jackkuo commited on
Commit
c2c6e10
·
verified ·
1 Parent(s): 5f12779

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. -9E1T4oBgHgl3EQf8gV6/content/2301.03546v1.pdf +3 -0
  2. -9E1T4oBgHgl3EQf8gV6/vector_store/index.faiss +3 -0
  3. -9E1T4oBgHgl3EQf8gV6/vector_store/index.pkl +3 -0
  4. .gitattributes +53 -0
  5. 0tAyT4oBgHgl3EQfbfeR/vector_store/index.faiss +3 -0
  6. 19FQT4oBgHgl3EQf1zZe/vector_store/index.faiss +3 -0
  7. 2NE0T4oBgHgl3EQfdwA8/vector_store/index.pkl +3 -0
  8. 2tAzT4oBgHgl3EQfuP3I/content/2301.01689v1.pdf +3 -0
  9. 3dAzT4oBgHgl3EQfR_t3/content/tmp_files/2301.01225v1.pdf.txt +2853 -0
  10. 3dAzT4oBgHgl3EQfR_t3/content/tmp_files/load_file.txt +0 -0
  11. 4dE4T4oBgHgl3EQfBAsA/content/2301.04847v1.pdf +3 -0
  12. 4dE4T4oBgHgl3EQfBAsA/vector_store/index.faiss +3 -0
  13. 4dE4T4oBgHgl3EQfBAsA/vector_store/index.pkl +3 -0
  14. 5dAzT4oBgHgl3EQf9_4T/content/2301.01926v1.pdf +3 -0
  15. 5dAzT4oBgHgl3EQf9_4T/vector_store/index.faiss +3 -0
  16. 5dE1T4oBgHgl3EQfBAJm/vector_store/index.pkl +3 -0
  17. 5dE2T4oBgHgl3EQfOgbi/vector_store/index.faiss +3 -0
  18. 5tAzT4oBgHgl3EQf9_5e/vector_store/index.faiss +3 -0
  19. 79E4T4oBgHgl3EQfdAwp/content/tmp_files/2301.05087v1.pdf.txt +2672 -0
  20. 79E4T4oBgHgl3EQfdAwp/content/tmp_files/load_file.txt +0 -0
  21. 7NAyT4oBgHgl3EQfcvfE/content/tmp_files/2301.00290v1.pdf.txt +1045 -0
  22. 7NAyT4oBgHgl3EQfcvfE/content/tmp_files/load_file.txt +0 -0
  23. 7NE1T4oBgHgl3EQf7QUc/content/tmp_files/2301.03531v1.pdf.txt +1181 -0
  24. 7NE1T4oBgHgl3EQf7QUc/content/tmp_files/load_file.txt +0 -0
  25. 8NE1T4oBgHgl3EQfBwLj/content/2301.02857v1.pdf +3 -0
  26. 8NE1T4oBgHgl3EQfBwLj/vector_store/index.pkl +3 -0
  27. 8tAyT4oBgHgl3EQfQ_YL/content/tmp_files/2301.00055v1.pdf.txt +2822 -0
  28. 8tAyT4oBgHgl3EQfQ_YL/content/tmp_files/load_file.txt +0 -0
  29. 99FLT4oBgHgl3EQfui_z/content/tmp_files/2301.12156v1.pdf.txt +1775 -0
  30. 99FLT4oBgHgl3EQfui_z/content/tmp_files/load_file.txt +0 -0
  31. 9tFAT4oBgHgl3EQfpx3L/content/tmp_files/2301.08642v1.pdf.txt +2672 -0
  32. 9tFAT4oBgHgl3EQfpx3L/content/tmp_files/load_file.txt +0 -0
  33. BNE0T4oBgHgl3EQfPwB8/content/2301.02183v1.pdf +3 -0
  34. BdE1T4oBgHgl3EQfpQWt/content/tmp_files/2301.03330v1.pdf.txt +3626 -0
  35. BdE1T4oBgHgl3EQfpQWt/content/tmp_files/load_file.txt +0 -0
  36. CNAzT4oBgHgl3EQfh_3R/vector_store/index.faiss +3 -0
  37. CNE1T4oBgHgl3EQfpgW7/content/2301.03333v1.pdf +3 -0
  38. CNE1T4oBgHgl3EQfpgW7/vector_store/index.faiss +3 -0
  39. CNE1T4oBgHgl3EQfpgW7/vector_store/index.pkl +3 -0
  40. ENE1T4oBgHgl3EQf-QbO/content/tmp_files/2301.03567v1.pdf.txt +4048 -0
  41. ENE1T4oBgHgl3EQf-QbO/content/tmp_files/load_file.txt +0 -0
  42. EtAzT4oBgHgl3EQfw_5J/content/tmp_files/2301.01730v1.pdf.txt +925 -0
  43. EtAzT4oBgHgl3EQfw_5J/content/tmp_files/load_file.txt +0 -0
  44. F9E4T4oBgHgl3EQfgA1N/content/tmp_files/2301.05112v1.pdf.txt +175 -0
  45. F9E4T4oBgHgl3EQfgA1N/content/tmp_files/load_file.txt +128 -0
  46. FdE0T4oBgHgl3EQfzALi/content/2301.02668v1.pdf +3 -0
  47. FdE1T4oBgHgl3EQfEwOD/content/2301.02894v1.pdf +3 -0
  48. FdE1T4oBgHgl3EQfEwOD/vector_store/index.pkl +3 -0
  49. H9E0T4oBgHgl3EQfhwGK/content/tmp_files/load_file.txt +509 -0
  50. I9AzT4oBgHgl3EQfjv2R/content/2301.01521v1.pdf +3 -0
-9E1T4oBgHgl3EQf8gV6/content/2301.03546v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfb11e4348c9751f7ca6bd003ce33df11e941a2c8b3934ca31d087ea793bb6ed
3
+ size 705495
-9E1T4oBgHgl3EQf8gV6/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce0a95976dd7ae93815e99353955c749ac42464f9d13b7f505d6493e080e2e64
3
+ size 4653101
-9E1T4oBgHgl3EQf8gV6/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:546213c0bf04d94043fb125e40180786b37447518a74ca10a6ceb4810c9fb566
3
+ size 160743
.gitattributes CHANGED
@@ -7680,3 +7680,56 @@ FdE0T4oBgHgl3EQfzALi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
7680
  a9FIT4oBgHgl3EQfmCsU/content/2301.11307v1.pdf filter=lfs diff=lfs merge=lfs -text
7681
  itFPT4oBgHgl3EQf0DX1/content/2301.13178v1.pdf filter=lfs diff=lfs merge=lfs -text
7682
  BNE0T4oBgHgl3EQfPwB8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7680
  a9FIT4oBgHgl3EQfmCsU/content/2301.11307v1.pdf filter=lfs diff=lfs merge=lfs -text
7681
  itFPT4oBgHgl3EQf0DX1/content/2301.13178v1.pdf filter=lfs diff=lfs merge=lfs -text
7682
  BNE0T4oBgHgl3EQfPwB8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7683
+ BNE0T4oBgHgl3EQfPwB8/content/2301.02183v1.pdf filter=lfs diff=lfs merge=lfs -text
7684
+ lNE4T4oBgHgl3EQftQ2k/content/2301.05223v1.pdf filter=lfs diff=lfs merge=lfs -text
7685
+ XNFJT4oBgHgl3EQfQCwC/content/2301.11488v1.pdf filter=lfs diff=lfs merge=lfs -text
7686
+ I9AzT4oBgHgl3EQfjv2R/content/2301.01521v1.pdf filter=lfs diff=lfs merge=lfs -text
7687
+ I9AzT4oBgHgl3EQfjv2R/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7688
+ CNAzT4oBgHgl3EQfh_3R/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7689
+ k9AzT4oBgHgl3EQfbvzP/content/2301.01392v1.pdf filter=lfs diff=lfs merge=lfs -text
7690
+ 5dAzT4oBgHgl3EQf9_4T/content/2301.01926v1.pdf filter=lfs diff=lfs merge=lfs -text
7691
+ 5dAzT4oBgHgl3EQf9_4T/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7692
+ itE0T4oBgHgl3EQf7AJH/content/2301.02770v1.pdf filter=lfs diff=lfs merge=lfs -text
7693
+ 2tAzT4oBgHgl3EQfuP3I/content/2301.01689v1.pdf filter=lfs diff=lfs merge=lfs -text
7694
+ QdAyT4oBgHgl3EQftvmX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7695
+ itE0T4oBgHgl3EQf7AJH/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7696
+ xtE4T4oBgHgl3EQfYAyx/content/2301.05046v1.pdf filter=lfs diff=lfs merge=lfs -text
7697
+ XdFJT4oBgHgl3EQfQCyF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7698
+ kNFAT4oBgHgl3EQfbB0a/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7699
+ ONE1T4oBgHgl3EQfZwTX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7700
+ jNE0T4oBgHgl3EQf7ALm/content/2301.02772v1.pdf filter=lfs diff=lfs merge=lfs -text
7701
+ P9AyT4oBgHgl3EQfUve2/content/2301.00132v1.pdf filter=lfs diff=lfs merge=lfs -text
7702
+ stAzT4oBgHgl3EQfPPv2/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7703
+ FdE1T4oBgHgl3EQfEwOD/content/2301.02894v1.pdf filter=lfs diff=lfs merge=lfs -text
7704
+ 5dE2T4oBgHgl3EQfOgbi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7705
+ kb_27/content/kb_27.pdf filter=lfs diff=lfs merge=lfs -text
7706
+ FdE0T4oBgHgl3EQfzALi/content/2301.02668v1.pdf filter=lfs diff=lfs merge=lfs -text
7707
+ adE3T4oBgHgl3EQf2ws3/content/2301.04757v1.pdf filter=lfs diff=lfs merge=lfs -text
7708
+ -9E1T4oBgHgl3EQf8gV6/content/2301.03546v1.pdf filter=lfs diff=lfs merge=lfs -text
7709
+ NNFAT4oBgHgl3EQfxx6m/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7710
+ CNE1T4oBgHgl3EQfpgW7/content/2301.03333v1.pdf filter=lfs diff=lfs merge=lfs -text
7711
+ rNE2T4oBgHgl3EQf1Aga/content/2301.04146v1.pdf filter=lfs diff=lfs merge=lfs -text
7712
+ kNAzT4oBgHgl3EQfNfsi/content/2301.01148v1.pdf filter=lfs diff=lfs merge=lfs -text
7713
+ kNAzT4oBgHgl3EQfNfsi/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7714
+ eNE4T4oBgHgl3EQfpw2r/content/2301.05195v1.pdf filter=lfs diff=lfs merge=lfs -text
7715
+ 4dE4T4oBgHgl3EQfBAsA/content/2301.04847v1.pdf filter=lfs diff=lfs merge=lfs -text
7716
+ jNE2T4oBgHgl3EQfIAbu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7717
+ 19FQT4oBgHgl3EQf1zZe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7718
+ CNE1T4oBgHgl3EQfpgW7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7719
+ eNE4T4oBgHgl3EQfpw2r/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7720
+ pdAzT4oBgHgl3EQfAfrM/content/2301.00928v1.pdf filter=lfs diff=lfs merge=lfs -text
7721
+ k9AzT4oBgHgl3EQfbvzP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7722
+ jNE0T4oBgHgl3EQf7ALm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7723
+ jNE2T4oBgHgl3EQfIAbu/content/2301.03676v1.pdf filter=lfs diff=lfs merge=lfs -text
7724
+ -9E1T4oBgHgl3EQf8gV6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7725
+ bdE1T4oBgHgl3EQfKwMf/content/2301.02967v1.pdf filter=lfs diff=lfs merge=lfs -text
7726
+ RdAzT4oBgHgl3EQf0P6C/content/2301.01781v1.pdf filter=lfs diff=lfs merge=lfs -text
7727
+ i9E3T4oBgHgl3EQf5AsY/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7728
+ jdE3T4oBgHgl3EQf5Au-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7729
+ dtFJT4oBgHgl3EQfSSx0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7730
+ 0tAyT4oBgHgl3EQfbfeR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7731
+ 4dE4T4oBgHgl3EQfBAsA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7732
+ m9E0T4oBgHgl3EQfpwHn/content/2301.02545v1.pdf filter=lfs diff=lfs merge=lfs -text
7733
+ 8NE1T4oBgHgl3EQfBwLj/content/2301.02857v1.pdf filter=lfs diff=lfs merge=lfs -text
7734
+ 5tAzT4oBgHgl3EQf9_5e/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
7735
+ M9E1T4oBgHgl3EQfZgQt/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
0tAyT4oBgHgl3EQfbfeR/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e4d1cabe13d1410f34dcc14fb68f8708baba9da6cc1495b68e9c7129d39dfd2
3
+ size 2949165
19FQT4oBgHgl3EQf1zZe/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa0834eb787769e5086a4d081a954b3ec801fcb2f7a39c1473884457c11ad6e8
3
+ size 4915245
2NE0T4oBgHgl3EQfdwA8/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afea43c78e67936970af0030452446e85e6751a0808e44af96fb37d3d4d9d77f
3
+ size 81140
2tAzT4oBgHgl3EQfuP3I/content/2301.01689v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23abe30b145b16ed70baa15fb6c390ce2c646035862db6c3114731868af3977d
3
+ size 13553799
3dAzT4oBgHgl3EQfR_t3/content/tmp_files/2301.01225v1.pdf.txt ADDED
@@ -0,0 +1,2853 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ Two-Dimensional Golay Complementary Array
3
+ Sets With Arbitrary Lengths for
4
+ Omnidirectional MIMO Transmission
5
+ You-Qi Zhao, Cheng-Yu Pai, Zhen-Ming Huang, Zilong Liu, Senior
6
+ Member, IEEE, and Chao-Yu Chen, Member, IEEE
7
+ Abstract
8
+ This paper presents a coding approach for achieving omnidirectional transmission of certain common
9
+ signals in massive multi-input multi-output (MIMO) networks such that the received power at any
10
+ direction in a cell remains constant for any given distance. Specifically, two-dimensional (2D) Golay
11
+ complementary array set (GCAS) can be used to design optimal massive MIMO precoding matrix so as
12
+ to achieve omnidirectional transmission due to its complementary autocorrelation property. In this paper,
13
+ novel constructions of new 2D GCASs with arbitrary array lengths are proposed. Our key idea is to
14
+ carefully truncate the columns of certain larger arrays generated by 2D generalized Boolean functions.
15
+ Finally, the power radiation patterns and numerical results are provided to verify the omnidirectional
16
+ property of the GCAS-based precoding. The error performances of the proposed precoding scheme are
17
+ presented to validate its superiority over the existing alternatives.
18
+ Index Terms
19
+ This work was supported by the Ministry of Science and Technology, Taiwan, R.O.C., under Grant MOST 109–2628–E–006–
20
+ 008–MY3 and MOST 111–2218–E–305–002.
21
+ You-Qi Zhao and C.-Y. Pai are with the Department of Engineering Science, National Cheng Kung University, Tainan 701,
22
+ Taiwan, R.O.C. (e-mail: n98081505@gs.ncku.edu.tw).
23
+ Z.-M. Huang is with the Institute of Computer and Communication Engineering, National Cheng Kung University, Tainan
24
+ 701, Taiwan, R.O.C. (e-mail: n98101012@gs.ncku.edu.tw).
25
+ Zilong Liu is with the School of Computer Science and Electronic Engineering, University of Essex, United Kingdom (e-mail:
26
+ zilong.liu@essex.ac.uk).
27
+ C.-Y. Chen is with the Department of Electrical Engineering and the Institute of Computer and Communication Engineering,
28
+ National Cheng Kung University, Tainan 701, Taiwan, R.O.C. (e-mail: super@mail.ncku.edu.tw).
29
+ January 4, 2023
30
+ DRAFT
31
+ arXiv:2301.01225v1 [cs.IT] 3 Jan 2023
32
+
33
+ 2
34
+ Generalized Boolean function (GBF), Golay complementary array pair (GCAP), Golay comple-
35
+ mentary array set (GCAS), omnidirectional precoding (OP), uniform rectangular array (URA).
36
+ I. INTRODUCTION
37
+ Complementary pairs/sets of sequences have attracted a sustained research interest owing to
38
+ their zero aperiodic correlation sums properties. To be specific, a Golay complementary pair
39
+ (GCP) refers to a pair of equal-length sequences whose summation of aperiodic autocorrelations
40
+ is zero except at the zero time-shift [1]. Such a concept was extended to Golay complementary
41
+ set (GCS) with constituent sequences of more than 2 by Tseng and Liu in [2]. Furthermore, a
42
+ maximum collection of GCSs is called a set of complete complementary code (CCC) [3] if any
43
+ two different GCSs have zero aperiodic cross-correlation sums for all time-shifts. In the literature,
44
+ GCSs and CCCs have been widely used for radar sensing [4], channel estimation [5], precoding
45
+ for massive multi-input multi-output (MIMO) [6], peak-to-average power ratio (PAPR) reduction
46
+ in orthogonal frequency division multiplexing (OFDM) [7]–[13], interference-free multicarrier
47
+ code division multiple access [14]–[17], and many other applications [18], [19].
48
+ Recently, there is a surge of research attention to study two-dimensional (2D) Golay com-
49
+ plementary array sets (GCASs) [18]-[23], each having zero aperiodic autocorrelation sums
50
+ property for two directions of shifts (compared to conventional GCSs and CCCs with time-
51
+ shifts only). An important application of the 2D GCASs is for omnidirectional transmission in
52
+ MIMO communication systems with a uniform rectangular array (URA) configuration [20], [21].
53
+ In massive MIMO systems, some common messages (e.g., reference signals, synchronization
54
+ signals, control signals, etc.) need to be power-uniformly broadcasted to all the angles within
55
+ the whole cell. In this paper, we consider space-time block code (STBC) for the harvesting
56
+ of the diversity gain. At the base station (BS), the STBC encoded symbols are assigned to
57
+ several streams and then mapped onto the antenna arrays in URA by certain 2D GCASs assisted
58
+ precoding matrices to achieve uniform power radiation at any angle.
59
+ On the other hand, since a large number of antennas are considered in massive MIMO
60
+ systems, a huge pilot overhead may be needed to acquire the channel state information (CSI). As
61
+ pointed out in [22], this can be alleviated by omnidirectional precoding (OP) based transmission.
62
+ For uniform linear arrays (ULAs), Zadoff-Chu (ZC) sequences were adopted to satisfy the
63
+ requirements of the omnidirectional property. However, [22] only considered the omnidirectional
64
+ January 4, 2023
65
+ DRAFT
66
+
67
+ 3
68
+ transmission in certain directions. Later in [6], GCSs and CCCs based OP matrices were proposed
69
+ to meet the requirement of omnidirectional transmission across all directions.
70
+ In [20], [21], [23], [24], 2D GCASs were employed for precoding matrices in URAs by
71
+ applying interleaving and Kronecker-product to existing 1D sequences or 2D arrays. As a result,
72
+ the array sizes of 2D GCASs are only feasible for certain lengths. A construction of 2D GCASs
73
+ of array size pn × pm was proposed in [25] by using permutation ploynomials (PPs) functions
74
+ and 2-level autocorrelation sequences, where p is a prime number, m, n are two positive integers,
75
+ and p, m, n > 0. Furthermore, a unifying construction framework for 2D GCASs was developed
76
+ in [26] by a multivariate polynomial matrix from certain seed para-unitary (PU) matrices. In
77
+ [27], [28], Pai and Chen proposed direct constructions of 2D Golay complementary array pairs
78
+ (GCAPs) and GCASs with array size 2n × 2m from 2D generliazed Boolean functions (GBFs)
79
+ [29] where n, m are integers and n, m ≥ 2. 2D GCAP can be regarded as a case of 2D GCAS
80
+ when the set size is equal to 2. Moreover, Pai et al. [30] proposed a direct construction of 2D
81
+ CCCs with array size 2n×2m, which have ideal autocorrelations and cross-correlations. Later, Liu
82
+ et al. [31] proposed a construction of GCASs with array size pn ×pm by using 2D multivariable
83
+ functions, where p is a prime number, n, m are integers, and n, m ≥ 2. Based on [27], [32]
84
+ developed a direct construction of GCASs with set size 4 and array size 2n × (2m−1 + 2v) by
85
+ using 2D GBFs, where n, m, v are positive number with n, m ≥ 2, and 0 ≤ v ≤ m − 1.
86
+ The aforementioned research efforts are generally driven by the need of highly flexible array
87
+ sizes of 2D GCASs. Motivated by this, we aim for generating new GCASs with arbitrary array
88
+ lengths. The key idea of our proposed constructions is to carefully truncate some columns of
89
+ the certain larger arrays generated by 2D GBFs. Thus, our proposed GCASs can be applied to
90
+ URAs with various array sizes. In addition, the proposed GCASs can be directly generated from
91
+ 2D GBFs without the requirements of any specific sequences or tedious sequence operations. In
92
+ Table I, we compare the existing parameters of 2D GCASs with our proposed ones.
93
+ The remainder of this paper is defined as follows. Section II discusses notations, definitions,
94
+ system models, and the omnidirectional transmission in MIMO systems. Section III describes
95
+ our proposed constructions of 2D GCASs. Section IV shows the power radiation pattern and bit
96
+ error rate (BER) performance based on our proposed 2D GCASs precoding. Finally, Section V
97
+ presents the conclusion.
98
+ January 4, 2023
99
+ DRAFT
100
+
101
+ 4
102
+ TABLE I
103
+ A COMPARISON OF CONSTRUCTIONS FOR 2D GCASS
104
+ Construction
105
+ Parameters
106
+ Approaches
107
+ [26, Th. 5]
108
+ (N, N n, N m), N, n, m > 0
109
+ Seed PU matrices
110
+ [26, Th. 7]
111
+ (2k, 2kn, 2km), n, m, k > 0
112
+ [25, Th. 4]
113
+ (p, pn, pm), prime p, n, m > 0
114
+ PPs and 2-level
115
+ autocorrelation sequences
116
+ [25, Th. 6]
117
+ (pk, pkn, pkm), prime p, k, n, m > 0
118
+ [31, Th. 1]
119
+ (pk1
120
+ 1 pk2
121
+ 2 , pn
122
+ 1 , pm
123
+ 2 ), primes p1, p2
124
+ 2D multivariable functions
125
+ [31, Th. 2]
126
+ (pk, pn, pm), prime p, n + m ≥ k > 0
127
+ [27], [28], [30]
128
+ (2k, 2n, 2m), n, m ≥ k > 0, and k > 0
129
+ 2D GBFs
130
+ [32]
131
+ (4, 2n, 2m−1 + 2v), n, m ≥ 2, and k > 0
132
+ Th. 1
133
+ (2k+1, 2n, 2m−1 + �k−1
134
+ α=1 dα2m−k+α−1 + d02v),
135
+ k < m, 0 ≤ v ≤ m − k, dα ∈ {0, 1}
136
+ Th. 2
137
+ (2k+1, 2n, 2m−1 + �k−1
138
+ α=1 dα2π1(m−k+α)−1 + d02v),
139
+ k < m, 0 ≤ v ≤ m − k, dα ∈ {0, 1}
140
+ II. PRELIMINARIES AND DEFINITIONS
141
+ A. Notations
142
+ Throughout this paper, we present the notations in the following:
143
+ • (a)i refers to the i-th element of the vector a.
144
+ • (A)i,j denotes the (i, j)-th element of the array A.
145
+ • (·)H refers to the conjugate transpose.
146
+ • diag(A) refers to the column vector composed of the main diagonal of A.
147
+ • (·)∗ refers to the complex conjugation of an element.
148
+ • (·)T refers to the transpose.
149
+ • vec(·) express stacking one column of the matrix into one another column.
150
+ • 1 is a vector whose elements are all 1.
151
+ • Let ξ = e2π√−1/q.
152
+ • In this paper, q is an even number.
153
+ Let X and Y be two arrays of size L1 × L2. Then X and Y can be stated as
154
+ X = (Xg,i), Y = (Yg,i),
155
+ (1)
156
+ where g = 0, 1, · · · , L1 − 1 and i = 0, 1, · · · , L2 − 1.
157
+ January 4, 2023
158
+ DRAFT
159
+
160
+ 5
161
+ Definition 1: Given two arrays X and Y of size L1 × L2, the 2D aperiodic cross-correlation
162
+ function (AACF) is defined by
163
+ ρ (X, Y; u1, u2) =
164
+
165
+
166
+
167
+
168
+
169
+
170
+
171
+
172
+
173
+
174
+
175
+
176
+
177
+
178
+
179
+
180
+
181
+
182
+
183
+
184
+
185
+
186
+
187
+
188
+
189
+
190
+
191
+
192
+
193
+
194
+
195
+
196
+
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+
206
+
207
+
208
+
209
+
210
+
211
+
212
+
213
+
214
+
215
+
216
+
217
+ L1−1−u1
218
+
219
+ g=0
220
+ L2−1−u2
221
+
222
+ i=0
223
+ Yg+u1,i+u2X∗
224
+ g,i, 0 ≤ u1 < L1,
225
+ 0 ≤ u2 < L2;
226
+ L1−1−u1
227
+
228
+ g=0
229
+ L2−1−u2
230
+
231
+ i=0
232
+ Yg+u1,iX∗
233
+ g,i−u2, 0 < u1 < L1,
234
+ −L2 < u2 < 0;
235
+ L1−1−u1
236
+
237
+ g=0
238
+ L2−1−u2
239
+
240
+ i=0
241
+ Yg,iX∗
242
+ g−u1,i−u2, −L1 < u1 < 0,
243
+ −L2 < u2 < 0;
244
+ L1−1+u1
245
+
246
+ g=0
247
+ L2−1−u2
248
+
249
+ i=0
250
+ Yg,i+u2X∗
251
+ g−u1,i, −L1 < u1 < 0,
252
+ 0 < u2 < L2.
253
+ (2)
254
+ When X = Y , then it is called 2D aperiodic autocorrelation function (AACF) and denoted
255
+ by ρ(X; u1, u2). If taking L1 = 1, two 2D arrays X and Y are degraded as a 1-D sequence
256
+ X = Xi for i = 0, 1, · · · , L2 − 1 and Y = Yi for i = 0, 1, · · · , L2 − 1, respectively. Then the
257
+ 1-D AACF of 1-D sequence X is related by
258
+ ρ(X; u) =
259
+
260
+
261
+
262
+
263
+
264
+
265
+
266
+ L2−1−u
267
+
268
+ i=0
269
+ Xi+uX∗
270
+ i ,
271
+ 0 ≤ u ≤ L2 − 1;
272
+ L2−1+u
273
+
274
+ i=0
275
+ XiX∗
276
+ i−u,
277
+ −L2 + 1 ≤ u < 0.
278
+ (3)
279
+ In this paper, q-PSK modulation is employed. Thus, x and y denote q-ary arrays and (1) is
280
+ expressed as
281
+ X = (Xg,i) = (ξxg,i) = ξx;
282
+ Y = (Yg,i) = (ξyg,i) = ξy,
283
+ (4)
284
+ where x = (xg,i), y = (yg,i), and xg,i, yg,i ∈ Zq = {0, 1, · · · , q−1} for 0 ≤ g < L1, 0 ≤ i < L2.
285
+ Consider a set of N L-length sequences can be represented as
286
+ C = {X0, X1, · · · , XN−1}
287
+ where
288
+ Xn = (Xn,0, Xn,1, · · · , Xn,L−1)
289
+ for n = 0, 1, · · · , N − 1.
290
+ January 4, 2023
291
+ DRAFT
292
+
293
+ 6
294
+ Definition 2: [19] If a set C consisting of N sequences of length L satisfies
295
+ N−1
296
+
297
+ k=0
298
+ ρ(Xk; u) =
299
+
300
+
301
+
302
+
303
+
304
+ NL,
305
+ u = 0;
306
+ 0,
307
+ u ̸= 0,
308
+ (5)
309
+ then the set C is called a Golay complementary set of size N, denoted by (N, L)-GCS. The
310
+ GCP can be regarded as a special case of the GCS by setting N = 2.
311
+ Definition 3: For a GCP (X0, X1), if another GCP (Y0, Y1) meets the following condition:
312
+ ρ(X0, Y0; u) + ρ(X1, Y1; u) = 0, for all u,
313
+ (6)
314
+ then the two GCPs are called the Golay complementary mate of each other.
315
+ Definition 4: A pair of arrays X and Y of array size L1 × L2 is called a 2D Golay
316
+ complementary array pair if
317
+ ρ(X; u1, u2) + ρ(Y ; u1, u2) =
318
+
319
+
320
+
321
+
322
+
323
+ 2L1L2,
324
+ u1 = u2 = 0;
325
+ 0,
326
+ u1 ̸= 0 or u2 ̸= 0.
327
+ (7)
328
+ Definition 5: Let the array set G = {X0, X1, · · · , XN−1} where each array in set G is of
329
+ size L1 × L2. If the array set G satisfies
330
+ N−1
331
+
332
+ k=0
333
+ ρ(Xk; u1, u2) =
334
+
335
+
336
+
337
+
338
+
339
+ NL1L2,
340
+ u1 = u2 = 0;
341
+ 0,
342
+ u1 ̸= 0 or u2 ̸= 0,
343
+ (8)
344
+ the set G is called the Golay complementary array set of set size N denoted by (N, L1, L2)-
345
+ GCAS where L2 is defined as the length of the GCAS. If N = 2, the GCAS G is degraded as
346
+ a GCAP.
347
+ B. Generalized Boolean Functions
348
+ A 2D generalized Boolean function (GBF) f in n + m binary variables y1, y2, · · · , yn,
349
+ x1, x2, · · · , xm, is a function mapping: Zn
350
+ 2 ×Zm
351
+ 2 → Zq, where xi, yg ∈ {0, 1} for i = 1, 2, · · · , m
352
+ and g = 1, 2, · · · , n. A monomial of degree r is given by any product of r distinct variables
353
+ among y1, y2, · · · , yn, x1, x2, · · · , xm. For instance, x1x3y1y2 is a monomial of degree 4. Next,
354
+ the variables z1, z2, · · · , zn+m are defined as
355
+ zl =
356
+
357
+
358
+
359
+
360
+
361
+ yl
362
+ if 1 ≤ l ≤ n;
363
+ xl−n
364
+ if n < l ≤ m + n,
365
+ (9)
366
+ January 4, 2023
367
+ DRAFT
368
+
369
+ 7
370
+ which are useful for our proposed constructions. For a 2D GBF with n + m variables, the 2D
371
+ Zq-valued array
372
+ f =
373
+
374
+
375
+
376
+
377
+
378
+
379
+
380
+ f0,0
381
+ f0,1
382
+ · · ·
383
+ f0,2m−1
384
+ f1,0
385
+ f1,1
386
+ · · ·
387
+ f1,2m−1
388
+ ...
389
+ ...
390
+ ...
391
+ ...
392
+ f2n−1,0
393
+ f2n−1,1
394
+ · · ·
395
+ f2n−1,2m−1
396
+
397
+
398
+
399
+
400
+
401
+
402
+
403
+ (10)
404
+ of size 2n×2m is given by letting fg,i = f((g1, g2, · · · , gn), (i1, i2, · · · , im)), where (g1, g2, · · · , gn)
405
+ and (i1, i2, · · · , im) are binary vector representations of integers g = �n
406
+ h=1 gh2h−1 and i =
407
+ �n
408
+ j=1 ij2j−1, respectively.
409
+ Example 1: Taking q = 4, n = 2, and m = 3 for example, the 2D GBF is given as f =
410
+ 3z5z4 + z2z3 + 2z2. Then the array f of size 4 × 8 corresponding to f can be obtained, i.e.,
411
+ f =
412
+
413
+
414
+
415
+
416
+
417
+
418
+
419
+ 0
420
+ 0
421
+ 0
422
+ 0
423
+ 0
424
+ 0
425
+ 3
426
+ 3
427
+ 0
428
+ 0
429
+ 0
430
+ 2
431
+ 1
432
+ 1
433
+ 3
434
+ 3
435
+ 2
436
+ 3
437
+ 2
438
+ 3
439
+ 2
440
+ 3
441
+ 1
442
+ 2
443
+ 2
444
+ 3
445
+ 2
446
+ 3
447
+ 2
448
+ 3
449
+ 1
450
+ 2
451
+
452
+
453
+
454
+
455
+
456
+
457
+
458
+ .
459
+ (11)
460
+ The GBF f can be rewritten as f = 3x3x2 + y2x1 + 2y2. In this paper, we consider the array
461
+ size ̸= 2n × 2m. Hence, we define the truncated array f (L) corresponding to the 2D GBF f by
462
+ ignoring the last 2m − L columns of the corresponding array f.
463
+ Example 2: Following the same notations given in Example 1, the truncated array f (6) is
464
+ given by
465
+ f (6) =
466
+
467
+
468
+
469
+
470
+
471
+
472
+
473
+ 0
474
+ 0
475
+ 0
476
+ 0
477
+ 0
478
+ 0
479
+ 0
480
+ 0
481
+ 0
482
+ 2
483
+ 1
484
+ 1
485
+ 2
486
+ 3
487
+ 2
488
+ 3
489
+ 2
490
+ 3
491
+ 2
492
+ 3
493
+ 2
494
+ 3
495
+ 2
496
+ 3
497
+
498
+
499
+
500
+
501
+
502
+
503
+
504
+ .
505
+ (12)
506
+ For simplicity, we use f to stand for f (L) when L is known.
507
+ C. System Model
508
+ Considering downlink transmission from a BS to UEs where each has one single antenna,
509
+ we suppose that the number of antennas at the BS is M = L1 × L2, i.e., the URA consists of
510
+ L1 rows and L2 columns. Fig. 1 illustrates the diagram of data downlink transmission. For an
511
+ January 4, 2023
512
+ DRAFT
513
+
514
+ 8
515
+ Fig. 1. Diagram of data transmission through STBC encoding and omnidirectional precoding.
516
+ L1 × L2 URA, the steering matrix A(ϕ, θ) at the direction (ϕ, θ) with the (g, i)-th entry can be
517
+ expressed as
518
+ (A(ϕ, θ))g,i =e−j 2π
519
+ λ gdy sin ϕ sin θ−j 2π
520
+ λ idx sin ϕ cos θ,
521
+ for g = 0, 1, . . . , L1 − 1, i = 0, 1, . . . , L2 − 1,
522
+ θ ∈ [0, 2π], ϕ ∈ [0, π/2],
523
+ (13)
524
+ where dx and dy denote the vertical antenna and horizontal antenna inter-element spacings of
525
+ the URA, respectively, and λ denotes the carrier wavelength. To enhance the spatial diversity
526
+ and communication reliability, the STBC signal transmission scheme is used. The N ×M STBC
527
+ is given by
528
+ S ≜
529
+
530
+
531
+
532
+
533
+
534
+
535
+
536
+ s0(0)
537
+ s0(1)
538
+ · · ·
539
+ s0(M − 1)
540
+ s1(0)
541
+ s1(1)
542
+ · · ·
543
+ s1(M − 1)
544
+ ...
545
+ ...
546
+ ...
547
+ ...
548
+ sN−1(0)
549
+ sN−1(1)
550
+ · · ·
551
+ sN−1(M − 1)
552
+
553
+
554
+
555
+
556
+
557
+
558
+
559
+ ∈ CN×M
560
+ (14)
561
+ January 4, 2023
562
+ DRAFT
563
+
564
+ So(t)
565
+ Xo(t)
566
+ 7
567
+ Data
568
+ Omni-
569
+ Space-time
570
+ UE1
571
+ Si(t)
572
+ directional
573
+ xi(t)
574
+ block coding
575
+ y(t)
576
+ ...
577
+ precoding
578
+ V
579
+ SN-1(t)
580
+ XL;L2
581
+ -1(t)
582
+ y
583
+ d
584
+ d
585
+ (t)
586
+ xo(t)
587
+ x
588
+ Omnidirectional
589
+ precoding
590
+ UE19
591
+ where CN×M refers to the N-by-M complex space and sn(t) denotes the (n, t)-th element of
592
+ the STBC at time instant t for t = 0, 1, · · · , M − 1. We define the precoding matrix Wn of size
593
+ L1 × L2. The encoded symbols is given by
594
+ x(t) = (x0(t), x1(t), · · · , xL1L2−1(t))T = vec
595
+ �N−1
596
+
597
+ n=0
598
+ Wn · sn(t)
599
+
600
+ , for t = 0, 1, · · · , M − 1,
601
+ (15)
602
+ which are transmitted by the L1L2 antennas of the URA. In the light-of-sight (LOS) channel
603
+ without multipaths, the received signal at the direction (ϕ, θ) can be written as
604
+ y(t) =
605
+ N−1
606
+
607
+ n=0
608
+
609
+ vec(A(ϕ, θ))Tvec(Wn)
610
+
611
+ · sn(t) + η(t), t = 0, . . ., M − 1,
612
+ (16)
613
+ where η(t) is the additive Gaussian white noise (AWGN) at time instant t.
614
+ D. Omnidirectional Precoding Matrices Based on 2D Arrays
615
+ In this subsection, we list two necessary requirements for the design of OP matrices. Then,
616
+ we will connect these two requirements with the conditions of 2D arrays.
617
+ Requirement 1 (R1): Omnidirectional transmission.
618
+ We consider the MIMO system with URA. Following (16), the received power E at the angle
619
+ (ϕ, θ) is represented as
620
+ E =
621
+ N−1
622
+
623
+ n=0
624
+ ��[vec(A(ϕ, θ))Tvec(Wn)]
625
+ ��2 .
626
+ (17)
627
+ Therefore, to satisfy the omnidirectional transmission in the whole cell, (17) must be constant
628
+ for all ϕ and θ.
629
+ Requirement 2 (R2): Equal average power on each antenna.
630
+ To enhance the efficiency of the power amplifier, the average transmission power on all L1×L2
631
+ antennas is required to be equal. We define
632
+ W = (vec(W0), vec(W1), · · · , vec(WN−1)) ,
633
+ (18)
634
+ where the array size of W is L1L2 × N. Hence, (15) can be rewritten as
635
+ X = (x(0), x(1), · · · , x(M − 1)) = W S.
636
+ (19)
637
+ January 4, 2023
638
+ DRAFT
639
+
640
+ 10
641
+ Let s(t) be the t-th column of S. Throughout this paper, we assume E
642
+
643
+ s(t)s(t)H�
644
+ =IN. The
645
+ transmitted signal on the (l1, l2)-th antenna is (W s)l2L1+l1. The average power on the (l1, l2)-th
646
+ antenna can be expressed as
647
+ E
648
+
649
+ |(W s)l2L1+l1|2�
650
+ =
651
+
652
+ W E
653
+
654
+ s(t)s(t)H�
655
+ W H�
656
+ l2L1+l1,l2L1+l1
657
+ = (W W H)l2L1+l1,l2L1+l1.
658
+ (20)
659
+ Therefore, the condition to guarantee equal power on each antenna is equivalent to
660
+ diag(W W H) = N1.
661
+ (21)
662
+ Next, we will derive two sufficient conditions on the precoding matrices to fulfill requirements
663
+ R1 and R2.
664
+ Lemma 1: [21] For an L1 × L2 URA, if the precoding matrices W0, W1, · · · , WN−1 of size
665
+ L1 × L2 form an (N, L1, L2)-GCAS, then the omnidirectional transmission is achieved.
666
+ Lemma 2: For an L1×L2 URA, if the precoding matrices W0, W1, · · · , WN−1 of size L1×L2
667
+ are unimodular, then the average power on each antenna is equal.
668
+ Proof: In order to meet the requirement for equal average power on each antenna, the
669
+ precoding matrix W must satisfy (21). We let wi = vec(Wi), for i = 0, 1, · · · , N − 1. Then,
670
+ diag
671
+
672
+ W W H�
673
+ =
674
+ �N−1
675
+
676
+ i=0
677
+ |(wi)0|2 ,
678
+ N−1
679
+
680
+ i=0
681
+ |(wi)1|2 , · · · ,
682
+ N−1
683
+
684
+ i=0
685
+ |(wi)L1L2−1|2
686
+ �T
687
+ = N1
688
+ (22)
689
+ since we have
690
+ |(wi)n|2 = 1,
691
+ for i = 0, 1, · · · , N − 1 and n = 0, 1, · · · , L1L2 − 1.
692
+ (23)
693
+ According to (21), the requirement (R2) is fulfilled.
694
+ In the sequel, the design of OP matrices W0, W1, · · · , WN−1 are based on Lemma 1 and
695
+ Lemma 2. That is, our goal is to construct unimodular GCASs with flexible sizes.
696
+ III. GCASS WITH FLEXIBLE ARRAY SIZE
697
+ In this section, two constructions of 2D GCASs with arbitrary array lengths based on 2D
698
+ GBFs will be proposed. By recalling the function mapping in (9), we present our first theorem
699
+ in the following.
700
+ January 4, 2023
701
+ DRAFT
702
+
703
+ 11
704
+ Theorem 1: For any integers q, m, n ≥ 2, and k < m, v is an integer satisfies 0 ≤ v ≤ m−k
705
+ and let π be a permutation of {1, 2, · · · m + n − k} satisfying {zπ(1), zπ(2), · · · , zπ(v+n)} =
706
+ {z1, z2, · · · , zv+n}. The 2D generalized Boolean function can be written as
707
+ f = q
708
+ 2
709
+ �m+n−k−1
710
+
711
+ l=1
712
+ zπ(l)zπ(l+1)
713
+
714
+ +
715
+ m+n
716
+
717
+ s=1
718
+ pszs + p0
719
+ (24)
720
+ where ps ∈ Zq. The array set
721
+ G =
722
+
723
+ f + q
724
+ 2
725
+ k
726
+
727
+ α=1
728
+ λαzm+n−k+α + q
729
+ 2λk+1zπ(1) : λα ∈ {0, 1}
730
+
731
+ (25)
732
+ is a q-ary (2k+1, 2n, 2m−1 + �k−1
733
+ α=1 dα2m−k+α−1 + d02v)-GCAS where dα ∈ {0, 1}.
734
+ Proof: Without loss of generality, we consider L1 = 2n and L2 = 2m−1+�k−1
735
+ α=1 2m−k+α−1+ 2v.
736
+ We need to show that
737
+
738
+ c∈G
739
+ L1−1−u1
740
+
741
+ g=0
742
+ L2−1−u2
743
+
744
+ i=0
745
+
746
+ ξcg+u1,i+u2−cg,i�
747
+ = 0
748
+ (26)
749
+ for 0 ≤ u1 < 2n, 0 ≤ u2 < 2m−1 + �k−1
750
+ α=1 2m−k+α−1 + 2v and (u1, u2) ̸= (0, 0). Then we let
751
+ h = g + u1 and j = i + u2 for any integers g and i. We also let (g1, g2, · · · , gn),(i1, i2, · · · , im),
752
+ (h1, h2, · · · , hn), and (j1, j2, · · · , jm) be the binary representations of g, i, h, and j, respectively.
753
+ For the ease of presentation, we denote
754
+ al =
755
+
756
+
757
+
758
+
759
+
760
+ gl
761
+ for 1 ≤ l ≤ n;
762
+ il−n for n < l ≤ n + m;
763
+ bl =
764
+
765
+
766
+
767
+
768
+
769
+ hl
770
+ for 1 ≤ l ≤ n;
771
+ jl−n for n < l ≤ n + m;
772
+ (27)
773
+ In what follows, we consider four cases to show that the above formula holds.
774
+ Case 1: If aπ(1) ̸= bπ(1), we can find that c′ = c + (q/2)zπ(1) for any arrayc ∈ G satisfying
775
+ ch,j − cg,i − c′
776
+ h,j+c′
777
+ g,i = q
778
+ 2(aπ(1) − bπ(1)) ≡ q
779
+ 2
780
+ (mod q).
781
+ (28)
782
+ Therefore, we have
783
+ ξch,j−cg,i + ξc′
784
+ h,j−c′
785
+ g,i = 0.
786
+ (29)
787
+ Case 2: If am+n−k+α ̸= bm+n−k+α, we can find that c′ = c + (q/2)zm+n−k+α for any array
788
+ c ∈ G. Similar to Case 1, we have
789
+ ξch,j−cg,i + ξc′
790
+ h,j−c′
791
+ g,i = 0.
792
+ (30)
793
+ January 4, 2023
794
+ DRAFT
795
+
796
+ 12
797
+ Case 3: If aπ(1) = bπ(1) and am+n−k+α = bm+n−k+α for α = 1, 2, · · · , k. Suppose that α′
798
+ is the largest integer satisfying am+n−k+α′ = bm+n−k+α′ = 0 for α′ ≤ k. Then we assume β
799
+ is the smallest integer which satisfies aπ(β) ̸= bπ(β). Let a′ and b′ be integers distinct from a
800
+ and b, respectively, only in one position π(β − 1). In other words, a′
801
+ π(β−1) = 1 − aπ(β−1) and
802
+ b′
803
+ π(β−1) = 1 − bπ(β−1). If 1 ≤ π(β − 1) ≤ n, by using the above definition, we have
804
+ cg′,i − cg,i
805
+ = q
806
+ 2
807
+
808
+ aπ(β−2)g′
809
+ π(β−1) − aπ(β−2)gπ(β−1) + g′
810
+ π(β−1)aπ(β)
811
+ −gπ(β−1)aπ(β)
812
+
813
+ + pπ(β−1)g′
814
+ π2(β−1) − pπ(β−1)gπ(β−1)
815
+ ≡ q
816
+ 2(aπ(β−2) + aπ(β)) + pπ(β−1)(1 − 2gπ(β−1))
817
+ (mod q).
818
+ (31)
819
+ where a′
820
+ π(β−1) = g′
821
+ π(β−1) and aπ(β−1) = gπ(β−1). Since aπ(β−2) = bπ(β−2) and aπ(β−1) = bπ(β−1),
822
+ we have
823
+ ch,j − cg,i − ch′,j + cg′,i
824
+ ≡ q
825
+ 2(aπ(β−2) − bπ(β−2) + aπ(β) − bπ(β))
826
+ + pπ(β−1)(2hπ(β−1) − 2gπ(β−1))
827
+ ≡ q
828
+ 2(aπ(β) − bπ(β)) ≡ q
829
+ 2
830
+ (mod q)
831
+ (32)
832
+ implying ξch,j−cg,i/ξch′,j−cg′,i = −1. We can also obtain
833
+ ξch,j−cg,i + ξch′,j−cg′,i = 0.
834
+ (33)
835
+ If n < π(β − 1) ≤ n + m, note that a′
836
+ π(β−1) = i′
837
+ π(β−1)−n and aπ(β−1) = iπ(β − 1) − n according
838
+ to (27). Following the similar argument as given above, we can get ξch,j−cg,i + ξch,j′−cg,i′ = 0.
839
+ Case 4: If aπ(1) = bπ(1) and am+n−k+α = bm+n−k+α = 1 for α = 1, 2, · · · , k. We assume β is
840
+ the smallest integer such that aπ(β) ̸= bπ(β). Since as = bs = 0 for s = v+n+1, v+n+2, · · · , m+
841
+ n−k, we can obtain π(β) ≤ v+n implying π(β −1) ≤ v+n. If 1 ≤ π(β−1) ≤ n, by following
842
+ the similar argument as given above, we have ξch,j−cg,i +ξch′,j−cg′,i = 0. If n < π(β −1) ≤ v+n,
843
+ we have ξch,j−cg,i + ξch,j′−cg,i′ = 0. From Cases 1 to 4, the theorem can be proved.
844
+ Remark 1: The parameter 2m−1+�k−1
845
+ α=1 dα2m−k+α−1+d02v of the proposed GCASs in Theorem
846
+ 1 can be any arbitrary length since m, k, v are flexible and dα ∈ {0, 1}.
847
+ Example 3: Taking q = 2, m = 6, n = 2, k = 1, and v = 0, we let π = (1, 2, 3, 4, 5, 6, 7).
848
+ The generalized Boolean function is f = z1z2 + z2z3 + z3z4 + z4z5 + z5z6 + z6z7 = x1x2 +
849
+ x2x3 + x3x4 + x4x5 + y1y2 + y2x1 by setting pk = 0 for k = 0, 1, . . . , m + n. The array set
850
+ January 4, 2023
851
+ DRAFT
852
+
853
+ 13
854
+ TABLE II
855
+ THE CONSTRUCTED (4, 4, 33)-GCAS IN EXAMPLE 3
856
+ c0 =
857
+
858
+
859
+
860
+
861
+
862
+
863
+
864
+ 0
865
+ 1
866
+ 1
867
+ 1
868
+ 1
869
+ 0
870
+ 1
871
+ 1
872
+ 1
873
+ 0
874
+ 0
875
+ 0
876
+ 1
877
+ 0
878
+ 1
879
+ 1
880
+ 1
881
+ 0
882
+ 0
883
+ 0
884
+ 0
885
+ 1
886
+ 0
887
+ 0
888
+ 1
889
+ 0
890
+ 0
891
+ 0
892
+ 1
893
+ 0
894
+ 1
895
+ 1
896
+ 1
897
+ 1
898
+ 0
899
+ 0
900
+ 0
901
+ 0
902
+ 1
903
+ 0
904
+ 0
905
+ 0
906
+ 1
907
+ 1
908
+ 1
909
+ 0
910
+ 1
911
+ 0
912
+ 0
913
+ 0
914
+ 1
915
+ 1
916
+ 1
917
+ 1
918
+ 0
919
+ 1
920
+ 1
921
+ 0
922
+ 1
923
+ 1
924
+ 1
925
+ 0
926
+ 1
927
+ 0
928
+ 0
929
+ 1
930
+ 1
931
+ 0
932
+ 0
933
+ 0
934
+ 0
935
+ 1
936
+ 0
937
+ 0
938
+ 0
939
+ 1
940
+ 1
941
+ 1
942
+ 0
943
+ 1
944
+ 0
945
+ 0
946
+ 0
947
+ 1
948
+ 1
949
+ 1
950
+ 1
951
+ 0
952
+ 1
953
+ 1
954
+ 0
955
+ 1
956
+ 1
957
+ 1
958
+ 0
959
+ 1
960
+ 0
961
+ 0
962
+ 0
963
+ 1
964
+ 0
965
+ 0
966
+ 0
967
+ 0
968
+ 1
969
+ 0
970
+ 0
971
+ 0
972
+ 1
973
+ 1
974
+ 1
975
+ 0
976
+ 1
977
+ 0
978
+ 0
979
+ 0
980
+ 1
981
+ 1
982
+ 1
983
+ 1
984
+ 0
985
+ 1
986
+ 1
987
+ 0
988
+ 1
989
+ 1
990
+ 1
991
+ 0
992
+ 1
993
+ 0
994
+ 0
995
+ 1
996
+
997
+
998
+
999
+
1000
+
1001
+
1002
+
1003
+ c1 =
1004
+
1005
+
1006
+
1007
+
1008
+
1009
+
1010
+
1011
+ 0
1012
+ 0
1013
+ 1
1014
+ 0
1015
+ 1
1016
+ 1
1017
+ 1
1018
+ 0
1019
+ 1
1020
+ 1
1021
+ 0
1022
+ 1
1023
+ 1
1024
+ 1
1025
+ 1
1026
+ 0
1027
+ 1
1028
+ 1
1029
+ 0
1030
+ 1
1031
+ 0
1032
+ 0
1033
+ 0
1034
+ 1
1035
+ 1
1036
+ 1
1037
+ 0
1038
+ 1
1039
+ 1
1040
+ 1
1041
+ 1
1042
+ 0
1043
+ 1
1044
+ 1
1045
+ 1
1046
+ 0
1047
+ 1
1048
+ 0
1049
+ 0
1050
+ 0
1051
+ 1
1052
+ 0
1053
+ 0
1054
+ 1
1055
+ 0
1056
+ 0
1057
+ 0
1058
+ 0
1059
+ 1
1060
+ 0
1061
+ 0
1062
+ 1
1063
+ 0
1064
+ 1
1065
+ 1
1066
+ 1
1067
+ 0
1068
+ 0
1069
+ 0
1070
+ 1
1071
+ 0
1072
+ 0
1073
+ 0
1074
+ 0
1075
+ 1
1076
+ 1
1077
+ 1
1078
+ 1
1079
+ 0
1080
+ 1
1081
+ 0
1082
+ 0
1083
+ 0
1084
+ 1
1085
+ 0
1086
+ 0
1087
+ 1
1088
+ 0
1089
+ 0
1090
+ 0
1091
+ 0
1092
+ 1
1093
+ 0
1094
+ 0
1095
+ 1
1096
+ 0
1097
+ 1
1098
+ 1
1099
+ 1
1100
+ 0
1101
+ 0
1102
+ 0
1103
+ 1
1104
+ 0
1105
+ 0
1106
+ 0
1107
+ 0
1108
+ 1
1109
+ 0
1110
+ 1
1111
+ 1
1112
+ 0
1113
+ 1
1114
+ 0
1115
+ 0
1116
+ 0
1117
+ 1
1118
+ 0
1119
+ 0
1120
+ 1
1121
+ 0
1122
+ 0
1123
+ 0
1124
+ 0
1125
+ 1
1126
+ 0
1127
+ 0
1128
+ 1
1129
+ 0
1130
+ 1
1131
+ 1
1132
+ 1
1133
+ 0
1134
+ 0
1135
+ 0
1136
+ 1
1137
+ 0
1138
+ 0
1139
+ 0
1140
+ 0
1141
+ 1
1142
+ 1
1143
+
1144
+
1145
+
1146
+
1147
+
1148
+
1149
+
1150
+ c2 =
1151
+
1152
+
1153
+
1154
+
1155
+
1156
+
1157
+
1158
+ 0
1159
+ 1
1160
+ 1
1161
+ 1
1162
+ 1
1163
+ 0
1164
+ 1
1165
+ 1
1166
+ 1
1167
+ 0
1168
+ 0
1169
+ 0
1170
+ 1
1171
+ 0
1172
+ 1
1173
+ 1
1174
+ 1
1175
+ 0
1176
+ 0
1177
+ 0
1178
+ 0
1179
+ 1
1180
+ 0
1181
+ 0
1182
+ 1
1183
+ 0
1184
+ 0
1185
+ 0
1186
+ 1
1187
+ 0
1188
+ 1
1189
+ 1
1190
+ 1
1191
+ 1
1192
+ 0
1193
+ 0
1194
+ 0
1195
+ 0
1196
+ 1
1197
+ 0
1198
+ 0
1199
+ 0
1200
+ 1
1201
+ 1
1202
+ 1
1203
+ 0
1204
+ 1
1205
+ 0
1206
+ 0
1207
+ 0
1208
+ 1
1209
+ 1
1210
+ 1
1211
+ 1
1212
+ 0
1213
+ 1
1214
+ 1
1215
+ 0
1216
+ 1
1217
+ 1
1218
+ 1
1219
+ 0
1220
+ 1
1221
+ 0
1222
+ 0
1223
+ 1
1224
+ 0
1225
+ 1
1226
+ 1
1227
+ 1
1228
+ 1
1229
+ 0
1230
+ 1
1231
+ 1
1232
+ 1
1233
+ 0
1234
+ 0
1235
+ 0
1236
+ 1
1237
+ 0
1238
+ 1
1239
+ 1
1240
+ 1
1241
+ 0
1242
+ 0
1243
+ 0
1244
+ 0
1245
+ 1
1246
+ 0
1247
+ 0
1248
+ 1
1249
+ 0
1250
+ 0
1251
+ 0
1252
+ 1
1253
+ 0
1254
+ 1
1255
+ 1
1256
+ 1
1257
+ 0
1258
+ 1
1259
+ 1
1260
+ 1
1261
+ 1
1262
+ 0
1263
+ 1
1264
+ 1
1265
+ 1
1266
+ 0
1267
+ 0
1268
+ 0
1269
+ 1
1270
+ 0
1271
+ 1
1272
+ 1
1273
+ 1
1274
+ 0
1275
+ 0
1276
+ 0
1277
+ 0
1278
+ 1
1279
+ 0
1280
+ 0
1281
+ 1
1282
+ 0
1283
+ 0
1284
+ 0
1285
+ 1
1286
+ 0
1287
+ 1
1288
+ 1
1289
+ 0
1290
+
1291
+
1292
+
1293
+
1294
+
1295
+
1296
+
1297
+ c3 =
1298
+
1299
+
1300
+
1301
+ ��
1302
+
1303
+
1304
+
1305
+ 0
1306
+ 0
1307
+ 1
1308
+ 0
1309
+ 1
1310
+ 1
1311
+ 1
1312
+ 0
1313
+ 1
1314
+ 1
1315
+ 0
1316
+ 1
1317
+ 1
1318
+ 1
1319
+ 1
1320
+ 0
1321
+ 1
1322
+ 1
1323
+ 0
1324
+ 1
1325
+ 0
1326
+ 0
1327
+ 0
1328
+ 1
1329
+ 1
1330
+ 1
1331
+ 0
1332
+ 1
1333
+ 1
1334
+ 1
1335
+ 1
1336
+ 0
1337
+ 1
1338
+ 1
1339
+ 1
1340
+ 0
1341
+ 1
1342
+ 0
1343
+ 0
1344
+ 0
1345
+ 1
1346
+ 0
1347
+ 0
1348
+ 1
1349
+ 0
1350
+ 0
1351
+ 0
1352
+ 0
1353
+ 1
1354
+ 0
1355
+ 0
1356
+ 1
1357
+ 0
1358
+ 1
1359
+ 1
1360
+ 1
1361
+ 0
1362
+ 0
1363
+ 0
1364
+ 1
1365
+ 0
1366
+ 0
1367
+ 0
1368
+ 0
1369
+ 1
1370
+ 1
1371
+ 0
1372
+ 0
1373
+ 1
1374
+ 0
1375
+ 1
1376
+ 1
1377
+ 1
1378
+ 0
1379
+ 1
1380
+ 1
1381
+ 0
1382
+ 1
1383
+ 1
1384
+ 1
1385
+ 1
1386
+ 0
1387
+ 1
1388
+ 1
1389
+ 0
1390
+ 1
1391
+ 0
1392
+ 0
1393
+ 0
1394
+ 1
1395
+ 1
1396
+ 1
1397
+ 0
1398
+ 1
1399
+ 1
1400
+ 1
1401
+ 1
1402
+ 0
1403
+ 1
1404
+ 0
1405
+ 0
1406
+ 1
1407
+ 0
1408
+ 1
1409
+ 1
1410
+ 1
1411
+ 0
1412
+ 1
1413
+ 1
1414
+ 0
1415
+ 1
1416
+ 1
1417
+ 1
1418
+ 1
1419
+ 0
1420
+ 1
1421
+ 1
1422
+ 0
1423
+ 1
1424
+ 0
1425
+ 0
1426
+ 0
1427
+ 1
1428
+ 1
1429
+ 1
1430
+ 0
1431
+ 1
1432
+ 1
1433
+ 1
1434
+ 1
1435
+ 0
1436
+ 0
1437
+
1438
+
1439
+
1440
+
1441
+
1442
+
1443
+
1444
+ 0
1445
+ 200
1446
+ 2
1447
+ 400
1448
+ 40
1449
+ 600
1450
+ 20
1451
+ 800
1452
+ 0
1453
+ 1000
1454
+ 0
1455
+ -20
1456
+ -2
1457
+ -40
1458
+ Fig. 2. The summation of autocorrelations of constituent arrays in the GCAS in Example 3.
1459
+ G = {f, f + x8, f + y1, f + x8 + y1} is a GCAS of size 4 and the array size is 4 × 33. We let
1460
+ G = {c0, c1, c2, c3} and list the constituent arrays in Table II. Fig. 2 shows the AACF sum of
1461
+ set G is zero at shift u1 ̸= 0 or u2 ̸= 0. Thus, we can find that array set G is a (4, 4, 33)-GCAS.
1462
+ January 4, 2023
1463
+ DRAFT
1464
+
1465
+ 14
1466
+ Next, we introduce a lemma which illustrates a construction of (4, 2n, 2m−1 +2v)-GCAS from
1467
+ 2D GBFs.
1468
+ Lemma 3: [32, Th. 1] For nonnegative integers m, n, and v with 0 ≤ v < m − 1, let π1 be
1469
+ a permutation of {1, 2, · · · , m − 1} and π2 be a permutation of {1, 2, · · · , n}. The 2D GBF is
1470
+ given by
1471
+ f =q
1472
+ 2
1473
+ �m−2
1474
+
1475
+ k=1
1476
+ xπ1(k)xπ1(k+1) +
1477
+ n−1
1478
+
1479
+ k=1
1480
+ yπ2(k)yπ2(k+1) + xπ1(m−1)xm + xmyπ2(1)
1481
+
1482
+ +
1483
+ m
1484
+
1485
+ l=1
1486
+ plxl +
1487
+ n
1488
+
1489
+ s=1
1490
+ κsys + p0
1491
+ (34)
1492
+ where pl, κs ∈ Zq. Then the array set
1493
+ G =
1494
+
1495
+ f, f + q
1496
+ 2xπ1(1), f + q
1497
+ 2yπ2(n), f + q
1498
+ 2xπ1(1) + q
1499
+ 2yπ2(n)
1500
+
1501
+ is a (4, 2n, 2m−1 + 2v)-GCAS.
1502
+ Since the set size of the GCAS from Lemma 3 is limited to 4, we propose a general
1503
+ construction of 2D GCASs with more flexible array sizes and set sizes which can include Lemma
1504
+ 3 as a special case.
1505
+ Theorem 2: For any integers q, m, n ≥ 2, and k < m, v is an integer satisfies 0 ≤ v ≤ m−k.
1506
+ Assume that π1 is a permutation of {1, 2, · · · m} and π2 is a permutation of {1, 2, · · · n}. The
1507
+ 2D generalized Boolean function can be written as
1508
+ f =q
1509
+ 2
1510
+ �m−k−1
1511
+
1512
+ l=1
1513
+ xπ1(l)xπ1(l+1) +
1514
+ n−1
1515
+
1516
+ s=1
1517
+ yπ2(s)yπ2(s+1) + xπ1(m)yπ2(n)
1518
+
1519
+ +
1520
+ m−k
1521
+
1522
+ l=1
1523
+ µlxπ1(l)xπ1(m) +
1524
+ m
1525
+
1526
+ l=1
1527
+ plxk +
1528
+ n
1529
+
1530
+ s=1
1531
+ κsys + p0
1532
+ (35)
1533
+ where µl, pl, κs, ∈ Zq. The array set
1534
+ G =
1535
+
1536
+ f + q
1537
+ 2
1538
+ k−1
1539
+
1540
+ α=1
1541
+ λαxπ1(m−k+α) + q
1542
+ 2λkyπ2(1) + q
1543
+ 2λk+1xπ1(1) : λα ∈ {0, 1}
1544
+
1545
+ (36)
1546
+ is a q-ary (2k+1, 2n, 2m−1 + �k−1
1547
+ α=1 dα2π1(m−k+α)−1 + d02v)-GCAS where dα ∈ {0, 1} if the
1548
+ following three conditions hold.
1549
+ (C1) {π1(1), π1(2), · · · , π1(v)} = {1, 2, · · · , v} if v > 0;
1550
+ (C2) π1(m − k + α) < π1(m − k + α + 1) for 1 ≤ α ≤ k − 1 where π1(m) = m;
1551
+ (C3) For 1 ≤ α ≤ k − 1 and 2 ≤ β ≤ m − k, if π1(β) < π1(m − k + α), then π1(β − 1) <
1552
+ π1(m − k + α).
1553
+ January 4, 2023
1554
+ DRAFT
1555
+
1556
+ 15
1557
+ Proof: Similarly, we consider L1 = 2n and L2 = 2m−1 + �k−1
1558
+ α=1 2π1(m−k+α)−1 + 2v. Then
1559
+ we would like to prove that
1560
+
1561
+ C
1562
+ ρ(C; u1, u2) =
1563
+
1564
+ c∈G
1565
+ L1−1−u1
1566
+
1567
+ g=0
1568
+ L2−1−u2
1569
+
1570
+ i=0
1571
+
1572
+ ξcg+u1,i+u2−cg,i�
1573
+ = 0
1574
+ (37)
1575
+ for 0 ≤ u1 < 2n, 0 ≤ u2 < 2m−1 + �k−1
1576
+ α=1 2π1(m−k+α)−1 + 2v and (u1, u2) ̸= (0, 0). From (4)
1577
+ we can find that
1578
+ c = q
1579
+ 2
1580
+ �m−k−1
1581
+
1582
+ l=1
1583
+ xπ1(l)xπ1(l+1) +
1584
+ n−1
1585
+
1586
+ s=1
1587
+ yπ2(s)yπ2(s+1) + xπ1(m)yπ2(n)
1588
+
1589
+ +
1590
+ m−k
1591
+
1592
+ l=1
1593
+ µlxπ1(l)xπ1(m) +
1594
+ m
1595
+
1596
+ l=1
1597
+ plxl +
1598
+ n
1599
+
1600
+ s=1
1601
+ κsys + p0 · 1.
1602
+ (38)
1603
+ Then we let h = g + u1 and j = i + u2 for any integers g and i. Next, we discuss seven cases
1604
+ to complete the proof.
1605
+ Case 1: Assuming u1 > 0, u2 ≥ 0, and gπ2(1) ̸= hπ2(1), we can find an array c′ = c +
1606
+ (q/2)yπ2(1) ∈ G for any array c ∈ G. Therefore, we can obtain
1607
+ ch,j − cg,i − c′
1608
+ h,j+c′
1609
+ g,i = q
1610
+ 2(gπ2(1) − hπ2(1)) ≡ q
1611
+ 2
1612
+ (mod q)
1613
+ (39)
1614
+ Since gπ2(1) ̸= hπ2(1), we have
1615
+ ξch,j−cg,i/ξc′
1616
+ h,j−c′
1617
+ g,i = ξ
1618
+ q
1619
+ 2 = −1.
1620
+ (40)
1621
+ Thus,
1622
+ ξch,j−cg,i + ξc′
1623
+ h,j−c′
1624
+ g,i = 0.
1625
+ (41)
1626
+ Case 2: If u1 > 0, u2 ≥ 0, and gπ2(1) = hπ2(1). Let β be the smallest integer such that
1627
+ gπ2(β) ̸= hπ2(β). We define g′ and h′ are two integers which are distinct from g and h only in
1628
+ one position π2(β − 1), respectively. Then, similar to Case 2 of Theorem 1, we have
1629
+ ξch,j−cg,i + ξch′,j−cg′,i = 0.
1630
+ (42)
1631
+ Case 3: We suppose im ̸= jm, u1 = 0 and u2 > 0. We let g′ be an integer distinct from
1632
+ i only in one position, i.e., g′
1633
+ π2(n) = 1 − gπ2(n). Similar to Case 3 of Theorem 1, we have
1634
+ ξcg,j−cg,i + ξcg′,j−cg′,i = 0.
1635
+ Case 4: If u1 = 0, u2 > 0, and iπ1(1) ̸= jπ1(1) or iπ1(m−k+α) ̸= jπ1(m−k+α), we can find an
1636
+ array c′ = c + (q/2)xπ1(1) ∈ G or c′ = c + (q/2)xπ1(m−k+α) for any array c ∈ G. Similar to
1637
+ Case 1, we can obtain ξcg,j−cg,i + ξc′
1638
+ g,j−c′
1639
+ g,i = 0.
1640
+ January 4, 2023
1641
+ DRAFT
1642
+
1643
+ 16
1644
+ Case 5: Suppose u1 = 0, u2 > 0, iπ1(1) = jπ1(1), and iπ1(m−k+α) = jπ1(m−k+α) for all
1645
+ α = 1, 2, · · · , k. Suppose that α′ is the largest non-negative integer satisfying iπ1(m−k+α′) =
1646
+ jπ1(m−k+α′) = 0. Then we assume β is the smallest integer which satisfies iπ1(β) ̸= jπ1(β).
1647
+ Here, we have is = js = 0 for s = π1(m − k + α′) + 1, π1(m − k + α′) + 2, . . . , m − 1, and
1648
+ s ̸= π1(m − k + α) for α = α′ + 1, α′ + 2, . . . , k. Hence, it implies π1(β) < π1(m − k + α′)
1649
+ and π1(β − 1) < π1(m − k + α′) according to the condition (C-3). Let i′ and j′ be integers that
1650
+ differ from i and j, respectively, in the position π1(β − 1). Similar to Case 2, we have
1651
+ ξcg,j−cg,i + ξcg,j′−cg,i′ = 0.
1652
+ (43)
1653
+ Case 6: Suppose u1 = 0, u2 > 0, iπ1(1) = jπ1(1), and iπ1(m−k+α) = jπ1(m−k+α) = 1 for all
1654
+ α = 1, 2, · · · , k. Then we assume β is the smallest integer which satisfies iπ1(β) ̸= jπ1(β). Since
1655
+ is = js = 0 for s = v + 1, v + 2, · · · , m − k and s ̸= π1(m − k + α) for α = 1, 2, . . . , k − 1, we
1656
+ can obtain π1(β) ≤ v implying π1(β − 1) ≤ v. Similar to Case 2, we have
1657
+ ξcg,j−cg,i + ξcg,j′−cg,i′ = 0.
1658
+ (44)
1659
+ From Cases 1 to 6, the theorem can be proved.
1660
+ Remark 2: Taking σ2(l) = π2(n − l + 1) for l = 1, 2, . . . , n and π1(m − k + α) = m − k + α
1661
+ for α = 1, 2, . . . , k in Theorem 2, (34) can be represented as
1662
+ f =q
1663
+ 2
1664
+ �m−k−1
1665
+
1666
+ k=1
1667
+ xπ1(k)xπ1(k+1) +
1668
+ n−1
1669
+
1670
+ k=1
1671
+ yσ2(k)yσ2(k+1) + xmyσ2(1)
1672
+
1673
+ +
1674
+ m−k
1675
+
1676
+ l=1
1677
+ µlxπ1(l)xm
1678
+ +
1679
+ m
1680
+
1681
+ l=1
1682
+ plxl +
1683
+ n
1684
+
1685
+ s=1
1686
+ κsys + p0
1687
+ (45)
1688
+ where pl, κs ∈ Zq. We can find that the result of Lemma 3 is a special case of Theorem 2 by
1689
+ simply setting k = 1, µm−1 = q
1690
+ 2, and µl = 0 for l = 1, · · · , m − 2.
1691
+ Example 4: Taking q = 2, m = 5, n = 2, k = 2, and v = 0, we let π1 = (1, 2, 4, 3, 5) and
1692
+ π2 = (1, 2). The generalized Boolean function is f = x1x2 + x2x4 + y1y2 + x5y1 by setting
1693
+ pl, κs = 0. The array set G is a GCAS of size 8 when the truncated size L1 = 4 L2 = 21. We
1694
+ let G = {c0, c1, · · · , c7} and list the constituent arrays in Table III. Also, their AACF sum is
1695
+ shown as Fig. 3.
1696
+ IV. SIMULATION RESULTS
1697
+ In this section, we present the numerical results including the power radiation pattern and
1698
+ BER performance by using our proposed 2D GCASs for massive MIMO systems with URA.
1699
+ January 4, 2023
1700
+ DRAFT
1701
+
1702
+ 17
1703
+ TABLE III
1704
+ THE CONSTRUCTED (8, 4, 21)-GCAS IN EXAMPLE 4
1705
+ c0 =
1706
+
1707
+
1708
+
1709
+
1710
+
1711
+
1712
+
1713
+ 0
1714
+ 0
1715
+ 0
1716
+ 1
1717
+ 0
1718
+ 1
1719
+ 1
1720
+ 1
1721
+ 0
1722
+ 0
1723
+ 1
1724
+ 0
1725
+ 1
1726
+ 0
1727
+ 1
1728
+ 1
1729
+ 0
1730
+ 1
1731
+ 1
1732
+ 1
1733
+ 0
1734
+ 0
1735
+ 0
1736
+ 0
1737
+ 1
1738
+ 0
1739
+ 1
1740
+ 1
1741
+ 1
1742
+ 0
1743
+ 0
1744
+ 1
1745
+ 0
1746
+ 1
1747
+ 0
1748
+ 1
1749
+ 1
1750
+ 0
1751
+ 1
1752
+ 1
1753
+ 1
1754
+ 0
1755
+ 0
1756
+ 1
1757
+ 0
1758
+ 0
1759
+ 0
1760
+ 0
1761
+ 1
1762
+ 0
1763
+ 0
1764
+ 1
1765
+ 1
1766
+ 1
1767
+ 1
1768
+ 1
1769
+ 1
1770
+ 0
1771
+ 0
1772
+ 0
1773
+ 1
1774
+ 0
1775
+ 0
1776
+ 1
1777
+ 0
1778
+ 1
1779
+ 1
1780
+ 1
1781
+ 1
1782
+ 0
1783
+ 1
1784
+ 1
1785
+ 0
1786
+ 0
1787
+ 0
1788
+ 0
1789
+ 0
1790
+ 0
1791
+ 1
1792
+ 1
1793
+ 1
1794
+ 0
1795
+ 1
1796
+ 1
1797
+
1798
+
1799
+
1800
+
1801
+
1802
+
1803
+
1804
+ c1 =
1805
+
1806
+
1807
+
1808
+
1809
+
1810
+
1811
+
1812
+ 0
1813
+ 0
1814
+ 0
1815
+ 1
1816
+ 1
1817
+ 0
1818
+ 0
1819
+ 0
1820
+ 0
1821
+ 0
1822
+ 1
1823
+ 0
1824
+ 0
1825
+ 1
1826
+ 0
1827
+ 0
1828
+ 0
1829
+ 1
1830
+ 1
1831
+ 1
1832
+ 1
1833
+ 0
1834
+ 0
1835
+ 0
1836
+ 1
1837
+ 1
1838
+ 0
1839
+ 0
1840
+ 0
1841
+ 0
1842
+ 0
1843
+ 1
1844
+ 0
1845
+ 0
1846
+ 1
1847
+ 0
1848
+ 0
1849
+ 0
1850
+ 1
1851
+ 1
1852
+ 1
1853
+ 1
1854
+ 0
1855
+ 1
1856
+ 0
1857
+ 0
1858
+ 1
1859
+ 1
1860
+ 0
1861
+ 1
1862
+ 0
1863
+ 1
1864
+ 1
1865
+ 1
1866
+ 0
1867
+ 0
1868
+ 0
1869
+ 1
1870
+ 0
1871
+ 0
1872
+ 1
1873
+ 0
1874
+ 1
1875
+ 1
1876
+ 0
1877
+ 1
1878
+ 1
1879
+ 0
1880
+ 0
1881
+ 1
1882
+ 0
1883
+ 1
1884
+ 0
1885
+ 0
1886
+ 0
1887
+ 1
1888
+ 1
1889
+ 1
1890
+ 0
1891
+ 1
1892
+ 1
1893
+ 0
1894
+ 1
1895
+ 0
1896
+
1897
+
1898
+
1899
+
1900
+
1901
+
1902
+
1903
+ c2 =
1904
+
1905
+
1906
+
1907
+
1908
+
1909
+
1910
+
1911
+ 0
1912
+ 0
1913
+ 0
1914
+ 1
1915
+ 0
1916
+ 1
1917
+ 1
1918
+ 1
1919
+ 0
1920
+ 0
1921
+ 1
1922
+ 0
1923
+ 1
1924
+ 0
1925
+ 1
1926
+ 1
1927
+ 1
1928
+ 0
1929
+ 0
1930
+ 0
1931
+ 1
1932
+ 0
1933
+ 0
1934
+ 0
1935
+ 1
1936
+ 0
1937
+ 1
1938
+ 1
1939
+ 1
1940
+ 0
1941
+ 0
1942
+ 1
1943
+ 0
1944
+ 1
1945
+ 0
1946
+ 1
1947
+ 1
1948
+ 1
1949
+ 0
1950
+ 0
1951
+ 0
1952
+ 1
1953
+ 0
1954
+ 1
1955
+ 0
1956
+ 0
1957
+ 0
1958
+ 0
1959
+ 1
1960
+ 0
1961
+ 0
1962
+ 1
1963
+ 1
1964
+ 1
1965
+ 1
1966
+ 1
1967
+ 1
1968
+ 0
1969
+ 1
1970
+ 1
1971
+ 0
1972
+ 1
1973
+ 1
1974
+ 1
1975
+ 0
1976
+ 1
1977
+ 1
1978
+ 1
1979
+ 1
1980
+ 0
1981
+ 1
1982
+ 1
1983
+ 0
1984
+ 0
1985
+ 0
1986
+ 0
1987
+ 0
1988
+ 0
1989
+ 1
1990
+ 0
1991
+ 0
1992
+ 1
1993
+ 0
1994
+ 0
1995
+
1996
+
1997
+
1998
+
1999
+
2000
+
2001
+
2002
+ c3 =
2003
+
2004
+
2005
+
2006
+
2007
+
2008
+
2009
+
2010
+ 0
2011
+ 0
2012
+ 0
2013
+ 1
2014
+ 1
2015
+ 0
2016
+ 0
2017
+ 0
2018
+ 0
2019
+ 0
2020
+ 1
2021
+ 0
2022
+ 0
2023
+ 1
2024
+ 0
2025
+ 0
2026
+ 1
2027
+ 0
2028
+ 0
2029
+ 0
2030
+ 0
2031
+ 0
2032
+ 0
2033
+ 0
2034
+ 1
2035
+ 1
2036
+ 0
2037
+ 0
2038
+ 0
2039
+ 0
2040
+ 0
2041
+ 1
2042
+ 0
2043
+ 0
2044
+ 1
2045
+ 0
2046
+ 0
2047
+ 1
2048
+ 0
2049
+ 0
2050
+ 0
2051
+ 0
2052
+ 0
2053
+ 1
2054
+ 0
2055
+ 0
2056
+ 1
2057
+ 1
2058
+ 0
2059
+ 1
2060
+ 0
2061
+ 1
2062
+ 1
2063
+ 1
2064
+ 0
2065
+ 0
2066
+ 0
2067
+ 1
2068
+ 1
2069
+ 1
2070
+ 0
2071
+ 1
2072
+ 0
2073
+ 1
2074
+ 0
2075
+ 1
2076
+ 1
2077
+ 0
2078
+ 0
2079
+ 1
2080
+ 0
2081
+ 1
2082
+ 0
2083
+ 0
2084
+ 0
2085
+ 1
2086
+ 1
2087
+ 1
2088
+ 0
2089
+ 0
2090
+ 0
2091
+ 1
2092
+ 0
2093
+ 1
2094
+
2095
+
2096
+
2097
+
2098
+
2099
+
2100
+
2101
+ c4 =
2102
+
2103
+
2104
+
2105
+
2106
+
2107
+
2108
+
2109
+ 0
2110
+ 0
2111
+ 0
2112
+ 1
2113
+ 0
2114
+ 1
2115
+ 1
2116
+ 1
2117
+ 0
2118
+ 0
2119
+ 1
2120
+ 0
2121
+ 1
2122
+ 0
2123
+ 1
2124
+ 1
2125
+ 0
2126
+ 1
2127
+ 1
2128
+ 1
2129
+ 0
2130
+ 1
2131
+ 1
2132
+ 1
2133
+ 0
2134
+ 1
2135
+ 0
2136
+ 0
2137
+ 0
2138
+ 1
2139
+ 1
2140
+ 0
2141
+ 1
2142
+ 0
2143
+ 1
2144
+ 0
2145
+ 0
2146
+ 1
2147
+ 0
2148
+ 0
2149
+ 0
2150
+ 1
2151
+ 0
2152
+ 1
2153
+ 0
2154
+ 0
2155
+ 0
2156
+ 0
2157
+ 1
2158
+ 0
2159
+ 0
2160
+ 1
2161
+ 1
2162
+ 1
2163
+ 1
2164
+ 1
2165
+ 1
2166
+ 0
2167
+ 0
2168
+ 0
2169
+ 1
2170
+ 0
2171
+ 0
2172
+ 0
2173
+ 1
2174
+ 0
2175
+ 0
2176
+ 0
2177
+ 0
2178
+ 1
2179
+ 0
2180
+ 0
2181
+ 1
2182
+ 1
2183
+ 1
2184
+ 1
2185
+ 1
2186
+ 1
2187
+ 0
2188
+ 0
2189
+ 0
2190
+ 1
2191
+ 0
2192
+ 0
2193
+
2194
+
2195
+
2196
+
2197
+
2198
+
2199
+
2200
+ c5 =
2201
+
2202
+
2203
+
2204
+
2205
+
2206
+
2207
+
2208
+ 0
2209
+ 0
2210
+ 0
2211
+ 1
2212
+ 1
2213
+ 0
2214
+ 0
2215
+ 0
2216
+ 0
2217
+ 0
2218
+ 1
2219
+ 0
2220
+ 0
2221
+ 1
2222
+ 0
2223
+ 0
2224
+ 0
2225
+ 1
2226
+ 1
2227
+ 1
2228
+ 1
2229
+ 1
2230
+ 1
2231
+ 1
2232
+ 0
2233
+ 0
2234
+ 1
2235
+ 1
2236
+ 1
2237
+ 1
2238
+ 1
2239
+ 0
2240
+ 1
2241
+ 1
2242
+ 0
2243
+ 1
2244
+ 1
2245
+ 1
2246
+ 0
2247
+ 0
2248
+ 0
2249
+ 0
2250
+ 0
2251
+ 1
2252
+ 0
2253
+ 0
2254
+ 1
2255
+ 1
2256
+ 0
2257
+ 1
2258
+ 0
2259
+ 1
2260
+ 1
2261
+ 1
2262
+ 0
2263
+ 0
2264
+ 0
2265
+ 1
2266
+ 0
2267
+ 0
2268
+ 1
2269
+ 0
2270
+ 1
2271
+ 0
2272
+ 1
2273
+ 0
2274
+ 0
2275
+ 1
2276
+ 1
2277
+ 0
2278
+ 1
2279
+ 0
2280
+ 1
2281
+ 1
2282
+ 1
2283
+ 0
2284
+ 0
2285
+ 0
2286
+ 1
2287
+ 0
2288
+ 0
2289
+ 1
2290
+ 0
2291
+ 1
2292
+
2293
+
2294
+
2295
+
2296
+
2297
+
2298
+
2299
+ c6 =
2300
+
2301
+
2302
+
2303
+
2304
+
2305
+
2306
+
2307
+ 0
2308
+ 0
2309
+ 0
2310
+ 1
2311
+ 0
2312
+ 1
2313
+ 1
2314
+ 1
2315
+ 0
2316
+ 0
2317
+ 1
2318
+ 0
2319
+ 1
2320
+ 0
2321
+ 1
2322
+ 1
2323
+ 1
2324
+ 0
2325
+ 0
2326
+ 0
2327
+ 1
2328
+ 1
2329
+ 1
2330
+ 1
2331
+ 0
2332
+ 1
2333
+ 0
2334
+ 0
2335
+ 0
2336
+ 1
2337
+ 1
2338
+ 0
2339
+ 1
2340
+ 0
2341
+ 1
2342
+ 0
2343
+ 0
2344
+ 0
2345
+ 1
2346
+ 1
2347
+ 1
2348
+ 0
2349
+ 0
2350
+ 1
2351
+ 0
2352
+ 0
2353
+ 0
2354
+ 0
2355
+ 1
2356
+ 0
2357
+ 0
2358
+ 1
2359
+ 1
2360
+ 1
2361
+ 1
2362
+ 1
2363
+ 1
2364
+ 0
2365
+ 1
2366
+ 1
2367
+ 0
2368
+ 1
2369
+ 1
2370
+ 0
2371
+ 1
2372
+ 0
2373
+ 0
2374
+ 0
2375
+ 0
2376
+ 1
2377
+ 0
2378
+ 0
2379
+ 1
2380
+ 1
2381
+ 1
2382
+ 1
2383
+ 1
2384
+ 1
2385
+ 0
2386
+ 1
2387
+ 1
2388
+ 0
2389
+ 1
2390
+ 1
2391
+
2392
+
2393
+
2394
+
2395
+
2396
+
2397
+
2398
+ c7 =
2399
+
2400
+
2401
+
2402
+
2403
+
2404
+
2405
+
2406
+ 0
2407
+ 0
2408
+ 0
2409
+ 1
2410
+ 1
2411
+ 0
2412
+ 0
2413
+ 0
2414
+ 0
2415
+ 0
2416
+ 1
2417
+ 0
2418
+ 0
2419
+ 1
2420
+ 0
2421
+ 0
2422
+ 1
2423
+ 0
2424
+ 0
2425
+ 0
2426
+ 0
2427
+ 1
2428
+ 1
2429
+ 1
2430
+ 0
2431
+ 0
2432
+ 1
2433
+ 1
2434
+ 1
2435
+ 1
2436
+ 1
2437
+ 0
2438
+ 1
2439
+ 1
2440
+ 0
2441
+ 1
2442
+ 1
2443
+ 0
2444
+ 1
2445
+ 1
2446
+ 1
2447
+ 1
2448
+ 0
2449
+ 1
2450
+ 0
2451
+ 0
2452
+ 1
2453
+ 1
2454
+ 0
2455
+ 1
2456
+ 0
2457
+ 1
2458
+ 1
2459
+ 1
2460
+ 0
2461
+ 0
2462
+ 0
2463
+ 1
2464
+ 1
2465
+ 1
2466
+ 0
2467
+ 1
2468
+ 0
2469
+ 0
2470
+ 1
2471
+ 0
2472
+ 0
2473
+ 1
2474
+ 1
2475
+ 0
2476
+ 1
2477
+ 0
2478
+ 1
2479
+ 1
2480
+ 1
2481
+ 0
2482
+ 0
2483
+ 0
2484
+ 1
2485
+ 1
2486
+ 1
2487
+ 0
2488
+ 1
2489
+ 0
2490
+
2491
+
2492
+
2493
+
2494
+
2495
+
2496
+
2497
+ A. Power Radiation Pattern
2498
+ According to (16), the power radiation pattern �N−1
2499
+ n=0
2500
+ ��[vec(A(ϕ, θ))Tvec(Wn)]
2501
+ ��2 can be ob-
2502
+ tained. We first consider the massive MIMO system equipped with a URA of size 4 × 33,
2503
+ January 4, 2023
2504
+ DRAFT
2505
+
2506
+ 18
2507
+ 0
2508
+ 200
2509
+ 2
2510
+ 400
2511
+ 40
2512
+ 600
2513
+ 20
2514
+ 800
2515
+ 0
2516
+ 1000
2517
+ 0
2518
+ -20
2519
+ -2
2520
+ -40
2521
+ Fig. 3. The summation of autocorrelations of constituent arrays in the GCAS in Example 4.
2522
+ i.e., L1 = 4 and L2 = 33. We take the GCS G = {c0, c1, c2, c3} listed in Table II to
2523
+ generate the precoding matrices {W0, W1, W2, W3} = {(−1)c0, (−1)c1, (−1)c2, (−1)c3} with
2524
+ the omnidirectional property. The power radiation pattern of the GCAS-based scheme with array
2525
+ size 4 × 33 is perfectly omnidirectional as illustrated in Fig 4(a).
2526
+ For the purpose of comparison, we also show the power radiation patterns of the precoding
2527
+ matrices based on Zadoff-Chu sequences and random-matrices whose elements are randomly
2528
+ generated from “+1” and “−1”. The ZC-based precoder consists of four 4 × 33 precoding
2529
+ matrices, which are obtained based on a ZC sequence of length 4 and a ZC sequence of length
2530
+ 33 [21]. Fig. 4(b) illustrates the power radiation pattern of the ZC-based precoder. We can
2531
+ find that its power radiation pattern is not omnidirectional. The random-matrix-based precoder
2532
+ consists of four 4 × 33 precoding matrices. The elements in the random-matrix-based precoding
2533
+ matrices are generated by selecting the elements from {1, −1} with equal probability. Fig. 4(c)
2534
+ describes the power radiation pattern of the random matrix-based precoder. We can observe that
2535
+ the power radiation pattern is not omnidirectional.
2536
+ Next, we consider the massive MIMO system equipped with a URA of size 4 × 21, i.e.,
2537
+ January 4, 2023
2538
+ DRAFT
2539
+
2540
+ 19
2541
+ (a) GCAS-based precoding.
2542
+ (b) ZC-based precoding.
2543
+ (c) Random-matrix-based precoding.
2544
+ Fig. 4. Power radiation pattern with 4 × 33 URA and 4 × 4 STBC.
2545
+ L1 = 4 and L2 = 21. We use the GCS G = {c0, c1, · · · , c7} listed in Table III for the precoding
2546
+ matrix {W0, W1, · · · , W7} = {(−1)c0, (−1)c1, · · · , (−1)c7}. The power radiation pattern of the
2547
+ GCAS-based scheme with array size 4×21 is described in Fig. 5(a). The perfect omnidirectional
2548
+ property can be observed. We also see that the power radiation patterns of the ZC-based precoder
2549
+ and the random-matrix precoder shown in Fig. 5(b) and Fig. 5(c) are not omnidirectional. The
2550
+ ZC-based precoding matrices are obtained by a ZC sequence of length 4 and ZC sequence of
2551
+ 21 [21].
2552
+ B. Bit Error Rate Performance
2553
+ In this subsection, we present the BER performance of our proposed 2D GCAS-based schemes.
2554
+ We first consider the massive MIMO system equipped with a URA of size 4×33. We let N = 4
2555
+ January 4, 2023
2556
+ DRAFT
2557
+
2558
+ X-axis-0.500.51Sy-ax1sX-axis550.50.51Sy-ax.
2559
+ 1S0.3X-axis00.50.5Sy-ax.
2560
+ 1S20
2561
+ (a) GCAS-based precoding.
2562
+ (b) ZC-based precoding.
2563
+ (c) Random-matrix-based precoding.
2564
+ Fig. 5. Power radiation pattern with 4 × 21 URA and 8 × 8 STBC.
2565
+ and then the 4 × 4 orthogonal real STBC be presented as
2566
+ S =
2567
+
2568
+
2569
+
2570
+
2571
+
2572
+
2573
+
2574
+ s0
2575
+ −s1
2576
+ −s2
2577
+ −s3
2578
+ s1
2579
+ s0
2580
+ s3
2581
+ −s2
2582
+ s2
2583
+ −s3
2584
+ s0
2585
+ s1
2586
+ s3
2587
+ s2
2588
+ −s1
2589
+ s0
2590
+
2591
+
2592
+
2593
+
2594
+
2595
+
2596
+
2597
+ ,
2598
+ (46)
2599
+ where s0, s1, s2, s3 are binary phase shift keying (BPSK) modulated symbols. We employ the
2600
+ maximum likelihood (ML) decoding here. For each realization, the elevation and the azimuth
2601
+ angles are uniformly distributed at random between [0, π/2] and [0, 2π], respectively. For com-
2602
+ parison, the ZC-based precoder and random-matrix-based precoder are the same as mentioned in
2603
+ Section IV-A. The BER performances of three different schemes are depicted in Fig. 6. We can
2604
+ find that the 2D GCAS-based scheme outperform the others. At BER of 10−4, there are 1.6 dB
2605
+ and 3.6 dB gains over the ZC-based scheme and the random-matrix-based scheme, respectively.
2606
+ January 4, 2023
2607
+ DRAFT
2608
+
2609
+ X-axis-0.500.51Sy-ax1sX-axis-0.500.51Sy-ax1s0.8X-axis-0.500.510.2Sy-06.ax1s21
2610
+ -2
2611
+ 0
2612
+ 2
2613
+ 4
2614
+ 6
2615
+ 8
2616
+ 10
2617
+ 12
2618
+ 14
2619
+ SNR (dB)
2620
+ 10-8
2621
+ 10-6
2622
+ 10-4
2623
+ 10-2
2624
+ 100
2625
+ BER
2626
+ GCAS-based precoding (N=4)
2627
+ ZC-based precoding (N=4)
2628
+ Random-matrix-based precoding (N=4)
2629
+ Fig. 6. BER performance of the different schemes for a 4 × 33 URA.
2630
+ Next, we consider the massive MIMO system equipped with a URA of size 4 × 21. We
2631
+ consider 8 × 8 STBC and the 8 × 8 orthogonal real STBC is given by
2632
+ S =
2633
+
2634
+
2635
+
2636
+
2637
+
2638
+
2639
+
2640
+
2641
+
2642
+
2643
+
2644
+
2645
+
2646
+
2647
+
2648
+
2649
+
2650
+
2651
+
2652
+ s0
2653
+ s1
2654
+ s2
2655
+ s3
2656
+ s4
2657
+ s5
2658
+ s6
2659
+ s7
2660
+ −s1
2661
+ s0
2662
+ s3
2663
+ −s2
2664
+ s5
2665
+ −s4
2666
+ −s7
2667
+ s6
2668
+ −s2
2669
+ −s3
2670
+ s0
2671
+ s1
2672
+ s6
2673
+ s7
2674
+ −s4
2675
+ −s5
2676
+ −s3
2677
+ s2
2678
+ −s1
2679
+ s0
2680
+ s7
2681
+ −s6
2682
+ s5
2683
+ −s4
2684
+ −s4
2685
+ −s5
2686
+ −s6
2687
+ −s7
2688
+ s0
2689
+ s1
2690
+ s2
2691
+ s3
2692
+ −s5
2693
+ s4
2694
+ −s7
2695
+ s6
2696
+ −s1
2697
+ s0
2698
+ −s3
2699
+ s2
2700
+ −s6
2701
+ s7
2702
+ s4
2703
+ −s5
2704
+ −s2
2705
+ s3
2706
+ s0
2707
+ −s1
2708
+ −s7
2709
+ −s6
2710
+ s5
2711
+ −s4
2712
+ −s3
2713
+ s2
2714
+ s1
2715
+ s0
2716
+
2717
+
2718
+
2719
+
2720
+
2721
+
2722
+
2723
+
2724
+
2725
+
2726
+
2727
+
2728
+
2729
+
2730
+
2731
+
2732
+
2733
+
2734
+
2735
+ (47)
2736
+ where s0, s1, · · · , s7 are BPSK modulated symbols. We also take the ZC-based precoding and
2737
+ random-matrix-based precoding for comparison. The BER performance comparison for these
2738
+ three different schemes is depicted in Fig. 7. At BER of 10−4, there are 0.2 dB and 1.8 dB
2739
+ gains over the ZC-based scheme and the random-matrix-based scheme, respectively. As a result,
2740
+ the 2D GCASs are good candidates as precoding matrices for omnidirectional transmission in
2741
+ January 4, 2023
2742
+ DRAFT
2743
+
2744
+ 22
2745
+ -2
2746
+ 0
2747
+ 2
2748
+ 4
2749
+ 6
2750
+ 8
2751
+ 10
2752
+ 12
2753
+ 14
2754
+ SNR (dB)
2755
+ 10-8
2756
+ 10-6
2757
+ 10-4
2758
+ 10-2
2759
+ 100
2760
+ BER
2761
+ GCAS-based precoding (N=8)
2762
+ ZC-based precoding (N=8)
2763
+ Random-matrix-based precoding (N=8)
2764
+ Fig. 7. BER performance of the different schemes for a 4 × 21 URA.
2765
+ massive MIMO systems.
2766
+ V. CONCLUSION
2767
+ In this paper, constructions of 2D GCASs with flexible array sizes have been proposed in
2768
+ Theorems 1 and 2. Our constructions can be obtained directly from 2D GBFs without the aid
2769
+ of special sequences. Besides, our proposed GCASs have flexible array sizes which can fit
2770
+ more antenna configuration. Furthermore, Theorem 2 can include the results in [32] as a special
2771
+ case. Simulation results showed that the omnidirectional transmission can be achieved when
2772
+ the precoding matrices are based on the proposed GCASs. The BER performance due to their
2773
+ omnidirectional power radiation patterns, the ZC-based scheme and random-matix-based have
2774
+ inferior performances because their power radiation patterns both are not ideally omnidirectional.
2775
+ Although Theorems 1 and 2 can provide direct constructions of 2D GCASs, the first dimension
2776
+ has size L1 limited to 2n. Therefore, the future work includes the extension of constructions of
2777
+ 2D GCASs of which both dimensions have non-power-of-two sizes.
2778
+ January 4, 2023
2779
+ DRAFT
2780
+
2781
+ 23
2782
+ REFERENCES
2783
+ [1] M. J. E. Golay, “Complementary series,” IRE Trans. Inf. Theory, vol. IT-7, pp. 82–87, Apr. 1961.
2784
+ [2] C.-C. Tseng and C. L. Liu, “Complementary sets of sequences,” IEEE Trans. Inf. Theory, vol. IT-18, no. 5, pp. 644–652,
2785
+ Sep. 1972.
2786
+ [3] N. Suehiro and M. Hatori, “N-shift cross-orthogonal sequences,” IEEE Trans. Inf. Theory, vol. 34, no. 1, pp. 143–146,
2787
+ Jan. 1988.
2788
+ [4] A. Pezeshki, A. R. Calderbank, W. Moran, and S. D. Howard, “Doppler resilient Golay complementary waveforms,” IEEE
2789
+ Trans. Inf. Theory, vol. 54, no. 9, pp. 4254–4266, Sep. 2008.
2790
+ [5] P. Spasojevic and C. N. Georghiades, “Complementary sequences for ISI channel estimation,” IEEE Trans. Inf. Theory,
2791
+ vol. 47, no. 3, pp. 1145–1152, Mar. 2001.
2792
+ [6] D. Su, Y. Jiang, X. Wang, and X. Gao, “Omnidirectional precoding for massive MIMO with uniform rectangular array—part
2793
+ I: complementary codes-based schemes,” IEEE Trans. Signal Process., vol. 67, no. 18, pp. 4761–4771, Sep. 2019.
2794
+ [7] Z. Liu, Y. Li, and Y. L. Guan, “New constructions of general QAM Golay complementary sequences,” IEEE Trans. Inf.
2795
+ Theory, vol. 59, no. 11, pp. 7684–7692, Nov. 2013.
2796
+ [8] R. van Nee, “OFDM codes for peak-to-average power reduction and error correction,” in Proc. IEEE Global Telecommun.
2797
+ Conf., London, U.K., Nov. 1996, pp. 740–744.
2798
+ [9] J. A. Davis and J. Jedwab, “Peak-to-mean power control in OFDM, Golay complementary sequences, and Reed-Muller
2799
+ codes,” IEEE Trans. Inf. Theory, vol. 45, no. 7, pp. 2397–2417, Nov. 1999.
2800
+ [10] K. G. Paterson, “Generalized Reed-Muller codes and power control in OFDM modulation,” IEEE Trans. Inf. Theory,
2801
+ vol. 46, no. 1, pp. 104–120, Jan. 2000.
2802
+ [11] K.-U. Schmidt, “Complementary sets, generalized Reed-Muller codes, and power control for OFDM,” IEEE Trans. Inf.
2803
+ Theory, vol. 53, no. 2, pp. 808–814, Feb. 2007.
2804
+ [12] C.-Y. Chen, C.-H. Wang, and C.-C. Chao, “Complementary sets and Reed-Muller codes for peak-to-average power ratio
2805
+ reduction in OFDM,” in Proc. 16th Int. Symp. AAECC, LNCS 3857, Las Vegas, NV, Feb. 2006, pp. 317–327.
2806
+ [13] C.-Y. Chen, “Complementary sets of non-power-of-two length for peak-to-average power ratio reduction in OFDM,” IEEE
2807
+ Trans. Inf. Theory, vol. 62, no. 12, pp. 7538–7545, Dec. 2016.
2808
+ [14] Z. Liu, Y. L. Guan, and H. H. Chen, “Fractional-delay-resilient receiver design for interference-free MC-CDMA
2809
+ communications based on complete complementary codes,” IEEE Trans. Wireless Commun., vol. 14, no. 3, pp. 1226–
2810
+ 1236, Mar. 2015.
2811
+ [15] H.-H. Chen, J.-F. Yeh, and N. Suehiro, “A multicarrier CDMA architecture based on orthogonal complete complementary
2812
+ codes for new generations of wideband wireless communications,” IEEE Commun. Mag., vol. 39, pp. 126–134, Oct. 2001.
2813
+ [16] N. Suehiro, “A signal design without co-channel interference for approximately synchronized CDMA systems,” IEEE J.
2814
+ Sel. Areas Commun., vol. 12, pp. 837–841, Jun. 1994.
2815
+ [17] S.-M. Tseng and M. R. Bell, “Asynchronous multicarrier DS-CDMA using mutually orthogonal complementary sets of
2816
+ sequences,” IEEE Trans. Commun., vol. 48, no. 1, pp. 53–59, Jan. 2000.
2817
+ [18] J. M. Groenewald and B. T. Maharaj, “MIMO channel synchronization using Golay complementary pairs,” in Proc.
2818
+ AFRICON 2007, Windhoek, South Africa, Sep. 2007, pp. 1–5.
2819
+ [19] S. Boyd, “Multitone signals with low crest factor,” IEEE Trans. Circuits Syst., vol. CAS-33, no. 10, pp. 1018–1022, Oct.
2820
+ 1986.
2821
+ [20] A.-A. Lu, X. Gao, X. Meng, and X.-G. Xia, “Omnidirectional precoding for 3D massive MIMO with uniform planar
2822
+ arrays,” IEEE Trans. Wireless Commun., vol. 19, no. 4, pp. 2628–2642, Apr. 2020.
2823
+ January 4, 2023
2824
+ DRAFT
2825
+
2826
+ 24
2827
+ [21] F. Li, Y. Jiang, C. Du, and X. Wang, “Construction of Golay complementary matrices and its applications to MIMO
2828
+ omnidirectional transmission,” IEEE Trans. Signal Process., vol. 69, pp. 2100–2113, Mar. 2021.
2829
+ [22] X. Meng, X. Gao, and X.-G. Xia, “Omnidirectional precoding based transmission in massive MIMO systems,” IEEE Trans.
2830
+ Commun., vol. 64, no. 1, pp. 174–186, Jan. 2016.
2831
+ [23] Y. Jiang, F. Li, X. Wang, and J. Li, “Autocorrelation complementary matrices,” in Proc. 53rd Asilomar Conference on
2832
+ Signals, Systems, and Computers, Pacific Grove, CA, USA, Nov. 2019, pp. 1596–1600.
2833
+ [24] S. Matsufuji, R. Shigemitsu, Y. Tanada, and N. Kuroyanagi, “Construction of complementary arrays,” in Proc. Joint 1ST
2834
+ Workshop on Mobile Future Symp. Trends Commun. (SympoTIC), Bratislave, Slovakia, Oct. 2004, pp. 78–81.
2835
+ [25] Z. Wang and G. Gong, “Constructions of complementary sequence sets and complete complementary codes by 2-level
2836
+ autocorrelation sequences and permutation polynomials,” May 2020. [Online]. Available: https://arxiv.org/abs/2005.05825
2837
+ [26] Z. Wang, D. Ma, G. Gong, and E. Xue, “New construction of complementary sequence (or array) sets and complete
2838
+ complementary codes,” IEEE Trans. Inf. Theory, vol. 67, no. 7, pp. 4902–4928, Jul. 2021.
2839
+ [27] C.-Y. Pai and C.-Y. Chen, “Constructions of two-dimensional Golay complementary array pairs based on generalized
2840
+ Boolean functions,” in Proc. IEEE Int. Symp. Inf. Theory, Los Angeles, California, USA, Jun. 2020, pp. 2931–2935.
2841
+ [28] C.-Y. Pai and C.-Y. Chen, “Two-dimensional Golay complementary array pairs/sets with bounded row and column sequence
2842
+ PAPRs,” IEEE Trans. Commun., vol. 70, no. 6, pp. 3695–3707, Jun. 2022.
2843
+ [29] Z. Liu, Y. L. Guan, and U. Parampalli, “New complete complementary codes for peak-to-mean power control in multi-
2844
+ carrier CDMA,” IEEE Trans. Commun., vol. 62, pp. 1105–1113, Mar. 2014.
2845
+ [30] C.-Y. Pai, Z. Liu, Y.-Q. Zhao, Z.-M. Hunag, and C.-Y. Chen, “Designing two-dimensional complete complementary codes
2846
+ for omnidirectional transmission in massive MIMO systems,” in Proc. IEEE Int. Symp. Inf. Theory, Espoo, Finland, Jun.
2847
+ 2022, pp. 1699–1704.
2848
+ [31] T. Liu, X. Men, Y. Li, and X. Chen, “Constructions of 2-D Golay complementary array sets for MIMO omnidirectional
2849
+ transmission,” IEEE Commun. Lett., pp. 1459 – 1463, Jul. 2022.
2850
+ [32] B. Shen, Y. Yang, and R. Ren, “Three constructions of Golay complementary array sets,” Adv. Math. Commun., Oct. 2022.
2851
+ January 4, 2023
2852
+ DRAFT
2853
+
3dAzT4oBgHgl3EQfR_t3/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4dE4T4oBgHgl3EQfBAsA/content/2301.04847v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1816fde42b9b8e13d72fc7524241e73342542e9c9fac503e1b55e3664f4e734
3
+ size 3187519
4dE4T4oBgHgl3EQfBAsA/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5ada987d74c2b64fd024838f7106e46dc14c05a6118e8fc40f72adb5814ffb9
3
+ size 1507373
4dE4T4oBgHgl3EQfBAsA/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:181a1a843998c439b30444446338a27c2c5f58150ac94b31c078dec3a5fdbe9a
3
+ size 64074
5dAzT4oBgHgl3EQf9_4T/content/2301.01926v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6934c690326cace8a20c0d34f3cbabf1b521dd089233f0d9142e0c632d581f6
3
+ size 1148333
5dAzT4oBgHgl3EQf9_4T/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afd3b9234656631439478ba13c2bc5c123f5642056dba6f24de37c91dd97d1a2
3
+ size 4587565
5dE1T4oBgHgl3EQfBAJm/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d238a8c0c5f0261c15ba5bb91a77b8b4a3e383ccd9baf0701214bbd1278aa89e
3
+ size 144586
5dE2T4oBgHgl3EQfOgbi/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1d5badd170e0563b9d846903c829812126852732c16ccd8ebcd074c542153b0
3
+ size 11796525
5tAzT4oBgHgl3EQf9_5e/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83dbc1813869bbf6ec09c8772343f13d49ee94a80cbd61fd4bd3d3708a1a9a23
3
+ size 6488109
79E4T4oBgHgl3EQfdAwp/content/tmp_files/2301.05087v1.pdf.txt ADDED
@@ -0,0 +1,2672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.05087v1 [math.DG] 12 Jan 2023
2
+ SCALAR CURVATURE RIGIDITY OF CONVEX
3
+ POLYTOPES
4
+ SIMON BRENDLE
5
+ Abstract. We prove a scalar curvature rigidity theorem for convex
6
+ polytopes. The proof uses the Fredholm theory for Dirac operators on
7
+ manifolds with boundary.
8
+ A variant of a theorem of Fefferman and
9
+ Phong plays a central role in our analysis.
10
+ 1. Introduction
11
+ Let Ω be a compact polytope in Rn with non-empty interior. We may
12
+ write Ω = �
13
+ α∈A{uα ≤ 0}, where A is a finite set and the uα are linear
14
+ functions in Rn. For each α ∈ A, we denote by Nα ∈ Sn−1 the outward-
15
+ pointing unit normal vector to the halfspace {uα ≤ 0} with respect to the
16
+ Euclidean metric.
17
+ Let g be a Riemannian metric which is defined on an open set containing
18
+ Ω. For each α ∈ A, we denote by να the outward-pointing unit normal
19
+ vector to the halfspace {uα ≤ 0} with respect to the metric g. We will make
20
+ the following assumption:
21
+ Matching Angle Hypothesis. If x is point in ∂Ω and α1, α2 ∈ A satisfy
22
+ uα1(x) = uα2(x) = 0, then ⟨να1, να2⟩ = ⟨Nα1, Nα2⟩ at the point x. Here, the
23
+ inner product ⟨να1, να2⟩ is computed with respect to the metric g, and the
24
+ inner product ⟨Nα1, Nα2⟩ is the standard inner product in Rn.
25
+ Theorem 1.1. Suppose that n ≥ 3 is an odd integer, and Ω is a compact
26
+ polytope in Rn with non-empty interior. Let g be a Riemannian metric which
27
+ is defined on an open set containing Ω and has nonnegative scalar curvature
28
+ at each point in Ω. For each α ∈ A, we assume that the mean curvature of
29
+ the hypersurface {uα = 0} with respect to g is nonnegative at each point in
30
+ Ω ∩ {uα = 0}. Moreover, we assume that the Matching Angle Hypothesis is
31
+ satisfied. Then the Ricci tensor of g vanishes at each point in Ω.
32
+ Theorem 1.1 also holds in the even-dimensional case. This can be seen
33
+ by considering the Cartesian product Ω × [0, 1] ⊂ Rn+1.
34
+ Scalar curvature comparison theorems for polytopes were first studied in
35
+ seminal work of Gromov [6],[7],[8]. Li [9] has used minimal surface techniques
36
+ to prove a scalar curvature comparison theorem for certain polytopes in
37
+ The author was support by the National Science Foundation under grant DMS-2103573
38
+ and by the Simons Foundation. The author acknowledges the hospitality of T¨ubingen
39
+ University, where part of this work was carried out.
40
+ 1
41
+
42
+ 2
43
+ SIMON BRENDLE
44
+ dimension 3. Wang, Xie, and Yu [10] have proposed a different approach to
45
+ this problem which is based on the study of Dirac operators on manifolds
46
+ with corners.
47
+ In this paper, we describe another approach to this problem. As in [10], we
48
+ employ a spinor approach. In contrast to [10], we work with boundary value
49
+ problems for Dirac operators on smooth domains, which are well understood
50
+ thanks to the work of B¨ar and Ballmann [1],[2].
51
+ In the following, we outline the main steps involved in the proof of The-
52
+ orem 1.1. We approximate a given convex polytope Ω by a one-parameter
53
+ family of smooth convex domains Ωλ. On each domain Ωλ, we solve the
54
+ Dirac equation for an m-tuple of spinors s = (s1, . . . , sm) with a suitable
55
+ local boundary condition. To prove the existence of a solution satisfying
56
+ that particular boundary condition, we use the Fredholm theory developed
57
+ by B¨ar and Ballmann [1],[2] together with the homotopy invariance of the
58
+ Fredholm index. Having constructed an m-tuple of harmonic spinors on Ωλ
59
+ satisfying this boundary condition, we apply a Weitzenb¨ock formula, and
60
+ integrate over Ωλ. The resulting integral formula contains a term involving
61
+ the scalar curvature, as well as a boundary term. Unfortunately, it is not
62
+ clear if the boundary term has a favorable sign. We are able to control the
63
+ boundary integral by adapting a theorem due to Fefferman and Phong [4].
64
+ 2. A boundary value problem for the Dirac operator on a
65
+ smooth domain
66
+ Let m = 2[ n
67
+ 2 ] denote the dimension of the space of spinors on Rn. Let
68
+ {E1, . . . , En} denote the standard basis of Rn. Throughout this section, we
69
+ fix an orthonormal basis {¯s1, . . . , ¯sm} of the space of spinors on flat Rn.
70
+ We define ωaαβ = ⟨Ea · ¯sα, ¯sβ⟩ for a = 1, . . . , n and α, β = 1, . . . , m. The
71
+ matrices ω1, . . . , ωn ∈ End(Cm) are skew-Hermitian, so that ωaαβ = −ωaβα.
72
+ Moreover, ωaωb + ωbωa = −2δab id. In other words,
73
+ m
74
+
75
+ β=1
76
+ (ωaαβ ωbβγ + ωbαβ ωaβγ) = −2δab δαγ.
77
+ We begin by stating a basic algebraic fact which will be needed later.
78
+ Lemma 2.1. Assume that n is odd. Then there is no non-zero element of
79
+ End(Cm) which anti-commutes with ωa ∈ End(Cm) for each a = 1, . . . , n.
80
+ Proof. We recall the definition of the spin representation in odd dimen-
81
+ sions. Let {E1, . . . , En} denote the standard basis of Rn. For k = 1, . . . , [n
82
+ 2 ],
83
+ we define wk = E2k−1 − iE2k ∈ Cn. The spinor space is defined as the ex-
84
+ terior algebra Λ∗W, where W = span{wk : k = 1, . . . , [n
85
+ 2 ]} ⊂ Cn. For each
86
+ k ∈ {1, . . . , [n
87
+ 2 ]}, we define a linear map Pk ∈ End(��∗W) by
88
+ Pk(wj1 ∧ . . . ∧ wjr) = wk ∧ wj1 ∧ . . . ∧ wjr.
89
+
90
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
91
+ 3
92
+ Moreover, for each k ∈ {1, . . . , [n
93
+ 2 ]}, we define a linear map Qk ∈ End(Λ∗W)
94
+ by
95
+ Qk(wj1 ∧ . . . ∧ wjr) = 0,
96
+ Qk(wk ∧ wj1 ∧ . . . ∧ wjr) = wj1 ∧ . . . ∧ wjr
97
+ for k /∈ {j1, . . . , jr}. Then PkPl + PlPk = QkQl + QlQk = 0 and PkQl +
98
+ QlPk = δkl id for k, l ∈ {1, . . . , [n
99
+ 2 ]}. Finally, we define a linear map S ∈
100
+ End(Λ∗W) so that
101
+ S(wj1 ∧ . . . ∧ wjr) =
102
+
103
+ wj1 ∧ . . . ∧ wjr
104
+ if r is even
105
+ −wj1 ∧ . . . ∧ wjr
106
+ if r is odd.
107
+ Clearly, PkS + SPk = 0, QkS + SQk = 0, and S2 = id.
108
+ Consequently,
109
+ there is a natural algebra homomorphism from the Clifford algebra ClC(n)
110
+ to End(Λ∗W) which maps wk to i
111
+
112
+ 2 Pk, ¯wk to i
113
+
114
+ 2 Qk, and En to iS. It is
115
+ well known (see [5], Lemma 20.9) that
116
+ span{Pk1 · · · PkrQl1 · · · Qls : r + s is even}
117
+ = End(ΛevenW) ⊕ End(ΛoddW)
118
+ and
119
+ span{Pk1 · · · PkrQl1 · · · Qls : r + s is odd}
120
+ = Hom(ΛevenW, ΛoddW) ⊕ Hom(ΛoddW, ΛevenW).
121
+ We claim that there is no non-zero element of End(Λ∗W) which anti-commutes
122
+ with Pk, Qk, S for each k ∈ {1, . . . , [n
123
+ 2 ]}.
124
+ Suppose that L ∈ End(Λ∗W)
125
+ is such an element.
126
+ Since L anti-commutes with S, it follows that L ∈
127
+ Hom(ΛevenW, ΛoddW)⊕Hom(ΛoddW, ΛevenW). Since L anti-commutes with
128
+ Pk, Qk for each k ∈ {1, . . . , [n
129
+ 2 ]}, it follows that L anti-commutes with every
130
+ element of Hom(ΛevenW, ΛoddW)⊕Hom(ΛoddW, ΛevenW). This implies that
131
+ L = 0. This completes the proof of Lemma 2.1.
132
+ Assume that Ω is a domain in Rn with smooth boundary ∂Ω = Σ. Let g
133
+ be a Riemannian metric on Ω. We denote by ν the outward-pointing unit
134
+ normal vector field with respect to the metric g. Let ∇ denote the spin
135
+ connection. The Dirac operator is defined by
136
+ Ds =
137
+ n
138
+
139
+ i=1
140
+ ei · ∇eis,
141
+ where {e1, . . . , en} is a local orthonormal frame on Ω. The boundary Dirac
142
+ operator DΣ is given by
143
+ DΣs =
144
+ n−1
145
+
146
+ i=1
147
+ ν · ei · ∇eis + 1
148
+ 2 H s
149
+ at each point on Σ, where {e1, . . . , en−1} is a local orthonormal frame on Σ.
150
+ In the remainder of this section, we consider the Dirac operator act-
151
+ ing on m-tuples of spinors with a suitable local boundary condition of
152
+
153
+ 4
154
+ SIMON BRENDLE
155
+ Lopatinsky-Shapiro type. To formulate the boundary condition, we assume
156
+ that N : Σ → Sn−1 is a given smooth map.
157
+ Definition 2.2. Consider an m-tuple of spinors s = (s1, . . . , sm). At each
158
+ point on Σ, we define
159
+ (χs)α = −
160
+ n
161
+
162
+ a=1
163
+ m
164
+
165
+ β=1
166
+ ⟨N, Ea⟩ ωaαβ ν · sβ
167
+ and
168
+ (Bs)α =
169
+ n−1
170
+
171
+ i=1
172
+ n
173
+
174
+ a=1
175
+ m
176
+
177
+ β=1
178
+ ⟨dN(ei), Ea⟩ ωaαβ ei · sβ,
179
+ where {e1, . . . , en−1} is a local orthonormal frame on Σ.
180
+ Lemma 2.3. The map χ is self-adjoint. Moreover, χ2 is the identity.
181
+ Proof. Suppose that s = (s1, . . . , sm) and t = (t1, . . . , tm) are two m-
182
+ tuples of spinors. We compute
183
+ (χ2s)α =
184
+ n
185
+
186
+ a,b=1
187
+ m
188
+
189
+ β,γ=1
190
+ ⟨N, Ea⟩ ⟨N, Eb⟩ ωaαβ ωbβγ ν · ν · sγ
191
+ = −
192
+ n
193
+
194
+ a,b=1
195
+ m
196
+
197
+ β,γ=1
198
+ ⟨N, Ea⟩ ⟨N, Eb⟩ ωaαβ ωbβγ sγ
199
+ = −1
200
+ 2
201
+ n
202
+
203
+ a,b=1
204
+ m
205
+
206
+ β,γ=1
207
+ ⟨N, Ea⟩ ⟨N, Eb⟩ (ωaαβ ωbβγ + ωbαβ ωaβγ) sγ
208
+ =
209
+ n
210
+
211
+ a,b=1
212
+ m
213
+
214
+ γ=1
215
+ ⟨N, Ea⟩ ⟨N, Eb⟩ δab δαγ sγ
216
+ = sα.
217
+ Moreover,
218
+ m
219
+
220
+ α=1
221
+ ⟨(χs)α, tα⟩ = −
222
+ n
223
+
224
+ a=1
225
+ m
226
+
227
+ α,β=1
228
+ ⟨N, Ea⟩ ωaαβ ⟨ν · sβ, tα⟩
229
+ = −
230
+ n
231
+
232
+ a=1
233
+ m
234
+
235
+ α,β=1
236
+ ⟨N, Ea⟩ ωaβα ⟨sβ, ν · tα⟩
237
+ =
238
+ m
239
+
240
+ β=1
241
+ ⟨sβ, (χt)β⟩.
242
+ This completes the proof of Lemma 2.3.
243
+
244
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
245
+ 5
246
+ Lemma 2.4. Assume that x ∈ Σ and ξ ∈ TxΣ. Then the map (s1, . . . , sm) �→
247
+ (ν · ξ · s1, . . . , ν · ξ · sm) maps the eigenspace of χ with eigenvalue 1 to the
248
+ eigenspace of χ with eigenvalue −1, and vice versa. In particular, the two
249
+ eigenspaces have the same dimension.
250
+ Proof. For each vector ξ ∈ TxΣ, the map
251
+ (s1, . . . , sm) �→ (ν · ξ · s1, . . . , ν · ξ · sm)
252
+ anti-commutes with χ. From this, the assertion follows.
253
+ Lemma 2.5. The map B is self-adjoint. Moreover, χ and B commute.
254
+ Proof. Let {e1, . . . , en−1} be a local orthonormal frame on Σ. Then
255
+ m
256
+
257
+ α=1
258
+ ⟨(Bs)α, tα⟩ =
259
+ n−1
260
+
261
+ i=1
262
+ n
263
+
264
+ a=1
265
+ m
266
+
267
+ α,β=1
268
+ ⟨dN(ei), Ea⟩ ωaαβ ⟨ei · sβ, tα⟩
269
+ =
270
+ n−1
271
+
272
+ i=1
273
+ n
274
+
275
+ a=1
276
+ m
277
+
278
+ α,β=1
279
+ ⟨dN(ei), Ea⟩ ωaβα ⟨sβ, ei · tα⟩
280
+ =
281
+ m
282
+
283
+ β=1
284
+ ⟨sβ, (Bt)β⟩.
285
+ This shows that B is self-adjoint. Moreover,
286
+ (χBs)α − (Bχs)α
287
+ = −
288
+ n−1
289
+
290
+ i=1
291
+ n
292
+
293
+ a,b=1
294
+ n
295
+
296
+ β,γ=1
297
+ ⟨N, Ea⟩ ⟨dN(ei), Eb⟩ ωaαβ ωbβγ ν · ei · sγ
298
+ +
299
+ n−1
300
+
301
+ i=1
302
+ n
303
+
304
+ a,b=1
305
+ n
306
+
307
+ β,γ=1
308
+ ⟨dN(ei), Ea⟩ ⟨N, Eb⟩ ωaαβ ωbβγ ei · ν · sγ
309
+ = −
310
+ n−1
311
+
312
+ i=1
313
+ n
314
+
315
+ a,b=1
316
+ n
317
+
318
+ β,γ=1
319
+ ⟨N, Ea⟩ ⟨dN(ei), Eb⟩ (ωaαβ ωbβγ + ωbαβ ωaβγ) ν · ei · sγ
320
+ = 2
321
+ n−1
322
+
323
+ i=1
324
+ n
325
+
326
+ a,b=1
327
+ n
328
+
329
+ γ=1
330
+ ⟨N, Ea⟩ ⟨dN(ei), Eb⟩ δab δαγ ν · ei · sγ
331
+ = 2
332
+ n−1
333
+
334
+ i=1
335
+ ⟨N, dN(ei)⟩ ν · ei · sα
336
+ = 0.
337
+ Thus, χ and B commute. This completes the proof of Lemma 2.5.
338
+ At this point, we recall a definition from linear algebra.
339
+
340
+ 6
341
+ SIMON BRENDLE
342
+ Definition 2.6. Let V and W be finite-dimensional vector spaces of the
343
+ same dime, each of them equipped with an inner product. The trace norm
344
+ of a linear map L : V → W is defined by ∥L∥tr = supQ tr(QL), where the
345
+ supremum is taken over all linear isometries Q : W → V . Equivalently,
346
+ ∥L∥tr can be characterized as the sum of the singular values of L.
347
+ It is easy to see from the definition that the trace norm satisfies the tri-
348
+ angle inequality.
349
+ Lemma 2.7. Suppose that s = (s1, . . . , sm) is an m-tuple of spinors. Then
350
+ ����
351
+ m
352
+
353
+ α=1
354
+ ⟨(Bs)α, sα⟩
355
+ ���� ≤ ∥dN∥tr
356
+ � m
357
+
358
+ α=1
359
+ |sα|2
360
+
361
+ at each point x ∈ Σ. Here, ∥dN∥tr denotes the trace norm of the differential
362
+ dN : TxΣ → TN(x)Sn−1. The tangent space TxΣ is equipped with the restric-
363
+ tion of the inner product g, and the tangent space TN(x)Sn−1 is equipped
364
+ with the restriction of the standard inner product on Rn.
365
+ Proof. Fix a point x ∈ Σ. We can find an orthonormal basis {e1, . . . , en−1}
366
+ of TxΣ so that dN(ei) = λi ˆEi, where { ˆE1, . . . , ˆEn−1} is an orthonormal ba-
367
+ sis of TN(x)Sn−1 and λ1, . . . , λn−1 ≥ 0 denote the singular values of dN.
368
+ Then
369
+ m
370
+
371
+ α=1
372
+ ����
373
+ n
374
+
375
+ a=1
376
+ m
377
+
378
+ β=1
379
+ ⟨ ˆEi, Ea⟩ ωaαβ ei · sβ
380
+ ����
381
+ 2
382
+ =
383
+ n
384
+
385
+ a,b=1
386
+ m
387
+
388
+ α,β,γ=1
389
+ ⟨ ˆEi, Ea⟩ ⟨ ˆEi, Eb⟩ ωaαβ ωbαγ ⟨ei · sβ, ei · sγ⟩
390
+ = −
391
+ n
392
+
393
+ a,b=1
394
+ m
395
+
396
+ α,β,γ=1
397
+ ⟨ ˆEi, Ea⟩ ⟨ ˆEi, Eb⟩ ωaαβ ωbγα ⟨sβ, sγ⟩
398
+ = −1
399
+ 2
400
+ n
401
+
402
+ a,b=1
403
+ m
404
+
405
+ α,β,γ=1
406
+ ⟨ ˆEi, Ea⟩ ⟨ ˆEi, Eb⟩ (ωaγα ωbαβ + ωbγα ωaαβ) ⟨sβ, sγ⟩
407
+ =
408
+ n
409
+
410
+ a,b=1
411
+ m
412
+
413
+ β,γ=1
414
+ ⟨ ˆEi, Ea⟩ ⟨ ˆEi, Eb⟩ δab δγβ ⟨sβ, sγ⟩
415
+ =
416
+ m
417
+
418
+ α=1
419
+ |sα|2
420
+
421
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
422
+ 7
423
+ for each i = 1, . . . , n − 1. Using the Cauchy-Schwarz inequality, we obtain
424
+ ����
425
+ n
426
+
427
+ a=1
428
+ m
429
+
430
+ α,β=1
431
+ ⟨ ˆEi, Ea⟩ ωaαβ ⟨ei · sβ, sα⟩
432
+ ����
433
+
434
+ � m
435
+
436
+ α=1
437
+ ����
438
+ n
439
+
440
+ a=1
441
+ m
442
+
443
+ β=1
444
+ ⟨ ˆEi, Ea⟩ ωaαβ ei · sβ
445
+ ����
446
+ 2� 1
447
+ 2 � m
448
+
449
+ α=1
450
+ |sα|2
451
+ � 1
452
+ 2
453
+ =
454
+ m
455
+
456
+ α=1
457
+ |sα|2
458
+ for each i = 1, . . . , n − 1. Summation over i = 1, . . . , n − 1 gives
459
+ ����
460
+ m
461
+
462
+ α=1
463
+ ⟨(Bs)α, sα⟩
464
+ ���� =
465
+ ����
466
+ n−1
467
+
468
+ i=1
469
+ n
470
+
471
+ a=1
472
+ m
473
+
474
+ α,β=1
475
+ ⟨dN(ei), Ea⟩ ωaαβ ⟨ei · sβ, sα⟩
476
+ ����
477
+ =
478
+ ����
479
+ n−1
480
+
481
+ i=1
482
+ λi
483
+
484
+ n
485
+
486
+ a=1
487
+ m
488
+
489
+ α,β=1
490
+ ⟨ ˆEi, Ea⟩ ωaαβ ⟨ei · sβ, sα⟩
491
+ �����
492
+
493
+ � n−1
494
+
495
+ i=1
496
+ λi
497
+ � � m
498
+
499
+ α=1
500
+ |sα|2
501
+
502
+ ,
503
+ as claimed.
504
+ Proposition 2.8. Suppose that s = (s1, . . . , sm) and t = (t1, . . . , tm) are
505
+ m-tuples of spinors. Then
506
+ 0 =
507
+
508
+ Σ
509
+ m
510
+
511
+ α=1
512
+ ⟨DΣsα, (χt)α⟩ dσg +
513
+
514
+ Σ
515
+ m
516
+
517
+ α=1
518
+ ⟨(χs)α, DΣtα⟩ dσg
519
+ +
520
+
521
+ Σ
522
+ m
523
+
524
+ α=1
525
+ ⟨(Bs)α, tα⟩ dσg.
526
+ Equivalently,
527
+ 0 =
528
+
529
+ Σ
530
+ m
531
+
532
+ α=1
533
+ ⟨(As)α, (χt)α⟩ dσg +
534
+
535
+ Σ
536
+ m
537
+
538
+ α=1
539
+ ⟨(χs)α, (At)α⟩ dσg,
540
+ where A is defined by A = DΣ + 1
541
+ 2χB.
542
+ Proof. Let {e1, . . . , en−1} be a local orthonormal frame on Σ. We define
543
+ a tangential vector field Z on Σ by
544
+ ⟨Z, ei⟩ =
545
+ n
546
+
547
+ a=1
548
+ m
549
+
550
+ α,β=1
551
+ ⟨N, Ea⟩ ωaαβ ⟨ei · sβ, tα⟩
552
+
553
+ 8
554
+ SIMON BRENDLE
555
+ for i = 1, . . . , n − 1. Then
556
+ divΣZ =
557
+ n−1
558
+
559
+ i=1
560
+ n
561
+
562
+ a=1
563
+ m
564
+
565
+ α,β=1
566
+ ⟨N, Ea⟩ ωaαβ ⟨ei · ∇eisβ, tα⟩
567
+ +
568
+ n−1
569
+
570
+ i=1
571
+ n
572
+
573
+ a=1
574
+ m
575
+
576
+ α,β=1
577
+ ⟨N, Ea⟩ ωaαβ ⟨ei · sβ, ∇eitα⟩
578
+
579
+ n
580
+
581
+ a=1
582
+ m
583
+
584
+ α,β=1
585
+ H ⟨N, Ea⟩ ωaαβ ⟨ν · sβ, tα⟩
586
+ +
587
+ n−1
588
+
589
+ i=1
590
+ n
591
+
592
+ a=1
593
+ m
594
+
595
+ α,β=1
596
+ ⟨dN(ei), Ea⟩ ωaαβ ⟨ei · sβ, tα⟩
597
+ = −
598
+ n−1
599
+
600
+ i=1
601
+ m
602
+
603
+ β=1
604
+ ⟨ei · ∇eisβ, ν · (χt)β⟩
605
+ +
606
+ n−1
607
+
608
+ i=1
609
+ m
610
+
611
+ α=1
612
+ ⟨ei · ν · (χs)α, ∇eitα⟩
613
+ +
614
+ m
615
+
616
+ α=1
617
+ H ⟨(χs)α, tα⟩ +
618
+ m
619
+
620
+ α=1
621
+ ⟨(Bs)α, tα⟩
622
+ =
623
+ m
624
+
625
+ β=1
626
+ ⟨DΣsβ, (χt)β⟩ +
627
+ m
628
+
629
+ α=1
630
+ ⟨(χs)α, DΣtα⟩ +
631
+ m
632
+
633
+ α=1
634
+ ⟨(Bs)α, tα⟩.
635
+ Integrating over Σ, we obtain
636
+ 0 =
637
+
638
+ Σ
639
+ m
640
+
641
+ β=1
642
+ ⟨DΣsβ, (χt)β⟩ dσg +
643
+
644
+ Σ
645
+ m
646
+
647
+ α=1
648
+ ⟨(χs)α, DΣtα⟩ dσg
649
+ +
650
+
651
+ Σ
652
+ m
653
+
654
+ α=1
655
+ ⟨(Bs)α, tα⟩ dσg.
656
+ This completes the proof of Proposition 2.8.
657
+ Remark 2.9. It is well known that the boundary Dirac operator DΣ is
658
+ formally self-adjoint. Moreover, it follows from Lemma 2.3 and Lemma 2.5
659
+ that χB is self-adjoint. Consequently, the operator A = DΣ+ 1
660
+ 2χB is formally
661
+ self-adjoint. Finally, Proposition 2.8 implies that A and χ anti-commute.
662
+
663
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
664
+ 9
665
+ Proposition 2.10. Suppose that s = (s1, . . . , sm) is an m-tuple of spinors.
666
+ Then
667
+
668
+
669
+
670
+ m
671
+
672
+ α=1
673
+ |Dsα|2 dvolg +
674
+
675
+
676
+ n
677
+
678
+ α=1
679
+ |∇sα|2 dvolg + 1
680
+ 4
681
+
682
+
683
+ m
684
+
685
+ α=1
686
+ R |sα|2 dvolg
687
+ ≤ 1
688
+ 2
689
+
690
+ Σ
691
+ m
692
+
693
+ α=1
694
+ ⟨DΣs��, sα − (χs)α⟩ dσg + 1
695
+ 2
696
+
697
+ Σ
698
+ m
699
+
700
+ α=1
701
+ ⟨sα − (χs)α, DΣsα⟩ dσg
702
+ − 1
703
+ 2
704
+
705
+ Σ
706
+ (H − ∥dN∥tr)
707
+ � m
708
+
709
+ α=1
710
+ |sα|2
711
+
712
+ dσg.
713
+ Proof. By the Weitzenb¨ock formula, D2sα = −∆sα + 1
714
+ 4 R sα, where ∆
715
+ denotes the connection Laplacian on the spinor bundle. Using the divergence
716
+ theorem, we obtain
717
+
718
+
719
+
720
+ m
721
+
722
+ α=1
723
+ |Dsα|2 dvolg +
724
+
725
+
726
+ m
727
+
728
+ α=1
729
+ |∇sα|2 dvolg + 1
730
+ 4
731
+
732
+
733
+ m
734
+
735
+ α=1
736
+ R |sα|2 dvolg
737
+ =
738
+
739
+ Σ
740
+ m
741
+
742
+ α=1
743
+ ⟨ν · Dsα, sα⟩ dσg +
744
+
745
+ Σ
746
+ m
747
+
748
+ α=1
749
+ ⟨∇νsα, sα⟩ dσg
750
+ =
751
+
752
+ Σ
753
+ ⟨DΣsα, sα⟩ dσg − 1
754
+ 2
755
+
756
+ Σ
757
+ m
758
+
759
+ α=1
760
+ H |sα|2 dσg.
761
+ Applying Proposition 2.8 with s = t gives
762
+ 0 =
763
+
764
+ Σ
765
+ m
766
+
767
+ α=1
768
+ ⟨DΣsα, (χs)α⟩ dσg +
769
+
770
+ Σ
771
+ m
772
+
773
+ α=1
774
+ ⟨(χs)α, DΣsα⟩ dσg
775
+ +
776
+
777
+ Σ
778
+ m
779
+
780
+ α=1
781
+ ⟨(Bs)α, sα⟩ dσg.
782
+ This gives
783
+
784
+
785
+
786
+ m
787
+
788
+ α=1
789
+ |Dsα|2 dvolg +
790
+
791
+
792
+ n
793
+
794
+ α=1
795
+ |∇sα|2 dvolg + 1
796
+ 4
797
+
798
+
799
+ m
800
+
801
+ α=1
802
+ R |sα|2 dvolg
803
+ = 1
804
+ 2
805
+
806
+ Σ
807
+ m
808
+
809
+ α=1
810
+ ⟨DΣsα, sα⟩ dσg + 1
811
+ 2
812
+
813
+ Σ
814
+ m
815
+
816
+ α=1
817
+ ⟨sα, DΣsα⟩ dσg − 1
818
+ 2
819
+
820
+ Σ
821
+ m
822
+
823
+ α=1
824
+ H |sα|2 dσg
825
+ = 1
826
+ 2
827
+
828
+ Σ
829
+ m
830
+
831
+ α=1
832
+ ⟨DΣsα, sα − (χs)α⟩ dσg + 1
833
+ 2
834
+
835
+ Σ
836
+ m
837
+
838
+ α=1
839
+ ⟨sα − (χs)α, DΣsα⟩ dσg
840
+ − 1
841
+ 2
842
+
843
+ Σ
844
+ m
845
+
846
+ α=1
847
+ ⟨(Bs)α, sα⟩ dσg − 1
848
+ 2
849
+
850
+ Σ
851
+ m
852
+
853
+ α=1
854
+ H |sα|2 dσg.
855
+ Hence, the assertion follows from Lemma 2.7.
856
+
857
+ 10
858
+ SIMON BRENDLE
859
+ Corollary 2.11. Suppose that R ≥ 0 at each point in Ω and H ≥ ∥dN∥tr at
860
+ each point on Σ. Then every m-tuple of harmonic spinors s = (s1, . . . , sm)
861
+ with χs = s is parallel.
862
+ Replacing N by −N, we can draw the following conclusion:
863
+ Corollary 2.12. Suppose that R ≥ 0 at each point in Ω and H ≥ ∥dN∥tr at
864
+ each point on Σ. Then every m-tuple of harmonic spinors s = (s1, . . . , sm)
865
+ with χs = −s is parallel.
866
+ Proposition 2.13. Suppose that Ω is a convex domain in Rn with smooth
867
+ boundary ∂Ω = Σ.
868
+ Let g be a Riemannian metric on Ω.
869
+ Suppose that
870
+ N : Σ → Sn−1 is a smooth map. Then the boundary condition χs = s is a
871
+ D-elliptic boundary condition in the sense of B¨ar and Ballmann [2].
872
+ Proof.
873
+ We apply Corollary 3.18 in [2] with E′ = ker(id − χ) and
874
+ E′′ = ker(id + χ).
875
+ Lemma 2.4 implies that, for each point x ∈ Σ and
876
+ each ξ ∈ TxΣ, the map (s1, . . . , sm) �→ (ν · ξ · s1, . . . , ν · ξ · sm) interchanges
877
+ ker(id − χ) and ker(id + χ). Therefore, the boundary condition χs = s is a
878
+ D-elliptic boundary condition in the sense of [2].
879
+ Proposition 2.14. Assume that n ≥ 3 is an odd integer. Suppose that
880
+ Ω is a convex domain in Rn with smooth boundary ∂Ω = Σ. Let g be a
881
+ Riemannian metric on Ω. Suppose that N : Σ → Sn−1 is homotopic to
882
+ the Gauss map of Σ with respect to the Euclidean metric. Then the Dirac
883
+ operator with the boundary condition χs = s has Fredholm index at least 1.
884
+ Proof. Since the Fredholm index is homotopy invariant, it suffices to
885
+ prove the assertion in the special case when g is the Euclidean metric and
886
+ N is the Gauss map of Σ with respect to the Euclidean metric.
887
+ We first analyze the kernel of the Dirac operator with the boundary con-
888
+ dition χs = s. Recall that ¯s1, . . . , ¯sm is a basis of spinors on flat Rn, and
889
+ ωaαβ = ⟨Ea · ¯sα, ¯sβ⟩. Clearly, ¯s = (¯s1, . . . , ¯sm) is an m-tuple of harmonic
890
+ spinors on Ω which satisfies the boundary condition χ¯s = ¯s. Therefore, the
891
+ kernel has dimension at least 1.
892
+ We next examine the cokernel.
893
+ The cokernel can be identified with
894
+ the space of all m-tuples of harmonic spinors s = (s1, . . . , sm) such that
895
+ ⟨ν · s, t⟩ = 0 for all points x ∈ Σ and all t ∈ ker(id − χ) (see [2], Example
896
+ 3.20). We claim that this space has dimension 0. To see this, suppose that
897
+ s = (s1, . . . , sm) is an m-tuple of harmonic spinors such that ⟨ν · s, t⟩ = 0
898
+ for all points x ∈ Σ and all t ∈ ker(id − χ). This implies s ∈ ker(id + χ) at
899
+ each point on Σ. Since H = ∥dN∥tr at each point on Σ, Corollary 2.12 im-
900
+ plies that s = (s1, . . . , sm) is parallel. In other words, s1, . . . , sm are constant
901
+ spinors. Let us write sα = �m
902
+ β=1 zαβ ¯sβ for some matrix z ∈ End(Cm). Since
903
+ χs = −s at each point on Σ, it follows that the matrix z ∈ End(Cm) anti-
904
+ commutes with the matrix �n
905
+ a=1⟨N(x), Ea⟩ ωa ∈ End(Cm) for each point
906
+ x ∈ Σ. It is easy to see that the Gauss map N : Σ → Sn−1 is surjective.
907
+
908
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
909
+ 11
910
+ Consequently, the matrix z ∈ End(Cm) anti-commutes with ωa ∈ End(Cm)
911
+ for each a = 1, . . . , n. Since n is odd, Lemma 2.1 implies that z = 0, hence
912
+ s = 0. This shows that the cokernel has dimension 0. This completes the
913
+ proof of Proposition 2.14.
914
+ 3. Approximating a compact, convex polytope by smooth
915
+ domains
916
+ Let us consider a compact, convex polytope Ω ⊂ Rn with non-empty
917
+ interior. We write Ω = �
918
+ α∈A{uα ≤ 0}, where A is a finite set and the uα
919
+ are linear functions in Rn. After eliminating redundant inequalities, we may
920
+ assume that the following condition is satisfied.
921
+ Assumption 3.1. For each α ∈ A, the set Ω ∩ {uα > 0} is non-empty.
922
+ Let g be a Riemannian metric which is defined on an open set containing
923
+ Ω. For each α ∈ A, ∇uα will denote the gradient of uα with respect to the
924
+ metric g; |∇uα| will denote the norm of the gradient of uα with respect to
925
+ the metric g; and να =
926
+ ∇uα
927
+ |∇uα| will denote the outward-pointing unit normal
928
+ vector to the halfspace {uα ≤ 0} with respect to the metric g. For each
929
+ α ∈ A, we denote by Nα ∈ Sn−1 the outward-pointing unit normal vector
930
+ to the halfspace {uα ≤ 0} with respect to the Euclidean metric.
931
+ For each λ > 0, the function �
932
+ α∈A eλuα is convex with respect to the
933
+ Euclidean metric. Clearly, �
934
+ α∈A eλuα > 1 on ∂Ω. Moreover, we can find
935
+ large number λ0 such that infΩ
936
+
937
+ α∈A eλuα < 1 for each λ > λ0. For each
938
+ λ > λ0, we define
939
+ Ωλ =
940
+ � �
941
+ α∈A
942
+ eλuα ≤ 1
943
+
944
+ .
945
+ For each λ > λ0, Ωλ is a convex domain in Rn with smooth boundary
946
+ Σλ = ∂Ωλ. The sets Ωλ form an increasing family of sets in the sense that
947
+ Ωλ ⊂ Ωµ for λ0 < λ < µ. Moreover,
948
+
949
+ λ>λ0
950
+ Ωλ =
951
+
952
+ α∈A
953
+ {uα < 0}.
954
+ Lemma 3.2. If λ is sufficiently large, then infΣλ
955
+ �� �
956
+ α∈A eλuα duα
957
+ �� ≥ C−1
958
+ for some large constant C which is independent of λ.
959
+ Proof. We argue by contradiction. Suppose that the assertion is false.
960
+ Then there exists a sequence of positive real numbers λl → ∞ and a se-
961
+ quence of points xl ∈ Σλl such that
962
+ �� �
963
+ α∈A eλuα duα
964
+ �� ≤ l−1 at the point
965
+ xl. After passing to a subsequence, we may assume that the sequence xl
966
+ converges to a point x0 ∈ Ω.
967
+ Moreover, we may assume that, for each
968
+ α ∈ A, the sequence eλluα(xl) converges to a nonnegative real number zα.
969
+ Since �
970
+ α∈A eλluα(xl) = 1 for each l, we know that �
971
+ α∈A zα > 0.
972
+ Let
973
+ A0 := {α ∈ A : zα > 0}. Clearly, A0 is non-empty, and uα(x0) = 0 for all
974
+
975
+ 12
976
+ SIMON BRENDLE
977
+ α ∈ A0. Moreover, �
978
+ α∈A0 zα duα = 0 at the point x0. On the other hand,
979
+ since Ω is a convex set with non-empty interior, we can find a tangent vector
980
+ ξ ∈ Tx0Ω such that duα(ξ) > 0 for all α ∈ A0. This is a contradiction. This
981
+ completes the proof of Lemma 3.2.
982
+ Lemma 3.3. If λ is sufficiently large, then infΣλ
983
+ �� �
984
+ α∈A eλuα |∇uα| Nα
985
+ �� ≥
986
+ C−1 for some large constant C which is independent of λ.
987
+ Proof. We argue by contradiction. Suppose that the assertion is false.
988
+ Then there exists a sequence of positive real numbers λl → ∞ and a se-
989
+ quence of points xl ∈ Σλl such that
990
+ �� �
991
+ α∈A eλuα |∇uα| Nα
992
+ �� ≤ l−1 at the
993
+ point xl. After passing to a subsequence, we may assume that the sequence
994
+ xl converges to a point x0 ∈ Ω. Moreover, we may assume that, for each
995
+ α ∈ A, the sequence eλluα(xl) |∇uα(xl)| converges to a nonnegative real num-
996
+ ber zα. Since �
997
+ α∈A eλluα(xl) = 1 for each l, we know that �
998
+ α∈A zα > 0.
999
+ Let A0 := {α ∈ A : zα > 0}. Clearly, A0 is non-empty, and uα(x0) = 0
1000
+ for all α ∈ A0. Moreover, �
1001
+ α∈A0 zαNα = 0 at the point x0. On the other
1002
+ hand, since Ω is a convex set with non-empty interior, we can find a vector
1003
+ ξ ∈ Rn such that ⟨Nα, ξ⟩ > 0 for all α ∈ A0. This is a contradiction. This
1004
+ completes the proof of Lemma 3.3.
1005
+ The outward-pointing unit normal vector to the domain Ωλ with respect
1006
+ to the metric g is given by
1007
+ ν =
1008
+
1009
+ α∈A eλuα ∇uα
1010
+ �� �
1011
+ α∈A eλuα ∇uα
1012
+ �� =
1013
+
1014
+ α∈A eλuα |∇uα| να
1015
+ �� �
1016
+ α∈A eλuα |∇uα| να
1017
+ ��.
1018
+ We define a map N : Σλ → Sn−1 by
1019
+ N =
1020
+
1021
+ α∈A eλuα |∇uα| Nα
1022
+ �� �
1023
+ α∈A eλuα |∇uα| Nα
1024
+ ��.
1025
+ Lemma 3.4. The map N : Σλ → Sn−1 is homotopic to the Gauss map of
1026
+ Σλ with respect to the Euclidean metric.
1027
+ Proof. In the special case when g is the Euclidean metric, the map N
1028
+ coincides with the Gauss map of Σλ, and the assertion is trivially true. To
1029
+ prove the assertion in general, we deform the metric g to the Euclidean met-
1030
+ ric.
1031
+ Proposition 3.5. Let x ∈ Σλ. Let π : TxΩ → TxΩ denotes the orthogonal
1032
+ projection to the orthogonal complement of ν and P : Rn → Rn denotes
1033
+ the orthogonal projection to the orthogonal complement of N. Then H −
1034
+
1035
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
1036
+ 13
1037
+ ∥dN∥tr ≥ Vλ, where the function Vλ : Σλ → R is defined by
1038
+ Vλ = λ
1039
+
1040
+ α∈A eλuα |∇uα|2 |π(να)|2
1041
+ �� �
1042
+ α∈A eλuα |∇uα| να
1043
+ ��
1044
+ − λ
1045
+
1046
+ α∈A eλuα |∇uα|2 |π(να)| |P(Nα)|
1047
+ �� �
1048
+ α∈A eλuα |∇uα| Nα
1049
+ ��
1050
+ +
1051
+
1052
+ α∈A eλuα (∆uα − (D2uα)(ν, ν))
1053
+ �� �
1054
+ α∈A eλuα |∇uα| να
1055
+ ��
1056
+
1057
+
1058
+ α∈A eλuα |∇(|∇uα|)| |P(Nα)|
1059
+ �� �
1060
+ α∈A eλuα |∇uα| Nα
1061
+ ��
1062
+ .
1063
+ Proof. Let {e1, . . . , en−1} denote a local orthonormal frame on Σλ. The
1064
+ mean curvature of Σλ is given by
1065
+ H = λ
1066
+ �n−1
1067
+ i=1
1068
+
1069
+ α∈A eλuα ⟨∇uα, ei⟩2
1070
+ �� �
1071
+ α∈A eλuα ∇uα
1072
+ ��
1073
+ +
1074
+ �n−1
1075
+ i=1
1076
+
1077
+ α∈A eλuα (D2uα)(ei, ei)
1078
+ �� �
1079
+ α∈A eλuα ∇uα
1080
+ ��
1081
+ = λ
1082
+
1083
+ α∈A eλuα |π(∇uα)|2
1084
+ �� �
1085
+ α∈A eλuα ∇uα
1086
+ ��
1087
+ +
1088
+
1089
+ α∈A eλuα (∆uα − (D2uα)(ν, ν))
1090
+ �� �
1091
+ α∈A eλuα ∇uα
1092
+ ��
1093
+ = λ
1094
+
1095
+ α∈A eλuα |∇uα|2 |π(να)|2
1096
+ �� �
1097
+ α∈A eλuα |∇uα| να
1098
+ ��
1099
+ +
1100
+
1101
+ α∈A eλuα (∆uα − (D2uα)(ν, ν))
1102
+ �� �
1103
+ α∈A eλuα |∇uα| να
1104
+ ��
1105
+ .
1106
+ If ξ is a tangent vector to Σλ, then
1107
+ dN(ξ)
1108
+ = λ
1109
+
1110
+ α∈A eλuα |∇uα| ⟨∇uα, ξ⟩ P(Nα)
1111
+ �� �
1112
+ α∈A eλuα |∇uα| Nα
1113
+ ��
1114
+ +
1115
+
1116
+ α∈A eλuα ⟨∇(|∇uα|), ξ⟩ P(Nα)
1117
+ �� �
1118
+ α∈A eλuα |∇uα| Nα
1119
+ ��
1120
+ = λ
1121
+
1122
+ α∈A eλuα |∇uα|2 ⟨π(να), ξ⟩ P(Nα)
1123
+ �� �
1124
+ α∈A eλuα |∇uα| Nα
1125
+ ��
1126
+ +
1127
+
1128
+ α∈A eλuα ⟨∇(|∇uα|), ξ⟩ P(Nα)
1129
+ �� �
1130
+ α∈A eλuα |∇uα| Nα
1131
+ ��
1132
+ .
1133
+ The trace norm of a linear transformation of the form ξ �→ ⟨X, ξ⟩ Y is given
1134
+ by |X| |Y |. Since the trace norm satisfies the triangle inequality, it follows
1135
+ that
1136
+ ∥dN∥tr
1137
+ ≤ λ
1138
+
1139
+ α∈A eλuα |∇uα|2 |π(να)| |P(Nα)|
1140
+ �� �
1141
+ α∈A eλuα |∇uα| Nα
1142
+ ��
1143
+ +
1144
+
1145
+ α∈A eλuα |∇(|∇uα|)| |P(Nα)|
1146
+ �� �
1147
+ α∈A eλuα |∇uα| Nα
1148
+ ��
1149
+ .
1150
+ Putting these facts together, the assertion follows.
1151
+ In the following, we denote by Vλ,− = max{−Vλ, 0} the negative part of
1152
+ Vλ.
1153
+ Proposition 3.6. Suppose that the Matching Angle Hypothesis is satisfied.
1154
+ Then supΣλ Vλ,− ≤ o(λ) as λ → ∞.
1155
+ Proof. We argue by contradiction. Suppose that the assertion is false.
1156
+ Then there exists a sequence of positive real numbers λl → ∞ and a se-
1157
+ quence of points xl ∈ Σλl such that lim supl→∞ λ−1
1158
+ l
1159
+ Vλl(xl) < 0.
1160
+ After
1161
+ passing to a subsequence, we may assume that the sequence xl converges
1162
+ to a point x0 ∈ Ω.
1163
+ Moreover, we may assume that, for each α ∈ A,
1164
+ the sequence eλluα(xl) |∇uα(xl)| converges to a nonnegative real number zα.
1165
+
1166
+ 14
1167
+ SIMON BRENDLE
1168
+ Since �
1169
+ α∈A eλluα(xl) = 1 for each l, we know that �
1170
+ α∈A zα > 0.
1171
+ Let
1172
+ A0 := {α ∈ A : zα > 0}. Clearly, A0 is non-empty, and uα(x0) = 0 for
1173
+ all α ∈ A0. The Matching Angle Hypothesis implies that, at the point x0,
1174
+ ⟨να1, να2⟩ = ⟨Nα1, Nα2⟩ for all α1, α2 ∈ A0. Let π : Tx0Ω → Tx0Ω denote
1175
+ the orthogonal projection to the orthogonal complement of �
1176
+ α∈A0 zανα,
1177
+ and let P : Rn → Rn denote the orthogonal projection to the orthogonal
1178
+ complement of �
1179
+ α∈A0 zαNα. For each β ∈ A0, we have
1180
+ |π(νβ)|2 = 1 −
1181
+ � �
1182
+ α∈A0 zανα, νβ
1183
+ �2
1184
+ �� �
1185
+ α∈A0 zανα
1186
+ ��2
1187
+ = 1 −
1188
+ � �
1189
+ α∈A0 zαNα, Nβ
1190
+ �2
1191
+ �� �
1192
+ α∈A0 zαNα
1193
+ ��2
1194
+ = |P(Nβ)|2
1195
+ at the point x0. Therefore, for each β ∈ A0, we obtain
1196
+ |π(νβ)|
1197
+ �� �
1198
+ α∈A0 zανα
1199
+ �� =
1200
+ |P(Nβ)|
1201
+ �� �
1202
+ α∈A0 zαNα
1203
+ ��
1204
+ at the point x0. Using Proposition 3.5, we conclude that λ−1
1205
+ l
1206
+ Vλl(xl) → 0 as
1207
+ l → ∞. This is a contradiction.
1208
+ In the remainder of this section, we will estimate the Ls-norm Vλ,− on Σλ∩
1209
+ Br(p), where s ∈ [1, 3
1210
+ 2) is a fixed exponent and Br(p) denotes a Euclidean
1211
+ ball of radius r. We begin by recalling a basic fact about the area of convex
1212
+ hypersurfaces in Rn.
1213
+ Lemma 3.7. Let Br(p) denote a Euclidean ball of radius r. Then the in-
1214
+ tersection Σλ ∩ Br(p) has area at most Crn−1.
1215
+ Proof. This follows from the fact that the hypersurface Σλ = ∂Ωλ is
1216
+ outward-minimizing with respect to the Euclidean metric.
1217
+ Definition 3.8. Consider three pairwise distinct elements α1, α2, α3 ∈ A.
1218
+ We denote by G(α1,α2,α3)
1219
+ λ
1220
+ the set of all points x ∈ Σλ with the property that
1221
+ uα1(x) ≥ uα2(x) ≥ uα3(x) and uα3(x) ≥ uα(x) for α ∈ A \ {α1, α2, α3}.
1222
+ Lemma 3.9. Assume that the mean curvature of the hypersurface {uα = 0}
1223
+ with respect to g is nonnegative at each point in Ω ∩ {uα = 0}. Let us fix an
1224
+ exponent s ∈ [1, 3
1225
+ 2), and let Br(p) denote a Euclidean ball of radius r ≤ 1.
1226
+ If λr is sufficiently large, then
1227
+
1228
+ rs+1−n
1229
+
1230
+ G(α1,α2,α3)
1231
+ λ
1232
+ ∩{uα2≤−λ− 7
1233
+ 8 r
1234
+ 1
1235
+ 8 }∩Br(p)
1236
+ V s
1237
+ λ,−
1238
+ � 1
1239
+ s
1240
+ ≤ Cλr e−(λr)
1241
+ 1
1242
+ 8
1243
+ for all pairwise distinct elements α1, α2, α3 ∈ A.
1244
+ Proof. Let us consider an arbitrary point x ∈ G(α1,α2,α3)
1245
+ λ
1246
+ with uα2(x) ≤
1247
+ −λ− 7
1248
+ 8 r
1249
+ 1
1250
+ 8 .
1251
+ By definition of G(α1,α2,α3)
1252
+ λ
1253
+ , it follows that uα(x) ≤ −λ− 7
1254
+ 8r
1255
+ 1
1256
+ 8
1257
+ for all α ∈ A \ {α1}.
1258
+ Using the identity �
1259
+ α∈A eλuα(x) = 1, we obtain
1260
+ uα1(x) ≥ −Cλ−1 e−(λr)
1261
+ 1
1262
+ 8 . Moreover, |ν − να1| ≤ C e−(λr)
1263
+ 1
1264
+ 8 and |N − Nα1| ≤
1265
+
1266
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
1267
+ 15
1268
+ C e−(λr)
1269
+ 1
1270
+ 8 at the point x. From this, we deduce that |π(να1)| ≤ C e−(λr)
1271
+ 1
1272
+ 8
1273
+ and |P(Nα1)| ≤ C e−(λr)
1274
+ 1
1275
+ 8 at the point x. Therefore,
1276
+ Vλ ≥ ∆uα1 − (D2uα1)(να1, να1)
1277
+ |∇uα1|
1278
+ − Cλ e−(λr)
1279
+ 1
1280
+ 8
1281
+ at the point x. Since uα1(x) ≥ −Cλ−1 e−(λr)
1282
+ 1
1283
+ 8 and uα(x) ≤ −λ− 7
1284
+ 8r
1285
+ 1
1286
+ 8 for
1287
+ all α ∈ A \ {α1}, we can find a point y ∈ Ω such that uα1(y) = 0 and
1288
+ d(x, y) ≤ Cλ−1 e−(λr)
1289
+ 1
1290
+ 8 . By assumption, the mean curvature of the hyper-
1291
+ surface {uα1 = 0} at the point y is nonnegative. This implies
1292
+ ∆uα1 − (D2uα1)(να1, να1)
1293
+ |∇uα1|
1294
+ ≥ 0
1295
+ at the point y. Consequently,
1296
+ ∆uα1 − (D2uα1)(να1, να1)
1297
+ |∇uα1|
1298
+ ≥ −C d(x, y)
1299
+ at the point x. Thus, we conclude that
1300
+ Vλ(x) ≥ −Cλ e−(λr)
1301
+ 1
1302
+ 8
1303
+ for each point x ∈ G(α1,α2,α3)
1304
+ λ
1305
+ ∩ {uα2 ≤ −λ− 7
1306
+ 8r
1307
+ 1
1308
+ 8}.
1309
+ On the other hand,
1310
+ Σλ ∩ Br(p) has area at most Crn−1. Consequently,
1311
+
1312
+ rs+1−n
1313
+
1314
+ G(α1,α2,α3)
1315
+ λ
1316
+ ∩{uα2≤−λ− 7
1317
+ 8 r
1318
+ 1
1319
+ 8 }∩Br(p)
1320
+ V s
1321
+ λ,−
1322
+ � 1
1323
+ s
1324
+ ≤ Cλr e−(λr)
1325
+ 1
1326
+ 8 .
1327
+ This completes the proof of Lemma 3.9.
1328
+ Lemma 3.10. Assume that the Matching Angle Hypothesis holds. Let us
1329
+ fix an exponent s ∈ [1, 3
1330
+ 2), and let Br(p) denote a Euclidean ball of radius
1331
+ r ≤ 1. If λr is sufficiently large, then
1332
+
1333
+ rs+1−n
1334
+
1335
+ G(α1,α2,α3)
1336
+ λ
1337
+ ∩{uα2≥−λ− 7
1338
+ 8 r
1339
+ 1
1340
+ 8 }∩{uα3≤−λ− 3
1341
+ 4 r
1342
+ 1
1343
+ 4 }∩Br(p)
1344
+ V s
1345
+ λ,−
1346
+ � 1
1347
+ s
1348
+ ≤ C (λr)
1349
+ 1
1350
+ 8 − 7
1351
+ 8s
1352
+ for all pairwise distinct elements α1, α2, α3 ∈ A.
1353
+ Proof. We distinguish two cases:
1354
+ Case 1: Suppose that Ω ∩ {uα1 = 0} ∩ {uα2 = 0} = ∅. By continuity, we
1355
+ can find a real number δ such that Ω ∩ {uα1 ≥ −δ} ∩ {uα2 ≥ −δ} = ∅. If λr
1356
+ is sufficiently large, then λ− 7
1357
+ 8r
1358
+ 1
1359
+ 8 ≤ δ. This implies
1360
+ G(α1,α2,α3)
1361
+ λ
1362
+ ∩ {uα2 ≥ −λ− 7
1363
+ 8 r
1364
+ 1
1365
+ 8 }
1366
+ ⊂ Σλ ∩ {uα1 ≥ −δ} ∩ {uα2 ≥ −δ} = ∅.
1367
+ Hence, the assertion is trivially true in this case.
1368
+
1369
+ 16
1370
+ SIMON BRENDLE
1371
+ Case 2: Suppose that Ω ∩ {uα1 = 0} ∩ {uα2 = 0} ̸= ∅. It follows from
1372
+ Assumption 3.1 that the hypersurfaces {uα1 = 0} and {uα2 = 0} intersect
1373
+ transversally.
1374
+ Let us consider an arbitrary point x ∈ G(α1,α2,α3)
1375
+ λ
1376
+ with uα2(x) ≥ −λ− 7
1377
+ 8r
1378
+ 1
1379
+ 8
1380
+ and uα3(x) ≤ −λ− 3
1381
+ 4r
1382
+ 1
1383
+ 4. Clearly, uα1(x) ≥ −λ− 7
1384
+ 8r
1385
+ 1
1386
+ 8 by definition of G(α1,α2,α3)
1387
+ λ
1388
+ .
1389
+ Moreover, uα(x) ≤ −λ− 3
1390
+ 4r
1391
+ 1
1392
+ 4 for all α ∈ A \ {α1, α2}. Consequently, we can
1393
+ find a point y ∈ Ω such that uα1(y) = uα2(y) = 0 and d(x, y) ≤ Cλ− 7
1394
+ 8r
1395
+ 1
1396
+ 8.
1397
+ The Matching Angle Hypothesis implies ⟨να1, να2⟩ = ⟨Nα1, Nα2⟩ at the point
1398
+ y. Consequently, |⟨να1, να2⟩ − ⟨Nα1, Nα2⟩| ≤ C d(x, y) at the point x. From
1399
+ this, we deduce that
1400
+ |π(να1)|
1401
+ �� �
1402
+ α∈A eλuα |∇uα| να
1403
+ �� −
1404
+ |P(Nα1)|
1405
+ �� �
1406
+ α∈A eλuα |∇uα| Nα
1407
+ �� ≥ −C d(x, y) − C e−(λr)
1408
+ 1
1409
+ 4
1410
+ and
1411
+ |π(να2)|
1412
+ �� �
1413
+ α∈A eλuα |∇uα| να
1414
+ �� −
1415
+ |P(Nα2)|
1416
+ �� �
1417
+ α∈A eλuα |∇uα| Nα
1418
+ �� ≥ −C d(x, y) − C e−(λr)
1419
+ 1
1420
+ 4
1421
+ at the point x. Thus, we conclude that
1422
+ Vλ(x) ≥ −Cλ
1423
+ 1
1424
+ 8 r− 7
1425
+ 8
1426
+ for each point x ∈ G(α1,α2,α3)
1427
+ λ
1428
+ ∩ {uα2 ≥ −λ− 7
1429
+ 8 r
1430
+ 1
1431
+ 8 } ∩ {uα3 ≤ −λ− 3
1432
+ 4r
1433
+ 1
1434
+ 4}. By
1435
+ transversality, the set {0 ≥ uα1 ≥ −λ− 7
1436
+ 8r
1437
+ 1
1438
+ 8} ∩ {0 ≥ uα2 ≥ −λ− 7
1439
+ 8 r
1440
+ 1
1441
+ 8} ∩ Br(p)
1442
+ can be covered by C (λr)
1443
+ 7(n−2)
1444
+ 8
1445
+ Euclidean balls of radius λ− 7
1446
+ 8 r
1447
+ 1
1448
+ 8 .
1449
+ More-
1450
+ over, the intersection of Σλ with each ball of radius λ− 7
1451
+ 8r
1452
+ 1
1453
+ 8 has area at
1454
+ most C (λr)− 7(n−1)
1455
+ 8
1456
+ rn−1. This shows that Σλ ∩ {uα1 ≥ −λ− 7
1457
+ 8 r
1458
+ 1
1459
+ 8 } ∩ {uα2 ≥
1460
+ −λ− 7
1461
+ 8 r
1462
+ 1
1463
+ 8 } ∩ Br(p) has area at most C (λr)− 7
1464
+ 8 rn−1. Since
1465
+ G(α1,α2,α3)
1466
+ λ
1467
+ ∩ {uα2 ≥ −λ− 7
1468
+ 8r
1469
+ 1
1470
+ 8} ∩ Br(p)
1471
+ ⊂ Σλ ∩ {uα1 ≥ −λ− 7
1472
+ 8 r
1473
+ 1
1474
+ 8 } ∩ {uα2 ≥ −λ− 7
1475
+ 8r
1476
+ 1
1477
+ 8} ∩ Br(p),
1478
+ it follows that
1479
+
1480
+ rs+1−n
1481
+
1482
+ G(α1,α2,α3)
1483
+ λ
1484
+ ∩{uα2≥−λ− 7
1485
+ 8 r
1486
+ 1
1487
+ 8 }∩{uα3≤−λ− 3
1488
+ 4 r
1489
+ 1
1490
+ 4 }∩Br(p)
1491
+ V s
1492
+ λ,−
1493
+ � 1
1494
+ s
1495
+ ≤ C (λr)
1496
+ 1
1497
+ 8 − 7
1498
+ 8s .
1499
+ This completes the proof of Lemma 3.10.
1500
+ Lemma 3.11. Let us fix an exponent s ∈ [1, 3
1501
+ 2), and let Br(p) denote a
1502
+ Euclidean ball of radius r ≤ 1. If λr is sufficiently large, then
1503
+
1504
+ rs+1−n
1505
+
1506
+ G(α1,α2,α3)
1507
+ λ
1508
+ ∩{uα3≥−λ− 3
1509
+ 4 r
1510
+ 1
1511
+ 4 }∩Br(p)
1512
+ V s
1513
+ λ,−
1514
+ � 1
1515
+ s
1516
+ ≤ C (λr)1− 3
1517
+ 2s
1518
+ for all pairwise distinct elements α1, α2, α3 ∈ A.
1519
+
1520
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
1521
+ 17
1522
+ Proof. We distinguish two cases:
1523
+ Case 1: Suppose that Ω ∩ {uα1 = 0} ∩ {uα2 = 0} ∩ {uα3 = 0} = ∅. By
1524
+ continuity, we can find a real number δ such that Ω ∩ {uα1 ≥ −δ} ∩ {uα2 ≥
1525
+ −δ} ∩ {uα3 ≥ −δ} = ∅. If λr is sufficiently large, then λ− 3
1526
+ 4r
1527
+ 1
1528
+ 4 ≤ δ. This
1529
+ implies
1530
+ G(α1,α2,α3)
1531
+ λ
1532
+ ∩ {uα3 ≥ −λ− 3
1533
+ 4 r
1534
+ 1
1535
+ 4 }
1536
+ ⊂ Σλ ∩ {uα1 ≥ −δ} ∩ {uα2 ≥ −δ} ∩ {uα3 ≥ −δ} = ∅.
1537
+ Hence, the assertion is trivially true in this case.
1538
+ Case 2: Suppose that Ω ∩ {uα1 = 0} ∩ {uα2 = 0} ∩ {uα3 = 0} ̸= ∅. It
1539
+ follows from Assumption 3.1 that the hypersurfaces {uα1 = 0}, {uα2 = 0},
1540
+ and {uα3 = 0} intersect transversally.
1541
+ Let us consider an arbitrary point x ∈ G(α1,α2,α3)
1542
+ λ
1543
+ with uα3(x) ≥ −λ− 3
1544
+ 4r
1545
+ 1
1546
+ 4.
1547
+ Clearly,
1548
+ Vλ(x) ≥ −Cλ
1549
+ for all points x ∈ G(α1,α2,α3)
1550
+ λ
1551
+ ∩ {uα3 ≤ −λ− 3
1552
+ 4 r
1553
+ 1
1554
+ 4 }. By transversality, the set
1555
+ {0 ≥ uα1 ≥ −λ− 3
1556
+ 4r
1557
+ 1
1558
+ 4}∩{0 ≥ uα2 ≥ −λ− 3
1559
+ 4 r
1560
+ 1
1561
+ 4}∩{0 ≥ uα3 ≥ −λ− 3
1562
+ 4 r
1563
+ 1
1564
+ 4 }∩Br(p)
1565
+ can be covered by C (λr)
1566
+ 3(n−3)
1567
+ 4
1568
+ Euclidean balls of radius λ− 3
1569
+ 4 r
1570
+ 1
1571
+ 4 .
1572
+ More-
1573
+ over, the intersection of Σλ with each ball of radius λ− 3
1574
+ 4r
1575
+ 1
1576
+ 4 has area at
1577
+ most C (λr)− 3(n−1)
1578
+ 4
1579
+ rn−1. This shows that Σλ ∩ {uα1 ≥ −λ− 3
1580
+ 4 r
1581
+ 1
1582
+ 4 } ∩ {uα2 ≥
1583
+ −λ− 3
1584
+ 4 r
1585
+ 1
1586
+ 4 }∩{uα3 ≥ −λ− 3
1587
+ 4 r
1588
+ 1
1589
+ 4 }∩Br(p) has area at most C (λr)− 3
1590
+ 2 rn−1. Since
1591
+ G(α1,α2,α3)
1592
+ λ
1593
+ ∩ {uα3 ≥ −λ− 3
1594
+ 4 r
1595
+ 1
1596
+ 4 } ∩ Br(p)
1597
+ ⊂ Σλ ∩ {uα1 ≥ −λ− 3
1598
+ 4 r
1599
+ 1
1600
+ 4 } ∩ {uα2 ≥ −λ− 3
1601
+ 4 r
1602
+ 1
1603
+ 4 } ∩ {uα3 ≥ −λ− 3
1604
+ 4r
1605
+ 1
1606
+ 4} ∩ Br(p),
1607
+ it follows that
1608
+
1609
+ rs+1−n
1610
+
1611
+ G(α1,α2,α3)
1612
+ λ
1613
+ ∩{uα3≥−λ− 3
1614
+ 4 r
1615
+ 1
1616
+ 4 }∩Br(p)
1617
+ V s
1618
+ λ,−
1619
+ � 1
1620
+ s
1621
+ ≤ C (λr)1− 3
1622
+ 2s .
1623
+ This completes the proof of Lemma 3.11.
1624
+ Proposition 3.12. Assume that the mean curvature of the hypersurface
1625
+ {uα = 0} with respect to g is nonnegative at each point in Ω ∩ {uα = 0}
1626
+ and that the Matching Angle Hypothesis is satisfied. Let us fix an exponent
1627
+ s ∈ [1, 3
1628
+ 2), and let Br(p) denote a Euclidean ball of radius r ≤ 1. If λr is
1629
+ sufficiently large, then
1630
+
1631
+ rs+1−n
1632
+
1633
+ Σλ∩Br(p)
1634
+ V s
1635
+ λ,−
1636
+ � 1
1637
+ s
1638
+ ≤ C (λr)−1 + C (λr)
1639
+ 1
1640
+ 8 − 7
1641
+ 8s + C (λr)1− 3
1642
+ 2s .
1643
+
1644
+ 18
1645
+ SIMON BRENDLE
1646
+ Proof. Combining Lemma 3.9, Lemma 3.10, and Lemma 3.11, we con-
1647
+ clude that
1648
+
1649
+ rs+1−n
1650
+
1651
+ G(α1,α2,α3)
1652
+ λ
1653
+ ∩Br(p)
1654
+ V s
1655
+ λ,−
1656
+ � 1
1657
+ s
1658
+ ≤ C (λr)−1 + C (λr)
1659
+ 1
1660
+ 8− 7
1661
+ 8s + C (λr)1− 3
1662
+ 2s
1663
+ for all pairwise distinct elements α1, α2, α3 ∈ A. On the other hand, Σλ =
1664
+
1665
+ α1,α2,α3 G(α1,α2,α3)
1666
+ λ
1667
+ , where the union is taken over all pairwise distinct el-
1668
+ ements α1, α2, α3 ∈ A.
1669
+ Hence, the assertion follows by summation over
1670
+ α1, α2, α3. This completes the proof of Proposition 3.12.
1671
+ Corollary 3.13. Assume that the mean curvature of the hypersurface {uα =
1672
+ 0} with respect to g is nonnegative at each point in Ω ∩ {uα = 0} and that
1673
+ the Matching Angle Hypothesis is satisfied. Let us fix an exponent s ∈ [1, 3
1674
+ 2).
1675
+ Then
1676
+ sup
1677
+ p∈Rn sup
1678
+ r≤1
1679
+
1680
+ rs+1−n
1681
+
1682
+ Σλ∩Br(p)
1683
+ V s
1684
+ λ,−
1685
+ � 1
1686
+ s
1687
+ → 0
1688
+ as λ → ∞.
1689
+ Proof. Let us consider an arbitrary sequence λl → ∞. By Proposition
1690
+ 3.6, we can find a sequence of positive real numbers δl → 0 such that
1691
+ sup
1692
+ p∈Rn
1693
+ sup
1694
+ r≤(δlλl)−1
1695
+
1696
+ rs+1−n
1697
+
1698
+ Σλl∩Br(p)
1699
+ V s
1700
+ λl,−
1701
+ � 1
1702
+ s
1703
+ → 0
1704
+ as l → ∞. On the other hand, Proposition 3.12 implies that
1705
+ sup
1706
+ p∈Rn
1707
+ sup
1708
+ (δlλl)−1≤r≤1
1709
+
1710
+ rs+1−n
1711
+
1712
+ Σλl∩Br(p)
1713
+ V s
1714
+ λl,−
1715
+ � 1
1716
+ s
1717
+ → 0
1718
+ as l → ∞. Putting these facts together, the assertion follows.
1719
+ 4. Proof of the Theorem 1.1
1720
+ Throughout this section, we assume that n ≥ 3 is an odd integer, and Ω is
1721
+ a compact polytope in Rn with non-empty interior. Let g be a Riemannian
1722
+ metric which is defined on an open set containing Ω and has nonnegative
1723
+ scalar curvature at each point in Ω. We assume that the mean curvature of
1724
+ the hypersurface {uα = 0} with respect to g is nonnegative at each point in
1725
+ Ω ∩ {uα = 0} and that the Matching Angle Hypothesis is satisfied.
1726
+ Let U denote a Euclidean ball such that the closure of U is contained in
1727
+ the interior of Ω. Consider a sequence λl → ∞. Note that U ⊂ Ωλl if l is
1728
+ sufficiently large. By Proposition 2.14 we can find an m-tuple of harmonic
1729
+ spinors s(l) = (s(l)
1730
+ 1 , . . . , s(l)
1731
+ m ) such that s(l) is defined on Ωλl; s(l) does not
1732
+ vanish identically; Ds(l) = 0 in Ωλl; and χs(l) = s(l) on Σλl.
1733
+ Standard
1734
+
1735
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
1736
+ 19
1737
+ unique continuation arguments imply that
1738
+
1739
+ U
1740
+ �m
1741
+ α=1 |s(l)
1742
+ α |2 dvolg > 0 if l is
1743
+ sufficiently large. By scaling, we can arrange that
1744
+
1745
+ U
1746
+ m
1747
+
1748
+ α=1
1749
+ |s(l)
1750
+ α |2 dvolg = 1
1751
+ for each l. Using Proposition 2.10, we obtain
1752
+
1753
+ Ωλl
1754
+ m
1755
+
1756
+ l=1
1757
+ |∇s(l)
1758
+ α |2 dvolg + 1
1759
+ 4
1760
+
1761
+ Ωλl
1762
+ m
1763
+
1764
+ α=1
1765
+ R |s(l)
1766
+ α |2 dvolg
1767
+ ≤ −1
1768
+ 2
1769
+
1770
+ Σλl
1771
+ (H − ∥dN∥tr)
1772
+ � m
1773
+
1774
+ α=1
1775
+ |s(l)
1776
+ α |2
1777
+
1778
+ dσg.
1779
+ Proposition 3.5 implies that H − ∥dN∥tr ≥ Vλl at each point on Σλl. Con-
1780
+ sequently,
1781
+
1782
+ Ωλl
1783
+ m
1784
+
1785
+ l=1
1786
+ |∇s(l)
1787
+ α |2 dvolg + 1
1788
+ 4
1789
+
1790
+ Ωλl
1791
+ m
1792
+
1793
+ α=1
1794
+ R |s(l)
1795
+ α |2 dvolg
1796
+ ≤ 1
1797
+ 2
1798
+
1799
+ Σλl
1800
+ Vλl,−
1801
+ � m
1802
+
1803
+ α=1
1804
+ |s(l)
1805
+ α |2
1806
+
1807
+ dσg.
1808
+ Note that the hypersurface Σλl = ∂Ωλl can be written as a radial graph
1809
+ with bounded slope. From this, it is easy to see that Ωλl is bi-Lipschitz
1810
+ equivalent to the Euclidean unit ball, with constants that are independent
1811
+ of λl. Using Theorem A.7 and Proposition 3.13, we obtain
1812
+
1813
+ Σλl
1814
+ Vλl,− F 2 dσg ≤ εl
1815
+
1816
+ Ωλl
1817
+ |∇F|2 dvolg + εl
1818
+ � �
1819
+ Σλl
1820
+ F dσg
1821
+ �2
1822
+ for every smooth function F on Ωλl, where εl → 0 as l → ∞. Moreover, the
1823
+ Sobolev trace theorem implies
1824
+ � �
1825
+ Σλl
1826
+ F dσg
1827
+ �2
1828
+ ≤ C
1829
+
1830
+ Ωl
1831
+ |∇F|2 dvolg + C
1832
+
1833
+ U
1834
+ F 2 dvolg
1835
+ for every smooth function F on Ωλl, where C is a uniform constant inde-
1836
+ pendent of l. Putting these facts together, we conclude that
1837
+
1838
+ Σλl
1839
+ Vλl,− F 2 dσg ≤ Cεl
1840
+
1841
+ Ωλl
1842
+ |∇F|2 dvolg + Cεl
1843
+
1844
+ U
1845
+ F 2 dvolg
1846
+
1847
+ 20
1848
+ SIMON BRENDLE
1849
+ for every smooth function F on Ωλl. In the next step, we apply this inequal-
1850
+ ity with F =
1851
+
1852
+ δ2 + �m
1853
+ α=1 |s(l)
1854
+ α |2� 1
1855
+ 2 , and send δ → 0. This gives
1856
+
1857
+ Ωλl
1858
+ m
1859
+
1860
+ l=1
1861
+ |∇s(l)
1862
+ α |2 dvolg + 1
1863
+ 4
1864
+
1865
+ Ωλl
1866
+ m
1867
+
1868
+ α=1
1869
+ R |s(l)
1870
+ α |2 dvolg
1871
+ ≤ 1
1872
+ 2
1873
+
1874
+ Σλl
1875
+ Vλl,−
1876
+ � m
1877
+
1878
+ α=1
1879
+ |s(l)
1880
+ α |2
1881
+
1882
+ dσg
1883
+ ≤ Cεl
1884
+
1885
+ Ωλl
1886
+ m
1887
+
1888
+ α=1
1889
+ |∇s(l)
1890
+ α |2 dvolg + Cεl
1891
+
1892
+ U
1893
+ m
1894
+
1895
+ α=1
1896
+ |s(l)
1897
+ α |2 dvolg
1898
+ for each l. Since the scalar curvature is nonnegative, it follows that
1899
+
1900
+ Ωλl
1901
+ m
1902
+
1903
+ α=1
1904
+ |∇s(l)
1905
+ α |2 dvolg ≤ Cεl
1906
+
1907
+ U
1908
+ m
1909
+
1910
+ α=1
1911
+ |s(l)
1912
+ α |2 dvolg
1913
+ if l is sufficiently large. Passing to the limit as l → ∞, we obtain a non-
1914
+ vanishing m-tuple of parallel spinors defined on the interior of Ω. Conse-
1915
+ quently, the Ricci tensor of g vanishes at each point in Ω. This completes
1916
+ the proof of Theorem 1.1.
1917
+ Appendix A. A variant of a theorem of Fefferman and Phong
1918
+ In this section, we describe a variant of an estimate due to Fefferman and
1919
+ Phong [4], which plays a central role in our argument. We denote by Q the
1920
+ collection of all (n − 1)-dimensional cubes of the form
1921
+ [2mj1, 2m(j1 + 1)] × . . . × [2mjn−1, 2m(jn−1 + 1)] × {0},
1922
+ where m ∈ Z and j1, . . . , jn−1 ∈ Z. For each Q ∈ Q, we denote by |Q| the
1923
+ (n − 1)-dimensional volume of Q.
1924
+ Theorem A.1. Fix an exponent s ∈ (1, n − 1). Let V be a nonnegative
1925
+ continuous function on the hyperplane Rn−1 × {0} ⊂ Rn with the property
1926
+ that
1927
+ diam(Q)s+1−n
1928
+
1929
+ Q
1930
+ V s ≤ 1
1931
+ for each (n − 1)-dimensional cube Q ∈ Q.
1932
+ Suppose that F is a smooth
1933
+ function on the half-space Rn
1934
+ + = {x ∈ Rn : xn ≥ 0}, and let f denote the
1935
+ restriction of F to the boundary ∂Rn
1936
+ + = Rn−1 × {0}. Then
1937
+
1938
+ Q
1939
+ V f 2 ≤ C
1940
+
1941
+ Q×[0,diam(Q)]
1942
+ |∇F|2 + C diam(Q)−1 |Q|−1
1943
+ � �
1944
+ Q
1945
+ |f|
1946
+ �2
1947
+ .
1948
+ for each (n − 1)-dimensional cube Q ∈ Q.
1949
+
1950
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
1951
+ 21
1952
+ The proof of Theorem A.1 involves a straightforward adaptation of the
1953
+ arguments of Fefferman and Phong [4]. Let us fix an exponent t such that
1954
+ s > t > 1. We define a nonnegative function W : Rn−1 × {0} → R by
1955
+ W(x) =
1956
+ sup
1957
+ Q∈Q,x∈Q
1958
+
1959
+ |Q|−1
1960
+
1961
+ Q
1962
+ V s
1963
+ � 1
1964
+ s
1965
+ for each point x ∈ Rn−1 × {0}. In other words, W s is the maximal function
1966
+ associated with the function V s. Clearly, V ≤ W.
1967
+ We assume that F is a smooth function on the half-space Rn
1968
+ + = {x ∈
1969
+ Rn : xn ≥ 0}, and let f denote the restriction of F to the boundary ∂Rn
1970
+ + =
1971
+ Rn−1 × {0}.
1972
+ For each (n − 1)-dimensional cube Q ∈ Q, we denote by
1973
+ fQ = |Q|−1 �
1974
+ Q f the mean value of f over the cube Q.
1975
+ Lemma A.2. For each (n − 1)-dimensional cube Q0 ∈ Q, we have
1976
+
1977
+ |Q0|−1
1978
+
1979
+ Q0
1980
+ W t
1981
+ � 1
1982
+ t
1983
+ ≤ C
1984
+ sup
1985
+ Q∈Q,Q0⊂Q
1986
+
1987
+ |Q|−1
1988
+
1989
+ Q
1990
+ V s
1991
+ � 1
1992
+ s
1993
+ .
1994
+ Proof. For abbreviation, let
1995
+ Λ =
1996
+ sup
1997
+ Q∈Q,Q0⊂Q
1998
+
1999
+ |Q|−1
2000
+
2001
+ Q
2002
+ V s
2003
+ � 1
2004
+ s
2005
+ .
2006
+ We define a nonnegative function W0 : Q0 → R by
2007
+ W0(x) =
2008
+ sup
2009
+ Q∈Q,x∈Q⊂Q0
2010
+
2011
+ |Q|−1
2012
+
2013
+ Q
2014
+ V s
2015
+ � 1
2016
+ s
2017
+ for each point x ∈ Q0. Clearly,
2018
+ W(x) = max{Λ, W0(x)}
2019
+ for each point x ∈ Q0. The Hardy-Littlewood maximal inequality implies
2020
+ |Q0|−1 |{x ∈ Q0 : W0(x)s > α}| ≤ Cα−1 |Q0|−1
2021
+
2022
+ Q0
2023
+ V s ≤ Cα−1 Λs
2024
+ for all α > 0.
2025
+ We multiply both sides by α
2026
+ t
2027
+ s−1 and integrate over α ∈
2028
+ [Λs, ∞). This gives
2029
+ |Q0|−1
2030
+
2031
+ Q0
2032
+ W t
2033
+ 0 ≤ C Λt,
2034
+ hence
2035
+ |Q0|−1
2036
+
2037
+ Q0
2038
+ W t ≤ C Λt.
2039
+ This completes the proof of Lemma A.2.
2040
+
2041
+ 22
2042
+ SIMON BRENDLE
2043
+ Lemma A.3. Given a real number ε > 0, we can find a real number δ > 0
2044
+ with the following property. If Q0 is an (n − 1)-dimensional cube in Q and
2045
+ A ⊂ Q0 is a Borel set with |A| ≤ δ |Q0|, then
2046
+
2047
+ A
2048
+ W ≤ ε
2049
+
2050
+ Q0
2051
+ W.
2052
+ Proof. Using Lemma A.2, we obtain
2053
+
2054
+ |Q0|−1
2055
+
2056
+ Q0
2057
+ W t
2058
+ � 1
2059
+ t
2060
+ ≤ C
2061
+ sup
2062
+ Q∈Q,Q0⊂Q
2063
+
2064
+ |Q|−1
2065
+
2066
+ Q
2067
+ V s
2068
+ � 1
2069
+ s
2070
+ .
2071
+ Moreover,
2072
+ sup
2073
+ Q∈Q,Q0⊂Q
2074
+
2075
+ |Q|−1
2076
+
2077
+ Q
2078
+ V s
2079
+ � 1
2080
+ s
2081
+ ≤ inf
2082
+ Q0 W ≤ |Q0|−1
2083
+
2084
+ Q0
2085
+ W.
2086
+ Therefore,
2087
+
2088
+ |Q0|−1
2089
+
2090
+ Q0
2091
+ W t
2092
+ � 1
2093
+ t
2094
+ ≤ C |Q0|−1
2095
+
2096
+ Q0
2097
+ W.
2098
+ Hence, if A ⊂ Q0 is a Borel set with |A| ≤ δ Q0, then
2099
+
2100
+ A
2101
+ W ≤ |A|
2102
+ t−1
2103
+ t
2104
+ � �
2105
+ Q0
2106
+ W t
2107
+ � 1
2108
+ t
2109
+ ≤ δ
2110
+ t−1
2111
+ t |Q0|
2112
+ t−1
2113
+ t
2114
+ � �
2115
+ Q0
2116
+ W t
2117
+ � 1
2118
+ t
2119
+ ≤ Cδ
2120
+ t−1
2121
+ t
2122
+
2123
+ Q0
2124
+ W.
2125
+ This completes the proof of Lemma A.3.
2126
+ Lemma A.4. For each (n − 1)-dimensional cube Q0 ∈ Q, we have
2127
+ |Q0|−1
2128
+
2129
+ Q0
2130
+ W ≤ C diam(Q0)−1.
2131
+ Proof. Using Lemma A.2, we obtain
2132
+
2133
+ |Q0|−1
2134
+
2135
+ Q0
2136
+ W t
2137
+ � 1
2138
+ t
2139
+ ≤ C
2140
+ sup
2141
+ Q∈Q,Q0⊂Q
2142
+
2143
+ |Q|−1
2144
+
2145
+ Q
2146
+ V s
2147
+ � 1
2148
+ s
2149
+ .
2150
+ Moreover, our assumption implies that
2151
+
2152
+ |Q|−1
2153
+
2154
+ Q
2155
+ V s
2156
+ � 1
2157
+ s
2158
+ ≤ C diam(Q)−1
2159
+ for each (n − 1)-dimensional cube Q ∈ Q. Putting these facts together, the
2160
+ assertion follows.
2161
+ Lemma A.5. For each (n − 1)-dimensional cube Q0 ∈ Q, we have
2162
+
2163
+ Q0
2164
+ V |f − fQ0|2 ≤ C
2165
+
2166
+ Q0
2167
+ Wg2,
2168
+
2169
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
2170
+ 23
2171
+ where the function g : Q0 → R is defined by
2172
+ g(x) =
2173
+ sup
2174
+ Q∈Q,x∈Q⊂Q0
2175
+ |Q|−1
2176
+
2177
+ Q
2178
+ |f − fQ|
2179
+ for x ∈ Q0.
2180
+ Proof. Fix an (n − 1)-dimensional cube Q0 ∈ Q. We define a function
2181
+ h : Q0 → R by
2182
+ h(x) =
2183
+ sup
2184
+ Q∈Q,x∈Q⊂Q0
2185
+ |Q|−1
2186
+
2187
+ Q
2188
+ |f − fQ0|
2189
+ for x ∈ Q0. Note that V ≤ W and |f −fQ0| ≤ h at each point in Q0. Hence,
2190
+ it suffices to prove that
2191
+
2192
+ Q0
2193
+ Wh2 ≤ C
2194
+
2195
+ Q0
2196
+ Wg2.
2197
+ To prove this inequality, let α0 = |Q0|−1 �
2198
+ Q0 |f − fQ0|. For each α > α0,
2199
+ we denote by Qα the set of all (n − 1)-dimensional cubes Q ∈ Q with the
2200
+ following properties:
2201
+ • Q ⊂ Q0.
2202
+ • |Q|−1 �
2203
+ Q |f − fQ0| > α.
2204
+ • If ˜Q is an (n − 1)-dimensional cube in Q with Q ⊊ ˜Q and ˜Q ⊂ Q0,
2205
+ then | ˜Q|−1 �
2206
+ ˜Q |f − fQ0| ≤ α.
2207
+ It is easy to see that
2208
+ |Q|−1
2209
+
2210
+ Q
2211
+ |f − fQ0| ≤ 2n−1α
2212
+ for all Q ∈ Qα. In particular, |fQ − fQ0| ≤ 2n−1α for all Q ∈ Qα. Moreover,
2213
+
2214
+ Q∈Qα
2215
+ Q = {x ∈ Q0 : h(x) > α}.
2216
+ Finally, no point can be contained in the interior of more than one cube in
2217
+ Qα.
2218
+ Let K > 1 and δ ∈ (0, 1) be two real numbers that will be chosen later.
2219
+ For each (n−1)-dimensional cube Q ∈ Qα satisfying |Q|−1 �
2220
+ Q |f −fQ| ≤ δα,
2221
+
2222
+ 24
2223
+ SIMON BRENDLE
2224
+ we have
2225
+ (Kα − |fQ − fQ0|)
2226
+
2227
+ ˜Q∈QKα, ˜Q⊂Q
2228
+ | ˜Q|
2229
+
2230
+
2231
+ ˜Q∈QKα, ˜Q⊂Q
2232
+ � �
2233
+ ˜Q
2234
+ |f − fQ0| −
2235
+
2236
+ ˜Q
2237
+ |fQ − fQ0|
2238
+
2239
+
2240
+
2241
+ ˜Q∈QKα, ˜Q⊂Q
2242
+
2243
+ ˜Q
2244
+ |f − fQ|
2245
+
2246
+
2247
+ Q
2248
+ |f − fQ|
2249
+ ≤ δα |Q|.
2250
+ Recall that |fQ − fQ0| ≤ 2n−1α for all Q ∈ Qα. Hence, if we choose K > 2n,
2251
+ then we obtain
2252
+
2253
+ ˜Q∈QKα, ˜Q⊂Q
2254
+ | ˜Q| ≤ 21−n δ |Q|
2255
+ for each (n − 1)-dimensional cube Q ∈ Qα satisfying |Q|−1 �
2256
+ Q |f − fQ| ≤ δα.
2257
+ We next apply Lemma A.3 with ε = 1
2258
+ 2 K−2. Hence, we can choose δ ∈
2259
+ (0, 1) sufficiently small (depending on K) so that
2260
+
2261
+ ˜Q∈QKα, ˜Q⊂Q
2262
+
2263
+ ˜Q
2264
+ W ≤ 1
2265
+ 2 K−2
2266
+
2267
+ Q
2268
+ W
2269
+ for each (n − 1)-dimensional cube Q ∈ Qα satisfying |Q|−1 �
2270
+ Q |f − fQ| ≤ δα.
2271
+ For each (n−1)-dimensional cube Q ∈ Qα, the set Q∩{h > Kα} is contained
2272
+ in the union �
2273
+ ˜Q∈QKα, ˜Q⊂Q ˜Q. This implies
2274
+
2275
+ Q∩{h>Kα}
2276
+ W ≤ 1
2277
+ 2 K−2
2278
+
2279
+ Q
2280
+ W
2281
+ for each (n − 1)-dimensional cube Q ∈ Qα satisfying |Q|−1 �
2282
+ Q |f − fQ| ≤ δα.
2283
+ On the other hand, if Q is an (n − 1)-dimensional cube in Qα satisfying
2284
+ |Q|−1 �
2285
+ Q |f − fQ| > δα, then g > δα at each point in Q. Therefore,
2286
+
2287
+ Q∩{h>Kα}
2288
+ W ≤
2289
+
2290
+ Q∩{g>δα}
2291
+ W
2292
+ for each (n − 1)-dimensional cube Q ∈ Qα satisfying |Q|−1 �
2293
+ Q |f − fQ| > δα.
2294
+ Putting these facts together, we conclude that
2295
+
2296
+ Q∩{h>Kα}
2297
+ W ≤ 1
2298
+ 2 K−2
2299
+
2300
+ Q
2301
+ W +
2302
+
2303
+ Q∩{g>δα}
2304
+ W
2305
+
2306
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
2307
+ 25
2308
+ for each (n−1)-dimensional cube Q ∈ Qα. Summation over all cubes Q ∈ Qα
2309
+ gives
2310
+
2311
+ {h>Kα}
2312
+ W ≤ 1
2313
+ 2 K−2
2314
+
2315
+ {h>α}
2316
+ W +
2317
+
2318
+ {g>δα}
2319
+ W.
2320
+ This inequality holds for each α > α0. Moreover, since g ≥ α0 at each point
2321
+ in Q0, the inequality is trivially true for α ≤ α0. Finally, we multiply the
2322
+ inequality by α
2323
+ 2 and integrate over α ∈ (0, ∞). This gives
2324
+ K−2
2325
+
2326
+ Q0
2327
+ Wh2 ≤ 1
2328
+ 2 K−2
2329
+
2330
+ Q0
2331
+ Wh2 + δ−2
2332
+
2333
+ Q0
2334
+ Wg2.
2335
+ This completes the proof of Lemma A.5.
2336
+ Lemma A.6. For each (n − 1)-dimensional cube Q0 ∈ Q, we have
2337
+
2338
+ Q0
2339
+ Wg2 ≤ C
2340
+
2341
+ Q0×[0,diam(Q)]
2342
+ |∇F|2,
2343
+ where the function g : Q0 → R is defined by
2344
+ g(x) =
2345
+ sup
2346
+ Q∈Q,x∈Q⊂Q0
2347
+ |Q|−1
2348
+
2349
+ Q
2350
+ |f − fQ|
2351
+ for x ∈ Q0.
2352
+ Proof. Fix an (n−1)-dimensional cube Q0 ∈ Q, and let α0 = |Q0|−1 �
2353
+ Q0 |f−
2354
+ fQ0|. For each α > α0, we denote by Qα the set of all (n − 1)-dimensional
2355
+ cubes Q ∈ Q with the following properties:
2356
+ • Q ⊂ Q0.
2357
+ • |Q|−1 �
2358
+ Q |f − fQ| > α.
2359
+ • If ˜Q is an (n − 1)-dimensional cube in Q with Q ⊊ ˜Q and ˜Q ⊂ Q0,
2360
+ then | ˜Q|−1 �
2361
+ ˜Q |f − f ˜Q| ≤ α.
2362
+ It is easy to see that
2363
+ |Q|−1
2364
+
2365
+ Q
2366
+ |f − fQ| ≤ 2nα
2367
+ for all Q ∈ Qα. Moreover,
2368
+
2369
+ Q∈Qα
2370
+ Q = {x ∈ Q0 : g(x) > α}.
2371
+ Finally, no point can be contained in the interior of more than one cube in
2372
+ Qα.
2373
+
2374
+ 26
2375
+ SIMON BRENDLE
2376
+ Let K > 1 be a real number that will be chosen later. For each (n − 1)-
2377
+ dimensional cube Q ∈ Qα, we have
2378
+
2379
+
2380
+ ˜Q∈QKα, ˜Q⊂Q
2381
+ | ˜Q| ≤
2382
+
2383
+ ˜Q∈QKα, ˜Q⊂Q
2384
+
2385
+ ˜Q
2386
+ |f − f ˜Q|
2387
+ ≤ 2
2388
+
2389
+ ˜Q∈QKα, ˜Q⊂Q
2390
+
2391
+ ˜Q
2392
+ |f − fQ|
2393
+ ≤ 2
2394
+
2395
+ Q
2396
+ |f − fQ|
2397
+ ≤ 2n+1α |Q|.
2398
+ Hence, if we choose K > 2n+2, then
2399
+
2400
+ ˜Q∈QKα, ˜Q⊂Q
2401
+ | ˜Q| ≤ 1
2402
+ 2 |Q|
2403
+ for each cube Q ∈ Qα. For each (n − 1)-dimensional cube Q ∈ Qα, the set
2404
+ Q ∩ {g > Kα} is contained in the union �
2405
+ ˜Q∈QKα, ˜Q⊂Q ˜Q. This implies
2406
+ |Q ∩ {g > Kα}| ≤
2407
+
2408
+ ˜Q∈QKα, ˜Q⊂Q
2409
+ | ˜Q| ≤ 1
2410
+ 2 |Q|,
2411
+ hence
2412
+ |Q ∩ {g ≤ Kα}| ≥ 1
2413
+ 2 |Q|
2414
+ for each (n−1)-dimensional cube Q ∈ Qα. We define a nonnegative function
2415
+ ϕ : Rn−1 × {0} → R by
2416
+ ϕ(x1, . . . , xn−1, 0) =
2417
+ � � diam(Q0)
2418
+ 0
2419
+ |∇F(x1, . . . , xn−1, xn)|2 dxn
2420
+ � 1
2421
+ 2
2422
+ .
2423
+ Moreover, we define a nonnegative function ψ : Q0 → R by
2424
+ ψ(x) =
2425
+ sup
2426
+ Q∈Q,x∈Q⊂Q0
2427
+ |Q|−1
2428
+
2429
+ Q
2430
+ ϕ
2431
+ for each point x ∈ Q0. In other words, ψ is the maximal function associated
2432
+ with ϕ. Using the Sobolev trace theorem, we obtain
2433
+ α ≤ |Q|−1
2434
+
2435
+ Q
2436
+ |f − fQ|
2437
+ ≤ 2 |Q|−1 inf
2438
+ a∈R
2439
+
2440
+ Q
2441
+ |f − a|
2442
+ ≤ C |Q|−1 inf
2443
+ a∈R
2444
+ � �
2445
+ Q×[0,diam(Q)]
2446
+ |∇(F − a)|
2447
+ + diam(Q)−1
2448
+
2449
+ Q×[0,diam(Q)]
2450
+ |F − a|
2451
+
2452
+
2453
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
2454
+ 27
2455
+ for each (n − 1)-dimensional cube Q ∈ Qα. Using the Poincar´e inequality,
2456
+ we conclude that
2457
+ α ≤ C |Q|−1
2458
+
2459
+ Q×[0,diam(Q)]
2460
+ |∇F|
2461
+ ≤ C diam(Q)
2462
+ 1
2463
+ 2 |Q|−1
2464
+
2465
+ Q
2466
+ ϕ
2467
+ ≤ C diam(Q)
2468
+ 1
2469
+ 2 inf
2470
+ Q ψ
2471
+ for each (n − 1)-dimensional cube Q ∈ Qα. This implies
2472
+ α2 diam(Q)−1 |Q| ≤ C
2473
+
2474
+ Q∩{g≤Kα}
2475
+ ψ2
2476
+ for each (n − 1)-dimensional cube Q ∈ Qα. Combining this estimate with
2477
+ Lemma A.4, we obtain
2478
+ α2
2479
+
2480
+ Q
2481
+ W ≤ C
2482
+
2483
+ Q∩{g≤Kα}
2484
+ ψ2
2485
+ for each (n−1)-dimensional cube Q ∈ Qα. Summation over all cubes Q ∈ Qα
2486
+ gives
2487
+ α2
2488
+
2489
+ {g>α}
2490
+ W ≤
2491
+
2492
+ {α<g≤Kα}
2493
+ ψ2.
2494
+ This estimate holds for each α > α0. We now multiply both sides by α−1
2495
+ and integrate over α ∈ (2α0, ∞). This gives
2496
+
2497
+ {g>4α0}
2498
+ Wg2 ≤ C
2499
+
2500
+ Q0
2501
+ ψ2.
2502
+ In the next step, we bound the contribution from the set {g ≤ 4α0}. Using
2503
+ the Sobolev trace theorem, we obtain
2504
+ α0 = |Q0|−1
2505
+
2506
+ Q0
2507
+ |f − fQ0|
2508
+ ≤ 2 |Q0|−1 inf
2509
+ a∈R
2510
+
2511
+ Q0
2512
+ |f − a|
2513
+ ≤ C |Q0|−1 inf
2514
+ a∈R
2515
+ � �
2516
+ Q0×[0,diam(Q0)]
2517
+ |∇(F − a)|
2518
+ + diam(Q0)−1
2519
+
2520
+ Q0×[0,diam(Q)]
2521
+ |F − a|
2522
+
2523
+ .
2524
+
2525
+ 28
2526
+ SIMON BRENDLE
2527
+ Using the Poincar´e inequality, we conclude that
2528
+ α0 ≤ C |Q0|−1
2529
+
2530
+ Q0×[0,diam(Q0)]
2531
+ |∇F|
2532
+ ≤ C diam(Q0)
2533
+ 1
2534
+ 2 |Q0|−1
2535
+
2536
+ Q0
2537
+ ϕ
2538
+ ≤ C diam(Q0)
2539
+ 1
2540
+ 2 inf
2541
+ Q0 ψ.
2542
+ This implies
2543
+ α2
2544
+ 0 diam(Q0)−1 |Q0| ≤ C
2545
+
2546
+ Q0
2547
+ ψ2.
2548
+ Combining this estimate with Lemma A.4, we obtain
2549
+ α2
2550
+ 0
2551
+
2552
+ Q0
2553
+ W ≤ C
2554
+
2555
+ Q0
2556
+ ψ2,
2557
+ hence
2558
+
2559
+ {g≤4α0}
2560
+ Wg2 ≤ C
2561
+
2562
+ Q0
2563
+ ψ2.
2564
+ Putting these facts together, we conclude that
2565
+
2566
+ Q0
2567
+ Wg2 ≤ C
2568
+
2569
+ Q0
2570
+ ψ2.
2571
+ On the other hand, the Hardy-Littlewood maximal inequality implies
2572
+
2573
+ Q0
2574
+ ψ2 ≤ C
2575
+
2576
+ Q0
2577
+ ϕ2 = C
2578
+
2579
+ Q0×[0,diam(Q0)]
2580
+ |∇F|2.
2581
+ This completes the proof of Lemma A.6.
2582
+ After these preparations, we now complete the proof of Theorem A.1.
2583
+ Combining Lemma A.5 and Lemma A.6, we conclude that
2584
+
2585
+ Q0
2586
+ V |f − fQ0|2 ≤ C
2587
+
2588
+ Q0×[0,diam(Q0)]
2589
+ |∇F|2
2590
+ for each (n − 1)-dimensional cube Q0 ∈ Q. This implies
2591
+
2592
+ Q0
2593
+ V f 2 ≤ C
2594
+
2595
+ Q0×[0,diam(Q0)]
2596
+ |∇F|2 + C |Q0|−2
2597
+ � �
2598
+ Q0
2599
+ V
2600
+ � � �
2601
+ Q0
2602
+ |f|
2603
+ �2
2604
+ for each (n − 1)-dimensional cube Q0 ∈ Q. Using the estimate
2605
+ |Q0|−1
2606
+
2607
+ Q0
2608
+ V ≤
2609
+
2610
+ |Q0|−1
2611
+
2612
+ Q0
2613
+ V s
2614
+ � 1
2615
+ s
2616
+ ≤ C diam(Q0)−1,
2617
+ we conclude that
2618
+
2619
+ Q0
2620
+ V f 2 ≤ C
2621
+
2622
+ Q0×[0,diam(Q0)]
2623
+ |∇F|2 + C diam(Q0)−1 |Q0|−1
2624
+ � �
2625
+ Q0
2626
+ |f|
2627
+ �2
2628
+
2629
+ SCALAR CURVATURE RIGIDITY OF CONVEX POLYTOPES
2630
+ 29
2631
+ for each (n − 1)-dimensional cube Q0 ∈ Q.
2632
+ Corollary A.7. Fix an exponent s ∈ (1, n − 1). Let V be a nonnegative
2633
+ continuous function on the unit sphere Sn−1 ⊂ Rn with the property that
2634
+ rs+1−n
2635
+
2636
+ Sn−1∩Br(p)
2637
+ V s ≤ 1
2638
+ for all points p ∈ Rn and all r ≤ 1. Suppose that F is a smooth function on
2639
+ the unit ball Bn = {x ∈ Rn : |x| ≤ 1}, and let f denote the restriction of F
2640
+ to the boundary ∂Bn = Sn−1. Then
2641
+
2642
+ Sn−1 V f 2 ≤ C
2643
+
2644
+ Bn |∇F|2 + C
2645
+ � �
2646
+ Sn−1 |f|
2647
+ �2
2648
+ .
2649
+ References
2650
+ [1] C. B¨ar and W. Ballmann, Boundary value problems for elliptic differential opera-
2651
+ tors of first order, Surveys in Differential Geometry vol. 17, pp. 1–78, Intern. Press,
2652
+ Somerville, 2012
2653
+ [2] C. B¨ar and W. Ballmann, Guide to boundary value problems for Dirac-type operators,
2654
+ arXiv:1307.3021
2655
+ [3] C. B¨ar, B. Hanke, and T. Schick, Remarks on the paper ”On Gromov’s dihedral
2656
+ extremality and rigidity conjectures” by Jinmin Wang, Zhizhang Xie, and Guoliang
2657
+ Yu, arXiv:2202.05180
2658
+ [4] C. Fefferman and D. Phong, Lower bounds for Schr¨odinger equations, Conference on
2659
+ Partial Differential Equations (Saint Jean de Monts, 1982), Conf. No. 7, pp. 1–7, Soc.
2660
+ Math. France, Paris, 1982
2661
+ [5] W. Fulton and J. Harris, Representation Theory, Springer-Verlag, 1991
2662
+ [6] M. Gromov, Dirac and Plateau billiards in domains with corners, Central European
2663
+ Journal of Mathematics 12, 1109–1156 (2014)
2664
+ [7] M. Gromov, Four Lectures on Scalar Curvature, arXiv:1908.10612
2665
+ [8] M. Gromov, Convex Polytopes, dihedral angles, mean curvature, and scalar curvature,
2666
+ arXiv:2207.13346
2667
+ [9] C. Li, A polyhedron comparison theorem for 3-manifolds with positive scalar curvature,
2668
+ Invent. Math. 219, 1–37 (2020)
2669
+ [10] J. Wang, Z. Xie, and G. Yu, On Gromov’s dihedral extremality and rigidity conjec-
2670
+ tures, arXiv:2112.01510
2671
+ Columbia University, 2990 Broadway, New York NY 10027, USA
2672
+
79E4T4oBgHgl3EQfdAwp/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
7NAyT4oBgHgl3EQfcvfE/content/tmp_files/2301.00290v1.pdf.txt ADDED
@@ -0,0 +1,1045 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BARVINN: Arbitrary Precision DNN Accelerator Controlled by a
2
+ RISC-V CPU
3
+ Mohammadhossein Askarihemmat1, Sean Wagner2, Olexa Bilaniuk3,
4
+ Yassine Hariri4, Yvon Savaria1, Jean-Pierre David1
5
+ 1Ecole Polytechnique Montreal, Canada, 2 IBM, Toronto, Canada, 3 Mila, Montreal, Canada,
6
+ 4 CMC Microsystems, Kingston, Canada
7
+ {mohammad.hossein.askari-hemmat, yvon.savaria, jpdavid}@polymtl.ca, wagnerse@ca.ibm.com,
8
+ olexa.bilaniuk@mila.quebec, hariri@cmc.ca
9
+ ABSTRACT
10
+ We present a DNN accelerator that allows inference at arbitrary
11
+ precision with dedicated processing elements that are configurable
12
+ at the bit level. Our DNN accelerator has 8 Processing Elements
13
+ controlled by a RISC-V controller with a combined 8.2 TMACs of
14
+ computational power when implemented with the recent Alveo
15
+ U250 FPGA platform. We develop a code generator tool that ingests
16
+ CNN models in ONNX format and generates an executable com-
17
+ mand stream for the RISC-V controller. We demonstrate the scalable
18
+ throughput of our accelerator by running different DNN kernels
19
+ and models when different quantization levels are selected. Com-
20
+ pared to other low precision accelerators, our accelerator provides
21
+ run time programmability without hardware reconfiguration and
22
+ can accelerate DNNs with multiple quantization levels, regardless
23
+ of the target FPGA size. BARVINN is an open source project and it
24
+ is available at https://github.com/hossein1387/BARVINN.
25
+ KEYWORDS
26
+ neural networks, hardware acceleration, FPGA, low-precision
27
+ ACM Reference Format:
28
+ Mohammadhossein Askarihemmat1, Sean Wagner2, Olexa Bilaniuk3,, Yas-
29
+ sine Hariri4, Yvon Savaria1, Jean-Pierre David1. 2023. BARVINN: Arbi-
30
+ trary Precision DNN Accelerator Controlled by a RISC-V CPU. In 28th
31
+ Asia and South Pacific Design Automation Conference (ASPDAC ’23), Janu-
32
+ ary 16–19, 2023, Tokyo, Japan. ACM, New York, NY, USA, 7 pages. https:
33
+ //doi.org/10.1145/3566097.3567872
34
+ 1
35
+ INTRODUCTION
36
+ Deep neural networks (DNNs) traditionally rely on floating point
37
+ computations. These operations are slow and costly in terms of
38
+ power consumption and required silicon area compared to fixed-
39
+ point/integer operations. One way to accelerate computation in
40
+ a DNN is to use less precision for computation via quantization
41
+ [12]. This also reduces memory consumption as well as energy
42
+ consumption. For instance, in a 45 nm process, 8-bit integer multi-
43
+ plication and addition take 0.2 pJ and 0.03 pJ, respectively, while the
44
+ Permission to make digital or hard copies of all or part of this work for personal or
45
+ classroom use is granted without fee provided that copies are not made or distributed
46
+ for profit or commercial advantage and that copies bear this notice and the full citation
47
+ on the first page. Copyrights for components of this work owned by others than ACM
48
+ must be honored. Abstracting with credit is permitted. To copy otherwise, or republish,
49
+ to post on servers or to redistribute to lists, requires prior specific permission and/or a
50
+ fee. Request permissions from permissions@acm.org.
51
+ ASPDAC ’23, January 16–19, 2023, Tokyo, Japan
52
+ © 2023 Association for Computing Machinery.
53
+ ACM ISBN 978-1-4503-9783-4/23/01...$15.00
54
+ https://doi.org/10.1145/3566097.3567872
55
+ Table 1: Effects of Quantization on Accuracy and Model Size.
56
+ Task
57
+ Dataset
58
+ Model
59
+ Precision
60
+ A/W
61
+ Acc/
62
+ MAP
63
+ Size
64
+ (MB)
65
+ Classification
66
+ CIFAR
67
+ 100
68
+ ResNet18
69
+ LSQ(2/2)
70
+ 76.81
71
+ 2.889
72
+ LSQ(4/4)
73
+ 76.92
74
+ 5.559
75
+ LSQ(8/8)
76
+ 78.45
77
+ 10.87
78
+ FP32
79
+ 76.82
80
+ 42.8
81
+ Object
82
+ Detection
83
+ VOC-
84
+ 2007
85
+ SSD300-
86
+ ResNet18
87
+ LSQ(2/2)
88
+ 0.61
89
+ 10.34
90
+ LSQ(4/4)
91
+ 0.60
92
+ 11.81
93
+ LSQ(8/8)
94
+ 0.68
95
+ 14.77
96
+ FP32
97
+ 0.59
98
+ 32.49
99
+ same operations with 32-bit floating-point values requires 3.7 pJ
100
+ for multiplication and 0.9 pJ for addition [11]. On an Intel Core i7
101
+ 4770 running at 3.4GHz, multiplication is more than 3 times faster
102
+ for fixed-point compared to floating-point [15]. With recent quanti-
103
+ zation techniques, these benefits are available with little to no loss
104
+ in model performance and accuracy. In [9, 13], their quantization
105
+ schemes showed accuracy losses of 1-3% at 2-bit precision on most
106
+ classification and object detection models. Table 1 illustrates the
107
+ result of applying Learned Scale Quantization (LSQ) [9] with differ-
108
+ ent bit precisions on different models and tasks. Quantized models
109
+ offer accuracy similar to full precision models, while having smaller
110
+ size.
111
+ Mixed-precision quantization [7, 16, 21, 23, 24] further provides
112
+ finer control to reach an optimal solution by learning different
113
+ precisions for each layer of a network. In [23], the authors illustrate
114
+ that using their mixed-precision framework, they reduced model
115
+ latency and energy consumption by a factor of almost 2× with little
116
+ drop in accuracy compared with an 8-bit quantized model.
117
+ Fully benefiting from low-precision in a DNN requires hardware
118
+ that natively supports low-precision computations. Commodity
119
+ hardware can perform arbitrary precision arithmetic by transform-
120
+ ing data-layout and computing with bit-wise instructions [8]. How-
121
+ ever, this approach is extremely costly for general processors, be-
122
+ cause of the overhead for shifting, masking and packing bits to
123
+ the correct format. At the time of writing this paper, there are no
124
+ commercially available general processors (CPU or GPU) that can
125
+ efficiently process data in arbitrary precision.
126
+ In this paper, we propose an arbitrary low-precision DNN hard-
127
+ ware accelerator called BARVINN. Our accelerator is software pro-
128
+ grammable and can be integrated in the RISC-V standard devel-
129
+ opment flow. It is designed as a highly optimized computational
130
+ pipeline for DNNs that introduces low hardware overhead and of-
131
+ fers low-power operation. The contributions of our paper are as
132
+ follows:
133
+ arXiv:2301.00290v1 [cs.AR] 31 Dec 2022
134
+
135
+ ASPDAC ’23, January 16–19, 2023, Tokyo, Japan
136
+ AskariHemmat et al
137
+ • Implementation of a DNN hardware accelerator with arbi-
138
+ trary fixed-point low-precision for matrix-vector multiply
139
+ operations at high-throughput and low power.
140
+ • Implementation of a custom embedded RISC-V CPU to con-
141
+ trol an array of DNN accelerators by software.
142
+ • Data structures for efficiently storing and processing weights
143
+ and activations for high-throughput serial computation.
144
+ • Development of a software code generator for transforming
145
+ DNNs into RISC-V code that executes on our accelerator.
146
+ In section 2, we review relevant DNN accelerators from the liter-
147
+ ature. Section 3 presents the architecture of BARVINN. In section
148
+ 4, a detailed performance analysis of BARVINN is provided and
149
+ compared with other DNN accelerators.
150
+ 2
151
+ RELATED WORKS
152
+ Several DNN hardware accelerators supporting quantization and
153
+ low-precision have been presented in recent years for both FPGA
154
+ and ASIC targets. Here, we discuss the accelerator architectures
155
+ most relevant to our work.
156
+ Recent FPGA-based accelerators include FINN [6, 22], DNNBuilder
157
+ [25], and FILM-QNN [20]. In FINN and DNNBuilder, a software
158
+ toolchain is used to map a trained DNN to generated logic modules
159
+ that are integrated together. An overall processing pipeline is gen-
160
+ erated and then synthesized for the target device. The advantage
161
+ of this approach is that the logic efficiently implements a specific
162
+ DNN with minimal overhead on a device that can be reconfigured
163
+ to different DNNs at different times. However, this approach re-
164
+ quires that all DNN layers be implemented in the logic all at once,
165
+ which limits the size of the DNN to the amount of logic resources
166
+ available on a given FPGA. While FINN supports low-precision
167
+ down to binary and DNNBuilder down to 4-bit, neither supports
168
+ arbitrary and mixed-precision at different DNN layers. In contrast,
169
+ FILM-QNN does support DNN models of arbitrary sizes and quan-
170
+ tized DNNs with mixed precisions. However, it is limited to only 4-
171
+ or 8-bit weights and 5-bit activations due to a bit-packing scheme
172
+ used with the DSP blocks in the FPGA.
173
+ Several ASIC accelerator designs support arbitrary precision. Bit
174
+ Fusion [19] uses a large array of 2-bit processing elements that
175
+ can be fused together to perform up to 8-bit operations. Loom
176
+ [18], and BitBlade [17] employ bit serial computation schemes for
177
+ added flexibility. While bit-serial computation of any single math
178
+ computation (e.g. multiplication) inherently requires additional
179
+ clock cycles and latency over bit-parallel circuits, these designs
180
+ exploit the large number of computations in a DNN that can be
181
+ done in parallel. This is done by implementing a large number
182
+ of bit-serial computational units operating simultaneously, which
183
+ compensates high latency with high throughput. For example, the
184
+ Loom engine consists of 128 × 16 = 2048 Serial Inner-Product units
185
+ (SIPs), each of which performs 16 1 × 1-bit products per cycle.
186
+ 3
187
+ ARCHITECTURE
188
+ BARVINN is designed to provide high-throughput and software
189
+ programmability, while supporting DNNs of arbitrary size and type.
190
+ The high-level architecture of BARVINN is illustrated in Figure 1.
191
+ It consists of the following main components: 1) an array of Matrix
192
+ Vector Units (MVU) [5], and 2) a RISC-V CPU called Pito [4] as a
193
+ controller for the MVU array. The MVUs accelerate common DNN
194
+ computations such as GEMV, GEMM, and convolutions along with
195
+ other operations such as batch normalization, ReLU activation, and
196
+ quantization. Pito coordinates the computations in the MVU array
197
+ while also handling data transfers to and from the host system.
198
+ As it is not possible to foresee all possible neural networks that
199
+ may crop up in the literature in the future, high-level sequencing of
200
+ tensor operations for BARVINN is done in software. To control the
201
+ array of processing elements, unlike the aforementioned accelera-
202
+ tors, BARVINN uses the standard RISC-V RV32I ISA. This allows
203
+ us to leverage the pre-existing software ecosystem. Furthermore,
204
+ by using a CPU that supports a well known ISA, BARVINN is more
205
+ flexible and it can be adapted to support new DNN architectures.
206
+ 3.1
207
+ Matrix Vector Units
208
+ The base configuration of BARVINN is implemented with an array
209
+ of 8 MVUs. Figure 1 shows each MVU is a 64-element vector pipeline
210
+ with several modules: a) a Matrix Vector Product unit (MVP), b)
211
+ RAMs for activations/weights/scalers/biases, c) a scaler unit, d)
212
+ a pooling/activation unit, and e) a quantizer. MVUs compute 64
213
+ output vector elements per clock cycle using a 64 element input data
214
+ vector from the activation RAM and a 64×64 element matrix from
215
+ the weight RAM. Activation and weight RAMs store data in low-
216
+ precision. MVP units operate in low-precision, while subsequent
217
+ units in the pipeline operate in high-precision fixed-point.
218
+ To justify our design choice of operating on 64 element vectors,
219
+ we analysed over 50 models available at the ONNX Model Zoo
220
+ [2] to check the input channel size of convolution layers. Figure 2
221
+ illustrates a distribution of input channel size of all layers among
222
+ those models. We found that 79% of these models use convolution
223
+ with input channel sizes that are multiples of 64.
224
+ 3.1.1
225
+ Matrix-Vector Product Units. Matrix operations are carried
226
+ out by the MVP units. They compute on fixed-point arbitrary pre-
227
+ cision operands from 1- to 16-bit. Each MVP has 64 vector-vector
228
+ product (VVP) pipelines. Each VVP has 64 input lanes with 1-bit
229
+ multipliers, followed by an addition tree with 8-bit output, as shown
230
+ in Figure 4. On every cycle, 64 bits from the activation RAM are
231
+ broadcasted to each of the 64 VVPs, while a 64×64 matrix tile from
232
+ the weight RAM is read with each row of the tile sent to sepa-
233
+ rate VVPs. The VVPs compute a 64-element dot product on 1-bit
234
+ operands in each pipeline. With 64 VVPs per MVP, the overall
235
+ output is a 64-element vector.
236
+ MVPs compute arbitrary bit precision dot-products using the bit-
237
+ serial scheme of [5]. Weights and activations can be unsigned or 2’s-
238
+ complement signed fixed-point. Bit-depth is set independently for
239
+ both, thus allowing for mixed precision. The bit-serial dot-product,
240
+ shown in Algorithm 1, is a multi-cycle sequence starting with the
241
+ most significant bits (MSB) from 64 elements of the activation and
242
+ weight tensors. Bits are multiplied in each lane and results are
243
+ added together across lanes in an addition tree producing an 8-bit
244
+ dot product. This is added to an accumulator/shifter (see Figure 4).
245
+ The MSB×MSB result represents the highest order-of-magnitude
246
+ partial sum of the overall dot product. The MVP then computes the
247
+ next lower order-of-magnitude partial sum by drawing the needed
248
+ bit combinations of the operands. When a change in the order-of-
249
+ magnitude is made, the accumulator is shifted left by 1-bit to align
250
+
251
+ BARVINN: Arbitrary Precision DNN Accelerator Controlled by a RISC-V CPU
252
+ ASPDAC ’23, January 16–19, 2023, Tokyo, Japan
253
+ Figure 1: BARVINN hardware architecture with MVU array and Pito RISC-V controller. Right side is MVU detail.
254
+ to the order-of-magnitude prior to adding the addition tree output.
255
+ MVPs are fully pipelined, allowing them to work on different bit
256
+ combinations at different stages without stalling. The operation
257
+ completes when the dot products of the least significant bits (LSB)
258
+ of the operands are computed and accumulated. For 𝑏𝑤-bit weights
259
+ and 𝑏𝑎-bit activations, the overall operation takes 𝑏𝑤𝑏𝑎 cycles to
260
+ compute one tile of the output vector. The precision of the operands
261
+ is configured separately for each MVU, thus each MVU can process
262
+ different layers with different bit precisions.
263
+ Algorithm 1 Bit-serial dot-product
264
+ 1: 𝑏𝑎, 𝑏𝑤: activation and weight bit precisions
265
+ 2: 𝑥,𝑤: activation and weight vectors of size 𝑛
266
+ 3: 𝑗,𝑘: bit position for activations and weights
267
+ 4: 𝑎𝑐𝑐𝑢𝑚𝑢𝑙𝑎𝑡𝑜𝑟 ← 0
268
+ 5: for 𝑖 ← 𝑏𝑤 + 𝑏𝑎 to 1 do
269
+ 6:
270
+ for all (𝑗,𝑘) where 𝑗 + 𝑘 == 𝑖 do
271
+ 7:
272
+ for 𝑙 ← 0 to 𝑛 − 1 do
273
+ 8:
274
+ 𝑜𝑛𝑒𝑏𝑖𝑡𝑝𝑟𝑜𝑑 = 𝑥𝑗 [𝑙] × 𝑤𝑘 [𝑙]
275
+ 9:
276
+ 𝑎𝑐𝑐𝑢𝑚𝑢𝑙𝑎𝑡𝑜𝑟 ← 𝑎𝑐𝑐𝑢𝑚𝑢𝑙𝑎𝑡𝑜𝑟 + 𝑜𝑛𝑒𝑏𝑖𝑡𝑝𝑟𝑜𝑑
277
+ 10:
278
+ end for
279
+ 11:
280
+ end for
281
+ 11:
282
+ shift 𝑎𝑐𝑐𝑢𝑚𝑢𝑙𝑎𝑡𝑜𝑟 left 1-bit
283
+ 12: end for
284
+ 13: 𝑜𝑢𝑡𝑝𝑢𝑡 ← 𝑎𝑐𝑐𝑢𝑚𝑢𝑙𝑎𝑡𝑜𝑟
285
+ Our bit-serial dot product scheme differs from other architec-
286
+ tures. The computation scheme in BitFusion is based on computing
287
+ the individual products of the overall dot-product, that are then
288
+ summed. This requires a large number of shift-registers to align
289
+ and sum partial products. BARVINN and BitBlade instead inter-
290
+ change the ordering of the computation such that partial products
291
+ of the same magnitude from all individual products are computed
292
+ first and then summed. This reduces the number shifters needed.
293
+ BARVINN additionally serializes the computation of partial prod-
294
+ ucts of different magnitude, requiring only a single fixed shifter and
295
+ a single adder tree, whereas BitBlade requires 16 variable shifters
296
+ and 17 adder trees. BARVINN maintains throughput despite this
297
+ serialized scheme by parallelizing across a wider number of input
298
+ operands and producing a larger number of output products per
299
+ clock cycle. BitFusion and BitBlade are further limited to operand
300
+ sizes 2, 4, and 8-bit, whereas MVUs in BARVINN and SIPs in Loom
301
+ support operands of any bit-depth down to 1-bit. However, Loom’s
302
+ data loading scheme restricts the efficiency for general matrix mul-
303
+ tiply operations when the weight bit depth is below 16, whereas
304
+ BARVINNs is able to maintain full throughput down to 1-bit.
305
+ 3.1.2
306
+ Memories and Data Layout. Activation and weight RAMs
307
+ store data in a bit-transposed format shown in Figure 3 to exploit
308
+ bit-serial computation. When precision is greater than 1 bit, tensor
309
+ elements are organized in blocks where bits of the same order-of-
310
+ magnitude are stored in the same memory word starting with the
311
+ MSBs in the lowest address. A block of 𝑛 elements with precision 𝑏
312
+
313
+ PITO RISC-V Core
314
+ Matrix Vector Unit (MVU)
315
+ Fetch
316
+ Decode
317
+ Execute
318
+ Mem
319
+ Commit
320
+ Write Interconnect
321
+ t write Controller
322
+ Read Controller Read Interco
323
+ onnect
324
+ Word
325
+ Word
326
+ Word
327
+ Word
328
+ imm :
329
+ Instruction
330
+ CSR WRITE
331
+ ALU
332
+ PRF1
333
+ Memory
334
+ D$
335
+ 8KB
336
+ Decoder
337
+
338
+ CSR1
339
+ Scaler
340
+ PC+
341
+ Activation Ram
342
+ Data
343
+ Ram
344
+ select
345
+ Memory
346
+ Weight
347
+ 8KB
348
+ Controller)
349
+ Interface
350
+ Ram
351
+ pank3
352
+ Bias
353
+ APB][
354
+ IRQ
355
+ 7
356
+ Ram
357
+ APB Bus
358
+ 1
359
+ 1
360
+ Memory I
361
+ 64 bits
362
+ 64x64 bits
363
+ MVU1
364
+ MVU8
365
+ MVP
366
+ Weight
367
+ Input
368
+ Bias
369
+ Scaler
370
+ Weight
371
+ Bias
372
+ Scaler
373
+ Input
374
+ 32'bit 32'bit 32'bit
375
+ 32'bit
376
+ Ram
377
+ Ram
378
+ Ram
379
+ Ram
380
+ Ram
381
+ Ram
382
+ Ram
383
+ Ram
384
+ 16bit
385
+ 4
386
+
387
+
388
+ Scaler o
389
+ Scaler 1
390
+ Scaler 2
391
+ Scaler 63
392
+ 32 bit
393
+ 32'bit 32'bit 32'bit
394
+ 32'bit
395
+ Crossbar
396
+ 4
397
+ Pol/ReLUPool/ReLu|Pool/ReLu
398
+ Pool/ReLu
399
+ Legend
400
+ 32'bit 32'bit 32'bit
401
+ 32'bit
402
+ 4
403
+ [ Quantser
404
+ PooIReLu
405
+ > MVU to MVU Interface
406
+ Quantser Quantser
407
+ <> Wire Interface
408
+ 1 bit
409
+ 1 bit
410
+ 1 bit
411
+ 1 bit
412
+ > APB Bus CSR Config Interface
413
+ AXlMemInterface
414
+ L64 bit-ASPDAC ’23, January 16–19, 2023, Tokyo, Japan
415
+ AskariHemmat et al
416
+ Figure 2: Channel sizes in models from ONNX Model Zoo.
417
+ requires 𝑏 memory words of width 𝑛. Activation vector elements
418
+ are in blocks of 64 while weights matrix elements are in blocks of
419
+ 4096 bits in order to load a 64×64 matrix tile. A transposer module
420
+ transforms input data from the host into the needed bit-transposed
421
+ format. Transposition is only needed on the first layer of a DNN
422
+ since MVUs write back to activation RAM in the bit-transposed
423
+ format. Weights are pre-processed by a toolchain on the host and
424
+ loaded into weights RAMs in the expected bit-transposed format.
425
+ Figure 3: Bit-transposed data format for arbitrary precision.
426
+ The layout of the tensors in the RAMs depends on the operation
427
+ to be performed. For GEMV, activations are organized as vectors
428
+ with blocks of 64 elements, while weight matrices are organized
429
+ as a set of 64×64 tiles. For 2D convolutions, layout of activations
430
+ is 𝑁𝐻𝑊𝐶, where the channel dimension 𝐶 is the innermost di-
431
+ mension, followed by width 𝑊 , height 𝐻, and batch size 𝑁. The 𝐶
432
+ dimension is the innermost since several common DNNs such as
433
+ ResNet typically have hidden layer channel depths that are pow-
434
+ ers of 2, and hence align to the 64 input lanes of the VVPs. When
435
+ there are more than 64 channels, the first 64 channels are stored in
436
+ the first block, the second 64 channels are stored into the second
437
+ block and so on. As an example, an input tensor of [N=1, H=8, W=8,
438
+ C=256] with 2-bit precision, will have 4 channel blocks, each block
439
+ will have 64 rows of 2 by 64-bit elements.
440
+ Our weight tensor memory layout for 2D convolutions is de-
441
+ signed to support efficient execution by interleaving the input chan-
442
+ nel dimension 𝐶𝑖 and output channel dimension 𝐶𝑜. Each weight
443
+ memory word contains 64 subsets from the𝐶𝑜 dimension, with each
444
+ subset containing 64 elements from the𝐶𝑖 dimension. A contiguous
445
+ Figure 4: VVP unit with a shifter-accumulator. Bit 𝑗 from
446
+ 64 elements of the activation tensor 𝑥 and bit 𝑘 from 64 ele-
447
+ ments of the weight tensor𝑤 are input in a bit-serial fashion.
448
+ Note that some input bits and layers of the 5-deep adder tree
449
+ are not shown.
450
+ block of 𝑏𝑤 words that stores a complete set of bits for the needed
451
+ weight precision is referred to as a channel block 𝐶𝑏. The layout
452
+ for 2D convolution weights is 𝐶𝑜,𝑠𝐹𝐻 𝐹𝑊 𝐶𝑏, where 𝐶𝑜,𝑠 = 𝐶𝑜/64
453
+ are output channel sets, and the kernel size is (𝐹𝑊 , 𝐹𝐻 ).
454
+ 3.1.3
455
+ Job Configuration and Execution. MVUs are programmed to
456
+ perform jobs such as GEMV or Conv2D operation. A controller sets
457
+ configuration registers that orchestrate the sequence of calculations
458
+ and memory reads to complete an operation in the MVUs. Once the
459
+ job is finished, the MVU will generate an interrupt to the controller,
460
+ indicating that the job is finished and results are ready to be sent
461
+ back to the host or to trigger subsequent operations on the same
462
+ MVU or other MVUs. While a MVU is busy, it can be programmed
463
+ to prepare the next job to minimize idle time.
464
+ Each MVU contains address generation units (AGU) that drive
465
+ the memory access pattern across the activation and weight RAMs.
466
+ The access pattern is managed by a set of up to five nested loops
467
+ with parameters setting the number of iterations and the forward
468
+ or backward address jumps to make on each iteration. The address
469
+ jump scheme reduces the logic to a set of small accumulators to
470
+ control the loops and small adders to compute addresses. Innermost
471
+ loops are usually set to stride over the bit depth of the activations
472
+ and weights. Outer loops are used to iterate over the bit combina-
473
+ tions for the serial dot-product procedure and over the dimensions
474
+ of the tensors. For GEMV, two nested loops are required for both
475
+ activations and weights. Conv2D operations are programmed to
476
+ compute one row of the output activation map per job, requiring
477
+ four nested loops.
478
+ 3.1.4
479
+ Pipeline Modules. Each MVU has modules downstream from
480
+ the MVP to implement other DNN operations including a multi-
481
+ plier/adder unit, a pooling/ReLU unit, and a quantizer/serializer
482
+ unit. These modules operate at high-precision. Fixed-point mul-
483
+ tiplier/adder units (Scaler in Figure 1), compute DNN operations
484
+ such as batch normalization and quantization scaling as in LSQ [9].
485
+ Scalers multiply the MVP output by a 16-bit operand sourced from
486
+ the scaler RAM. In an FPGA, the multiplier is 27 × 16, which aligns
487
+ with the port widths of on-chip fixed DSP units. An adder that
488
+ follows adds 32-bit fixed-point bias terms from bias RAM. Scaler
489
+
490
+ 600
491
+ Not Multiple of 64
492
+ Multiple of 64
493
+ 500
494
+ 400
495
+ ayers
496
+ e-
497
+ 300
498
+ #
499
+ 200
500
+ 100
501
+ 0
502
+ 128 256 512 1024
503
+ 1
504
+ 2
505
+ 4
506
+ 8
507
+ 16
508
+ 32
509
+ 64
510
+ Input Channel Sizek elements
511
+ address
512
+ bit
513
+ 01
514
+ k
515
+ 0
516
+ n-1
517
+ MSB
518
+ 1
519
+ n-2
520
+ block 0
521
+ n-1
522
+ 0
523
+ LSB
524
+ MSB
525
+ block 1
526
+ LSBw,[0]
527
+ x,[1]
528
+ 32-bit shifter/accumulator
529
+ w,[1]
530
+ x,[2]
531
+ W,[2]
532
+ 8-bit sum
533
+ x[3]
534
+ w,[3]
535
+ x,[62]
536
+ W,[62]
537
+ x,[63]
538
+ W.[63]BARVINN: Arbitrary Precision DNN Accelerator Controlled by a RISC-V CPU
539
+ ASPDAC ’23, January 16–19, 2023, Tokyo, Japan
540
+ and bias RAMs have independent AGUs. The module that follows
541
+ combines max pooling and ReLU (Pool/ReLU in Figure 1), imple-
542
+ mented as a comparator with an internal register. For ReLU, the
543
+ incoming value is checked against the register initially set to 0. The
544
+ combined MaxPool/ReLU is implemented by programming MVUs
545
+ to produce data in the sequence needed for a MaxPool window.
546
+ The pipeline ends at the quantization/serialization unit (QuantSer
547
+ in Figure 1). It takes 32-bit fixed-point data from each of the 64 data
548
+ paths and serializes them into 64 1-bit outputs. It is programmed to
549
+ set the output bit-depth and the MSB position from the input word.
550
+ Combined with scaler units, this is used to implement quantization
551
+ schemes such as LSQ [9]. Serialized outputs of each datapath are
552
+ grouped into a single 64-bit word that is sent either to the activation
553
+ RAM of the same MVU, or to a different MVU via an interconnect.
554
+ 3.1.5
555
+ Interconnect. MVUs can send data to each other via an inter-
556
+ connect implemented as an 8-way crossbar switch with broadcast
557
+ capability. A source MVU is programmed to send its output results
558
+ in a serialized fashion to a given address in the activation memory
559
+ of a destination MVU(s). At a destination MVU, a fixed-priority
560
+ arbitration scheme to the write port of the target MVU activation
561
+ RAM is used. The interconnect is given highest priority, followed
562
+ by the controller, then lastly the MVU itself. When multiple MVUs
563
+ attempt to write to the same destination MVU, a fixed priority
564
+ scheme determines which MVU can write to its memory.
565
+ 3.1.6
566
+ DNN Mapping. Each MVU can be assigned to different lay-
567
+ ers of a DNN, such as convolutions and fully-connected layers.
568
+ Alternatively, a single layer can be split between multiple MVUs
569
+ with each MVUs processing a subset of the input activations and/or
570
+ weights. Partial results are forwarded from one MVU to another
571
+ via the interconnect to process subsequent layers of the network,
572
+ thus creating an overall processing pipeline through the array. By
573
+ sending partial results from one MVU to another, subsequent MVUs
574
+ can begin processing as soon as sufficient data has been received
575
+ from previous layers. For instance, a MVU processing a 3× 3 convo-
576
+ lution requires only 3 rows of activations from the previous layer to
577
+ produce one output row of the layer it is processing. This avoids the
578
+ need to wait until all outputs from a layer are generated, which re-
579
+ duces latency and idle time. Furthermore, the ability to immediately
580
+ process partial layer outputs by subsequent MVUs keeps on-chip
581
+ storage requirements low, since only the partial set of activations
582
+ required to produce the next layer partial output needs to be stored.
583
+ Depending on the performance goal, BARVINN can execute a
584
+ DNN in either Pipelined mode or Distributed mode. In Pipelined
585
+ mode (Figure 5.a), the MVU array can process up to 8 convolutions
586
+ and fully-connected layers all at once. Each MVU can be configured
587
+ to use different precisions. In cases where a DNN model contains
588
+ more than 8 layers, the MVU array can be programmed to process
589
+ the entire model by dividing it into subsets of up to 8 layers each.
590
+ Each MVU can be loaded with weights from layers in each subset,
591
+ either all from the start of processing if there is sufficient weight
592
+ memory available in each MVU or on-the-fly from external memory
593
+ if not. Output activations from the last MVU in the chain can also
594
+ be stored temporarily in off-chip memory and fetched later in the
595
+ case where the first MVU is still processing data from the current
596
+ lap. In the Distributed mode, to minimize latency, the objective is
597
+ to process single batch inputs as fast as possible. As can be seen
598
+ in Figure 5.b, in this mode, the computation of a single layer is
599
+ broken into 8 independent computation regions. All MVUs will be
600
+ programmed to share the same set of weights. To make sure an MVU
601
+ computation is independent of those performed on other MVUs, the
602
+ user might need to copy the input regions that are shared between
603
+ computation units. The programmability of BARVINN allows the
604
+ user to mix and match these execution modes for different layers
605
+ and models to achieve highest performance.
606
+ a. Pipelined mode
607
+ b. Distributed mode
608
+ Figure 5: Execution flow of a DNN on the MVU array in
609
+ Pipelined (a) and Distributed (b) modes. In Pipelined mode
610
+ each MVU processes one layer at a time. In distributed mode,
611
+ the computation of a single layer is distributed among mul-
612
+ tiple MVUs.
613
+ 3.2
614
+ Pito: RISC-V-based Controller
615
+ To make use of MVUs for neural networks, a control unit is required.
616
+ The controller is a barrel RISC-V processor designed to control
617
+ the 8 MVUs using separate but communicating hardware threads
618
+ (harts) that each manage their respective MVUs. DNN layers are ex-
619
+ ecuted either in distributed or in a pipelined fashion, depending on
620
+ whether the DNN is compiled to maximize throughput or minimize
621
+ latency. This design allows MVUs to complete tensor operations
622
+ independently of each other. The drawback is that it requires 8
623
+ microprocessors to execute the 8 programs. We instead amortized
624
+ the fixed costs of the processor by adopting barrel processing. With
625
+ a 8-way threaded processor, we may assign one thread to control
626
+ each of the MVUs. Because every thread comes up for execution
627
+ only every 8 clock cycles, the five pipeline stages (fetch, decode,
628
+ execute, data read & writes and commit) can be completely hidden.
629
+ Branch prediction units are unnecessary. Since tensor operations
630
+ can require hundreds of cycles to execute on a MVU, the barrel
631
+ processor can fully turn over dozens of times in the interim, allow-
632
+ ing each thread to issue the next command to its MVU in a few
633
+ instructions.
634
+ We adopted a Harvard architecture and divided the instruction
635
+ and data RAM, 8KB each, and shared between all harts. This gives a
636
+
637
+ MemoryInterface
638
+ UART
639
+ MvU Array
640
+ MVU6
641
+ MU5
642
+ D$
643
+ IS
644
+ 8KB
645
+ 8KB
646
+ 1
647
+ CSR8 pe_irq
648
+ pestart
649
+ CSR2 pe irq
650
+ Crossbar
651
+ CSR1 pe_irq
652
+ ant
653
+ Pito RISC-V
654
+ pe_start ld
655
+ pe_command httus
656
+ APB
657
+ pe_quant
658
+ pe_status
659
+ Input
660
+ Convo
661
+ Conv4
662
+ Conv1
663
+ Conv5
664
+ Conv2
665
+ Conv6
666
+ Input
667
+ Weight
668
+ Conv7
669
+ Output
670
+ Conv3
671
+ [1x63x32x32]
672
+ [64x64x3x3]
673
+ [1x63x32x32]ASPDAC ’23, January 16–19, 2023, Tokyo, Japan
674
+ AskariHemmat et al
675
+ 1K word space to store data and instructions to control each MVU.
676
+ The processor executes instructions following compilation order
677
+ and without any further scheduling. A hart scheduler provides
678
+ access to the required resources for the hart at each stage. In the
679
+ fetch stage, each hart loads instructions from the instruction RAM.
680
+ The program counter (PC) and register file for each hart is different
681
+ and the hart scheduler indicates which register should be accessed
682
+ at a given time. The Decode stage decodes instruction and loads
683
+ source registers or an immediate operand. Our RISC-V controller is
684
+ compatible with RV32I RISC-V ISA with minimal support for privi-
685
+ lege specification to make Control and Status Registers (CSRs) and
686
+ Interrupts available to interface with the MVU array. In addition
687
+ to the base CSRs, we have added 74 MVU-specific CSRs to allow
688
+ software to control the processing element array. These CSRs con-
689
+ trol different settings within an MVU such as weight and activation
690
+ precision, AGU’s jump settings, input, weight and output memory
691
+ address and pipeline module selection as described in 3.1.4.
692
+ 3.3
693
+ Code Generator
694
+ BARVINN performs GEMM/GEMV, Convolutions, Maxpooling and
695
+ activation (ReLU). However, it is up to the user to sequence the
696
+ operations within a DNN with software. To facilitate this, we de-
697
+ veloped a code generator that takes a DNN described in ONNX
698
+ [3] and configuration settings (weight/input/output precision), and
699
+ generates RISC-V code for each operation. The code generator ex-
700
+ ports weights to the bit-transposed format described in section 3.1.2.
701
+ Since each MVU works on 64-bit words, the code generator tiles
702
+ each weight tensor in blocks of 64×64. When this cannot be done
703
+ (either tensor input channel or output channel is not a multiple of
704
+ 64), we pad the corresponding tile. Currently, our code generator
705
+ does not apply graph optimization techniques. Also, for now, our
706
+ code generator supports Pipelined mode execution. In the follow-
707
+ ing section, we used our code generator to map PyTorch models to
708
+ micro kernel codes which can then be directly used by BARVINN.
709
+ 4
710
+ PERFORMANCE ANALYSIS AND RESULTS
711
+ 4.1
712
+ Experimental Setup
713
+ To illustrate the performance of BARVINN, we chose the ResNet9
714
+ image classifier model for the CIFAR10 dataset. We trained and
715
+ quantized a ResNet9 model on CIFAR10 using LSQ [9] and used the
716
+ residual distillation [14] technique to remove shortcut connections
717
+ (Plain CNN models). In many image classification DNN models such
718
+ as ResNet [10], the input to the first layer typically consists of less
719
+ than 64 channels. Furthermore, due to sensitivity of the first and
720
+ last layer to information loss, most state-of-the-art compression
721
+ and quantization methods do not apply optimization on input and
722
+ output layers [9], hence keeping these layers untouched and in full
723
+ precision. We have adopted the same technique to compute first
724
+ and last layers on the host or on the RISC-V controller.
725
+ Table 2 shows the performance of ResNet9 on CIFAR10 in the
726
+ PyTorch framework. Once we were satisfied with the performance
727
+ of our quantized model, we exported the trained model to ONNX
728
+ and then used our code generator. Table 3 illustrates the per layer
729
+ computation cost of running ResNet9 on BARVINN with 2-bit ac-
730
+ tivations and weights. All convolutions use a padding of 1. As
731
+ discussed before, we skipped running the first and last layer on
732
+ Table 2: ResNet9 with different bit precision on CIFAR10
733
+ ResNet9 Model
734
+ Precision
735
+ Accuracy
736
+ Size (Bytes)
737
+ Original
738
+ Fp32
739
+ 90.8%
740
+ 19605141
741
+ Plain-CNN
742
+ Fp32
743
+ 91.1%
744
+ 18912487
745
+ Quantized Plain-CNN
746
+ Int2
747
+ 89.2%
748
+ 1181360
749
+ Table 3: ResNet9 layers for CIFAR10 dataset and computa-
750
+ tion cost. All layers are quantized to 2-bit for activation and
751
+ weights, except for the first and last layers.
752
+ Layer
753
+ Input
754
+ Kernel
755
+ Output
756
+ Cycles
757
+ conv0
758
+ [3, 32, 32]
759
+ [64, 3, 3, 3]
760
+ [64, 32, 32]
761
+ N/A
762
+ conv1
763
+ [64, 32, 32]
764
+ [64, 64, 3, 3]
765
+ [64, 32, 32]
766
+ 34560
767
+ conv2
768
+ [64, 32, 32]
769
+ [64, 64, 3, 3]
770
+ [64, 32, 32]
771
+ 34560
772
+ conv3
773
+ [64, 32, 32]
774
+ [128, 64, 3, 3]
775
+ [128, 16, 16]
776
+ 17280
777
+ conv4
778
+ [128, 16, 16]
779
+ [128, 128, 3, 3]
780
+ [128, 8, 8]
781
+ 32256
782
+ conv5
783
+ [128, 8, 8]
784
+ [256, 128, 3, 3]
785
+ [128, 8, 8]
786
+ 16128
787
+ conv6
788
+ [128, 8, 8]
789
+ [256, 256, 3, 3]
790
+ [256, 4, 4]
791
+ 27648
792
+ conv7
793
+ [256, 4, 4]
794
+ [512, 256, 3, 3]
795
+ [256, 4, 4]
796
+ 13824
797
+ conv8
798
+ [256, 4, 4]
799
+ [512, 512, 3, 3]
800
+ [512, 4, 4]
801
+ 18432
802
+ fc
803
+ [512, 4, 4]
804
+ [10, 512]
805
+ [10]
806
+ N/A
807
+ Total:
808
+ 194688
809
+ BARVINN and we kept them in their original format. The overall
810
+ computation takes 194,688 cycles to complete.
811
+ Our design was written in Verilog and synthesized using Xilinx
812
+ Vivado 2021.1 for the Xilinx Alveo U250 accelerator card. Synthesis
813
+ results for the RISC-V controller, the processing array, and the accel-
814
+ erator are presented in Table 4. Power consumption was estimated
815
+ using the software tools in Vivado.
816
+ 4.2
817
+ Discussion
818
+ We compared BARVINN with FINN [22], which is a templated Vi-
819
+ vado HLS C++ library of common DNN layers. Like BARVINN,
820
+ FINN can generate hardware for arbitrary precision, but is not
821
+ software programmable. Hence, once the FINN hardware is gen-
822
+ erated, the user cannot change the computation data stream. We
823
+ attempted to compare the performance of BARVINN with FINN
824
+ using the ResNet9 model we used earlier. However, at the time of
825
+ writing, FINN supports simple linear topologies and we were not
826
+ able to get performance metrics for our model. Instead, we used the
827
+ available CIFAR10-CNV model from the FINN repository that was
828
+ tuned for the FINN dataflow for our comparison. Table 5 shows the
829
+ performance of BARVINN and FINN. For this experiment, we used
830
+ different precisions for weights and activation. For both tools, we
831
+ used the performance estimation numbers for frames per second
832
+ (FPS). For FINN, we used the default folding configurations publicly
833
+ available in FINN-example repository [1]. As illustrated in Table
834
+ 5, we provide 7-15 times better throughput albeit with higher LUT
835
+ usage. On the other hand, for higher bit precisions, FINN provides
836
+ a better FPS/LUT, suggesting a scalable solution for bigger models.
837
+ We also compared the performance on a ResNet-50 model. Table
838
+ 6 shows our estimated FPS for BARVINN executing in Pipelined
839
+ mode along with reported performance for FINN [1] synthesized for
840
+ the Xilinx U250 and for FILM-QNN [20] synthesized for the Xilinx
841
+ ZCU102 FPGA. While FINN has the highest FPS, BARVINN shows
842
+
843
+ BARVINN: Arbitrary Precision DNN Accelerator Controlled by a RISC-V CPU
844
+ ASPDAC ’23, January 16–19, 2023, Tokyo, Japan
845
+ Table 4: Post-synthesis resource utilization of BARVINN.
846
+ Resource
847
+ Pito RISC-V
848
+ MVU Array
849
+ Overall
850
+ LUT
851
+ 10454
852
+ 190625
853
+ 201079
854
+ BRAM
855
+ 15
856
+ 1312
857
+ 1327
858
+ DSP
859
+ 0
860
+ 512
861
+ 512
862
+ Dynamic Power
863
+ 0.410 W
864
+ 21.066 W
865
+ 21.504 W
866
+ Frequency
867
+ 250 MHz
868
+ 250 MHz
869
+ 250 MHz
870
+ Table 5: Estimated performance of running CNV model on
871
+ CIFAR10 on Alveo U250 when different bit precision is used.
872
+ Bits
873
+ (W/A)
874
+ kLUT
875
+ BRAM
876
+ DSP
877
+ FPS
878
+ FPS/
879
+ kLUT
880
+ Ours
881
+ 1/1
882
+ 201.1 (15.0%)
883
+ 1327
884
+ 512
885
+ 61035
886
+ 303.5
887
+ 1/2
888
+ 201.1 (15.0%)
889
+ 1327
890
+ 512
891
+ 30517
892
+ 151.7
893
+ 2/2
894
+ 201.1 (15.0%)
895
+ 1327
896
+ 512
897
+ 15258
898
+ 75.8
899
+ FINN
900
+ 1/1
901
+ 28.2 (2.1%)
902
+ 150
903
+ 0
904
+ 7716
905
+ 273.6
906
+ 1/2
907
+ 19.8(1.47%)
908
+ 103
909
+ 0
910
+ 2170
911
+ 109.6
912
+ 2/2
913
+ 24.3(1.81%)
914
+ 202
915
+ 0
916
+ 2170
917
+ 89.3
918
+ Table 6: Performance for ResNet-50 model on ImageNet.
919
+ Bits (W/A)
920
+ Clock Freq.
921
+ FPS
922
+ FPS/Watt
923
+ Ours
924
+ 1/2
925
+ 250 MHz
926
+ 2296
927
+ 106.8
928
+ FINN-R [1][6]
929
+ 1/2
930
+ 178 MHz
931
+ 2873
932
+ 41.0
933
+ FILM-QNN [20]
934
+ 4(8)/5
935
+ 150 MHz
936
+ 109
937
+ 8.4
938
+ the best performance per Watt. According to the FINN-example
939
+ repository [1], a fine-tuned ResNet50 model, requires more than
940
+ 87% of Alveo U250 accelerator’s resources. This shows the limits
941
+ of FINN dealing with bigger models. BARVINN requires the same
942
+ LUT usage regardless of the model size and bit-width.
943
+ 5
944
+ CONCLUSION
945
+ In this paper, we presented an FPGA-based DNN accelerator that
946
+ supports arbitrary bit precision computations. We tested the perfor-
947
+ mance of BARVINN over different DNN kernels and models with
948
+ different bit precision. For model deployment, we developed a code
949
+ generator tool that takes in a model in ONNX format and generates
950
+ RISC-V assembly code for the controller. Compared to other low
951
+ precision accelerators, we provide a programmable solution which
952
+ makes BARVINN more flexible. With the programmable MVUs, the
953
+ user can run different models regardless of their size. BARVINN
954
+ allows trading off throughput and latency by running DNN lay-
955
+ ers either in distributed or in pipeline modes. Unlike other low
956
+ precision accelerators, our proposed solution offers implementing
957
+ various trade-offs through software and the end user can control
958
+ them for each individual layer without FPGA reconfiguration at
959
+ run time. Compared to programmable accelerators, BARVINN was
960
+ shown to provide a better throughput per Watt performance.
961
+ ACKNOWLEDGMENTS
962
+ The authors acknowledge support for this project from the IBM
963
+ AI Horizons Network, CMC Microsystems, Fonds de Recherche du
964
+ Quebec–Nature et Technologies (FRQNT), MITACS and from the
965
+ NSERC COHESA Strategic Research Network.
966
+ REFERENCES
967
+ [1] 2021. FINN Dataflow Accelerator Examples. (2021). https://github.com/Xilinx/
968
+ finn-examples
969
+ [2] 2021. ONNX Model Zoo. (2021). https://github.com/onnx/models
970
+ [3] 2021. Open Neural Network Exchange. (2021). https://onnx.ai/
971
+ [4] MohammadHossein AskariHemmat, Olexa Bilaniuk, Sean Wagner, Yvon Savaria,
972
+ and Jean-Pierre David. 2021. RISC-V Barrel Processor for Deep Neural Network
973
+ Acceleration. In 2021 IEEE International Symposium on Circuits and Systems
974
+ (ISCAS). 1–5. https://doi.org/10.1109/ISCAS51556.2021.9401617
975
+ [5] Olexa Bilaniuk, Sean Wagner, Yvon Savaria, and Jean-Pierre David. 2019. Bit-
976
+ Slicing FPGA Accelerator for Quantized Neural Networks. In 2019 IEEE Interna-
977
+ tional Symposium on Circuits and Systems (ISCAS). 1–5.
978
+ [6] Michaela Blott, Thomas B. Preußer, Nicholas J. Fraser, Giulio Gambardella, Ken-
979
+ neth O’brien, Yaman Umuroglu, Miriam Leeser, and Kees Vissers. 2018. FINN-R:
980
+ An End-to-End Deep-Learning Framework for Fast Exploration of Quantized
981
+ Neural Networks. ACM Trans. Reconfigurable Technol. Syst. 11, 3, Article 16 (dec
982
+ 2018), 23 pages. https://doi.org/10.1145/3242897
983
+ [7] Adrian Bulat and Georgios Tzimiropoulos. 2021. Bit-Mixer: Mixed-Precision
984
+ Networks With Runtime Bit-Width Selection. In Proceedings of the IEEE/CVF
985
+ International Conference on Computer Vision (ICCV). 5188–5197.
986
+ [8] Meghan Cowan, Thierry Moreau, Tianqi Chen, James Bornholt, and Luis Ceze.
987
+ 2020. Automatic Generation of High-Performance Quantized Machine Learning
988
+ Kernels. Association for Computing Machinery, New York, NY, USA, 305–316.
989
+ https://doi.org/10.1145/3368826.3377912
990
+ [9] Steven K. Esser, Jeffrey L. McKinstry, Deepika Bablani, Rathinakumar Ap-
991
+ puswamy, and Dharmendra S. Modha. 2020. Learned Step Size Quantization. In
992
+ International Conference on Learning Representations.
993
+ [10] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep Residual
994
+ Learning for Image Recognition. In Proceedings of the IEEE Conference on Computer
995
+ Vision and Pattern Recognition (CVPR).
996
+ [11] Mark Horowitz. 2014. Computing’s Energy Problem (and what we can do about
997
+ it). Interational Solid State Circuits Conference (2014).
998
+ [12] Itay Hubara et al. 2018. Quantized Neural Networks: Training Neural Networks
999
+ with Low Precision Weights and Activations. JMLR 18, 187 (2018), 1–30.
1000
+ [13] B. Ham J. Lee, D. Kim. 2021. Network Quantization with Element-wise Gradient
1001
+ Scaling. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern
1002
+ Recognition.
1003
+ [14] Guilin Li et al. 2020. Residual Distillation: Towards Portable Deep Neural Net-
1004
+ works without Shortcuts. In Advances in Neural Information Processing Systems,
1005
+ Vol. 33. 8935–8946.
1006
+ [15] Nicolas Limare. 2014. Floating-Point Math Speed vs Precision. (2014).
1007
+ http:
1008
+ //nicolas.limare.net/pro/notes/2014/12/16_math_speed/
1009
+ [16] Paulius Micikevicius et al. 2017.
1010
+ Mixed precision training.
1011
+ arXiv preprint
1012
+ arXiv:1710.03740 (2017).
1013
+ [17] Sungju Ryu, Hyungjun Kim, Wooseok Yi, and Jae-Joon Kim. 2019. Bitblade: Area
1014
+ and energy-efficient precision-scalable neural network accelerator with bitwise
1015
+ summation. In Proceedings of the 56th Annual Design Automation Conference 2019.
1016
+ 1–6.
1017
+ [18] Sayeh Sharify, Alberto Delmas Lascorz, Kevin Siu, Patrick Judd, and Andreas
1018
+ Moshovos. 2018. Loom: Exploiting Weight and Activation Precisions to Accelerate
1019
+ Convolutional Neural Networks. In 2018 55th ACM/ESDA/IEEE Design Automation
1020
+ Conference (DAC). 1–6. https://doi.org/10.1109/DAC.2018.8465915
1021
+ [19] Hardik Sharma et al. 2018. Bit fusion: Bit-level dynamically composable archi-
1022
+ tecture for accelerating deep neural network. In 2018 ACM/IEEE 45th Annual
1023
+ International Symposium on Computer Architecture (ISCA). IEEE, 764–775.
1024
+ [20] Mengshu Sun et al. 2022. FILM-QNN: Efficient FPGA Acceleration of Deep Neural
1025
+ Networks with Intra-Layer, Mixed-Precision Quantization. In Proceedings of the
1026
+ 2022 ACM/SIGDA International Symposium on Field-Programmable Gate Arrays.
1027
+ 134–145.
1028
+ [21] Stefan Uhlich et al. 2020.
1029
+ Mixed Precision DNNs: All you need is a good
1030
+ parametrization. In 8th International Conference on Learning Representations,
1031
+ ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020.
1032
+ [22] Yaman Umuroglu et al. 2017. FINN: A framework for fast, scalable binarized
1033
+ neural network inference. In Proceedings of the 2017 ACM/SIGDA International
1034
+ Symposium on Field-Programmable Gate Arrays. ACM, 65–74.
1035
+ [23] Kuan Wang, Zhijian Liu, Yujun Lin, Ji Lin, and Song Han. 2019. HAQ: Hardware-
1036
+ Aware Automated Quantization With Mixed Precision. In 2019 IEEE/CVF Con-
1037
+ ference on Computer Vision and Pattern Recognition (CVPR). 8604–8612. https:
1038
+ //doi.org/10.1109/CVPR.2019.00881
1039
+ [24] Haichao Yu, Haoxiang Li, Humphrey Shi, Thomas S. Huang, and Gang Hua.
1040
+ 2021. Any-Precision Deep Neural Networks. In Thirty-Fifth AAAI Conference on
1041
+ Artificial Intelligence. AAAI Press, 10763–10771. https://ojs.aaai.org/index.php/
1042
+ AAAI/article/view/17286
1043
+ [25] Xiaofan Zhang et al. 2018. DNNBuilder: an Automated Tool for Building High-
1044
+ Performance DNN Hardware Accelerators for FPGAs. In (ICCAD).
1045
+
7NAyT4oBgHgl3EQfcvfE/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
7NE1T4oBgHgl3EQf7QUc/content/tmp_files/2301.03531v1.pdf.txt ADDED
@@ -0,0 +1,1181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ 1
4
+ Abstract— Objectives: Identifying suicidality including suicidal
5
+ ideation, attempts, and risk factors in electronic health record data
6
+ in clinical notes is difficult. A major difficulty is the lack of training
7
+ samples given the small number of true positive instances among
8
+ the increasingly large number of patients being screened. This
9
+ paper describes a novel methodology that identifies suicidality in
10
+ clinical notes by addressing this data sparsity issue through zero-
11
+ shot learning. Materials and Methods: U.S. Veterans Affairs
12
+ clinical notes served as data. The training dataset label was
13
+ determined using diagnostic codes of suicide attempt and self-
14
+ harm. A base string associated with the target label of suicidality
15
+ was used to provide auxiliary information by narrowing the
16
+ positive training cases to those containing the base string. A deep
17
+ neural network was trained by mapping the training documents’
18
+ contents to a semantic space. For comparison, we trained another
19
+ deep neural network using the identical training dataset labels and
20
+ bag-of-words features. Results: The zero shot learning model
21
+ outperformed the baseline model in terms of AUC, sensitivity,
22
+ specificity, and positive predictive value at multiple probability
23
+ thresholds. In applying a 0.90 probability threshold, the
24
+ methodology identified notes not associated with a relevant ICD-
25
+ 10-CM code that documented suicidality, with 94% accuracy.
26
+ Conclusion: This new method can effectively identify suicidality
27
+ without requiring manual annotation.
28
+
29
+ Keywords— Suicide, Clinical Notes, NLP, Zero-Shot Learning
30
+ I. INTRODUCTION
31
+ uicide is a significant problem in the United States,
32
+ increasing 35.2% from 1999 to 2018, and from 10.5 to 14.2
33
+ suicides per every 100,000 individuals in that same time period
34
+ [1] In 2020, 45,979 people died from suicide, and
35
+ approximately 1.2 million attempted suicide in the United
36
+ States [2] Its estimated cost is over $70 billion annually in lost
37
+ productivity and medical care [3]; this calculation does not
38
+ include residual costs from the estimated 4-17 people closely
39
+ tied to the suicide decedent who are left bereaved [4]. Suicide,
40
+ however, is a complicated problem that includes a dynamic web
41
+ of individual-level risk factors (e.g., depression, substance use
42
+ behaviors, personality traits), interpersonal risk factors (e.g.,
43
+ violence, victimization), and community-level factors (e.g.,
44
+ unemployment, stigmatization of mental illness) [5, 6].
45
+
46
+ 1Biomedical Informatics Center; The George Washington University;
47
+ Washington DC, USA;
48
+ 2VA Medical Center, Washington, DC, USA;
49
+ 3Department of Emergency Medicine, Yale School of Medicine, New Haven,
50
+ CT, USA; 4PRIME Center, VA Connecticut Healthcare System, West Haven,
51
+ CT, USA; 5Research, VA Connecticut Healthcare System, West Haven, CT,
52
+ Veterans are especially affected by suicide, with an age- and
53
+ sex-adjusted rate that is 1.5 times higher than nonveterans [7].
54
+ The Department of Veterans Affairs (VA) operates the single
55
+ largest integrated health care system in the U.S., and has
56
+ devoted resources to suicide prevention, including the Suicide
57
+ Prevention Applications Network (SPAN), embedding suicide
58
+ prevention coordinators and special reporting measures in
59
+ facilities [8], increased mental health staffing, partnerships with
60
+ community care organizations, and enhanced surveillance and
61
+ monitoring through its electronic health record (EHR) system
62
+ [9, 10]. Additionally, the VA has continual efforts to develop
63
+ predictive analytics to identify patients at the highest risk of
64
+ suicide [8, 11] The data elements for these predictive analytic
65
+ algorithms rely on structured data (e.g., International
66
+ Classification of Disease [ICD] diagnosis codes, prescription
67
+ data, socio-demographic data, care utilization metrics) [12]
68
+ which often provide an incomplete record [13, 14]. Less is
69
+ known about how unstructured data, such as contained in
70
+ clinical notes, can contribute to suicidality (i.e., suicidal
71
+ ideation or attempt) identification and prevention. Given that a
72
+ suicide attempt is one of the greatest risk factors for subsequent
73
+ suicide death, a more thorough means of detecting such events
74
+ is warranted [15].
75
+ A. Background and Significance
76
+ Natural language processing (NLP) combined with machine
77
+ learning may add value to suicide documentation research.
78
+ Supervised machine learning methods use “supervised”, or pre-
79
+ classified data. However, naïve attempts at note retrieval using
80
+ keyword search alone quickly demonstrate the difficulty of this
81
+ problem, as words such as “suicide” occur in standard
82
+ questionnaires which are included in many notes, with few
83
+ actually documenting suicidality. For instance, in a prior
84
+ experiment we carried out, we randomly collected 1,000 VA
85
+ notes containing the term “suicidal” or “suicide” from 1,000
86
+ individual patients and performed manual chart review for
87
+ affirmed suicidality. Only 1.57% of these notes documented
88
+ actual suicidality. Patient reluctance to disclose suicidal
89
+ ideation provides a further complicating factor [16, 17]. As a
90
+ result, a patient’s negative response to a suicide ideation inquiry
91
+ may not reflect their real feelings or intentions. Additionally,
92
+ USA; 6VA Connecticut Healthcare System, West Haven, CT, USA; 7Suzanne
93
+ Dworak-Peck School of Social Work, University of Southern California, Los
94
+ Angeles, CA, USA; 8Department of Internal Medicine, Yale School of
95
+ Medicine, West Haven, CT;
96
+
97
+ Leveraging Contextual Relatedness to Identify Suicide
98
+ Documentation in Clinical Notes through Zero Shot
99
+ Learning
100
+ T. Elizabeth Workman, Ph.D.1,2, Joseph L. Goulet, Ph.D.3,6, Cynthia A. Brandt, M.D.3,6, Allison R.
101
+ Warren, Ph.D.4, Jacob Eleazer, Ph.D.4, Melissa Skanderson, M.S.W.5, Luke Lindemann, Ph.D.6, John
102
+ R. Blosnich, Ph.D.7, John O’Leary, M.Ed.6,8, Qing Zeng-Treitler, Ph.D.1,2
103
+ S
104
+
105
+
106
+
107
+ 2
108
+ relying on structured data alone will result in incomplete
109
+ identification of patients who have or are experiencing
110
+ suicidality, because relevant coding is prone to underuse [8].
111
+ However, not all clinical notes associated with relevant
112
+ structured data document suicidality. For example, a note
113
+ documenting a secondary service such as group therapy, or a
114
+ note documenting fluid intake may not directly document
115
+ suicidality.
116
+ Prior attempts to apply NLP and machine learning are often
117
+ limited to mental health-oriented notes and may suffer if using
118
+ imbalanced data. Levis et al.[18] applied sentiment analysis and
119
+ various machine learning algorithms to classify suicide, using
120
+ VA psychotherapy notes, yielding area under the curve (AUC)
121
+ ratings comparable to chance. Fernandes et al.[19] obtained
122
+ excellent NLP performance in their study of clinical notes from
123
+ the Clinical Record Initiative Search (CRIS), but performance
124
+ was computed after removing neutral (non-suicide) results from
125
+ their machine learning output. Carson et al. enriched notes
126
+ associated with suicide attempt that were then used to train a
127
+ random forest model achieving 83% sensitivity, but only 22%
128
+ specificity [20]. Cook et al. [21] applied a bag-of-words
129
+ approach with machine learning to identify suicide ideation and
130
+ psychiatric symptoms using notes for patients identified as
131
+ having performed self-harm, achieving 61% PPV (positive
132
+ predictive value), 59% sensitivity, and 60% specificity, with
133
+ results varying depending on the task. Zhang et al. sought to
134
+ identify psychological stressors using a pre-annotated dataset of
135
+ psychiatric evaluation records from the CEGS N-GRID 2016
136
+ challenge [22] as a gold standard, for a conditional random
137
+ fields machine learning model, [23] yielding final F scores of
138
+ 73.91% and 89.01%, respectively, on exact and inexact stressor
139
+ matching, and 97.73% and 100% respectively, for exact and
140
+ inexact suicide recognition on instances of the positive
141
+ keywords with the stressors; however, their evaluation methods
142
+ for this are not detailed.
143
+ Zhong et al. applied structured data and NLP to identify
144
+ suicidal behavior in pregnant women, achieving PPV of 76%
145
+ and 30%, for women identified through relevant diagnostic
146
+ codes and through NLP for women not receiving a relevant
147
+ diagnostic code, respectively [24]. Obeid et al.[25] trained a
148
+ convolutional neural network that achieved an AUC of 0.882
149
+ and an F1 score of 0.769 in predicting relevant suicide ICD
150
+ codes in subsequent years. Using notes from psychiatric
151
+ encounters, Cusick et al. [26] developed a rule-based NLP tool
152
+ to identify positive instances of suicide-oriented keywords that
153
+ leveraged NegEx. [27] They also developed different weakly-
154
+ supervised machine learning models. A convolutional neural
155
+ network receiving Word2Vec [28] word embeddings as input
156
+ achieved precision, recall, F1 score, and AUC values of 0.81,
157
+ 0.83, 0.82, and 0.946. In a subsequent evaluation the
158
+ convolutional neural network correctly classified 87% of the 23
159
+ notes (of 5000 clinical notes) receiving a positive classification,
160
+ from notes for patients diagnosed with depression or prescribed
161
+ an antidepressant. In a related task Tsui et al. [29] used prior
162
+ structured and unstructured data (clinical notes from history,
163
+ physical examination, progress notes and discharge summaries)
164
+ of inpatient and emergency room patients with a coded suicide
165
+ attempt, to identify first-time suicide attempts in a case-control
166
+ study. An ensemble of extreme gradient boosting (EXGB)
167
+ yielded best performance, with an AUC ranging from 91.9% to
168
+ 93.2%, according to time window between prior data and
169
+ suicide attempt diagnosis. Recently, Rozova et al. obtained
170
+ promising results (87% AUC) using a gradient boosting model,
171
+ although the study was limited to emergency room triage notes
172
+ [30].
173
+ Seeking suicidality in all types of clinical notes, among all
174
+ types of patients, or when hampered by imbalanced data, is
175
+ indeed a complex task. Some of the methods in the papers cited
176
+ above tend to suffer from low precision, specificity, and
177
+ possibly also low sensitivity (recall). Identifying probability
178
+ thresholds addresses these problems, providing flexibility for a
179
+ given task. For example, a high probability threshold (e.g., the
180
+ top ten percent) can serve as a means for identifying
181
+ documentation indicating suicidality and its risk with high
182
+ precision. When the prevalence is very low, which is often the
183
+ case of true positive suicidality documentation, the optimal
184
+ threshold needs to balance metrics such as the true positive rate
185
+ (sensitivity, also known as recall), specificity, and the positive
186
+ predictive value (precision). A strategic implementation of a
187
+ technique like Zero-Shot Learning may also provide accurate
188
+ identification of suicidality in clinical notes.
189
+ B. Zero-Shot Learning
190
+ Zero-Shot Learning (ZSL) enables predictions on unseen
191
+ data using a model trained on data that has labels that are
192
+ different than those of the unseen data [31, 32]. It largely
193
+ operates by mapping select properties of the data (i.e., the
194
+ “feature space”) to a semantic representation (i.e., the “semantic
195
+ space”) that enables prediction of unseen classes [33]. In other
196
+ words, auxiliary information must be provided on the labels of
197
+ the unseen classes to make it possible for a trained model to
198
+ recognize them in the testing data.
199
+ ZSL has been applied in several computer vision tasks [34,
200
+ 35], as well as NLP tasks [36]. Accordingly, a feature space
201
+ can consist of data derived from images [37] or text [36]. The
202
+ semantic representation can be based on several different
203
+ approaches, including data attributes, semantic word vectors as
204
+ those provided by skip-gram or continuous-bag-of-word
205
+ architectures [33] or BERT output [38], or knowledge graphs
206
+ [33]. Examples in NLP applications include semantic utterance
207
+ classification [39] multilingual translation [40] and emotion
208
+ detection [41]. However, other than Sivarajkumar and Wang’s
209
+ work [38] there is little ZSL research in unstructured clinical
210
+ text data.
211
+ Naturally, different semantic representations affect the
212
+ accuracy of ZSL [42]. In this study, we leveraged word
213
+ embedding and usage context.
214
+ C. Objectives
215
+ We investigated a ZSL methodology applied to a binary
216
+ suicidality classification task. The training dataset was
217
+ constructed using diagnostic codes (ICD-10-CM codes) related
218
+ to suicide. Our target label is the broader concept of suicidality.
219
+ To enable ZSL, a base string representing suicidality was
220
+
221
+
222
+
223
+ 3
224
+ selected. We then built the semantic space by identifying key
225
+ features associated with suicidality in the training dataset. A
226
+ DNN model was developed using the training data and tested
227
+ on two different sets of unseen data with the unseen label of
228
+ suicidality. Specifically, we sought to answer:
229
+  Will ZSL effectively identify suicidality documentation
230
+ from among all types of clinical notes, using review by
231
+ clinicians as the reference standard?
232
+  Will ZSL effectively identify suicidality or suicide risk
233
+ documentation from among clinical notes not associated with
234
+ a relevant ICD-10-CM code, by probability threshold, in terms
235
+ of precision, using the same reference standard?
236
+ We are unaware of previous descriptions of this methodology
237
+ and to our knowledge it has not been used prior to this study.
238
+ II. METHODS
239
+ A. Training Data
240
+ A training dataset was created using two corpora. The first
241
+ corpus consisted of 50,000 randomly selected VA clinical notes
242
+ from outpatient encounters recorded between 2016 and 2019
243
+ which contained the base string “suicid” (e.g. “suicide”,
244
+ “suicidal” ) and were associated with at least one ICD-CM-10
245
+ code identified by the National Health Statistics Report from
246
+ the Centers for Disease Control and Prevention (CDC)
247
+ indicating suicide attempt or intentional self-harm.[43] This
248
+ corpus is referred to as stringAndDx (9170 unique patients).
249
+ The second corpus consisted of 50,000 randomly selected VA
250
+ clinical notes from outpatient encounters recorded between
251
+ 2016 and 2019 that were associated with other ICD-CM-10
252
+ codes that were irrelevant to suicidality or self-harm. These
253
+ notes were extracted from patients matching the stringAndDx
254
+ patients in age (at the time of document retrieval), race, and
255
+ ethnicity. This second corpus is referred to as noDx (8638
256
+ unique patients). Each corpus was preprocessed by
257
+ transforming all letters to lower case, removing basic
258
+ formatting markup and punctuation, separating character
259
+ strings into tokens (words), separating relevant concatenated
260
+ tokens (e.g., “suicidalhomicidal” to “suicidal” ”homicidal”),
261
+ and removing all tokens that did not entirely consist of letters.
262
+ B. Semantic Space Feature Extraction and Mapping
263
+ The task to build the semantic space was carried out in three
264
+ steps: First, we identified a list of features that are potentially
265
+ relevant for the positive training label. Second, we created word
266
+ embeddings using a skip-gram architecture. Third, we
267
+ identified context words of the selected features using the word
268
+ embeddings. In a fourth step, a contextual weight is assigned
269
+ to each feature for each document in mapping the semantic
270
+ space to the feature space.
271
+ In the first step, inverse document frequency (TF-IDF)
272
+ analysis was used to identify the n most important terms in each
273
+ corpus. For this investigation, n = 1000. TF-IDF evaluates
274
+ term frequency using the count of documents containing a given
275
+ term. In each document, the relative frequency of each term is
276
+ weighted by the log of the number of documents in the corpus
277
+ divided by the number of documents containing the term, as
278
+ shown in (1)
279
+ 𝑡𝑖,𝑗 = 𝑡𝑓𝑖,𝑗 ∗ 𝑙𝑜𝑔( 𝑛
280
+ 𝑑𝑓𝑖
281
+ )
282
+
283
+ where ti,j is term i in document j, tfi,j is the relative frequency of
284
+ term i in document j, n is the total number of documents, and
285
+ dfi is the number of documents containing term i. Because TF-
286
+ IDF is a document-based measurement, we used the mean TF-
287
+ IDF value for each term in its respective corpus. The words
288
+ with the top TF-IDF scores that are unique to the stringAndDx
289
+ corpus were treated as features. Figure 1 illustrates this process.
290
+ Each circle represents terms from one of the corpora. Sets a and
291
+ b are the words with the top n TFIDF scores for stringAndDx
292
+ and noDx, respectively. Set c is the overlap between a and b.
293
+ The feature set F contains words that are in set a, but not in the
294
+ overlap set c or in set b (f  a and f  c and f  b).
295
+
296
+
297
+ Figure 1. Feature identification. Words that are deemed as features are in set a,
298
+ excluding words in c and b.
299
+
300
+ In the second step, we created a Word2Vec model using the
301
+ stringAndDx corpus. In this study, the model was a shallow
302
+ neural network with the hidden layer containing 300 nodes,
303
+ applying the skip-gram architecture, with an analytic window
304
+ size of 5, trained through 10 iterations.
305
+ In the third step, we identified the top m context words for
306
+ each feature word using the word embeddings from the
307
+ Word2Vec model. The m words most similar to each feature
308
+ word, according to cosine similarity values, served as its
309
+ context words. In this investigation, m = 50.
310
+ In the fourth step, we map the feature space, i.e. a document’s
311
+ preprocessed content, to the semantic space. A weight v is
312
+ assigned to each feature word for each document, based on its
313
+ occurrence with its context words in a window in the
314
+ document’s text. This weight is the summed total of the cosine
315
+ similarity between the feature and a co-occurring context word
316
+ multiplied by the mean TF-IDF value of the feature word. The
317
+ formula is shown in (2)
318
+
319
+ 𝑣 =
320
+
321
+ 𝑐𝑜𝑠𝑆𝑖𝑚(𝑥, 𝑦) ∗ 𝑡𝑓𝑖𝑑𝑓(𝑥)
322
+ 𝑥∈𝐹,𝑦∈𝐷
323
+
324
+
325
+ where x is a feature in F, the set of features in the semantic
326
+ space, and y is a context word of set D, the context words for x
327
+ in the semantic space, which occurs in a five-word window
328
+ around x in the document’s text. This process is illustrated in
329
+ Figure 2, where “pattern” (highlighted in light gray) is a feature
330
+ word, and “internalizing” and “fitful” (highlighted in dark gray)
331
+ are among its set of context words and appear in a five-word
332
+ window.
333
+ (2)
334
+ (1)
335
+
336
+ stringAndDx
337
+ noDx
338
+ a
339
+ c
340
+ 6
341
+
342
+ 4
343
+
344
+
345
+ Figure 2. Example of deriving a feature weight using (2)
346
+
347
+ If a feature word is not in the text, its value is zero for the
348
+ given document.
349
+ C. Model Development
350
+ 20,000 documents were randomly selected from each corpus
351
+ (stringAndDx and noDx). We trained a DNN model (here
352
+ referred to as the ZSL DNN) consisting of five fully-connected
353
+ hidden layers of alternating sizes of 30 or 70 nodes, with each
354
+ layer implementing a dropout rate of 0.5. We implemented the
355
+ Adam optimizer [44], with a learning rate of 0.0012, beta 1
356
+ value of 0.92, beta 2 value of 0.9992, and an epsilon value of
357
+ 1e-08, with binary cross entropy as the loss function, and the
358
+ sigmoid function in the output layer, since it was a binary
359
+ classification task. The architecture and hyperparameters were
360
+ chosen on empirical grounds, after experimentation. Each
361
+ document from the stringAndDx corpus was classified as “1” (a
362
+ generic positive instance), and each document from the noDx
363
+ corpus was classified as “0” (a generic negative instance).
364
+ These labels do not indicate whether or not the given document
365
+ directly pertains, or not pertains, to suicidality or its risks, but
366
+ an association with a structured data element, and for those
367
+ labeled “1”, also containing a base string. Balancing the
368
+ positive and negative approximated training datasets in this
369
+ manner (i.e., providing balanced training examples) addressed
370
+ the problematic issue of otherwise training a model with few
371
+ positive and many negative instances. We implemented a 60%
372
+ training, 20% validation, and 20% testing split in developing
373
+ the ZSL DNN. Figure 3 illustrates the method.
374
+
375
+
376
+ Figure 3. Method. The corpora stringAndDx (2016-19), noDx (2016-19),
377
+ testSet1 (2020), and testSet2 (2020) are unique and extracted from all clinical
378
+ notes based on associated ICD-10-CM codes, and in the case of
379
+ stringAndDx, where a base string is also present; corpora content is
380
+ preprocessed. The stringAndDx and noDx corpora are used in the TF-IDF
381
+ analysis to identify feature words that are unique to stringAndDx (step 1).
382
+ stringAndDx is applied to a skip-gram model to produce word embeddings
383
+ (step 2). Feature words and their significant context words (determined
384
+ through the word embeddings) form the semantic space (step 3). The
385
+ contents of stringAndDx and noDx are mapped to the semantic space, using
386
+ a function to determine feature word weights (step 4). The mapped contents
387
+ of stringAndDx and noDx documents are used to train the ZSL DNN, using
388
+ generic labels 1 and 0, respectively. The mapped contents of unseen
389
+ testSet1 and testSet2 notes were classified by the trained ZSL DNN, for the
390
+ classes (a) containing suicidality documentation, or (b) not containing
391
+ suicidality documentation. Human annotation independently classified
392
+ random documents from testSet1 and testSet2 for the same classes (a)
393
+ containing suicidality documentation, or (b) not containing suicidality
394
+ documentation; human annotation also assessed documents from testSet2
395
+ containing the base string that received a probability of 0.90 or greater, for
396
+ these classes and suicidality risk factors.
397
+
398
+ D. Evaluation
399
+ The authors randomly retrieved 5,000 different clinical notes
400
+ recorded in 2020 that were associated with at least one of the
401
+ relevant IDC-10-CM codes. This corpus is subsequently
402
+ labeled as testSet1. The authors also randomly retrieved 5,000
403
+ different clinical notes recorded in 2020 that were associated
404
+ with other IDC-10-CM codes irrelevant to suicidality or self-
405
+ harm. This corpus is subsequently labeled testSet2.
406
+ The contents of each of the notes in testSet1 and testSet2 were
407
+ mapped to the semantic space, i.e., deriving a weight for each
408
+ feature word as described earlier in the fourth step. Then, the
409
+ trained ZSL DNN was used to classify the notes in testSet1 and
410
+ testSet2 as (a) containing suicidality documentation, or (b) not
411
+ containing suicidality documentation.
412
+ In joint sessions, two clinical psychologists familiar with VA
413
+ clinical note documentation together identified suicidality (i.e.,
414
+ current or past suicide ideation or attempt) in 200 notes
415
+ randomly selected from testSet1 and testSet2 (100 from each
416
+ test set), after being instructed to look for documentation for
417
+ these specific events. They addressed differences of opinion
418
+ through discussion and mutual consensus during the joint
419
+ sessions. In a second evaluation, to explore how the
420
+ application’s output may serve to identify patients who had
421
+ experienced or were at risk for suicidality, but never formally
422
+ diagnosed as such, the clinicians examined the testSet2 notes
423
+ containing the base string “suicid" that received a probability
424
+ value of 0.90 or greater from the trained ZSL DNN, for
425
+ documentation of suicidality and/or its risk factors, according
426
+ to NIH guidelines.[45] This threshold was chosen in order to
427
+ explore how high-probability documents (i.e. the top 10% in
428
+ terms of probability) would be representative in identifying
429
+ documented suicidality or its risk factors with high precision,
430
+ thus addressing our second question.
431
+ 1) Baseline Comparison
432
+ For comparative purposes, the 163 most frequent bigrams
433
+ unique to the stringAndDx corpus were identified and used in a
434
+ bag-of-words baseline model. We trained a DNN (here referred
435
+ to as the Baseline DNN) using these 163 bigrams as features for
436
+ the 20,000 stringAndDx documents and the 20,000 noDx
437
+ documents. This baseline DNN was also used to classify the
438
+ Document Text: “The patient has a pattern of internalizing
439
+ criticism from his family. This pattern sometimes results in fitful
440
+ outbursts.”
441
+
442
+ TF-IDF value of feature word “pattern”: 0.0062
443
+ Cosine similarity of “pattern” and “internalizing”: 0.4673
444
+ Cosine similarity of “pattern” and “fitful”: 0.3824
445
+ Feature weight for “pattern”:
446
+ (0.0062 * 0.4673) + (0.0062 * 0.3824) = 0.0053
447
+
448
+ All Clinical
449
+ Notes
450
+ ICDcodes
451
+ ICDcodes
452
+ Base String
453
+ Human
454
+ testSetl
455
+ testSet2
456
+ stringAndDx
457
+ noDx
458
+ Annotation
459
+ (suicidality)
460
+ TFIDF
461
+ Analysis
462
+ Word
463
+ Embeddings
464
+ Semantic
465
+ Map content
466
+ Space
467
+ Mapcontent
468
+ Testing
469
+ DNN
470
+ Training
471
+ Output is a classification of (a) containing suicidality
472
+ documentation or (b) not containing suicidality documentation
473
+
474
+ 5
475
+ notes in testSet1 and testSet2, for (a) containing suicidality
476
+ documentation, or (b) not containing suicidality documentation,
477
+ using the 163 most frequent bigrams as features.
478
+ III. RESULTS
479
+ The first step of the new method (described in Methods)
480
+ identified 163 feature words associated with suicidality
481
+ diagnosis. The top thirty feature words are listed in Table I. No
482
+ form of the base string “suicid” was found among the 163 final
483
+ feature words. Both “suicide” and “suicidal” were prominent
484
+ terms in both the noDx and stringAndDx corpora, along with
485
+ terms like “psychiatrist” and “psychosocial”; this is likely due
486
+ to the proliferation of objects like questionnaires, and mental
487
+ health care documentation in notes that are unrelated to
488
+ suicidality.
489
+ TABLE I
490
+ TOP 30 FEATURE WORDS
491
+ flag
492
+ overdose
493
+ coordinator
494
+ took
495
+ spc
496
+ observation
497
+ called
498
+ warning
499
+ pills
500
+ prf
501
+ unknown
502
+ interrupted
503
+ gun
504
+ placement
505
+ lcsw
506
+ lethal
507
+ outcome
508
+ reportedly
509
+ notified
510
+ sdv
511
+ occurred
512
+ police
513
+ protocol
514
+ od
515
+ supports
516
+ seeking
517
+ category
518
+ preparatory
519
+ cut
520
+ determined
521
+
522
+ A. ZSL DNN and Baseline DNN Performance
523
+ The classifications by the clinicians and the probabilities
524
+ assigned by the ZSL DNN and the Baseline DNN were first
525
+ assessed by AUC score. The results are in Table II and Figure
526
+ 4.
527
+
528
+ TABLE II
529
+ AUC PERFORMANCE
530
+ ZSL DNN
531
+ Baseline DNN
532
+ 0.946
533
+ 0.47
534
+
535
+
536
+ Figure 4. ZSL DNN AUC results (left), Baseline DNN AUC results (right)
537
+
538
+ In terms of AUC, the ZSL DNN trained through mapping the
539
+ semantic space to the feature space outperformed the Baseline
540
+ DNN trained with the bigram bag-of-words features.
541
+ The sensitivity, specificity, and PPV results at 0.15, 0.5, and
542
+ 0.85 probability thresholds for each DNN are in Tables III-V.
543
+ Probability refers to the probability the DNN assigned to each
544
+ note for positive suicidality documentation. We applied the
545
+ median probability (0.1499, rounded) assigned by the ZSL
546
+ DNN to the testSet2 documents (the test set containing random
547
+ notes associated with irrelevant ICD-10-CM codes) in forming
548
+ minimum and maximum thresholds; 0.5 is a standard midpoint
549
+ probability threshold. The combined scores in these tables were
550
+ computed with all true positives, true negatives, false positives,
551
+ and false negatives for both test sets, for the indicated metrics.
552
+ Values of NaN (not a number) occurred where there were no
553
+ true positives or false positives.
554
+
555
+ TABLE III
556
+ EVALUATION RESULTS AT 0.15 PROBABILITY THRESHOLD
557
+ ZSL DNN
558
+ Sensitivity/Recall
559
+ Specificity
560
+ Precision/PPV
561
+ testSet1
562
+ 97%
563
+ 100%
564
+ 91%
565
+ testSet2
566
+ 100%
567
+ 64%
568
+ 05%
569
+ Combined
570
+ 97%
571
+ 59%
572
+ 67%
573
+ Baseline DNN
574
+
575
+
576
+
577
+ testSet1
578
+ 99%
579
+ 0%
580
+ 90%
581
+ testSet2
582
+ 50%
583
+ 09%
584
+ 01%
585
+ Combined
586
+ 98%
587
+ 08%
588
+ 48%
589
+
590
+ TABLE IV
591
+ EVALUATION RESULTS AT 0.5 PROBABILITY THRESHOLD
592
+ ZSL DNN
593
+ Sensitivity/Recall
594
+ Specificity
595
+ Precision/PPV
596
+ testSet1
597
+ 92%
598
+ 40%
599
+ 93%
600
+ testSet2
601
+ 50%
602
+ 97%
603
+ 25%
604
+ Combined
605
+ 91%
606
+ 92%
607
+ 90%
608
+ Baseline DNN
609
+
610
+
611
+
612
+ testSet1
613
+ 92%
614
+ 0%
615
+ 89%
616
+ testSet2
617
+ 50%
618
+ 10%
619
+ 1%
620
+ Combined
621
+ 91%
622
+ 9%
623
+ 46%
624
+
625
+ TABLE V
626
+ EVALUATION RESULTS AT 0.85 PROBABILITY THRESHOLD
627
+ ZSL DNN
628
+ Sensitivity/Recall
629
+ Specificity
630
+ Precision/PPV
631
+ testSet1
632
+ 77%
633
+ 70%
634
+ 96%
635
+ testSet2
636
+ 50%
637
+ 100%
638
+ 100%
639
+ Combined
640
+ 76%
641
+ 97%
642
+ 96%
643
+ Baseline DNN
644
+
645
+
646
+
647
+ testSet1
648
+ 0%
649
+ 100%
650
+ NaN/div by 0
651
+ testSet2
652
+ 0%
653
+ 100%
654
+ NaN/div by 0
655
+ Combined
656
+ 0%
657
+ 100%
658
+ NaN/div by 0
659
+
660
+ The ZSL DNN outperformed the Baseline DNN in most
661
+ metrics at all probability thresholds.
662
+ B. Second Evaluation
663
+ To explore how this new methodology can identify clinical
664
+ notes documenting suicidality that are not associated with a
665
+ relevant ICD-10-CM code with high precision, the clinicians
666
+ also reviewed the 16 notes from testSet2 containing the base
667
+ string “suicid’ that received a probability at or above 0.90 from
668
+ the trained ZSL DNN. The clinicians noted suicide ideation or
669
+ attempt, and the presence of the following suicide risk factors,
670
+ based on National Institute of Mental Health guidelines [45]:
671
+  Depression and other mental health disorders
672
+  Substance abuse disorder
673
+  Family history of a mental health or substance abuse
674
+ disorder
675
+  Family history of suicide
676
+  Family violence, including physical or sexual abuse
677
+  Having guns or other firearms in the home
678
+  Being in prison or jail
679
+  Being exposed to others’ suicidal behavior
680
+ Of these 16 clinical notes (associated with 16 different
681
+ patients), 7 documented current or past suicide ideation or
682
+ attempt. Eight of the remaining notes included one or more
683
+ risk factors for suicide (nearly all included multiple risk
684
+ factors). In all, 15 of the 16 notes contained documentation of
685
+ current or past suicide ideation or attempt, and/or suicide risk
686
+
687
+
688
+
689
+ 1.0
690
+ model results
691
+ 0.0
692
+ 0.2
693
+ 0.4
694
+ 0.8
695
+ 1.Dno distinction
696
+ model results
697
+ 0.8-
698
+ 2 0.4
699
+ 0.2
700
+ 0.0
701
+ 0.0
702
+ Q2
703
+ 0.4
704
+ False Positive Rate
705
+ 0.8
706
+ 10
707
+
708
+ 6
709
+ factors, for patients who had never received a suicidality ICD-
710
+ 10-CM code diagnosis during the study period, achieving a
711
+ PPV of 93.8%.
712
+ IV. DISCUSSION
713
+ Regarding the study’s original questions, our ZSL approach
714
+ effectively identified suicidality in all types of clinical notes,
715
+ surpassing the performance of the bag-of-words baseline in
716
+ conjunction with deep learning. It also effectively identified
717
+ suicidality or suicide risk documentation from among clinical
718
+ notes not associated with a relevant ICD-10-CM code with high
719
+ precision, on probability threshold.
720
+ A. Semantic Space
721
+ In this work, the semantic space development is framed as
722
+ feature extraction where mapping is enhanced by attaching
723
+ weights to features found in the data, an approach also used in
724
+ computer vision ZSL [46]. The semantic space captures natural
725
+ data properties by identifying salient terms and relevant
726
+ contextual
727
+ terms
728
+ in
729
+ collective
730
+ clinical
731
+ suicidality
732
+ documentation (i.e., a corpus of notes associated with relevant
733
+ ICD codes). Table 1 lists 30 prominent feature words associated
734
+ with collective suicidality documentation after removing terms
735
+ associated with other kinds of documents. There is an intuitive
736
+ sense to these words; “flag” is found in the phrase “high risk for
737
+ suicide flag”; “overdose” and “cut” refer to suicide methods;
738
+ “pills” and “gun” refer to suicide instruments. Identifying terms
739
+ contextually similar to these provides patterns in relevant
740
+ documentation. Again, this has an intuitive logic. The most
741
+ contextually similar terms to “flag” include “reactivate” and
742
+ “deactivate” (for a high suicide risk flag) and “high” (the level
743
+ of risk). The most contextually similar terms to “pills” include
744
+ “handful”, “fistfuls”, and “bunch”, implying large quantities,
745
+ along with “overdosing” and “took”, the associated actions.
746
+ The feature word “spc” indicates VA’s suicide prevention
747
+ coordinators, which is a structural change that VA implemented
748
+ for suicide prevention [10]. Concordantly, “police” and “lcsw”
749
+ (i.e., licensed clinical social worker) refer to other professions
750
+ highly associated with individuals at risk for suicide. For
751
+ example, police may be activated for a rescue, and a licensed
752
+ clinical social worker may be involved in treatment planning or
753
+ referral connections for suicidal individuals. The feature words
754
+ “prf” and “sdv” refer to “patient record flag” and “self-directed
755
+ violence”, respectively. The semantic space provided an
756
+ efficient representation for effective mapping to the feature
757
+ space.
758
+ B. Data Retrieval and Model Training
759
+ Using associated structured data elements like ICD-10-CM
760
+ codes, and a base string provides a means to locate equally sized
761
+ corpora for training that could be generically labeled “0” or “1”.
762
+ These labels were primarily based on a structured data
763
+ association, since their individual unstructured content was
764
+ mostly unknown. This approach solves the issue of imbalanced
765
+ training data. The predominant clinical note types (Appendix)
766
+ also illustrate this. Most of the frequent note types associated
767
+ with one of the relevant CDC ICD-10-CM codes and containing
768
+ the base string are relevant to suicidality. Addendum is a
769
+ common note type [47] associated with many domains [48].
770
+ The most frequent note types not associated with a relevant
771
+ code resemble frequencies of all note types in the VA [47].
772
+ C. Identifying Suicidality Documentation
773
+ To our knowledge, this method has not been applied in other
774
+ studies. Unlike VA surveillance methods using structured data,
775
+ it also leverages information found in EHR notes. Also, unlike
776
+ other NLP methods [18, 20, 21, 23, 24, 26, 29, 30] it can be
777
+ applied to all patients and note types. In other studies, a bag-
778
+ of-words approach has been applied to suicidality identification
779
+ and other machine learning tasks [21, 49, 50]. However, the
780
+ results of this current study suggest that the complexity of
781
+ suicidality documentation demands a more targeted approach.
782
+ This method could complement existing measures like
783
+ SPAN, alerting suicide prevention coordinators of additional
784
+ patients at risk. The results of the two clinical psychologists’
785
+ evaluations demonstrate the method’s efficiency in identifying
786
+ suicidality documentation for documents where there is no
787
+ relevant ICD-10-CM code. The performance on both test sets
788
+ demonstrates the methodology’s effectiveness in classifying
789
+ notes that are mixed in terms of ICD-10-CM coding.
790
+ Tables III - V suggest that the probability threshold can be
791
+ adjusted to suit a specific task like finding suicidality and its
792
+ risk factors with high precision among notes not associated with
793
+ a relevant ICD-10-CM code. This is especially true considering
794
+ the small prevalence of suicidality documentation in clinical
795
+ notes. The second evaluation (which yielded 93.8% PPV)
796
+ demonstrates this. By applying a high probability threshold of
797
+ 0.90 to all 5000 testSet2 documents and focusing on clinical
798
+ notes containing the base string, of the 16 documents (for 16
799
+ different patients), 94% contained suicidality and/or suicidality
800
+ risk factor documentation, based on clinician review. These
801
+ results exceed those of Cusick et al.’s [26] similar task, where
802
+ 87% of notes were correctly classified, among notes for patients
803
+ diagnosed with depression or prescribed an antidepressant. In
804
+ this current study’s second evaluation, none of the 16 patients
805
+ identified had ever received a suicide ICD-10-CM code during
806
+ the study’s time period. It is impossible to know if the patients
807
+ in the 8 notes simply containing documented risk factors were
808
+ suicidal or not based solely on electronic health records.
809
+ Suicidal patients sometimes deny suicide ideation or attempt
810
+ [16, 51]. For example, in one note from the chart review
811
+ associated with a relevant ICD-10-CM code, the patient
812
+ reportedly denied suicide ideation, even after checking into the
813
+ hospital hours earlier for a self-reported suicide attempt.
814
+ D. Future Work
815
+ This work is part of a larger study of patients at risk for
816
+ suicide.[52] The next step is to combine these findings with
817
+ prior work. We also plan an analysis of patients from first
818
+ suicide ideation or attempt documented in the VA system, to
819
+ understand their evolution of care.
820
+ E. Limitations
821
+ VHA data largely cover a population of older men. However,
822
+ the amount of women and younger patients is increasing, thus
823
+
824
+
825
+
826
+ 7
827
+ also increasing the generalizability of these findings. The
828
+ corpora retrieval method we used to train the ZSL DNN is
829
+ dependent on clinicians’ use of the relevant ICD-10-CM codes
830
+ in documenting care, which may be prone to underuse [8].
831
+ However, the results of this study indicate the method’s utility.
832
+ Due to environmental computational limitations, we randomly
833
+ selected 20,000 notes from the stringAndDx corpus, and 20,000
834
+ notes from the noDx corpus for training the ZSL DNN.
835
+ V. CONCLUSION
836
+ We developed a new methodology to identify suicidality in
837
+ clinical notes using zero-shot learning (ZSL). A trained ZSL
838
+ deep neural network (DNN) outperformed a DNN trained using
839
+ a baseline bag-of-words method in AUC scores and other
840
+ metrics assessed at various probability thresholds on unseen
841
+ data, according to expert review. This novel methodology
842
+ identifies suicidality and its risk factors with high precision,
843
+ when applying a 0.90 probability threshold, in VA clinical notes
844
+ not associated with a relevant ICD-10-CM code. This
845
+ methodology
846
+ could
847
+ complement
848
+ existing
849
+ suicidality
850
+ identification measures. These findings hold promise for future
851
+ research.
852
+ APPENDIX
853
+
854
+ Most Frequent Note Types in Training Data by Corpus
855
+ stringAndDx
856
+ noDx
857
+ Note Type
858
+ Count
859
+ Note Type
860
+ Count
861
+ Addendum
862
+ 2844
863
+ Addendum
864
+ 5683
865
+ Suicide Behavior
866
+ and Report
867
+ 843
868
+ Primary Care Secure
869
+ Messaging
870
+ 291
871
+ Suicide Prevention
872
+ Telephone Note
873
+ 811
874
+ Nursing Note
875
+ 228
876
+ Suicide Behavior
877
+ and Overdose
878
+ Report
879
+ 613
880
+ Administrative Note
881
+ 207
882
+ Suicide Prevention
883
+ Note
884
+ 452
885
+ State Prescription
886
+ Drug Monitoring
887
+ Program
888
+ 110
889
+ Suicide Prevention
890
+ Safety Plan
891
+ 448
892
+ Care Flow Sheet
893
+ 88
894
+ Mental Health
895
+ Nursing
896
+ Assessment Note
897
+ 374
898
+ Telephone Contact
899
+ 75
900
+ Veterans Crisis
901
+ Line Note
902
+ 222
903
+ Mental Health
904
+ Diagnostic Study
905
+ Note
906
+ 71
907
+ Social Work Note
908
+ 213
909
+ Non VA Care
910
+ Consult Result Note
911
+ 69
912
+ Suicide Prevention
913
+ Contact
914
+ 212
915
+ Operation Report
916
+ 64
917
+
918
+ ACKNOWLEDGMENT
919
+ The views expressed are those of the authors and do not
920
+ necessarily reflect those of the Department of Veterans Affairs,
921
+ the United States Government, or the academic affiliate
922
+ institutions. This work was funded by Veterans Affairs Health
923
+ Services Research and Development Services grant IIR 18-035
924
+ Understanding Suicide Risks among LGBT Veterans in VA
925
+ Care, and NIH National Center for Advancing Translational
926
+ Sciences grant UL1TR001876.
927
+ REFERENCES
928
+
929
+ [1]
930
+ H. Hedegaard, S. C. Curtin, and M. Warner. Suicide mortality in the
931
+ United States, 1999–2019, 2021.
932
+ [2]
933
+ American Foundation for Prevention. "Suicide Statistics." American
934
+ Foundation for Suicide Prevention. https://afsp.org/suicide-statistics
935
+ [3]
936
+ G. Gonzales and C. Henning-Smith, "Disparities in health and disability
937
+ among older adults in same-sex cohabiting relationships," J Aging Health,
938
+ vol. 27, no. 3, pp. 432-53, Apr 2015, doi: 10.1177/0898264314551332.
939
+ [4]
940
+ A. L. Berman, "Estimating the population of survivors of suicide: seeking
941
+ an evidence base," Suicide Life Threat Behav, vol. 41, no. 1, pp. 110-6,
942
+ Feb 2011, doi: 10.1111/j.1943-278X.2010.00009.x.
943
+ [5]
944
+ S. Stack, "Suicide: a 15-year review of the sociological literature. Part I:
945
+ cultural and economic factors," Suicide Life Threat Behav, vol. 30, no. 2,
946
+ pp.
947
+ 145-62,
948
+ Summer
949
+ 2000.
950
+ [Online].
951
+ Available:
952
+ https://www.ncbi.nlm.nih.gov/pubmed/10888055.
953
+ [6]
954
+ K. Hawton, I. C. C. Casanas, C. Haw, and K. Saunders, "Risk factors for
955
+ suicide in individuals with depression: a systematic review," J Affect
956
+ Disord,
957
+ vol.
958
+ 147,
959
+ no.
960
+ 1-3,
961
+ pp.
962
+ 17-28,
963
+ May
964
+ 2013,
965
+ doi:
966
+ 10.1016/j.jad.2013.01.004.
967
+ [7]
968
+ "2019 National Veteran Suicide Prevention Annual Report," U.S.
969
+ Department of Veterans Affairs, Office of Mental Health and Suicide
970
+ Prevention,
971
+ September
972
+ 2019
973
+ 2019.
974
+ [Online].
975
+ Available:
976
+ https://www.mentalhealth.va.gov/docs/data-
977
+ sheets/2019/2019_National_Veteran_Suicide_Prevention_Annual_Repo
978
+ rt_508.pdf
979
+ [8]
980
+ C. Hoffmire, B. Stephens, S. Morley, C. Thompson, J. Kemp, and R. M.
981
+ Bossarte, "VA Suicide Prevention Applications Network: A National
982
+ Health Care System-Based Suicide Event Tracking System," Public
983
+ Health Rep, vol. 131, no. 6, pp. 816-821, Nov 2016, doi:
984
+ 10.1177/0033354916670133.
985
+ [9]
986
+ I. Katz, "Lessons learned from mental health enhancement and suicide
987
+ prevention activities in the Veterans Health Administration," Am J Public
988
+ Health,
989
+ vol.
990
+ 102
991
+ Suppl
992
+ 1,
993
+ pp.
994
+ S14-6,
995
+ Mar
996
+ 2012,
997
+ doi:
998
+ 10.2105/AJPH.2011.300582.
999
+ [10] D. Carroll, L. K. Kearney, and M. A. Miller, "Addressing Suicide in the
1000
+ Veteran Population: Engaging a Public Health Approach," Front
1001
+ Psychiatry, vol. 11, p. 569069, 2020, doi: 10.3389/fpsyt.2020.569069.
1002
+ [11] J. F. McCarthy et al., "Predictive Modeling and Concentration of the Risk
1003
+ of Suicide: Implications for Preventive Interventions in the US
1004
+ Department of Veterans Affairs," Am J Public Health, vol. 105, no. 9, pp.
1005
+ 1935-42, Sep 2015, doi: 10.2105/AJPH.2015.302737.
1006
+
1007
+
1008
+
1009
+ 8
1010
+ [12] R. C. Kessler et al., "Developing a practical suicide risk prediction model
1011
+ for targeting high-risk patients in the Veterans health Administration," Int
1012
+ J Methods Psychiatr Res, vol. 26, no. 3, Sep 2017, doi: 10.1002/mpr.1575.
1013
+ [13] J. Moss, M. Andison, and H. Sobko, "An analysis of narrative nursing
1014
+ documentation in an otherwise structured intensive care clinical
1015
+ information system," AMIA Annu Symp Proc, pp. 543-7, Oct 11 2007.
1016
+ [Online]. Available: https://www.ncbi.nlm.nih.gov/pubmed/18693895.
1017
+ [14] H. J. Kong, "Managing Unstructured Big Data in Healthcare System,"
1018
+ Healthc Inform Res, vol. 25, no. 1, pp. 1-2, Jan 2019, doi:
1019
+ 10.4258/hir.2019.25.1.1.
1020
+ [15] J. M. Bostwick, C. Pabbati, J. R. Geske, and A. J. McKean, "Suicide
1021
+ Attempt as a Risk Factor for Completed Suicide: Even More Lethal Than
1022
+ We Knew," Am J Psychiatry, vol. 173, no. 11, pp. 1094-1100, Nov 1
1023
+ 2016, doi: 10.1176/appi.ajp.2016.15070854.
1024
+ [16] B. Harmer, S. Lee, H. Duong Tv, and A. Saadabadi, "Suicidal Ideation,"
1025
+ in StatPearls. Treasure Island (FL), 2021.
1026
+ [17] S. A. Louzon, R. Bossarte, J. F. McCarthy, and I. R. Katz, "Does Suicidal
1027
+ Ideation as Measured by the PHQ-9 Predict Suicide Among VA
1028
+ Patients?," Psychiatr Serv, vol. 67, no. 5, pp. 517-22, May 1 2016, doi:
1029
+ 10.1176/appi.ps.201500149.
1030
+ [18] M. Levis, C. Leonard Westgate, J. Gui, B. V. Watts, and B. Shiner,
1031
+ "Natural language processing of clinical mental health notes may add
1032
+ predictive value to existing suicide risk models," Psychol Med, pp. 1-10,
1033
+ Feb 17 2020, doi: 10.1017/S0033291720000173.
1034
+ [19] A. C. Fernandes, R. Dutta, S. Velupillai, J. Sanyal, R. Stewart, and D.
1035
+ Chandran, "Identifying Suicide Ideation and Suicidal Attempts in a
1036
+ Psychiatric Clinical Research Database using Natural Language
1037
+ Processing," Sci Rep, vol. 8, no. 1, p. 7426, May 9 2018, doi:
1038
+ 10.1038/s41598-018-25773-2.
1039
+ [20] N. J. Carson et al., "Identification of suicidal behavior among
1040
+ psychiatrically
1041
+ hospitalized
1042
+ adolescents
1043
+ using
1044
+ natural
1045
+ language
1046
+ processing and machine learning of electronic health records," PloS one,
1047
+ vol. 14, no. 2, p. e0211116, 2019.
1048
+ [21] B. L. Cook, A. M. Progovac, P. Chen, B. Mullin, S. Hou, and E. Baca-
1049
+ Garcia, "Novel Use of Natural Language Processing (NLP) to Predict
1050
+ Suicidal Ideation and Psychiatric Symptoms in a Text-Based Mental
1051
+ Health Intervention in Madrid," Comput Math Methods Med, vol. 2016,
1052
+ p. 8708434, 2016, doi: 10.1155/2016/8708434.
1053
+ [22] O. Uzuner, A. Stubbs, and M. Filannino, "A natural language processing
1054
+ challenge for clinical records: Research Domains Criteria (RDoC) for
1055
+ psychiatry," J Biomed Inform, vol. 75S, pp. S1-S3, Nov 2017, doi:
1056
+ 10.1016/j.jbi.2017.10.005.
1057
+ [23] Y. Zhang et al., "Psychiatric stressor recognition from clinical notes to
1058
+ reveal association with suicide," Health Informatics J, vol. 25, no. 4, pp.
1059
+ 1846-1862, Dec 2019, doi: 10.1177/1460458218796598.
1060
+ [24] Q.-Y. Zhong et al., "Screening pregnant women for suicidal behavior in
1061
+ electronic medical records: diagnostic codes vs. clinical notes processed
1062
+ by natural language processing," BMC medical informatics and decision
1063
+ making, vol. 18, no. 1, pp. 1-11, 2018.
1064
+ [25] J. S. Obeid et al., "Identifying and Predicting intentional self-harm in
1065
+ electronic health record clinical notes: Deep learning approach," JMIR
1066
+ medical informatics, vol. 8, no. 7, p. e17784, 2020.
1067
+ [26] M. Cusick et al., "Using weak supervision and deep learning to classify
1068
+ clinical notes for identification of current suicidal ideation," J Psychiatr
1069
+ Res,
1070
+ vol.
1071
+ 136,
1072
+ pp.
1073
+ 95-102,
1074
+ Apr
1075
+ 2021,
1076
+ doi:
1077
+ 10.1016/j.jpsychires.2021.01.052.
1078
+ [27] W. W. Chapman, W. Bridewell, P. Hanbury, G. F. Cooper, and B. G.
1079
+ Buchanan, "A simple algorithm for identifying negated findings and
1080
+ diseases in discharge summaries," J Biomed Inform, vol. 34, no. 5, pp.
1081
+ 301-10, Oct 2001, doi: 10.1006/jbin.2001.1029.
1082
+ [28] T. Mikolov, K. Chen, G. Corrado, and J. Dean, "Efficient estimation of
1083
+ word representations in vector space," arXiv preprint arXiv:1301.3781,
1084
+ 2013.
1085
+ [29] F. R. Tsui et al., "Natural language processing and machine learning of
1086
+ electronic health records for prediction of first-time suicide attempts,"
1087
+ JAMIA Open,
1088
+ vol. 4, no. 1, p. ooab011, Jan 2021, doi:
1089
+ 10.1093/jamiaopen/ooab011.
1090
+ [30] V. Rozova, K. Witt, J. Robinson, Y. Li, and K. Verspoor, "Detection of
1091
+ self-harm and suicidal ideation in emergency department triage notes," J
1092
+ Am Med Inform Assoc, vol. 29, no. 3, pp. 472-480, Jan 29 2022, doi:
1093
+ 10.1093/jamia/ocab261.
1094
+ [31] H. Larochelle, D. Erhan, and Y. Bengio, "Zero-data learning of new
1095
+ tasks," in AAAI, 2008, vol. 1, no. 2, p. 3.
1096
+ [32] C. H. Lampert, H. Nickisch, and S. Harmeling, "Learning to detect
1097
+ unseen object classes by between-class attribute transfer," in 2009 IEEE
1098
+ conference on computer vision and pattern recognition, 2009: IEEE, pp.
1099
+ 951-958.
1100
+ [33] X. Sun, J. Gu, and H. Sun, "Research progress of zero-shot learning,"
1101
+ Applied Intelligence, vol. 51, no. 6, pp. 3600-3614, 2021.
1102
+ [34] B. Romera-Paredes and P. Torr, "An embarrassingly simple approach to
1103
+ zero-shot learning," in International conference on machine learning,
1104
+ 2015: PMLR, pp. 2152-2161.
1105
+ [35] R. L. Hu, C. Xiong, and R. Socher, "Zero-shot image classification guided
1106
+ by natural language descriptions of classes: A meta-learning approach,"
1107
+ NeurIPS, 2018.
1108
+ [36] G. Dinu, A. Lazaridou, and M. Baroni, "Improving zero-shot learning by
1109
+ mitigating the hubness problem," arXiv preprint arXiv:1412.6568, 2014.
1110
+ [37] F. Pourpanah et al., "A review of generalized zero-shot learning
1111
+ methods," IEEE transactions on pattern analysis and machine
1112
+ intelligence, 2022.
1113
+ [38] S. Sivarajkumar and Y. Wang, "HealthPrompt: A Zero-shot Learning
1114
+ Paradigm for Clinical Natural Language Processing," arXiv preprint
1115
+ arXiv:2203.05061, 2022.
1116
+ [39] Y. N. Dauphin, G. Tur, D. Hakkani-Tur, and L. Heck, "Zero-shot learning
1117
+ for semantic utterance classification," arXiv preprint arXiv:1401.0509,
1118
+ 2013.
1119
+ [40] M. Johnson et al., "Google’s multilingual neural machine translation
1120
+ system: Enabling zero-shot translation," Transactions of the Association
1121
+ for Computational Linguistics, vol. 5, pp. 339-351, 2017.
1122
+ [41] S. G. Tesfagergish, J. Kapočiūtė-Dzikienė, and R. Damaševičius, "Zero-
1123
+ Shot Emotion Detection for Semi-Supervised Sentiment Analysis Using
1124
+ Sentence Transformers and Ensemble Learning," Applied Sciences, vol.
1125
+ 12, no. 17, p. 8662, 2022.
1126
+ [42] T. Hascoet, Y. Ariki, and T. Takiguchi, "Semantic embeddings of generic
1127
+ objects for zero-shot learning," EURASIP Journal on Image and Video
1128
+ Processing, vol. 2019, no. 1, pp. 1-14, 2019.
1129
+ [43] H. Hedegaard, M. Schoenbaum, C. Claassen, A. Crosby, K. Holland, and
1130
+ S. Proescholdbell, "Issues in Developing a Surveillance Case Definition
1131
+ for Nonfatal Suicide Attempt and Intentional Self-harm Using
1132
+ International Classification of Diseases, Tenth Revision, Clinical
1133
+ Modification (ICD-10-CM) Coded Data," Natl Health Stat Report, no.
1134
+ 108,
1135
+ pp.
1136
+ 1-19,
1137
+ Feb
1138
+ 2018.
1139
+ [Online].
1140
+ Available:
1141
+ https://www.ncbi.nlm.nih.gov/pubmed/29616901.
1142
+ [44] D. P. Kingma and J. Ba, "Adam: A method for stochastic optimization,"
1143
+ arXiv preprint arXiv:1412.6980, 2014.
1144
+ [45] National Institute of Mental Health/NIH. "Suicide In America: Frequently
1145
+ Asked
1146
+ Questions:
1147
+ Who
1148
+ is
1149
+ at
1150
+ Risk
1151
+ for
1152
+ Suicide?"
1153
+ https://www.nimh.nih.gov/health/publications/suicide-faq/#pub2
1154
+ [46] C. A. Caceres et al., "Feature Selection Methods for Zero-Shot Learning
1155
+ of Neural Activity," Front Neuroinform, vol. 11, p. 41, 2017, doi:
1156
+ 10.3389/fninf.2017.00041.
1157
+ [47] Y. Shao, G. Divita, T. E. Workman, D. Redd, J. H. Garvin, and Q. Zeng-
1158
+ Treitler, "Clinical Sublanguage Trend and Usage Analysis from a Large
1159
+ Clinical Corpus," in 2020 IEEE International Conference on Big Data
1160
+ (Big Data), 2020: IEEE, pp. 3837-3845.
1161
+ [48] T. E. Workman, G. Divita, and Q. Zeng-Treitler, "Discovering
1162
+ Sublanguages in a Large Clinical Corpus through Unsupervised Machine
1163
+ Learning and Information Gain," in 2019 IEEE International Conference
1164
+ on Big Data (Big Data), 2019: IEEE, pp. 4889-4898.
1165
+ [49] M. A. Clapp, E. Kim, K. E. James, R. H. Perlis, A. J. Kaimal, and T. H.
1166
+ McCoy, Jr., "Natural language processing of admission notes to predict
1167
+ severe maternal morbidity during the delivery encounter," Am J Obstet
1168
+ Gynecol, Apr 14 2022, doi: 10.1016/j.ajog.2022.04.008.
1169
+ [50] R. L. Figueroa and C. A. Flores, "Extracting Information from Electronic
1170
+ Medical Records to Identify the Obesity Status of a Patient Based on
1171
+ Comorbidities and Bodyweight Measures," J Med Syst, vol. 40, no. 8, p.
1172
+ 191, Aug 2016, doi: 10.1007/s10916-016-0548-8.
1173
+ [51] S. Merelle, E. Foppen, R. Gilissen, J. Mokkenstorm, R. Cluitmans, and
1174
+ W. Van Ballegooijen, "Characteristics Associated with Non-Disclosure
1175
+ of Suicidal Ideation in Adults," Int J Environ Res Public Health, vol. 15,
1176
+ no. 5, May 9 2018, doi: 10.3390/ijerph15050943.
1177
+ [52] T. E. Workman et al., "A Prototype Application to Identify LGBT
1178
+ Patients in Clinical Notes," in 2020 IEEE International Conference on Big
1179
+ Data (Big Data), 2020: IEEE, pp. 4270-4275.
1180
+
1181
+
7NE1T4oBgHgl3EQf7QUc/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8NE1T4oBgHgl3EQfBwLj/content/2301.02857v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bdfec9315f9f01d5d71284efed2012a5403c3f041d9e7f0e173ccaf003bd04c
3
+ size 1148106
8NE1T4oBgHgl3EQfBwLj/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:927ecd9747e98327afa116eb36d3996dfdf3e6dc9d414d401d1220b83bddc46d
3
+ size 96640
8tAyT4oBgHgl3EQfQ_YL/content/tmp_files/2301.00055v1.pdf.txt ADDED
@@ -0,0 +1,2822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A Bayesian latent position approach for community detection in
2
+ single- and multi-layer networks with continuous attributes
3
+ Zhumengmeng Jina, Juan Sosab and Brenda Betancourtc
4
+ a University of Florida
5
+ b Universidad Nacional de Colombia
6
+ c NORC at the University of Chicago
7
+ December 2022
8
+ Abstract
9
+ The increasing prevalence of multiplex networks has spurred a critical need to take into account po-
10
+ tential dependencies across different layers, especially when the goal is community detection, which is a
11
+ fundamental learning task in network analysis. We propose a full Bayesian mixture model for community
12
+ detection in both single-layer and multi-layer networks. A key feature of our model is the joint modeling
13
+ of the nodal attributes that often come with the network data as a spatial process over the latent space.
14
+ In addition, our model for multi-layer networks allows layers to have different strengths of dependency
15
+ in the unique latent position structure and assumes that the probability of a relation between two actors
16
+ (in a layer) depends on the distances between their latent positions (multiplied by a layer-specific factor)
17
+ and the difference between their nodal attributes. Under our prior specifications, the actors’ positions
18
+ in the latent space arise from a finite mixture of Gaussian distributions, each corresponding to a cluster.
19
+ Simulated examples show that our model performs favorably compared to the existing ones. The model
20
+ is also applied to a real three-layer network of employees in a law firm.
21
+ 1
22
+ Introduction
23
+ Network data conveniently describes the relationships between actors in complex systems and is ubiquitous
24
+ in many statistical applications, including finance, social science, criminology, biology, epidemiology, and
25
+ computer science, among others. Understanding the relationships between actors can aid domain experts.
26
+ Key words and phrases. multiplex network, community detection, latent position model, mixture model, spatial process, visu-
27
+ alization
28
+ 1
29
+ arXiv:2301.00055v1 [stat.AP] 30 Dec 2022
30
+
31
+ For instance, in epidemiology, people in a certain area can be portrayed in a contact network that can be
32
+ studied to detect infectious disease outbreaks. In criminology, communications between terrorists form a
33
+ terrorist network, helping intelligence agencies to better counter terrorism.
34
+ Many models have been developed for the inference of networks over the past decades (e.g., Erdös and
35
+ Rényi, 1959, Frank and Strauss, 1986), among which the broad class of latent space models is one of the
36
+ most widely used (see, e.g., Sosa, 2021 for an exhaustive review). Suppose the network under study has
37
+ N actors, then under latent space models, there are N independent and identically distributed (i.i.d.) latent
38
+ variables z1, . . . , zN, one for each actor. Under a mild exchangeability assumption in Hoff [2007], results
39
+ in Aldous [1985] and Hoover [1982] show that edge variables yi,j depend on latent variables through a
40
+ symmetric function γ(zi, zj) that is meant to capture any pattern in the network beyond any known covariate
41
+ information.
42
+ Many well-known models fall into the category of latent space models, which can be distinguished between
43
+ two cases depending on whether latent variables are discrete or continuous [Matias and Robin, 2014]. For in-
44
+ stance, stochastic block models [Nowicki and Snijders, 2001, Wang and Wong, 1987] – hereafter SBM – are
45
+ special cases of latent space models with discrete latent variables zi ∈ {1, 2, . . . , K}. When latent variables
46
+ are assumed to be continuous, another approach using latent variables is the class of latent position models
47
+ (LPM) proposed by Hoff et al. [2002] which our model in the paper is built upon. In its basic formulation,
48
+ LPMs model the edge variables yi,j as conditionally independent given the distance between latent variables
49
+ γ(zi, zj) = −∥zi − zj∥, which naturally accounts for transitivity effects through the latent space (typically
50
+ a Euclidean K-dimensional space for a predetermined K) where zi lives. Later on, Handcock et al. [2007]
51
+ proposed an extension on Hoff et al.’s LPM, namely the latent position cluster model (LPCM), by imposing
52
+ a Gaussian mixture prior on the latent positions to perform clustering tasks. Krivitsky et al. [2009] further
53
+ extended Handcock et al.’s model by adding the random sender and receiver effects proposed by Hoff [2005].
54
+ Other formulations of γ(·, ·) can be found in Schweinberger and Snijders [2003], Hoff [2005, 2009], Athreya
55
+ et al. [2017], Minhas et al. [2019], among others.
56
+ Besides edge information of a network, extra information like node and edge attributes and different types
57
+ of edges are often available, and should ideally be leveraged for inference. Typical ways to incorporate
58
+ attributes in a network model include: (1) modeling the network as a function of the attributes (see, e.g.,
59
+ Hoff et al., 2002, Hoff, 2005); (2) modeling the attributes as a function of the network [Guha and Rodriguez,
60
+ 2021]; (3) jointly modeling the network and attributes (Linkletter, 2007, Kim and Leskovec, 2012, Fosdick
61
+ and Hoff, 2015, Ciminelli et al., 2019). The first approach is arguably the most common approach to incor-
62
+ porate covariates into the model, but we consider an approach of joint modeling proposed by Ciminelli et al.
63
+ 2
64
+
65
+ [2019], namely the social network spatial model (SNSM), where the authors modeled edges yi,j as condi-
66
+ tionally independent given ∥zi − zj∥ and the distance of the continuous node attributes ∥xi − xj∥, and node
67
+ attributes are further modeled as a spatial process over the latent space. Note that joint modeling does not
68
+ require the network or the attributes to be fully observed as the first two approaches, hence one could predict
69
+ missing network and attribute data (if there is any). In addition, it improves model fitting by capturing the
70
+ dependence structure between latent variables and the attributes (when such dependency exists), as we will
71
+ see in Section 3.
72
+ We propose a full hierarchical Bayesian model that builds on Ciminelli et al.’s SNSM. Instead of using a
73
+ Gaussian distribution as the prior for latent positions as in Ciminelli et al. [2019], we impose a Gaussian
74
+ mixture prior as in Handcock et al. [2007], so that our model could also capture the group structure in the
75
+ network. Detecting communities or clusters among actors in the network is an important task in network
76
+ analysis and has spurred the development of many models and algorithms, among which the SBM has
77
+ motivated an active line of research that deals with community detection (see, e.g., Lee and Wilkinson
78
+ [2019] for a review). However, SBM may not fit well when many actors fall between clusters [Hoff et al.,
79
+ 2002]. We will compare our model with an SBM that incorporates covariates as fixed effects (i.e., model
80
+ the edge variables as a function of latent classes and covariates [Leger, 2016]), and we call this model a
81
+ covariate-assisted stochastic block model (CSBM). We will show that our model presents improved model
82
+ fitting while producing similar clustering results as CSBM.
83
+ We also propose an extension of our model to multi-layer network settings. Multi-layer networks can gen-
84
+ erally be categorized into two cases: cross-sectional networks that have different types of connections (e.g.,
85
+ social networks of friendship, coworker-ship, etc.) and time-varying networks where the same type of con-
86
+ nections are measured over time (e.g., a trade network that changes over time). We consider a type of
87
+ cross-sectional multi-layer network where each layer has a common set of actors. Substantial work has been
88
+ done on latent space models for cross-sectional multi-layer networks that take a Bayesian approach (see, e.g.,
89
+ Gollini and Murphy, 2016, Salter-Townshend and McCormick, 2017, D’Angelo et al., 2019, Sosa and Betan-
90
+ court, 2022, Durante and Dunson, 2018, Wang et al., 2019, MacDonald et al., 2020). In extending our model
91
+ to the multiple networks setting, we adopt the approach in Sosa and Betancourt [2022] in a parsimonious
92
+ way, where latent positions are assumed to be the same for all layers, but the strength of borrowing such
93
+ latent structure information is allowed to be different across different layers. Note that, the original model
94
+ in Sosa and Betancourt [2022] assumed different latent positions for different layers and had an additional
95
+ hierarchy on the hyperparameters. The specification of our model is given in the next section.
96
+ The remainder of the paper is organized as follows. Section 2 contains general background on the spatial
97
+ 3
98
+
99
+ process and introduces the proposed model (for single- and multi-layer network settings) which we call
100
+ the latent position joint mixture model (LPJMM) in the rest of the paper. In addition, prior specification,
101
+ identifiable problem, and inference will also be discussed in this section. Several simulation studies are
102
+ conducted in section 3, where LPJMM is compared with Handcock et al.’s LPCM, Ciminelli et al.’s SNSM
103
+ and CSBM in single-layer settings and the model is also evaluated in multi-layer settings. In section 4, we
104
+ apply LPJMM to a real-world multi-layer network data set. Finally, we conclude with some discussion in
105
+ section 5.
106
+ 2
107
+ Models
108
+ We first review the LPM introduced in Hoff et al. [2002], and then build upon it with a spatial process to allow
109
+ for joint modeling of the network and the nodal attributes, and with a finite Gaussian mixture distribution for
110
+ latent positions to allow for clustering.
111
+ Consider a binary single-layer network with N actors. Denote its adjacency matrix as Y = (yi,j) ∈
112
+ {0, 1}N×N, where yi,j = 1 if actors i and j are connected, and yi,j = 0 if they are not connected. Suppose
113
+ the network data comes with a one-dimensional nodal attribute xi for each actor, and denote the covariate as
114
+ x = (xi) ∈ RN. The LPM assumes that each actor i has an observed latent position zi in a K-dimensional
115
+ Euclidean latent space, the so-called latent space, for some K ∈ N. Let z = (zi) ∈ RN×K, then LPM
116
+ models edge yi,j as conditionally independent given distances between nodal attributes as well as distances
117
+ between latent positions via logistic regression. But instead of the logistic link, we use the probit link in our
118
+ model. The analysis of probit regression models can often be facilitated by a Gibbs sampler constructed using
119
+ the data augmentation approach that introduces latent variables with truncated normal distributions [Albert
120
+ and Chib, 1993]. (See also Sosa and Betancourt (2022) for a discussion on the choice of link functions.)
121
+ Specifically, for i, j ∈ {1, . . . , N} and i ̸= j,
122
+ yi,j | z, x, a, b, θ ind
123
+ ∼ Ber
124
+
125
+ Φ(a + b|xi − xj| − θ∥zi − zj∥)
126
+
127
+ ,
128
+ (1)
129
+ where a, b ∈ R and θ ∈ R+, Ber(p) is a Bernoulli distribution that takes value 1 with some probability p,
130
+ ∥ · ∥ is the Euclidean norm on RK and Φ(·) is the cumulative distribution function of the standard normal
131
+ distribution. Note that we impose a factor θ for the distance between latent positions, which is different from
132
+ Hoff et al. [2002] and Krivitsky et al. [2009]. Although θ is unidentifiable in single-layer networks, it plays
133
+ a non-trivial role in multi-layer network settings (introduced in Section 2.1). We defer a detailed discussion
134
+ of θ to Section 2.4.
135
+ 4
136
+
137
+ To allow for joint modeling of the network and nodal attributes, we model the nodal attributes as a spatial
138
+ process over the latent space RK. Hence, nodal attributes are treated as random variables indexed by their
139
+ latent positions, and the distance between these random variables is found by the distance between their
140
+ corresponding positions. As in Ciminelli et al. [2019], we specify the spatial process as a Gaussian process
141
+ that is stationary with mean β and isotropic (see Banerjee et al., 2015 for definitions). In this case, the
142
+ process is completely defined by its covariance function Cov(d), where d is the distance between two random
143
+ variables in the Gaussian process. In particular, we specify Cov(d) with an exponential kernel, that is,
144
+ Cov(d) =
145
+
146
+
147
+
148
+
149
+
150
+ τ 2 + σ2,
151
+ if d = 0;
152
+ σ2 exp(−φd),
153
+ if d > 0,
154
+ where τ ≥ 0, σ > 0 and φ > 0. It is well-known that such a covariance structure is valid, i.e., the covariance
155
+ matrix for any finite collection of random variables in the process is positive definite [Banerjee et al., 2015].
156
+ Let Mz = (mij) ∈ RN×N where mij = exp(−φ∥zi − zj∥) and denote IN as the N-dimensional identity
157
+ matrix, then the Gaussian process of the nodal attributes is constructed as follows,
158
+ x | z, β, σ, τ, φ ∼ NN(β111N, σ2M(z, φ) + τ 2IN),
159
+ (2)
160
+ where Nd is a d-dimensional multivariate normal distribution for some dimension d ∈ {2, 3, . . . }, and 111N is
161
+ an N-dimensional vector with all 1s.
162
+ As in Krivitsky et al. [2009], we impose a Gaussian mixture distribution on latent positions, which allows us
163
+ to cluster actors into different groups. Suppose there are H < ∞ predetermined number of components in
164
+ the Gaussian mixture distribution, then
165
+ zi | ωωω,µµµ,κκκ ind
166
+
167
+ H
168
+
169
+ h=1
170
+ ωhNK(µh, κ2
171
+ hIK) ,
172
+ (3)
173
+ where ωωω = {ω1, . . . , ωH}, µµµ = {µ1, . . . , µH}, κκκ = {κ1, . . . , κH}. Note that µh is a K-dimensional mean
174
+ vector where h ∈ {1, . . . , H}, and ωh is the probability that an actor belongs to the h-th group such that
175
+ ωh ∈ (0, 1) and �H
176
+ h=1 ωh = 1.
177
+ In single-layer network settings, the model is given by Eqs. (1) to (3). Under our model, nodal attributes
178
+ of two actors whose latent positions are close are more likely to be similar according to the exponential
179
+ covariance structure. If b < 0 (b > 0), actors with similar attributes are more (less) likely to be connected.
180
+ When b = 0, nodal attributes do not affect the distribution of the network directly (but it still has an indirect
181
+ 5
182
+
183
+ Figure 1: DAG representation of the LPJMM in multi-layer settings.
184
+ impact on the network through latent positions by Eq. (2)).
185
+ 2.1
186
+ An extension to multi-layer networks.
187
+ Our model can also be extended to multi-layer network settings in the following way. Suppose we have L
188
+ layers Y1, . . . , YL in the network, where all layers are defined over the same set of actors. We assume the
189
+ same latent positions z for all layers but allow the strength of borrowing such latent structure information to
190
+ be different by imposing layer-specific factors θℓ for ℓ ∈ {1, . . . , L}. Our model in multi-layer settings is
191
+ then presented as follows
192
+ yi,j,ℓ | z, x, aℓ, bℓ, θℓ
193
+ ind
194
+ ∼ Ber
195
+
196
+ Φ(aℓ + bℓ|xi − xj| − θℓ∥zi − zj∥)
197
+
198
+ ,
199
+ (4)
200
+ x | z, β, σ, τ, φ ∼ NN(β111N, σ2M(z, φ) + τ 2IN) ,
201
+ (5)
202
+ zi | ωωω,µµµ,κκκ i.i.d.
203
+
204
+ H
205
+
206
+ h=1
207
+ ωhNK(µh, κ2
208
+ hIK) ,
209
+ (6)
210
+ where yi,j,ℓ is the edge variable between actors i and j in layer ℓ ∈ {1, . . . , L}, aℓ, bℓ and θℓ are layer-specific
211
+ parameters. Note that Eqs. (5) and (6) are the same as Eqs. (2) and (3). Fig. 1 shows a directed acyclic graph
212
+ (DAG) representation of the model given by Eqs. (4) to (6).
213
+ 6
214
+
215
+ 2.2
216
+ Prior specification
217
+ We take a Bayesian approach to estimate the model parameters. Without loss of generality, a Bayesian ver-
218
+ sion of the model given by Eqs. (4) to (6) is formed by placing prior distributions on the unknown parameters
219
+ aℓ, bℓ, θℓ, β, σ, τ, φ, ωωω, µµµh, κh, for ℓ = {1, . . . , L} and h = {1, . . . , H}. In the model we consider, these
220
+ parameters are assumed a priori independent. For parameters in the probit regression tier as specified by
221
+ Eq. (4), their priors are specified as follows:
222
+ aℓ
223
+ i.i.d.
224
+ ∼ N(ma, ν2
225
+ a) ,
226
+ bℓ
227
+ i.i.d.
228
+ ∼ N(mb, ν2
229
+ b ) ,
230
+ θℓ
231
+ i.i.d.
232
+ ∼ Gamma(λ1, λ2) .
233
+ The priors for the parameters in the spatial process tier as given in Eq. (5) are given as follows:
234
+ β ∼ N(0, ν2
235
+ β) ,
236
+ σ2 ∼ InvG(η1, η2) ,
237
+ τ 2 ∼ InvG(ξ1, ξ2) ,
238
+ φ ∼ U(u1, u2) .
239
+ Finally, we put the following priors on the rest of the parameters:
240
+ ωωω ∼ Dir(α) ,
241
+ µh
242
+ i.i.d.
243
+ ∼ NK(mµ, ν2
244
+ µIK) ,
245
+ κ2
246
+ h
247
+ i.i.d.
248
+ ∼ InvG(γ1, γ2) .
249
+ Note that, ma, νa, mb, νb, λ1, λ2, νβ, η1, η2, ξ1, ξ2, u1, u2, α, mµ, νµ, γ1 and γ2 are user-specified
250
+ hyperparameters, and Gamma(·, ·), InvG(·, ·), U(·, ·), Dir(·) represents Gamma, Inverse-Gamma, uniform,
251
+ and Dirichlet distributions respectively.
252
+ 2.3
253
+ Posterior distribution and model estimation
254
+ As is standard in Bayesian estimation of mixture models (see, e.g., Diebolt and Robert [1994]), we define a
255
+ new variable gi that serves as the missing data of group membership of actor i whose distribution depends
256
+ on ωωω. In particular, gi = h if actor i belongs to the h-th group. The joint density of (zi, gi) given ωωω, µµµ and κκκ
257
+ is then given by
258
+ H
259
+
260
+ h=1
261
+
262
+ ωh
263
+ 1
264
+
265
+ 2πκ2
266
+ h
267
+ exp
268
+
269
+
270
+ 1
271
+ 2κ2
272
+ h
273
+ ∥zi − µh∥2��I{gi=h}
274
+ ,
275
+ where the indicator function I{gi=h} = 1 if gi = h, and I{gi=h} = 0 otherwise. Let g = (gi)N
276
+ i=1 be the group
277
+ membership for all actors and L(·) be the law of a random variable. Then the posterior distribution of z, g
278
+ 7
279
+
280
+ and the parameters upon which priors are specified in Section 2.2 is given by
281
+ Π(z, g, a1, . . . , aL, b1, . . . , bL, θ1, . . . , θL, β, τ 2, σ2, φ,ωωω,µµµ,κκκ | Y1, . . . , YL, x)
282
+
283
+ � L
284
+
285
+ ℓ=1
286
+ L(Yℓ | z, x, aℓ, bℓ, θℓ)
287
+
288
+ L(x | z, σ, τ, φ)L(z, g | ωωω,µµµ,κκκ)
289
+ � L
290
+
291
+ ℓ=1
292
+ L(aℓ)L(bℓ)L(θℓ)
293
+
294
+ × L(β)L(σ2)L(τ 2)L(φ)L(ωωω)L(µµµ)L(κκκ) .
295
+ Note that the dimension of the posterior distribution has dimension NK + N + 3L + 3H + 4 and the
296
+ corresponding posterior density is presented as follows,
297
+ π(z, g, a1, . . . , aL, b1, . . . , bL, θ1, . . . , θL, β, τ 2, σ2, φ,ωωω,µµµ,κκκ | Y1, . . . , YL, x)
298
+
299
+ N
300
+
301
+ i,j=1
302
+ i̸=j
303
+ L
304
+
305
+ ℓ=1
306
+
307
+ Φ(aℓ + bℓ|xi − xj| − θℓ∥zi − zj∥)
308
+ �yi,j,ℓ�
309
+ 1 − Φ(aℓ + bℓ|xi − xj| − θℓ∥zi − zj∥)
310
+ �1−yi,j,ℓ
311
+ × |σ2M(z, φ) + τ 2IN|− 1
312
+ 2 exp
313
+
314
+ − 1
315
+ 2(x − β1)⊺�
316
+ σ2M(z, φ) + τ 2IN
317
+ �−1(x − β1)
318
+
319
+ ×
320
+ N
321
+
322
+ i=1
323
+ H
324
+
325
+ h=1
326
+ � ωh
327
+
328
+ κ2
329
+ h
330
+ exp
331
+
332
+
333
+ 1
334
+ 2κ2
335
+ h
336
+ ∥zi − µh∥2��I{gi=h}
337
+ × exp
338
+ � 1
339
+ 2ν2a
340
+ L
341
+
342
+ ℓ=1
343
+ (aℓ − ma)2 +
344
+ 1
345
+ 2ν2
346
+ b
347
+ L
348
+
349
+ ℓ=1
350
+ (bℓ − mb)2� L
351
+
352
+ ℓ=1
353
+ θλ1−1
354
+
355
+ exp(−λ2θℓ)
356
+ × exp
357
+ � β2
358
+ 2ν2
359
+ β
360
+
361
+ (σ2)−η1−1(τ 2)−ξ1−1 exp
362
+
363
+ − η2
364
+ σ2 − ξ2
365
+ τ 2
366
+
367
+ I{φ∈[u1,u2]}
368
+ ×
369
+ H
370
+
371
+ h=1
372
+
373
+ ωαh−1
374
+ h
375
+ I{�H
376
+ h=1 ωh=1} exp
377
+
378
+
379
+ 1
380
+ 2ν2µ
381
+ ∥µh − mµ∥2�
382
+ (κ2
383
+ h)−γ1−1 exp
384
+
385
+ − γ2
386
+ κ2
387
+ h
388
+ ��
389
+ .
390
+ 2.4
391
+ Inference and identifiability of parameters
392
+ Note that the posterior distribution is highly intractable, hence we must resort to Markov chain Monte Carlo
393
+ (MCMC) methods for inferences on model parameters. A Markov chain of the parameters is generated via
394
+ the program “Just Another Gibbs Sampler” (JAGS) which is implemented in R [R Core Team, 2021] using
395
+ the rjags package [Plummer, 2022].
396
+ Several parameters are not identifiable in our model. Firstly, due to factors θℓ and φ, and the fact that latent
397
+ positions are incorporated in the posterior only through their distances, the posterior is, therefore, invariant
398
+ to θℓs and φ, and is invariant to scaling, reflection, rotation, and translation of the latent positions z. (Note
399
+ that, Hoff et al., 2002 and Krivitsky et al., 2009 did not have θℓs, hence their posterior is not invariant to the
400
+ 8
401
+
402
+ scaling of latent positions.) Although θℓs are not identifiable and do not affect the model fitting, in multi-
403
+ layer settings, their ratios θℓ1/θℓ2 still provide valid information on layer’s relative strength of borrowing
404
+ information from the latent space.
405
+ Despite being unidentifiable, one can still make inferences on the latent positions and find a reasonable
406
+ estimate for z through a post-process which we now describe. Similar to the definition in [Hoff et al., 2002],
407
+ we define the equivalence class of z ∈ RN×K, denoted as [z], to be the set of positions that are equivalent
408
+ to z under scaling, reflection, rotation, and translation. Given a fixed reference position zref, a position
409
+ z∗ is found in [z] such that z∗ = arg minz′∈[z] tr(zref − z′)⊺(zref − z′), which is the so-called Procrustes
410
+ transformation. In simulation studies, zref is naturally chosen to be the true latent position, while in practical
411
+ applications, we could use the last iteration of the Markov chain of latent positions as the reference. The
412
+ Procrustes transformation is performed for each iteration of the Markov chain of the latent positions {zn},
413
+ and an estimate for z is taken as the mean of the Procrustes transformations of {zn}.
414
+ As occurs in Bayesian mixture models, the label-switching problem for the group membership g is an-
415
+ other source of non-identifiability. That is, the posterior is invariant under permutations of clustering labels.
416
+ Many algorithms have been proposed to obtain a single clustering estimate based on the MCMC sample of
417
+ the group membership {gn}, including an optimization method (which we call “MaxPEAR” hereafter) in
418
+ Fritsch and Ickstadt [2009] that finds a clustering that maximizes posterior expected adjusted rand index, an
419
+ optimization method (“MinBinder”) in Lau and Green [2007] that minimizes Binder’s loss function, and a
420
+ greedy algorithm (“GreedyEPL”) in Rastelli and Friel [2018] that aims to minimize the variation of informa-
421
+ tion, among others. These approaches may generate different clustering estimates, and to get a better under-
422
+ standing of the model performance, all aforementioned algorithms (MaxPEAR, MinBinder and GreedyEPL)
423
+ are used to assess the model. Estimates based on these approaches are found using the packages GreedyEPL
424
+ [Rastelli, 2021] and mcclust [Fritsch, 2022].
425
+ 3
426
+ Simulation
427
+ Two simulation studies are carried out in this section to evaluate our model. A single-layer network is
428
+ considered in the first simulation where we compare LPJMM with three other models designed only for
429
+ single-layer networks, namely LPCM in Handcock et al. [2007], SNSM in Ciminelli et al. [2019], and CSBM
430
+ in Leger [2016], where SNSM is also implemented using the rjags package, and LPCM and CSBM are
431
+ implemented using the latentnet [Krivitsky and Handcock, 2022] and sbm [Chiquet et al., 2022] packages
432
+ respectively. The model specifications for these models can be found in Appendix A. Models assessments
433
+ include how well a model could recover the group membership and the latent position configuration, and a
434
+ 9
435
+
436
+
437
+ 1
438
+ Figure 2: Left: A visualization of the network based on the true latent position and color indicates group
439
+ membership g. Right: Heatmap of the adjacency matrix (where actors are reordered according to g).
440
+ goodness-of-fit test using summaries of networks including density, transitivity, and assortative coefficient
441
+ with respect to the group membership g (see Kolaczyk and Csárdi, 2020 for definitions). We also evaluate
442
+ our model by how accurately it could estimate certain parameters in the model. The second simulation is
443
+ conducted in two-layer network settings, where the performance of our model could be further evaluated by
444
+ how well the ratio θ1/θ2 can be recovered that reflects differences in each layer’s dependency on the latent
445
+ position structure.
446
+ 3.1
447
+ Simulation 1: a single-layer network
448
+ Consider a single-layer network (i.e., L = 1) with N = 100 actors generated as follows. Firstly, generate
449
+ latent positions z from a mixture of H = 5 multivariate normal distributions, and then generate attributes x
450
+ jointly from a multivariate normal distribution with mean β1N = 0 and covariance matrix given by Cov(d)
451
+ in Section 2 where φ = 0.5, τ 2 = 0.3, σ2 = 1. Finally, the network data is generated according to Eq. (1)
452
+ with a = 5, b = −2, and θ = 2.72. See Fig. 2 for a visualization of the simulated network. The network is
453
+ fairly sparse with a density equal to 0.1531, and shows fairly strong transitivity and assortative mixing with
454
+ coefficients 0.5049 and 0.5512 respectively.
455
+ As for the prior specifications, we set ma = mb = 0, and ν2
456
+ a = ν2
457
+ b = 9 to allow a wide range of values for a
458
+ and b. Let θ ∼ Gamma(1, 1) so that θ has mean 1. An almost flat prior is imposed on β by setting νβ = 104.
459
+ The same uniform prior U(0, 1) as in Ciminelli et al. [2019] is specified for φ. We suggest the sum of the prior
460
+ means of τ 2 and σ2 to be on the same scale as the sample variance of x, and here we use σ2 ∼ InvG(2, 1)
461
+ and τ 2 ∼ InvG(2, 1). Let α = 1 so that the prior on ωωω is a flat Dirichlet distribution. Following the
462
+ heuristics in Sosa and Betancourt [2022], we specify µh
463
+ i.i.d.
464
+ ∼ NK(0, 2/3IK) and κ2
465
+ h
466
+ i.i.d.
467
+ ∼ InvG(3, 2/3) so
468
+ that var(zij|gi) = 1.
469
+ 10
470
+
471
+ our model name
472
+ LPCM
473
+ CSBM
474
+ MaxPEAR
475
+ 0.737 (5)
476
+ 0.707 (4)
477
+
478
+ MinBinder
479
+ 0.712 (11)
480
+ 0.748 (10)
481
+
482
+ GreedyEPL
483
+ 0.664 (4)
484
+ 0.688 (4)
485
+
486
+ Variational-EM
487
+
488
+
489
+ 0.707 (6)
490
+ Table 1: Adjusted Rand indices corresponding to different estimation methods for group membership. Num-
491
+ bers in the parentheses represent numbers of estimated groups.
492
+ 1
493
+ 2
494
+ 3
495
+ 4
496
+ 5
497
+ 6
498
+ 7
499
+ 8
500
+ 9
501
+ 10
502
+ 11
503
+ 12
504
+ 13
505
+ 14
506
+ 15
507
+ 16
508
+ 17
509
+ 18
510
+ 19
511
+ 20
512
+ 21
513
+ 22
514
+ 23
515
+ 24
516
+ 25
517
+ 26
518
+ 27
519
+ 28
520
+ 29
521
+ 30
522
+ 31
523
+ 32
524
+ 33
525
+ 34
526
+ 35
527
+ 36
528
+ 37
529
+ 38
530
+ 39
531
+ 40
532
+ 41
533
+ 42
534
+ 43
535
+ 44
536
+ 45
537
+ 46
538
+ 47
539
+ 48
540
+ 49
541
+ 50
542
+ 51
543
+ 52
544
+ 53
545
+ 54
546
+ 55
547
+ 56
548
+ 57
549
+ 58
550
+ 59
551
+ 60
552
+ 61
553
+ 62
554
+ 63
555
+ 64
556
+ 65
557
+ 66
558
+ 67
559
+ 68
560
+ 69
561
+ 70
562
+ 71
563
+ 72
564
+ 73
565
+ 74
566
+ 75
567
+ 76
568
+ 77
569
+ 78
570
+ 79
571
+ 80
572
+ 81
573
+ 82
574
+ 83
575
+ 84
576
+ 85
577
+ 86
578
+ 87
579
+ 88
580
+ 89
581
+ 90
582
+ 91
583
+ 92
584
+ 93
585
+ 94
586
+ 95
587
+ 96
588
+ 97
589
+ 98
590
+ 99
591
+ 100
592
+ (a) Truth
593
+ 1
594
+ 2
595
+ 3
596
+ 4
597
+ 5
598
+ 6
599
+ 7
600
+ 8
601
+ 9
602
+ 10
603
+ 11
604
+ 12
605
+ 13
606
+ 14
607
+ 15
608
+ 16
609
+ 17
610
+ 18
611
+ 19
612
+ 20
613
+ 21
614
+ 22
615
+ 23
616
+ 24
617
+ 25
618
+ 26
619
+ 27
620
+ 28
621
+ 29
622
+ 30
623
+ 31
624
+ 32
625
+ 33
626
+ 34
627
+ 35
628
+ 36
629
+ 37
630
+ 38
631
+ 39
632
+ 40
633
+ 41
634
+ 42
635
+ 43
636
+ 44
637
+ 45
638
+ 46
639
+ 47
640
+ 48
641
+ 49
642
+ 50
643
+ 51
644
+ 52
645
+ 53
646
+ 54
647
+ 55
648
+ 56
649
+ 57
650
+ 58
651
+ 59
652
+ 60
653
+ 61
654
+ 62
655
+ 63
656
+ 64
657
+ 65
658
+ 66
659
+ 67
660
+ 68
661
+ 69
662
+ 70
663
+ 71
664
+ 72
665
+ 73
666
+ 74
667
+ 75
668
+ 76
669
+ 77
670
+ 78
671
+ 79
672
+ 80
673
+ 81
674
+ 82
675
+ 83
676
+ 84
677
+ 85
678
+ 86
679
+ 87
680
+ 88
681
+ 89
682
+ 90
683
+ 91
684
+ 92
685
+ 93
686
+ 94
687
+ 95
688
+ 96
689
+ 97
690
+ 98
691
+ 99
692
+ 100
693
+ (b) LPJMM
694
+ 1
695
+ 2
696
+ 3
697
+ 4
698
+ 5
699
+ 6
700
+ 7
701
+ 8
702
+ 9
703
+ 10
704
+ 11
705
+ 12
706
+ 13
707
+ 14
708
+ 15
709
+ 16
710
+ 17
711
+ 18
712
+ 19
713
+ 20
714
+ 21
715
+ 22
716
+ 23
717
+ 24
718
+ 25
719
+ 26
720
+ 27
721
+ 28
722
+ 29
723
+ 30
724
+ 31
725
+ 32
726
+ 33
727
+ 34
728
+ 35
729
+ 36
730
+ 37
731
+ 38
732
+ 39
733
+ 40
734
+ 41
735
+ 42
736
+ 43
737
+ 44
738
+ 45
739
+ 46
740
+ 47
741
+ 48
742
+ 49
743
+ 50
744
+ 51
745
+ 52
746
+ 53
747
+ 54
748
+ 55
749
+ 56
750
+ 57
751
+ 58
752
+ 59
753
+ 60
754
+ 61
755
+ 62
756
+ 63
757
+ 64
758
+ 65
759
+ 66
760
+ 67
761
+ 68
762
+ 69
763
+ 70
764
+ 71
765
+ 72
766
+ 73
767
+ 74
768
+ 75
769
+ 76
770
+ 77
771
+ 78
772
+ 79
773
+ 80
774
+ 81
775
+ 82
776
+ 83
777
+ 84
778
+ 85
779
+ 86
780
+ 87
781
+ 88
782
+ 89
783
+ 90
784
+ 91
785
+ 92
786
+ 93
787
+ 94
788
+ 95
789
+ 96
790
+ 97
791
+ 98
792
+ 99
793
+ 100
794
+ (c) LPCM
795
+ 1
796
+ 2
797
+ 3
798
+ 4
799
+ 5
800
+ 6
801
+ 7
802
+ 8
803
+ 9
804
+ 10
805
+ 11
806
+ 12
807
+ 13
808
+ 14
809
+ 15
810
+ 16
811
+ 17
812
+ 18
813
+ 19
814
+ 20
815
+ 21
816
+ 22
817
+ 23
818
+ 24
819
+ 25
820
+ 26
821
+ 27
822
+ 28
823
+ 29
824
+ 30
825
+ 31
826
+ 32
827
+ 33
828
+ 34
829
+ 35
830
+ 36
831
+ 37
832
+ 38
833
+ 39
834
+ 40
835
+ 41
836
+ 42
837
+ 43
838
+ 44
839
+ 45
840
+ 46
841
+ 47
842
+ 48
843
+ 49
844
+ 50
845
+ 51
846
+ 52
847
+ 53
848
+ 54
849
+ 55
850
+ 56
851
+ 57
852
+ 58
853
+ 59
854
+ 60
855
+ 61
856
+ 62
857
+ 63
858
+ 64
859
+ 65
860
+ 66
861
+ 67
862
+ 68
863
+ 69
864
+ 70
865
+ 71
866
+ 72
867
+ 73
868
+ 74
869
+ 75
870
+ 76
871
+ 77
872
+ 78
873
+ 79
874
+ 80
875
+ 81
876
+ 82
877
+ 83
878
+ 84
879
+ 85
880
+ 86
881
+ 87
882
+ 88
883
+ 89
884
+ 90
885
+ 91
886
+ 92
887
+ 93
888
+ 94
889
+ 95
890
+ 96
891
+ 97
892
+ 98
893
+ 99
894
+ 100
895
+ (d) CSBM
896
+ Figure 3: (A): Color indicates the true group membership g. (B)-(D): Color indicates the estimated group
897
+ memberships ˆg of LPJMM, LPCM and CSBM respectively. Positions of the points in all plots are true latent
898
+ positions z.
899
+ Note that the latent space dimension K and the number of clusters H in the model need to be prespecified
900
+ along with the priors. We take K to be the true dimensions of the latent space (i.e., K = 2) since this
901
+ facilitates model assessment by allowing visualizations of the estimated latent positions. One could also
902
+ use the Watanabe-Akaike Information Criterion (WAIC) to select a K with the smallest WAIC as in Sosa
903
+ and Betancourt [2022]. However, WAIC and other information criteria like Deviance Information Criterion
904
+ (DIC) are not helpful in choosing the number of clusters H. We noticed that the model assessment is
905
+ significantly worse when H is chosen to be smaller than the truth. However, model assessments are similar
906
+ among models whose H is at least as large as the truth. A comparison of the model assessment for different
907
+ specified H is given in Appendix B. From the comparison, we could also see that when H is specified to be
908
+ larger, the number of clusters in the estimated group membership ˆg also tends to be larger. Therefore, we
909
+ suggest choosing H to be the largest number of groups that one is willing to accept, and in this example, we
910
+ choose H to be 5.
911
+ We then fit LPJMM using MCMC sampling with 20 000 burn-in iterations and a further 10 000 iterations
912
+ which are kept for posterior analysis. The Markov chain mixes reasonably well and shows no signs of lack
913
+ of convergence (see Appendix C for the traceplot of the log-likelihood chain).
914
+ 11
915
+
916
+ LPCM
917
+ LPJMM
918
+ (simulation 1)
919
+ LPJMM
920
+ (simulation 2)
921
+ Sum of Euclidean distances
922
+ 23.08
923
+ 28.06
924
+ 12.20
925
+ Table 2: Sum of distances between the estimated and true latent positions.
926
+ To evaluate a model’s ability to recover the group membership, we first find estimates of clustering using
927
+ the optimization algorithms (i.e., MaxPEAR, MinBinder and GreedyEPL) mentioned in Section 2.4. The
928
+ adjusted Rand index is then calculated for each clustering estimate. Note that SNSM does not define clusters,
929
+ therefore we only compare the adjusted Rand index between LPJMM, LPCM, and CSBM. Since the sbm
930
+ package takes a non-Bayesian approach that uses a Variational-EM algorithm to find a point estimator for
931
+ the group membership g, optimization methods like MaxPEAR are not necessary to analyze results from
932
+ CSBM. The results shown in Table 1 suggest that these three models have a similar ability in recovering
933
+ group membership, with rand indices of LPJMM using the MaxPEAR and MinBinder algorithms being
934
+ higher than the rand index (0.707) under the CSBM model. A visualization of the estimated clusters based
935
+ on the true latent positions is given in Fig. 3. Also, notice that the MinBinder algorithm tends to overestimate
936
+ the number of clusters in the network.
937
+ To further compare the ability to recover latent position configuration between LPJMM and LPCM, we find
938
+ an estimate of the latent positions as follows. Firstly, we perform the Procrustes transformation on zn for each
939
+ iteration n, and then take the estimate ˆz of z to be the average of zn. We then calculate the Euclidean distance
940
+ between the estimated latent position ˆzi (which is the i-th row in ˆz) and the true latent position zi (i.e., the
941
+ i-th row in z) for each actor i and use the sum of distances of all actors to quantify the similarity between
942
+ the estimated and the true latent position configurations. The results are shown in Table 2 which suggests
943
+ that these two models have similar recovery of the latent positions. Plots of the estimated latent positions of
944
+ LPJMM and LPCM can be found in Appendix D, which also suggest similar estimated configurations of z
945
+ as Table 2.
946
+ Following Sosa and Betancourt [2022], we assess if models have a good fit in the sense of good reproduction
947
+ of a variety of summary statistics, which are calculated based on a collection of simulated networks generated
948
+ as follows. For LPJMM and SNSM, a network is simulated for every 10-th iteration using the parameters in
949
+ that iteration. For LPCM and CSBM, 1000 networks are simulated using their respective packages. Then
950
+ for each model, we calculate the density, transitivity, and assortative coefficient (if applicable) with respect
951
+ to the true group membership for each simulated network. Boxplots of these summary statistics are given in
952
+ Fig. 4 and the averages of these summary statistics for each model are given in Table 3. Note that our model
953
+ 12
954
+
955
+ density
956
+ transitivity
957
+ assortativity
958
+ 0.14
959
+ 0.15
960
+ 0.16
961
+ 0.35 0.40 0.45 0.50 0.55
962
+ 0.45
963
+ 0.50
964
+ 0.55
965
+ 0.60
966
+ LPJMM
967
+ LPCM
968
+ SNSM
969
+ CSBM
970
+ Figure 4: Boxplots of summary statistics for each model. Red dotted lines indicate the true values for
971
+ network characteristics respectively.
972
+ true value
973
+ LPJMM
974
+ LPCM
975
+ SNSM
976
+ CSBM
977
+ density
978
+ 0.1531
979
+ 0.1539
980
+ 0.1530
981
+ 0.1504
982
+ 0.1499
983
+ transitivity
984
+ 0.5049
985
+ 0.5144
986
+ 0.5467
987
+ 0.4027
988
+ 0.3776
989
+ assortativity
990
+ 0.5512
991
+ 0.5468
992
+ 0.5475
993
+ 0.4811
994
+ 0.4954
995
+ Table 3: Means of the summary statistics of the simulated networks for each model in simulation 1.
996
+ appropriately captures these structural features of the network data, while LPCM tends to overestimate tran-
997
+ sitivity in the network, and both SNSM and CSBM tend to underestimate both transitivity and assortativity
998
+ in the network.
999
+ 3.2
1000
+ Simulation 2: a two-layer network
1001
+ Continue using the parameter setup in simulation 1 and its generated network as the first layer (i.e., a1 = 5,
1002
+ b1 = −2, θ1 = 2.72), we generate a second layer of the network with a2 = 3, b2 = 1, θ2 = 4. As in
1003
+ simulation 1, we fit LPJMM with K = 2 and H = 5 and evaluate the model’s ability to recover the group
1004
+ membership using the adjusted Rand indices based on four clustering summaries. The results are given in
1005
+ Table 4, which shows similar clustering estimates as in simulation 1 where only one layer is considered.
1006
+ However, the sum of Euclidean distances between the estimated and true latent positions of all actors (see
1007
+ Table 2) in simulation 2 is 12.20, which is a significant improvement compared to 28.06 in simulation 1.
1008
+ The plot of the estimated latent position configurations is given in Fig. 5 (B), which visualizes the model’s
1009
+ recovery of latent positions and group membership.
1010
+ We also carry out the goodness-of-fit test as in simulation 1 and the result is given in Table 5, which shows
1011
+ that LPJMM captures these structural features accurately, and the result for layer 1 is similar to the result in
1012
+ simulation 1.
1013
+ 13
1014
+
1015
+ MaxPEAR
1016
+ MinBinder
1017
+ GreedyEPL
1018
+ adjusted Rand index
1019
+ 0.748 (6)
1020
+ 0.753 (12)
1021
+ 0.662 (4)
1022
+ Table 4: Adjusted Rand indices corresponding to different estimation methods for group membership in
1023
+ simulation 2. Numbers in the parentheses represent numbers of estimated groups.
1024
+ 1
1025
+ 2
1026
+ 3
1027
+ 4
1028
+ 5
1029
+ 6
1030
+ 7
1031
+ 8
1032
+ 9
1033
+ 10
1034
+ 11
1035
+ 12
1036
+ 13
1037
+ 14
1038
+ 15
1039
+ 16
1040
+ 17
1041
+ 18
1042
+ 19
1043
+ 20
1044
+ 21
1045
+ 22
1046
+ 23
1047
+ 24
1048
+ 25
1049
+ 26
1050
+ 27
1051
+ 28
1052
+ 29
1053
+ 30
1054
+ 31
1055
+ 32
1056
+ 33
1057
+ 34
1058
+ 35
1059
+ 36
1060
+ 37
1061
+ 38
1062
+ 39
1063
+ 40
1064
+ 41
1065
+ 42
1066
+ 43
1067
+ 44
1068
+ 45
1069
+ 46
1070
+ 47
1071
+ 48
1072
+ 49
1073
+ 50
1074
+ 51
1075
+ 52
1076
+ 53
1077
+ 54
1078
+ 55
1079
+ 56
1080
+ 57
1081
+ 58
1082
+ 59
1083
+ 60
1084
+ 61
1085
+ 62
1086
+ 63
1087
+ 64
1088
+ 65
1089
+ 66
1090
+ 67
1091
+ 68
1092
+ 69
1093
+ 70
1094
+ 71
1095
+ 72
1096
+ 73
1097
+ 74
1098
+ 75
1099
+ 76
1100
+ 77
1101
+ 78
1102
+ 79
1103
+ 80
1104
+ 81
1105
+ 82
1106
+ 83
1107
+ 84
1108
+ 85
1109
+ 86
1110
+ 87
1111
+ 88
1112
+ 89
1113
+ 90
1114
+ 91
1115
+ 92
1116
+ 93
1117
+ 94
1118
+ 95
1119
+ 96
1120
+ 97
1121
+ 98
1122
+ 99
1123
+ 100
1124
+ (a) Truth
1125
+ 1
1126
+ 2
1127
+ 3
1128
+ 4
1129
+ 5
1130
+ 6
1131
+ 7
1132
+ 8
1133
+ 9
1134
+ 10
1135
+ 11
1136
+ 12
1137
+ 13
1138
+ 14
1139
+ 15
1140
+ 16
1141
+ 17
1142
+ 18
1143
+ 19
1144
+ 20
1145
+ 21
1146
+ 22
1147
+ 23
1148
+ 24
1149
+ 25
1150
+ 26
1151
+ 27
1152
+ 28
1153
+ 29
1154
+ 30
1155
+ 31
1156
+ 32
1157
+ 33
1158
+ 34
1159
+ 35
1160
+ 36
1161
+ 37
1162
+ 38
1163
+ 39
1164
+ 40
1165
+ 41
1166
+ 42
1167
+ 43
1168
+ 44
1169
+ 45
1170
+ 46
1171
+ 47
1172
+ 48
1173
+ 49
1174
+ 50
1175
+ 51
1176
+ 52
1177
+ 53
1178
+ 54
1179
+ 55
1180
+ 56
1181
+ 57
1182
+ 58
1183
+ 59
1184
+ 60
1185
+ 61
1186
+ 62
1187
+ 63
1188
+ 64
1189
+ 65
1190
+ 66
1191
+ 67
1192
+ 68
1193
+ 69
1194
+ 70
1195
+ 71
1196
+ 72
1197
+ 73
1198
+ 74
1199
+ 75
1200
+ 76
1201
+ 77
1202
+ 78
1203
+ 79
1204
+ 80
1205
+ 81
1206
+ 82
1207
+ 83
1208
+ 84
1209
+ 85
1210
+ 86
1211
+ 87
1212
+ 88
1213
+ 89
1214
+ 90
1215
+ 91
1216
+ 92
1217
+ 93
1218
+ 94
1219
+ 95
1220
+ 96
1221
+ 97
1222
+ 98
1223
+ 99
1224
+ 100
1225
+ (b) Estimated z and g
1226
+ Figure 5: (A): Points are plotted based on true latent position z and true group membership g. (B): Points
1227
+ are plotted using the estimated latent positions in simulation 2, and color represents the estimated group
1228
+ membership using the MaxPEAR method.
1229
+ Recall that θ1 and θ2 are of no direct interest since they are not identifiable. However, we are still interested
1230
+ in the ratio θ1/θ2 since it reflects the relative strength of borrowing information from the latent space of each
1231
+ layer. Although aℓ and bℓ are of no direct interest, we pay attention to their signs, especially that of bℓ because
1232
+ different signs of bℓ have different interpretations of the effect of attributes as discussed in Section 2. We also
1233
+ assess the model’s ability to estimate parameters β, τ 2, and σ2 using posterior means and 95% credible inter-
1234
+ vals. The results are given in Table 6. Overall, the performance of LPJMM in recovering the true values of
1235
+ these model parameters is pretty well, except for τ 2 and σ2. Both LPJMM and SNSM tend to underestimate
1236
+ σ2 and overestimate τ 2. That is, the covariance of the attributes tends to be underestimated, and although τ 2
1237
+ is slightly overestimated, the variance of the attributes (τ 2 + σ2) still tends to be underestimated.
1238
+ 4
1239
+ Real data analysis
1240
+ In this section, we consider a three-layer network data set collected by [Lazega, 2001] from a corporate
1241
+ law firm from 1988-1991 in New England. This network describes three types of relationships (namely,
1242
+ networks of advice, friendship, and coworker contacts) between 71 lawyers in the law firm. Several actor
1243
+ attributes are also collected: age, gender, seniority (years with the firm), office (located in Boston, Hartford,
1244
+ or Providence), practice (litigation or corporate law), law school the lawyers attended (Harvard or Yale,
1245
+ University of Connecticut, or other universities) and status (partner or associate). A principal component
1246
+ 14
1247
+
1248
+ true value
1249
+ mean
1250
+ density
1251
+ layer 1
1252
+ 0.1531
1253
+ 0.1535
1254
+ layer 2
1255
+ 0.1024
1256
+ 0.1023
1257
+ transitivity
1258
+ layer 1
1259
+ 0.5049
1260
+ 0.5088
1261
+ layer 2
1262
+ 0.5477
1263
+ 0.5546
1264
+ assortativity
1265
+ layer 1
1266
+ 0.5512
1267
+ 0.5466
1268
+ layer 2
1269
+ 0.6923
1270
+ 0.6890
1271
+ Table 5: Means of the summary statistics in simulation 2.
1272
+ true value
1273
+ posterior mean
1274
+ 95% credible interval
1275
+ θ1/θ2
1276
+ 0.680
1277
+ 0.653
1278
+ (0.579, 0.721)
1279
+ a1
1280
+ 5
1281
+ 5.01
1282
+ (4.719, 5.262)
1283
+ a2
1284
+ 3
1285
+ 3.25
1286
+ (2.976, 3.572)
1287
+ b1
1288
+ -2
1289
+ -1.919
1290
+ (-2.053, -1.766)
1291
+ b2
1292
+ 1
1293
+ 1.058
1294
+ (0.901, 1.252)
1295
+ β
1296
+ 0
1297
+ -0.047
1298
+ (-1.027, 1.01 )
1299
+ τ 2
1300
+ 0.3
1301
+ 0.409
1302
+ (0.261, 0.592)
1303
+ σ2
1304
+ 1
1305
+ 0.642
1306
+ (0.230, 1.684)
1307
+ Table 6: Posterior means and 95% credible intervals.
1308
+ analysis (PCA) is performed on age and seniority attributes, and the first principal component explains
1309
+ 89% of the variance which is of no surprise since age and seniority are highly correlated with a correlation
1310
+ coefficient being 0.78. We chose the first principal component to be the attribute x and let H = 8 since it
1311
+ is the largest number of clusters we expect in the network. Then the model is fitted to the network using the
1312
+ same prior and Markov chain setup as in Section 3.
1313
+ The study of the Lazega network in this paper is meant to find out how the three types of relations can be
1314
+ explained by the findings deduced from the model fitting. We first visualize the estimated latent positions
1315
+ z colored by different categorical attributes (gender, office, practice, law school, and status) in Fig. 6. As
1316
+ we can see from these plots, the estimated positions z are well separated by the office (especially offices
1317
+ in Boston and Hartford) and practice. Compare these plots with z colored by MaxPEAR and GreedyEPL
1318
+ estimated clustering g in Fig. 7, we can see that both estimated g roughly clusters lawyers into three groups:
1319
+ lawyers in Hartford office, litigation lawyers in Boston or Providence offices, and corporate lawyers in
1320
+ Boston or Providence offices.
1321
+ Plots of adjacency matrices of the three layers (where lawyers are grouped by the MaxPEAR estimate of g)
1322
+ and their corresponding networks are given in Fig. 8, where we could see that the coworker network shows
1323
+ 15
1324
+
1325
+ 1
1326
+ 2
1327
+ 3
1328
+ 4
1329
+ 5
1330
+ 7
1331
+ 8
1332
+ 9
1333
+ 10
1334
+ 11
1335
+ 12
1336
+ 13
1337
+ 14
1338
+ 15
1339
+ 18
1340
+ 20
1341
+ 21
1342
+ 22
1343
+ 23
1344
+ 25
1345
+ 26
1346
+ 28
1347
+ 29
1348
+ 30
1349
+ 32
1350
+ 33
1351
+ 34
1352
+ 36
1353
+ 37
1354
+ 38
1355
+ 40
1356
+ 44
1357
+ 45
1358
+ 46
1359
+ 47
1360
+ 49
1361
+ 50
1362
+ 52
1363
+ 53
1364
+ 54
1365
+ 59
1366
+ 60
1367
+ 62
1368
+ 63
1369
+ 65
1370
+ 67
1371
+ 68
1372
+ 70
1373
+ male
1374
+ female
1375
+ gender
1376
+ 1
1377
+ 2
1378
+ 3
1379
+ 4
1380
+ 56
1381
+ 7
1382
+ 8
1383
+ 9
1384
+ 10
1385
+ 11
1386
+ 12
1387
+ 13
1388
+ 14
1389
+ 15
1390
+ 16
1391
+ 17
1392
+ 18
1393
+ 19
1394
+ 20
1395
+ 21
1396
+ 22
1397
+ 23
1398
+ 24
1399
+ 25
1400
+ 26
1401
+ 27
1402
+ 28
1403
+ 29
1404
+ 30
1405
+ 31
1406
+ 32
1407
+ 33
1408
+ 34
1409
+ 35
1410
+ 36
1411
+ 37
1412
+ 38
1413
+ 39
1414
+ 40
1415
+ 41
1416
+ 42
1417
+ 43
1418
+ 44
1419
+ 45
1420
+ 46
1421
+ 47
1422
+ 48
1423
+ 49
1424
+ 50
1425
+ 51
1426
+ 52
1427
+ 53
1428
+ 54
1429
+ 55
1430
+ 56 57
1431
+ 58
1432
+ 59
1433
+ 60
1434
+ 61
1435
+ 62
1436
+ 63
1437
+ 64
1438
+ 65
1439
+ 66
1440
+ 67
1441
+ 68
1442
+ 69
1443
+ 70
1444
+ 71
1445
+ Boston
1446
+ Hartford
1447
+ Providence
1448
+ office
1449
+ 1
1450
+ 2
1451
+ 3
1452
+ 4
1453
+ 56
1454
+ 7
1455
+ 8
1456
+ 9
1457
+ 10
1458
+ 11
1459
+ 12
1460
+ 13
1461
+ 14
1462
+ 15
1463
+ 16
1464
+ 17
1465
+ 18
1466
+ 19
1467
+ 20
1468
+ 21
1469
+ 22
1470
+ 23
1471
+ 24
1472
+ 25
1473
+ 26
1474
+ 27
1475
+ 28
1476
+ 29
1477
+ 30
1478
+ 31
1479
+ 32
1480
+ 33
1481
+ 34
1482
+ 35
1483
+ 36
1484
+ 37
1485
+ 38
1486
+ 39
1487
+ 40
1488
+ 41
1489
+ 42
1490
+ 43
1491
+ 44
1492
+ 45
1493
+ 46
1494
+ 47
1495
+ 48
1496
+ 49
1497
+ 50
1498
+ 51
1499
+ 52
1500
+ 53
1501
+ 54
1502
+ 55
1503
+ 56 57
1504
+ 58
1505
+ 59
1506
+ 60
1507
+ 61
1508
+ 62
1509
+ 63
1510
+ 64
1511
+ 65
1512
+ 66
1513
+ 67
1514
+ 68
1515
+ 69
1516
+ 70
1517
+ 71
1518
+ litigation
1519
+ corporate
1520
+ practice
1521
+ 1
1522
+ 2
1523
+ 3
1524
+ 4
1525
+ 56
1526
+ 7
1527
+ 8
1528
+ 9
1529
+ 10
1530
+ 11
1531
+ 12
1532
+ 13
1533
+ 14
1534
+ 15
1535
+ 16
1536
+ 17
1537
+ 18
1538
+ 19
1539
+ 20
1540
+ 21
1541
+ 22
1542
+ 23
1543
+ 24
1544
+ 25
1545
+ 26
1546
+ 27
1547
+ 28
1548
+ 29
1549
+ 30
1550
+ 31
1551
+ 32
1552
+ 33
1553
+ 34
1554
+ 35
1555
+ 36
1556
+ 37
1557
+ 38
1558
+ 39
1559
+ 40
1560
+ 41
1561
+ 42
1562
+ 43
1563
+ 44
1564
+ 45
1565
+ 46
1566
+ 47
1567
+ 48
1568
+ 49
1569
+ 50
1570
+ 51
1571
+ 52
1572
+ 53
1573
+ 54
1574
+ 55
1575
+ 56 57
1576
+ 58
1577
+ 59
1578
+ 60
1579
+ 61
1580
+ 62
1581
+ 63
1582
+ 64
1583
+ 65
1584
+ 66
1585
+ 67
1586
+ 68
1587
+ 69
1588
+ 70
1589
+ 71
1590
+ Harvard/Yale
1591
+ Ucon
1592
+ other
1593
+ law school
1594
+ 1
1595
+ 2
1596
+ 3
1597
+ 4
1598
+ 56
1599
+ 7
1600
+ 8
1601
+ 9
1602
+ 10
1603
+ 11
1604
+ 12
1605
+ 13
1606
+ 14
1607
+ 15
1608
+ 16
1609
+ 17
1610
+ 18
1611
+ 19
1612
+ 20
1613
+ 21
1614
+ 22
1615
+ 23
1616
+ 24
1617
+ 25
1618
+ 26
1619
+ 27
1620
+ 28
1621
+ 29
1622
+ 30
1623
+ 31
1624
+ 32
1625
+ 33
1626
+ 34
1627
+ 35
1628
+ 36
1629
+ 37
1630
+ 38
1631
+ 39
1632
+ 40
1633
+ 41
1634
+ 42
1635
+ 43
1636
+ 44
1637
+ 45
1638
+ 46
1639
+ 47
1640
+ 48
1641
+ 49
1642
+ 50
1643
+ 51
1644
+ 52
1645
+ 53
1646
+ 54
1647
+ 55
1648
+ 56 57
1649
+ 58
1650
+ 59
1651
+ 60
1652
+ 61
1653
+ 62
1654
+ 63
1655
+ 64
1656
+ 65
1657
+ 66
1658
+ 67
1659
+ 68
1660
+ 69
1661
+ 70
1662
+ 71
1663
+ partner
1664
+ associate
1665
+ status
1666
+ Figure 6: Points in all plots are drawn based on the estimated latent positions z, and are colored based on
1667
+ their categories in gender, office, practice, law school, and status.
1668
+ the most estimated clustering pattern, while the advice network presents the least of such pattern, which
1669
+ could also be seen from the relative ratios of θℓs in Table 7. This means that lawyers from the same office
1670
+ and doing the same practice are more likely to become coworkers and friends, but who they seek advice
1671
+ from does not depend much on office and practice. Furthermore, we can deduce from the posteriors of bℓ in
1672
+ Table 7 that these lawyers tend to seek advice from people of similar age (or seniority) since the posterior
1673
+ estimate of b1 is negative, while lawyers of different ages (or seniority) are more likely to become friends and
1674
+ coworkers. This conclusion is in line with the assortativity coefficients with respect to the nodal attributes
1675
+ (lawyer’s age) given in Table 8.
1676
+ 5
1677
+ Discussion
1678
+ This paper presents a latent position model that extends LPCM of Handcock et al. [2007] and SNSM of
1679
+ Ciminelli et al. [2019] to jointly model network data and the nodal attributes and perform model-based
1680
+ clustering. By jointly modeling the network and the attributes, we are able to describe how the attributes
1681
+ 16
1682
+
1683
+ 1
1684
+ 2
1685
+ 3
1686
+ 4
1687
+ 56
1688
+ 7
1689
+ 8
1690
+ 9
1691
+ 10
1692
+ 11
1693
+ 12
1694
+ 13
1695
+ 14
1696
+ 15
1697
+ 16
1698
+ 17
1699
+ 18
1700
+ 19
1701
+ 20
1702
+ 21
1703
+ 22
1704
+ 23
1705
+ 24
1706
+ 25
1707
+ 26
1708
+ 27
1709
+ 28
1710
+ 29
1711
+ 30
1712
+ 31
1713
+ 32
1714
+ 33
1715
+ 34
1716
+ 35
1717
+ 36
1718
+ 37
1719
+ 38
1720
+ 39
1721
+ 40
1722
+ 41
1723
+ 42
1724
+ 43
1725
+ 44
1726
+ 45
1727
+ 46
1728
+ 47
1729
+ 48
1730
+ 49
1731
+ 50
1732
+ 51
1733
+ 52
1734
+ 53
1735
+ 54
1736
+ 55
1737
+ 56 57
1738
+ 58
1739
+ 59
1740
+ 60
1741
+ 61
1742
+ 62
1743
+ 63
1744
+ 64
1745
+ 65
1746
+ 66
1747
+ 67
1748
+ 68
1749
+ 69
1750
+ 70
1751
+ 71
1752
+ (a) MaxPEAR
1753
+ 1
1754
+ 2
1755
+ 3
1756
+ 4
1757
+ 56
1758
+ 7
1759
+ 8
1760
+ 9
1761
+ 10
1762
+ 11
1763
+ 12
1764
+ 13
1765
+ 14
1766
+ 15
1767
+ 16
1768
+ 17
1769
+ 18
1770
+ 19
1771
+ 20
1772
+ 21
1773
+ 22
1774
+ 23
1775
+ 24
1776
+ 25
1777
+ 26
1778
+ 27
1779
+ 28
1780
+ 29
1781
+ 30
1782
+ 31
1783
+ 32
1784
+ 33
1785
+ 34
1786
+ 35
1787
+ 36
1788
+ 37
1789
+ 38
1790
+ 39
1791
+ 40
1792
+ 41
1793
+ 42
1794
+ 43
1795
+ 44
1796
+ 45
1797
+ 46
1798
+ 47
1799
+ 48
1800
+ 49
1801
+ 50
1802
+ 51
1803
+ 52
1804
+ 53
1805
+ 54
1806
+ 55
1807
+ 56 57
1808
+ 58
1809
+ 59
1810
+ 60
1811
+ 61
1812
+ 62
1813
+ 63
1814
+ 64
1815
+ 65
1816
+ 66
1817
+ 67
1818
+ 68
1819
+ 69
1820
+ 70
1821
+ 71
1822
+ (b) GreedyEPL
1823
+ Figure 7: Points are plotted using the estimated latent positions and color indicates the estimated group
1824
+ membership using MaxPEAR and GreedyEPL methods respectively.
1825
+ posterior mean
1826
+ 95% credible interval
1827
+ θ1/θ2
1828
+ 0.3229
1829
+ (0.2352, 0.4152)
1830
+ θ1/θ3
1831
+ 0.2035
1832
+ (0.1479, 0.2606)
1833
+ θ2/θ3
1834
+ 0.6319
1835
+ (0.5536, 0.7198)
1836
+ b1
1837
+ -0.0986
1838
+ (-0.1401, -0.0579)
1839
+ b2
1840
+ 0.0708
1841
+ (0.0263, 0.1137)
1842
+ b3
1843
+ 0.133
1844
+ (0.0854, 0.186)
1845
+ Table 7: Posterior means and 95% credible intervals.
1846
+ change over the network and explain how relations could be influenced by attributes. LPJMM also provides
1847
+ an extension to multi-layer network settings on the assumption that all layers share the same latent position
1848
+ structure but with different strengths of borrowing such latent structure information. We applied our method
1849
+ to two simulated networks, one with a single layer and another with two layers, and found our model to
1850
+ give satisfactory fits to these two data sets and is competitive in terms of goodness-of-fit and group detection
1851
+ compared with SNSM, LPCM, and CSBM. The model is also applied to a three-layer real network data set
1852
+ and we are able to draw reasonable conclusions from the modeling results.
1853
+ We have suggested choosing the number of groups H to be the largest number of groups that one is willing to
1854
+ accept in the network because we have found that varying the number of groups has almost no impact on the
1855
+ model fit and prediction outcome as long as it is in a reasonable range. One could also fit the CSBM to the
1856
+ network first, and choose H based on its estimated number of groups. One problem we have not addressed
1857
+ in the paper is of choosing the dimension of the latent space. This can be done by using Bayesian model
1858
+ selection like WAIC as in Sosa and Betancourt [2022].
1859
+ Our model could be extended in several ways. Firstly, other extensions of our model to multi-layer settings
1860
+ could be considered. For example, Sosa and Betancourt [2022] assumed conditionally independent layer-
1861
+ 17
1862
+
1863
+ advice
1864
+ friendship
1865
+ coworker
1866
+
1867
+
1868
+
1869
+ Figure 8: Upper: Heatmaps of the adjacency matrices (where lawyers are reordered according to the Max-
1870
+ PEAR estimate of g). Lower: A visualization of the three layers based on the estimated z and color indicates
1871
+ the MaxPEAR estimate of g.
1872
+ advice
1873
+ friendship
1874
+ coworker
1875
+ assortativity
1876
+ 0.2536
1877
+ -0.1107
1878
+ -0.1224
1879
+ Table 8: Assortativity coefficients with respect to lawyer’s age.
1880
+ specific latent positions, whereas MacDonald et al. [2022] assumed that the latent position of an actor in all
1881
+ layers is (d0 + d1)-dimensional, where the first d0 components of the latent position are the same across all
1882
+ layers, and only the last d1 components are layer-specific. Secondly, instead of assigning a user-specified
1883
+ number of groups H to the model, we could learn the number of groups by using a Bayesian nonpara-
1884
+ metric approach with a Dirichlet Process prior to model community memberships (see, e.g., Amini et al.,
1885
+ 2019).
1886
+ LPJMM could also be extended to leverage multivariate covariates. So far, we have limited ourselves to mod-
1887
+ eling univariate nodal attributes that are approximately Gaussian. For continuous nodal attributes with more
1888
+ than one dimension, we have used the first principal component from the principal component analysis. To
1889
+ take full advantage of high-dimensional nodal attributes, one could use multivariate spatial process modeling
1890
+ to replace Eq. (2). Other extensions of more sophisticated spatial modeling include spatiotemporal modeling
1891
+ of attributes for time-varying networks, which would help to describe changes in actors over time.
1892
+ 18
1893
+
1894
+ Appendix
1895
+ A
1896
+ Model Specifications for SNSM, LPCM and CSBM
1897
+ Note that the original SNSM in Ciminelli et al. [2019] uses the logit link. In order to make a fair comparison,
1898
+ we also use the probit link in SNSM as in LPJMM. The model specification for SNSM used in this paper is
1899
+ given as follows:
1900
+ yi,j | z, x, aℓ, bℓ, θℓ
1901
+ ind
1902
+ ∼ Ber
1903
+
1904
+ Φ(a + b|xi − xj| − ∥zi − zj∥)
1905
+
1906
+ ,
1907
+ x | z, β, σ, τ, φ ∼ NN(β111N, σ2M(z, φ) + τ 2IN) ,
1908
+ and the priors are set to be the same as the priors in LPJMM (if possible). To be specific,
1909
+ zi
1910
+ i.i.d.
1911
+ ∼ N2(000, I2) ,
1912
+ β ∼ N(0, 104) ,
1913
+ σ2 ∼ InvG(2, 1) ,
1914
+ τ 2 ∼ InvG(2, 1) ,
1915
+ φ ∼ U(0, 1) ,
1916
+ and the priors on the parameters in the probit regression tier are given by:
1917
+ a i.i.d.
1918
+ ∼ N(0, 9) ,
1919
+ b i.i.d.
1920
+ ∼ N(0, 9) .
1921
+ SNSM in this paper is implemented using JAGS.
1922
+ The model specification for LPCM (see Handcock et al., 2007) is given as the follows,
1923
+ yi,j | z, x, β0, β1
1924
+ ind
1925
+ ∼ Ber
1926
+
1927
+ logit(β⊺
1928
+ 0xi,j − β1∥zi − zj∥)
1929
+
1930
+ ,
1931
+ zi | ωωω,µµµ,κκκ i.i.d.
1932
+
1933
+ 5
1934
+
1935
+ h=1
1936
+ ωhN5(µh, κ2
1937
+ hIK) ,
1938
+ and we use the default priors given in the latentnet package for prior specifications.
1939
+ We first introduce several notations before presenting CSBM in Leger [2016]. Suppose there are Q groups
1940
+ in the network. Denote the N × Q group membership matrix as ZZZ = {Ziq}, and Ziq = 1 if actor i belongs
1941
+ to group q, Ziq = 0 if otherwise. It is assumed that an actor can only belong to one group. The model
1942
+ specification for CSBM is given as follows,
1943
+ yi,j | Zi, Zj, x, β ind
1944
+ ∼ Ber
1945
+
1946
+ logit(mqi,qj + β⊺xi,j)
1947
+
1948
+ ,
1949
+ where Zi is the i-th row of ZZZ, qi is the group membership for actor i and the group effect mqi,qj ∈ R.
1950
+ 19
1951
+
1952
+ B
1953
+ Comparing model performances for different number of groups
1954
+ We conduct a comparison of LPJMM with different H ∈ {3, 4, . . . , 9} using the data set in simulation 1.
1955
+ Table 9 presents the adjusted rand indices, and the results are similar for models that assume H to be equal to
1956
+ or larger than the true number of groups (which is 5 in this example). However, the adjusted rand indices for
1957
+ all three estimates are significantly smaller when the model assumes H to be smaller than 5. Also, notice that
1958
+ the estimated number of groups increases with H. Visualizations of how adjusted rand indices and estimated
1959
+ number of groups changes over H are given in Fig. 9.
1960
+ H
1961
+ MaxPEAR
1962
+ MinBinder
1963
+ GreedyEPL
1964
+ 3
1965
+ 0.4067 (3)
1966
+ 0.4008 (5)
1967
+ 0.4321 (3)
1968
+ 4
1969
+ 0.4882 (3)
1970
+ 0.4977 (6 )
1971
+ 0.6521 (4)
1972
+ 5
1973
+ 0.7374 (5)
1974
+ 0.7115 (11)
1975
+ 0.6635 (4)
1976
+ 6
1977
+ 0.7237 (6)
1978
+ 0.7442 (20)
1979
+ 0.7134 (4)
1980
+ 7
1981
+ 0.7449 (7)
1982
+ 0.6624 (25)
1983
+ 0.7313 (4)
1984
+ 8
1985
+ 0.7422 (8)
1986
+ 0.6674 (25)
1987
+ 0.7293 (8)
1988
+ 9
1989
+ 0.7056 (12)
1990
+ 0.7041 (25)
1991
+ 0.7043 (11)
1992
+ Table 9: Adjusted Rand indices of different estimates under LPJMM with different H. Numbers in the
1993
+ parentheses denote the numbers of estimated groups.
1994
+ 3
1995
+ 4
1996
+ 5
1997
+ 6
1998
+ 7
1999
+ 8
2000
+ 9
2001
+ H
2002
+ Adjusted rand index
2003
+ 0.4
2004
+ 0.6
2005
+ 0.8
2006
+ 3
2007
+ 4
2008
+ 5
2009
+ 6
2010
+ 7
2011
+ 8
2012
+ 9
2013
+ H
2014
+ number of groups
2015
+ 5
2016
+ 10
2017
+ 15
2018
+ 20
2019
+ 25
2020
+ MaxPEAR
2021
+ MinBinder
2022
+ GreedyEPL
2023
+ Figure 9: Left: Adjusted rand indices of the clustering estimates found by using the MaxPear, MinBinder,
2024
+ and GreedyEPL methods. Right: Estimated number of groups using the three methods.
2025
+ The goodness-of-fit test outlined in Section 3 is also carried out here to compare the means of several sum-
2026
+ mary statistics, which are plotted in Fig. 10. As we can see from the plots, the model’s fit is not affected by
2027
+ the choice of H even for H smaller than the actual number of clusters in the network.
2028
+ 20
2029
+
2030
+ 3
2031
+ 4
2032
+ 5
2033
+ 6
2034
+ 7
2035
+ 8
2036
+ 9
2037
+ H
2038
+ 0.1536
2039
+ 0.1540
2040
+ 0.1544
2041
+ (a) density
2042
+ 3
2043
+ 4
2044
+ 5
2045
+ 6
2046
+ 7
2047
+ 8
2048
+ 9
2049
+ H
2050
+ 0.513
2051
+ 0.514
2052
+ 0.515
2053
+ (b) transitivity
2054
+ 3
2055
+ 4
2056
+ 5
2057
+ 6
2058
+ 7
2059
+ 8
2060
+ 9
2061
+ H
2062
+ 0.538
2063
+ 0.546
2064
+ 0.554
2065
+ (c) assortativity
2066
+ Figure 10: The means of summary statistics for different H.
2067
+ C
2068
+ Traceplots of log-likelihood
2069
+ The traceplots of the log-likelihood (after thinning the Markov chain every 10 iterations) in simulation stud-
2070
+ ies and real applications in Sections 3 and 4 are given in Fig. 11.
2071
+ 0
2072
+ 2000
2073
+ 6000
2074
+ 10000
2075
+ −1100
2076
+ −1080
2077
+ −1060
2078
+ −1040
2079
+ (a) simulation 1
2080
+ 0
2081
+ 2000
2082
+ 6000
2083
+ 10000
2084
+ −1920
2085
+ −1900
2086
+ −1880
2087
+ −1860
2088
+ (b) simulation 2
2089
+ 0
2090
+ 2000
2091
+ 6000
2092
+ 10000
2093
+ −5230
2094
+ −5214
2095
+ −5198
2096
+ −5182
2097
+ (c) Lazega network
2098
+ Figure 11: Traceplots of the log-likelihood.
2099
+ D
2100
+ Visualizations of results from LPJMM and LPCM
2101
+ Visualizations of the estimated latent positions and estimated group membership using the MaxPEAR, Min-
2102
+ Binder, and GreedyEPL methods under LPJMM and LPCM are shown in Figs. 12 and 13 respectively.
2103
+ 21
2104
+
2105
+ 1
2106
+ 2
2107
+ 3
2108
+ 4
2109
+ 5
2110
+ 6
2111
+ 7
2112
+ 8
2113
+ 9
2114
+ 10
2115
+ 11
2116
+ 12
2117
+ 13
2118
+ 14
2119
+ 15
2120
+ 16
2121
+ 17
2122
+ 18
2123
+ 19
2124
+ 20
2125
+ 21
2126
+ 22
2127
+ 23
2128
+ 24
2129
+ 25
2130
+ 26
2131
+ 27
2132
+ 28
2133
+ 29
2134
+ 30
2135
+ 31
2136
+ 32
2137
+ 33
2138
+ 34
2139
+ 35
2140
+ 36
2141
+ 37
2142
+ 38
2143
+ 39
2144
+ 40
2145
+ 41
2146
+ 42
2147
+ 43
2148
+ 44
2149
+ 45
2150
+ 46
2151
+ 47
2152
+ 48
2153
+ 49
2154
+ 50
2155
+ 51
2156
+ 52
2157
+ 53
2158
+ 54
2159
+ 55
2160
+ 56
2161
+ 57
2162
+ 58
2163
+ 59
2164
+ 60
2165
+ 61
2166
+ 62
2167
+ 63
2168
+ 64
2169
+ 65
2170
+ 66
2171
+ 67
2172
+ 68
2173
+ 69
2174
+ 70
2175
+ 71
2176
+ 72
2177
+ 73
2178
+ 74
2179
+ 75
2180
+ 76
2181
+ 77
2182
+ 78
2183
+ 79
2184
+ 80
2185
+ 81
2186
+ 82
2187
+ 83
2188
+ 84
2189
+ 85
2190
+ 86
2191
+ 87
2192
+ 88
2193
+ 89
2194
+ 90
2195
+ 91
2196
+ 92
2197
+ 93
2198
+ 94
2199
+ 95
2200
+ 96
2201
+ 97
2202
+ 98
2203
+ 99
2204
+ 100
2205
+ (a) MaxPEAR
2206
+ 1
2207
+ 2
2208
+ 3
2209
+ 4
2210
+ 5
2211
+ 6
2212
+ 7
2213
+ 8
2214
+ 9
2215
+ 10
2216
+ 11
2217
+ 12
2218
+ 13
2219
+ 14
2220
+ 15
2221
+ 16
2222
+ 17
2223
+ 18
2224
+ 19
2225
+ 20
2226
+ 21
2227
+ 22
2228
+ 23
2229
+ 24
2230
+ 25
2231
+ 26
2232
+ 27
2233
+ 28
2234
+ 29
2235
+ 30
2236
+ 31
2237
+ 32
2238
+ 33
2239
+ 34
2240
+ 35
2241
+ 36
2242
+ 37
2243
+ 38
2244
+ 39
2245
+ 40
2246
+ 41
2247
+ 42
2248
+ 43
2249
+ 44
2250
+ 45
2251
+ 46
2252
+ 47
2253
+ 48
2254
+ 49
2255
+ 50
2256
+ 51
2257
+ 52
2258
+ 53
2259
+ 54
2260
+ 55
2261
+ 56
2262
+ 57
2263
+ 58
2264
+ 59
2265
+ 60
2266
+ 61
2267
+ 62
2268
+ 63
2269
+ 64
2270
+ 65
2271
+ 66
2272
+ 67
2273
+ 68
2274
+ 69
2275
+ 70
2276
+ 71
2277
+ 72
2278
+ 73
2279
+ 74
2280
+ 75
2281
+ 76
2282
+ 77
2283
+ 78
2284
+ 79
2285
+ 80
2286
+ 81
2287
+ 82
2288
+ 83
2289
+ 84
2290
+ 85
2291
+ 86
2292
+ 87
2293
+ 88
2294
+ 89
2295
+ 90
2296
+ 91
2297
+ 92
2298
+ 93
2299
+ 94
2300
+ 95
2301
+ 96
2302
+ 97
2303
+ 98
2304
+ 99
2305
+ 100
2306
+ (b) MinBinder
2307
+ 1
2308
+ 2
2309
+ 3
2310
+ 4
2311
+ 5
2312
+ 6
2313
+ 7
2314
+ 8
2315
+ 9
2316
+ 10
2317
+ 11
2318
+ 12
2319
+ 13
2320
+ 14
2321
+ 15
2322
+ 16
2323
+ 17
2324
+ 18
2325
+ 19
2326
+ 20
2327
+ 21
2328
+ 22
2329
+ 23
2330
+ 24
2331
+ 25
2332
+ 26
2333
+ 27
2334
+ 28
2335
+ 29
2336
+ 30
2337
+ 31
2338
+ 32
2339
+ 33
2340
+ 34
2341
+ 35
2342
+ 36
2343
+ 37
2344
+ 38
2345
+ 39
2346
+ 40
2347
+ 41
2348
+ 42
2349
+ 43
2350
+ 44
2351
+ 45
2352
+ 46
2353
+ 47
2354
+ 48
2355
+ 49
2356
+ 50
2357
+ 51
2358
+ 52
2359
+ 53
2360
+ 54
2361
+ 55
2362
+ 56
2363
+ 57
2364
+ 58
2365
+ 59
2366
+ 60
2367
+ 61
2368
+ 62
2369
+ 63
2370
+ 64
2371
+ 65
2372
+ 66
2373
+ 67
2374
+ 68
2375
+ 69
2376
+ 70
2377
+ 71
2378
+ 72
2379
+ 73
2380
+ 74
2381
+ 75
2382
+ 76
2383
+ 77
2384
+ 78
2385
+ 79
2386
+ 80
2387
+ 81
2388
+ 82
2389
+ 83
2390
+ 84
2391
+ 85
2392
+ 86
2393
+ 87
2394
+ 88
2395
+ 89
2396
+ 90
2397
+ 91
2398
+ 92
2399
+ 93
2400
+ 94
2401
+ 95
2402
+ 96
2403
+ 97
2404
+ 98
2405
+ 99
2406
+ 100
2407
+ (c) GreedyEPL
2408
+ Figure 12: Points are plotted based on the estimated latent position z and three estimated group memberships
2409
+ ˆg of LPJMM.
2410
+ 1
2411
+ 2
2412
+ 3
2413
+ 4
2414
+ 5
2415
+ 6
2416
+ 7
2417
+ 8
2418
+ 9
2419
+ 10
2420
+ 11
2421
+ 12
2422
+ 13
2423
+ 14
2424
+ 15
2425
+ 16
2426
+ 17
2427
+ 18
2428
+ 19
2429
+ 20
2430
+ 21
2431
+ 22
2432
+ 23
2433
+ 24
2434
+ 25
2435
+ 26
2436
+ 27
2437
+ 28
2438
+ 29
2439
+ 30
2440
+ 31
2441
+ 32
2442
+ 33
2443
+ 34
2444
+ 35
2445
+ 36
2446
+ 37
2447
+ 38
2448
+ 39
2449
+ 40
2450
+ 41
2451
+ 42
2452
+ 43
2453
+ 44
2454
+ 45
2455
+ 46
2456
+ 47
2457
+ 48
2458
+ 49
2459
+ 50
2460
+ 51
2461
+ 52
2462
+ 53
2463
+ 54
2464
+ 55
2465
+ 56
2466
+ 57
2467
+ 58
2468
+ 59
2469
+ 60
2470
+ 61
2471
+ 62
2472
+ 63
2473
+ 64
2474
+ 65
2475
+ 66
2476
+ 67
2477
+ 68
2478
+ 69
2479
+ 70
2480
+ 71
2481
+ 72
2482
+ 73
2483
+ 74
2484
+ 75
2485
+ 76
2486
+ 77
2487
+ 78
2488
+ 79
2489
+ 80
2490
+ 81
2491
+ 82
2492
+ 83
2493
+ 84
2494
+ 85
2495
+ 86
2496
+ 87
2497
+ 88
2498
+ 89
2499
+ 90
2500
+ 91
2501
+ 92
2502
+ 93
2503
+ 94
2504
+ 95
2505
+ 96
2506
+ 97
2507
+ 98
2508
+ 99
2509
+ 100
2510
+ (a) MaxPEAR (LPCM)
2511
+ 1
2512
+ 2
2513
+ 3
2514
+ 4
2515
+ 5
2516
+ 6
2517
+ 7
2518
+ 8
2519
+ 9
2520
+ 10
2521
+ 11
2522
+ 12
2523
+ 13
2524
+ 14
2525
+ 15
2526
+ 16
2527
+ 17
2528
+ 18
2529
+ 19
2530
+ 20
2531
+ 21
2532
+ 22
2533
+ 23
2534
+ 24
2535
+ 25
2536
+ 26
2537
+ 27
2538
+ 28
2539
+ 29
2540
+ 30
2541
+ 31
2542
+ 32
2543
+ 33
2544
+ 34
2545
+ 35
2546
+ 36
2547
+ 37
2548
+ 38
2549
+ 39
2550
+ 40
2551
+ 41
2552
+ 42
2553
+ 43
2554
+ 44
2555
+ 45
2556
+ 46
2557
+ 47
2558
+ 48
2559
+ 49
2560
+ 50
2561
+ 51
2562
+ 52
2563
+ 53
2564
+ 54
2565
+ 55
2566
+ 56
2567
+ 57
2568
+ 58
2569
+ 59
2570
+ 60
2571
+ 61
2572
+ 62
2573
+ 63
2574
+ 64
2575
+ 65
2576
+ 66
2577
+ 67
2578
+ 68
2579
+ 69
2580
+ 70
2581
+ 71
2582
+ 72
2583
+ 73
2584
+ 74
2585
+ 75
2586
+ 76
2587
+ 77
2588
+ 78
2589
+ 79
2590
+ 80
2591
+ 81
2592
+ 82
2593
+ 83
2594
+ 84
2595
+ 85
2596
+ 86
2597
+ 87
2598
+ 88
2599
+ 89
2600
+ 90
2601
+ 91
2602
+ 92
2603
+ 93
2604
+ 94
2605
+ 95
2606
+ 96
2607
+ 97
2608
+ 98
2609
+ 99
2610
+ 100
2611
+ (b) MinBinder (LPCM)
2612
+ 1
2613
+ 2
2614
+ 3
2615
+ 4
2616
+ 5
2617
+ 6
2618
+ 7
2619
+ 8
2620
+ 9
2621
+ 10
2622
+ 11
2623
+ 12
2624
+ 13
2625
+ 14
2626
+ 15
2627
+ 16
2628
+ 17
2629
+ 18
2630
+ 19
2631
+ 20
2632
+ 21
2633
+ 22
2634
+ 23
2635
+ 24
2636
+ 25
2637
+ 26
2638
+ 27
2639
+ 28
2640
+ 29
2641
+ 30
2642
+ 31
2643
+ 32
2644
+ 33
2645
+ 34
2646
+ 35
2647
+ 36
2648
+ 37
2649
+ 38
2650
+ 39
2651
+ 40
2652
+ 41
2653
+ 42
2654
+ 43
2655
+ 44
2656
+ 45
2657
+ 46
2658
+ 47
2659
+ 48
2660
+ 49
2661
+ 50
2662
+ 51
2663
+ 52
2664
+ 53
2665
+ 54
2666
+ 55
2667
+ 56
2668
+ 57
2669
+ 58
2670
+ 59
2671
+ 60
2672
+ 61
2673
+ 62
2674
+ 63
2675
+ 64
2676
+ 65
2677
+ 66
2678
+ 67
2679
+ 68
2680
+ 69
2681
+ 70
2682
+ 71
2683
+ 72
2684
+ 73
2685
+ 74
2686
+ 75
2687
+ 76
2688
+ 77
2689
+ 78
2690
+ 79
2691
+ 80
2692
+ 81
2693
+ 82
2694
+ 83
2695
+ 84
2696
+ 85
2697
+ 86
2698
+ 87
2699
+ 88
2700
+ 89
2701
+ 90
2702
+ 91
2703
+ 92
2704
+ 93
2705
+ 94
2706
+ 95
2707
+ 96
2708
+ 97
2709
+ 98
2710
+ 99
2711
+ 100
2712
+ (c) GreedyEPL (LPCM)
2713
+ Figure 13: Points are plotted based on estimated z and three estimated ˆg of LPCM.
2714
+ References
2715
+ J. H. Albert and S. Chib.
2716
+ Bayesian analysis of binary and polychotomous response data. Journal of the
2717
+ American Statistical Association, 88:669–679, 1993.
2718
+ D. J. Aldous. Exchangeability and related topics. Springer, Berlin, Heidelberg, 1985.
2719
+ A. A. Amini, M. S. Paez, and L. Lin. Hierarchical stochastic block model for community detection in
2720
+ multiplex networks. arXiv:1904.05330, 2019.
2721
+ A. Athreya, D. E. Fishkind, M. Tang, C. E. Priebe, Y. Park, J. T. Vogelstein, K. Levin, V. Lyzinski, Y. Qin,
2722
+ and D. L. Sussman. Statistical inference on random dot product graphs: a survey. Journal of Machine
2723
+ Learning Research, 18:1–92, 2017.
2724
+ S. Banerjee, B. Carlin, and A. Gelf. Hierarchical Modeling and Analysis for Spatial Data. CRC Press, 2nd
2725
+ edition, 2015.
2726
+ 22
2727
+
2728
+ J. Chiquet, S. Donnet, and P. Barbillon. sbm: Stochastic Blockmodels, 2022. URL https://CRAN.
2729
+ R-project.org/package=sbm. R package version 0.4.4.
2730
+ J. T. Ciminelli, T. Love, and T. T. Wu. Social network spatial model. Spatial Statistics, 29:129–144, 2019.
2731
+ J. Diebolt and C. P. Robert. Estimation of finite mixture distributions through Bayesian sampling. Journal
2732
+ of the Royal Statistical Society. Series B, 56:363–375, 1994.
2733
+ D. Durante and D. Dunson. Bayesian inference and testing of group differences in brain networks. Bayesian
2734
+ Analysis, 13:29–58, 2018.
2735
+ S. D’Angelo, T. B. Murphy, and M. Alfò. Latent space modelling of multidimensional networks with appli-
2736
+ cation to the exchange of votes in Eurovision song contest. The Annals of Applied Statistics, 13:900–930,
2737
+ 2019.
2738
+ P. Erdös and A. Rényi. On random graphs. I. Publicationes Mathematicae (Debrecen), 6:290–297, 1959.
2739
+ K. Fosdick and P. D. Hoff. Testing and modeling dependencies between a network and nodal attributes,.
2740
+ Journal of the American Statistical Association, 110:1047–1056, 2015.
2741
+ O. Frank and D. Strauss. Markov graphs. Journal of the American Statistical Association, 81:832–842,
2742
+ 1986.
2743
+ A. Fritsch.
2744
+ mcclust:
2745
+ Process an MCMC Sample of Clusterings, 2022.
2746
+ URL https://CRAN.
2747
+ R-project.org/package=mcclust. R package version 1.0.1.
2748
+ A. Fritsch and K. Ickstadt. Improved criteria for clustering based on the posterior similarity matrix. Bayesian
2749
+ Analysis, 4:367–391, 2009.
2750
+ I. Gollini and T. B. Murphy. Joint modeling of multiple network views. Journal of Computational and
2751
+ Graphical Statistics, 25:246–265, 2016.
2752
+ S. Guha and A. Rodriguez. Bayesian regression with undirected network predictors with an application to
2753
+ brain connectome data. Journal of the American Statistical Association, 116(534):581–593, 2021.
2754
+ M. S. Handcock, A. E. Raftery, and J. M. Tantrum. Model-based clustering for social networks. Journal of
2755
+ the Royal Statistical Society: Series A, 170:301–354, 2007.
2756
+ P. Hoff. Modeling homophily and stochastic equivalence in symmetric relational data. In Advances in Neural
2757
+ Information Processing Systems, pages 657–664, 2007.
2758
+ 23
2759
+
2760
+ P. D. Hoff. Bilinear mixed-effects models for dyadic data. Journal of the American Statistical Association,
2761
+ 100:286–295, 2005.
2762
+ P. D. Hoff. Multiplicative latent factor models for description and prediction of social networks. Computa-
2763
+ tional and Mathematical Organization Theory, 15:261–272, 2009.
2764
+ P. D. Hoff, A. E. Raftery, and M. S. Handcock. Latent space approaches to social network analysis. Journal
2765
+ of the American Statistical Association, 97:1090–1098, 2002.
2766
+ D. N. Hoover. Row-column exchangeability and a generalized model for probability. Exchangeability in
2767
+ probability and statistics (Rome, 1981), pages 281–291, 1982.
2768
+ M. Kim and J. Leskovec. Multiplicative attribute graph model of real-world networks. Internet Mathematics,
2769
+ 8:113–160, 2012.
2770
+ E. D. Kolaczyk and G. Csárdi. Statistical Analysis of Network Data with R. Springer, 2nd edition, 2020.
2771
+ P. N. Krivitsky and M. S. Handcock. latentnet: Latent Position and Cluster Models for Statistical Net-
2772
+ works. The Statnet Project (https://statnet.org), 2022. URL https://CRAN.R-project.
2773
+ org/package=latentnet. R package version 2.10.6.
2774
+ P. N. Krivitsky, M. S. Handcock, A. E. Raftery, and P. D. Hoff. Representing degree distributions, clustering,
2775
+ and homophily in social networks with latent cluster random effects models. Social Networks, 31:204–
2776
+ 213, 2009.
2777
+ J. W. Lau and P. J. Green. Bayesian model based clustering procedures. Journal of Computational and
2778
+ Graphical Statistics, 16:526–558, 2007.
2779
+ E. Lazega. The Collegial Phenomenon: The Social Mechanisms of Cooperation Among Peers in a Corporate
2780
+ Law Partnership. Oxford University Press, 2001.
2781
+ C. Lee and D. J. Wilkinson. A review of stochastic block models and extensions for graph clustering. Applied
2782
+ Network Science, 4:1–50, 2019.
2783
+ J.-B. Leger. Blockmodels: A R-package for estimating in Latent Block Model and Stochastic Block Model,
2784
+ with various probability functions, with or without covariates. arXiv:1602.07587, 2016.
2785
+ Crystal D Linkletter. Spatial process models for social network analysis. PhD thesis, Citeseer, 2007.
2786
+ P. W. MacDonald, E. Levina, and J. Zhu. Latent space models for multiplex networks with shared structure.
2787
+ arXiv:2012.14409, 2020.
2788
+ 24
2789
+
2790
+ P. W. MacDonald, E. Levina, and J. Zhu. Latent space models for multiplex networks with shared structure.
2791
+ Biometrika, 109:683–706, 2022.
2792
+ C. Matias and S. Robin. Modeling heterogeneity in random graphs through latent space models: a selective
2793
+ review. ESAIM: Proceedings and Surveys, 47:55–74, 2014.
2794
+ S. Minhas, P. D. Hoff, and M. D. Ward. Inferential approaches for network analysis: AMEN for latent factor
2795
+ models. Political Analysis, 27:208–222, 2019.
2796
+ K. Nowicki and T. A. B. Snijders. Estimation and prediction for stochastic block structures. Journal of the
2797
+ American Statistical Association, 96:1077–1087, 2001.
2798
+ M. Plummer.
2799
+ rjags:
2800
+ Bayesian Graphical Models using MCMC, 2022.
2801
+ URL https://CRAN.
2802
+ R-project.org/package=rjags. R package version 4-13.
2803
+ R Core Team. R: A Language and Environment for Statistical Computing. R Foundation for Statistical
2804
+ Computing, Vienna, Austria, 2021. URL https://www.R-project.org/.
2805
+ R. Rastelli. GreedyEPL: Greedy Expected Posterior Loss, 2021. URL https://CRAN.R-project.
2806
+ org/package=GreedyEPL. R package version 1.2.
2807
+ R. Rastelli and N. Friel. Optimal Bayesian estimators for latent variable cluster models. Statistics and
2808
+ Computing, 28:1169–1186, 2018.
2809
+ M. Salter-Townshend and T. H. McCormick. Latent space models for multiview network data. The Annals
2810
+ of Applied Statistics, 11:1217–1244, 2017.
2811
+ M. Schweinberger and A. B. Snijders. Settings in social networks: a measurement model. Sociological
2812
+ Methodology, 33:307–341, 2003.
2813
+ J. Sosa. A review on latent space models for social networks. Revista Colombiana de Estadistica, 44:
2814
+ 171–200, 2021.
2815
+ J. Sosa and B. Betancourt. A latent space model for multilayer network data. Computational Statistics &
2816
+ Data Analysis, 169:107432, 2022.
2817
+ L. Wang, Z. Zhang, and D. Dunson. Common and individual structure of brain networks. Annals of Applied
2818
+ Statistics, 13:85–112, 2019.
2819
+ Y. J. Wang and G. Y. Wong. Stochastic blockmodels for directed graphs. Journal of the American Statistical
2820
+ Association, 82:8–19, 1987.
2821
+ 25
2822
+
8tAyT4oBgHgl3EQfQ_YL/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
99FLT4oBgHgl3EQfui_z/content/tmp_files/2301.12156v1.pdf.txt ADDED
@@ -0,0 +1,1775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Perspective: How to overcome dynamical density functional theory
2
+ Daniel de las Heras,1 Toni Zimmermann,1 Florian Samm¨uller,1 Sophie Hermann,1 and Matthias Schmidt1
3
+ 1Theoretische Physik II, Physikalisches Institut, Universit¨at Bayreuth, D-95447 Bayreuth, Germany
4
+ (Dated: 28 January 2023)
5
+ We argue in favour of developing a comprehensive dynamical theory for rationalizing, predicting,
6
+ and machine learning nonequilibrium phenomena that occur in soft matter. To give guidance for
7
+ navigating the theoretical and practical challenges that lie ahead, we discuss and exemplify the
8
+ limitations of dynamical density functional theory. Instead of the implied adiabatic sequence of
9
+ equilibrium states that this approach provides as a makeshift for the true time evolution, we posit
10
+ that the pending theoretical tasks lie in developing a systematic understanding of the dynamical
11
+ functional relationships that govern the genuine nonequilibrium physics. While static density func-
12
+ tional theory gives a comprehensive account of the equilibrium properties of many-body systems,
13
+ we argue that power functional theory is the only present contender to shed similar insights into
14
+ nonequilibrium dynamics, including the recognition and implementation of exact sum rules that
15
+ result from the Noether theorem. As a demonstration of the power functional point of view, we
16
+ consider an idealized steady sedimentation flow of the three-dimensional Lennard-Jones fluid and
17
+ machine-learn the kinematic map from the mean motion to the internal force field. This proof of con-
18
+ cept demonstrates the significant potential of machine learning the inherent functional relationships
19
+ that govern nonequilibrium many-body physics.
20
+ I.
21
+ INTRODUCTION
22
+ The coupled dynamics of the microscopic degrees of
23
+ freedom in typical soft matter systems generates a wide
24
+ array of relevant and also often unsolved nonequilibrium
25
+ phenomena [1, 2].
26
+ One central quantity for the char-
27
+ acterization of self-assembly and structure formation in
28
+ complex systems is the microscopically resolved one-body
29
+ density distribution ρ(r, t), where r indicates position
30
+ and t denotes time. The “density profile” ρ(r, t) acts as a
31
+ central order parameter both due to its intuitive physical
32
+ interpretation and clearcut mathematical definition [3].
33
+ According to the dynamical density functional theory
34
+ (DDFT), as originally proposed by Evans in 1979 [4],
35
+ the time evolution of the microscopic density profile is
36
+ assumed to be determined by the following partial differ-
37
+ ential equation:
38
+ ∂ρ(r, t)
39
+ ∂t
40
+ = γ−1∇ · ρ(r, t)∇
41
+ � δF[ρ]
42
+ δρ(r, t) + Vext(r, t)
43
+
44
+ .
45
+ (1)
46
+ Here γ is a friction constant, F[ρ] is an intrinsic free
47
+ energy functional that depends functionally on the den-
48
+ sity profile, and the external potential Vext(r, t) repre-
49
+ sents interactions of the system with the environment.
50
+ The system is set into motion by a temporal variation
51
+ of Vext(r, t), such as e.g. step-like switching at an initial
52
+ time.
53
+ The time evolution according to Eq. (1) conserves the
54
+ particle number locally and hence it constitutes dynam-
55
+ ics of model B type [5].
56
+ In standard applications one
57
+ starts with an equilibrium state of the system and then
58
+ the dynamics are monitored on the basis of numerical
59
+ time integration of Eq. (1). In order to provide reference
60
+ data and to allow for the generation of benchmark results
61
+ to assess the quality of the theory, resorting to many-
62
+ body computer simulations is common, with overdamped
63
+ Brownian dynamics (BD) being a popular choice (Ref. [6]
64
+ describes a modern and stable algorithm). Comparison
65
+ of DDFT data with experimental results are more scarce,
66
+ but notable exceptions include non-equilibrium sedimen-
67
+ tation of colloids [7], the self-diffusion of particles in com-
68
+ plex fluids [8], and the bulk dynamics of Brownian hard
69
+ disks [9].
70
+ The DDFT time evolution reaches a stationary state
71
+ if the gradient on the right hand side of Eq. (1) vanishes,
72
+ i.e. provided that the expression inside of the parentheses
73
+ is constant:
74
+ δF[ρ]
75
+ δρ(r) + Vext(r) = µ.
76
+ (2)
77
+ Here we have dropped the dependence on time in the
78
+ notation, as the situation is now static. The constant µ
79
+ can be identified with the chemical potential, which in a
80
+ grand canonical statistical mechanical setting is the con-
81
+ jugate control parameter of the mean particle number.
82
+ Equation (2) is exact in equilibrium, as was shown by
83
+ Evans [4]. He proved the equilibrium intrinsic free en-
84
+ ergy functional F[ρ] to exist, to be unique, and to form
85
+ the starting point for a modern equilibrium theory of
86
+ spatially inhomogeneous liquids and crystals [10, 11].
87
+ In practice one needs to rely on approximations
88
+ for F[ρ], given a microscopic fluid model under consid-
89
+ eration.
90
+ Once one has solved Eq. (2) for given values
91
+ of µ and temperature T (the dependence of F[ρ] on T
92
+ is suppressed in the notation), then in principle com-
93
+ plete knowledge of the thermal system is available. The
94
+ value of the density functional F[ρ] is the true intrinsic
95
+ free energy, and higher-order correlation functions are
96
+ determined via higher-order derivatives of the free en-
97
+ ergy functional or via test-particle procedures. In par-
98
+ ticular two-body correlations functions, such as the bulk
99
+ pair correlation function g(r) as well as its generalization
100
+ to inhomogeneous systems are accessible. These exhibit
101
+ defining characteristics of liquids and more general soft
102
+ arXiv:2301.12156v1 [cond-mat.soft] 28 Jan 2023
103
+
104
+ 2
105
+ matter systems and they are formally fully contained in
106
+ the static density functional theory framework.
107
+ Together with a number of available reliable approxi-
108
+ mate free energy functionals, density functional theory is
109
+ a powerful theoretical framework that has been used to
110
+ elucidate much intricate and complex behaviour in soft
111
+ matter.
112
+ Recent representative highlights include trac-
113
+ ing hydrophobicity to critical drying at substrates [12–
114
+ 14], resolving three-dimensional structures of electrolyte
115
+ aqueous solutions near surfaces [15, 16], and addressing
116
+ the magnitude of the decay lengths in electrolytes [17].
117
+ Rosenfeld’s celebrated hard sphere fundamental measure
118
+ free energy functional [18–21] is at the core of much of
119
+ this research activity.
120
+ In the following we wish to address whether or not
121
+ the DDFT has the prowess to play a similar role in
122
+ nonequilibrium, as is often at least implicitly assumed.
123
+ We demonstrate on the basis of an explicit and generic
124
+ example, i.e., that of uniaxial compressional flow of the
125
+ three-dimensional Lennard-Jones fluid, that the DDFT is
126
+ fundamentally flawed and that in reality, as represented
127
+ by many-body simulations, recognizing the flow field as
128
+ a further relevant degree of freedom is required to rep-
129
+ resent true nonequilibrium. These conclusions are based
130
+ on analytical power functional approximations, adaptive
131
+ BD simulation data, and explicit machine learning of the
132
+ power functional map from motion to the interparticle
133
+ one-body force field.
134
+ This Perspective is organized as follows. We first make
135
+ some key aspects of DDFT explicit in Sec. II and describe
136
+ several prominent shortcomings of this theory. We then
137
+ give an account of how to go towards the formally exact
138
+ one-body dynamics in Sec. III and provide in Sec. IV a
139
+ description of key aspects of the power functional frame-
140
+ work, which as we wish to argue overcomes the funda-
141
+ mental defects of DDFT. We describe the exemplary sta-
142
+ tionary compressional flow situation in Sec. V and lay
143
+ put the application of Noether’s theorem in this statis-
144
+ tical mechanical setting in Sec. VI. We present machine
145
+ learning results for the kinematic functional relationships
146
+ of the streaming Lennard-Jones fluid in Sec. VII. We give
147
+ conclusion and an outlook in Sec. VIII.
148
+ II.
149
+ LIMITS AND LIMITATIONS OF
150
+ ADIABATIC DYNAMICS
151
+ We go into some detail and describe why the DDFT
152
+ represents adiabatic dynamics in the sense of a temporal
153
+ sequence of spatially inhomogeneous equilibrium states.
154
+ The equilibrium intrinsic free energy functional splits into
155
+ ideal and excess (over ideal gas) contributions according
156
+ to F[ρ] = Fid[ρ] + Fexc[ρ]. Here the excess free energy
157
+ functional Fexc[ρ] accounts for the effects of the inter-
158
+ particle interactions on the equilibrium properties of the
159
+ system and it is in general unknown and requires approx-
160
+ imations to be made. The ideal gas free energy functional
161
+ however is exactly given by
162
+ Fid[ρ] = kBT
163
+
164
+ drρ(r) ln(ρ(r)Λ3) − 1],
165
+ (3)
166
+ where kB denotes the Boltzmann constant, Λ is the
167
+ thermal de Broglie wavelength, and we consider three-
168
+ dimensional systems. The functional derivative, as it is
169
+ relevant for Eq. (1), is δFid[ρ]/δρ(r) = kBT ln(ρ(r)Λ3).
170
+ When disregarding the excess contribution and in-
171
+ serting this result alone into the DDFT equation
172
+ of motion (1), its right hand side becomes γ−1∇ ·
173
+ ρ(r, t)∇[kBT ln(ρ(r, t)Λ3) + Vext(r, t)]. This can be re-
174
+ written further such that for the case of the ideal gas,
175
+ where Fexc[ρ] = 0 and F[ρ] = Fid[ρ], the equation of
176
+ motion (1) attains the following form:
177
+ ∂ρ(r, t)
178
+ ∂t
179
+ = D0∇2ρ(r, t) − ∇ · ρ(r, t)fext(r, t)/γ.
180
+ (4)
181
+ Here D0 = kBT/γ is the diffusion constant, ∇2 is the
182
+ Laplace operator and the external force field is given
183
+ (here) as fext(r, t) = −∇Vext(r, t). Equation (4) is the
184
+ exact drift-diffusion equation for overdamped motion of
185
+ a mutually noninteracting system, i.e., the ideal gas.
186
+ Besides Evans’ original proposal [4] based on the con-
187
+ tinuity equation and undoubtedly his physical intuition,
188
+ derivations of the DDFT (1) were founded much more
189
+ recently on Dean’s equation of motion for the density op-
190
+ erator [22], the Smoluchowski equation [23], a stationary
191
+ action principle for the density [24], the projection op-
192
+ erator formalism [25], a phase-space approach [26], the
193
+ mean-field approximation [27], a local equilibrium as-
194
+ sumption [28], and a non-equilibrium free energy [29].
195
+ The question of the well-posedness of the DDFT was ad-
196
+ dressed [30] and several extensions beyond overdamped
197
+ Brownian dynamics were formulated, such as e.g. for dy-
198
+ namics including inertia [31–34] and for particles that ex-
199
+ perience hydrodynamic interactions [34, 35] or undergo
200
+ chemical reactions [36, 37].
201
+ The DDFT was also used beyond the description of flu-
202
+ ids, such as e.g. for opinion dynamics [38] and epidemic
203
+ spreading [39].
204
+ Recent reviews of DDFT are given in
205
+ Refs. [40, 41]. The theory is put into a wider perspective,
206
+ together with much background pedagogical material in
207
+ Ref. [42]. A modern and well-accessible account of the
208
+ general strategy of dynamical coarse-graining in statisti-
209
+ cal physics, of which the DDFT can be viewed as being a
210
+ representative, has recently been given by Schilling [43].
211
+ The fact that both the static limit for the fully in-
212
+ teracting system (2) as well as the full dynamics of the
213
+ noninteracting system (4) are exact, taken together with
214
+ the heft of the DDFT literature, appears to give much
215
+ credibility to the equation of motion (1). However, de-
216
+ spite the range of theoretical techniques employed [22–29]
217
+ neither of these approaches has provided us with a con-
218
+ crete way of going beyond Eq. (1). Apart from several
219
+ case-by-case and rather ad hoc modifications, no system-
220
+ atic or even only practical identification of what is miss-
221
+ ing has been formulated. (We turn to power functional
222
+
223
+ 3
224
+ theory in Sec. IV.) This is a problematic situation as
225
+ two defects of Eq. (1) are immediately obvious upon in-
226
+ spection: i) the description is local in time and there is
227
+ no natural mechanism for the inclusion of memory while
228
+ time-locality is not sufficient for general nonequilibrium
229
+ situations; ii) only flow that leads to direct changes in
230
+ the density profile is captured and hence effects of rota-
231
+ tional flow, such as shearing, as well as of nonequilibrium
232
+ effects in compression and expansion are lost (see below).
233
+ Here we argue that these defects are indicative of a
234
+ broader failure of Eq. (1) to describe nonequilibrium
235
+ physics. We show that the DDFT is only fit to describe
236
+ situations in which the dynamics follow an adiabatic path
237
+ through a sequence of equilibrium states. The description
238
+ of genuine nonequilibrium dynamics in a functional set-
239
+ ting on the one-body level rather requires recognition of
240
+ the local velocity field as a further relevant physical vari-
241
+ able besides the density profile, and this is provided by
242
+ power functional theory [42]. Before laying out key prin-
243
+ ciples of this approach in Sec. IV, we first describe the mi-
244
+ croscopically sharp coarse-graining on the one-body level
245
+ of correlation functions.
246
+ III.
247
+ TOWARDS EXACT ONE-BODY
248
+ DYNAMICS
249
+ Evans based his original derivation [4] of Eq. (1) on the
250
+ continuity equation,
251
+ ∂ρ(r, t)
252
+ ∂t
253
+ = −∇ · J(r, t),
254
+ (5)
255
+ where J(r, t) is the microscopically resolved one-body
256
+ current distribution. Equation (5) is exact in a variety of
257
+ contexts, including overdamped Brownian dynamics, as
258
+ described either on the Fokker-Planck level by the Smolu-
259
+ chowski equation or by the corresponding overdamped
260
+ Langevin equation that governs the trajectories, as they
261
+ are realized in simulation work [6]. For BD the one-body
262
+ current distribution is given exactly by [42]:
263
+ γJ(r, t) = −kBT∇ρ(r, t) + Fint(r, t) + ρ(r, t)fext(r, t).
264
+ (6)
265
+ This identity expresses the force density balance of the
266
+ negative friction force density (left hand side) with the
267
+ force densities due to ideal thermal diffusion, interparti-
268
+ cle interactions, and external influence (three contribu-
269
+ tions on the right hand side). Here the interparticle force
270
+ density distribution is given by the statistical average
271
+ Fint(r, t) = −
272
+ � �
273
+ i
274
+ δ(r − ri)∇iu(rN)
275
+ ����
276
+ t,
277
+ (7)
278
+ where the angular brackets indicate an average at fixed
279
+ time t over the nonequilibrium many-body distribu-
280
+ tion, u(rN) is the interparticle interaction potential
281
+ that depends on all particle position coordinates rN ≡
282
+ r1, . . . , rN and ∇i indicates the derivative with respect to
283
+ the position ri of particle i. The formulation of Eq. (7) is
284
+ based on the concept of static operators and a dynami-
285
+ cally evolving probability distribution. This is analogous
286
+ to the Schr¨odinger picture of quantum mechanics. The
287
+ Heisenberg picture is more closely related to simulation
288
+ work. Here the probability distribution is that of the ini-
289
+ tial microstates and the operators move forward in time,
290
+ i.e., the position ri(t) of particle i changes over the course
291
+ of time. Then the Dirac distribution in Eq. (7) becomes
292
+ δ(r − ri(t)), with the generic position variable r however
293
+ remaining static. The forces are those that act in the
294
+ given microstate rN(t) at time t, i.e., the interparticle
295
+ force on particle i at time t is −∇iu(rN(t)).
296
+ In practice, using BD simulations, carrying out the
297
+ average in Eq. (7) requires to build the mean over suf-
298
+ ficiently many separate realizations of the microscopic
299
+ evolution of the many-body system that differ in the ini-
300
+ tial state and in the realization of the thermal noise. As
301
+ Eq. (7) measures both the probability to find particle i at
302
+ position r (via the delta function) and the interparticle
303
+ force that acts via the negative gradient −∇iu(rN), we
304
+ refer to Fint(r, t) as a force density. The corresponding
305
+ force field fint(r, t) is obtained by simple normalization
306
+ with the density profile, i.e. fint(r, t) = Fint(r, t)/ρ(r, t).
307
+ Building this ratio scales out the probability effect and
308
+ the force field then carries physical units of force, i.e.
309
+ energy per length.
310
+ In equilibrium the definition (7) remains intact. Com-
311
+ plementing the statistical average, static density func-
312
+ tional theory allows to express the equilibrium force den-
313
+ sity as being functionally dependent on the density pro-
314
+ file via the functional derivative of the excess free energy
315
+ functional according to:
316
+ Fint(r)
317
+ ��
318
+ eq = −ρ(r)∇δFexc[ρ]
319
+ δρ(r) .
320
+ (8)
321
+ Crucially, and in contrast to Eq. (7), here the internal
322
+ force density is directly expressed as a density functional.
323
+ This dependence has superseded the original dependence
324
+ on the external potential, as is manifest in the probability
325
+ distribution for building the average (7) in equilibrium.
326
+ As a self-consistency check we insert the force density
327
+ functional (8) into the equilibrium limit of the force den-
328
+ sity balance (6). The current vanishes in the equilibrium
329
+ case, J(r, t) ≡ 0, and we obtain
330
+ −kBT∇ρ(r) + Fint(r)|eq + ρ(r)fext(r) = 0.
331
+ (9)
332
+ This result is independent of time and it consti-
333
+ tutes the gradient of the static Euler-Lagrange equa-
334
+ tion (2) when divided by the density profile.
335
+ (Insert
336
+ Eq. (8), identify the ideal gas contribution −kBT∇ρ(r) =
337
+ −ρ(r)δFid[ρ]/δρ(r), and divide by ρ(r).)
338
+ The classical
339
+ force density balance result (9) by Yvon, Born and Green
340
+ [3] has recently been derived from systematically address-
341
+ ing thermal Noether invariance [44, 45] against locally
342
+ resolved spatial deformations of the statistical ensemble
343
+ [46–48], as also valid quantum mechanically [48] and at
344
+
345
+ 4
346
+ second order in the displacement field [49, 50]; we give a
347
+ brief account of this theory in Sec. VI below.
348
+ A naive transfer of Eq. (8) to nonequilibrium lets
349
+ one simply evaluate the equilibrium excess free energy
350
+ functional at the instantaneous nonequilibrium density
351
+ ρ(r, t). In order to separate this contribution from true
352
+ static equilibrium, we refer to this force density as being
353
+ adiabatic (subscript “ad”) and to be defined as
354
+ Fad(r, t) = −ρ(r, t)∇δFexc[ρ]
355
+ δρ(r, t) .
356
+ (10)
357
+ We recall that the right hand side offers a concrete com-
358
+ putational structure that is of practical usefulness in ac-
359
+ tual applications, as considerable knowledge about ap-
360
+ proximative forms of the excess free energy density func-
361
+ tional Fexc[ρ] is available. Using the adiabatic force den-
362
+ sity as a proxy for the true nonequilibrium intrinsic force
363
+ density distribution (7), i.e. setting Fint(r, t) = Fad(r, t)
364
+ in the force density balance (6) together with the conti-
365
+ nuity equation (5) leads to the DDFT equation of mo-
366
+ tion (1). The adiabatic force density approximation is
367
+ uncontrolled though and the theory inherently yields the
368
+ dynamics as an adiabatic sequence of equilibrium states.
369
+ Surely, more than 40 years after the conception of the
370
+ DDFT [4], we have to be able to do better!
371
+ IV.
372
+ POWER FUNCTIONAL TECHNIQUES
373
+ Power functional theory [42] offers a concrete math-
374
+ ematical structure to go forward.
375
+ We describe the es-
376
+ sential steps that enable one to go beyond the DDFT
377
+ and to hence address a significantly expanded realm of
378
+ nonequilibrium physics which Eq. (1) is oblivious of.
379
+ The interparticle force density profile (7) is identified
380
+ to consist of two contributions according to:
381
+ Fint(r, t) = Fad(r, t) + Fsup(r, t).
382
+ (11)
383
+ Here Fad(r, t) is the adiabatic force density profile, as
384
+ given formally via the explicit equilibrium free energy
385
+ derivative (10) and directly accessible in simulations via
386
+ the custom flow method [51, 52]. The custom flow al-
387
+ gorithm allows to systematically construct a hypotheti-
388
+ cal adiabatic (equilibrium) system that shares its density
389
+ profile with the nonequilibrium system at the given time.
390
+ Then sampling the internal force density in the adiabatic
391
+ system yields results for Fad(r, t).
392
+ The second, superadiabatic contribution in Eq. (11),
393
+ Fsup(r, t), contains all effects that are not expressible
394
+ as an instantaneous density functional.
395
+ This includes
396
+ forces that lead to viscous and to nonequilibrium struc-
397
+ ture forming phenomena, as we exemplify below in a con-
398
+ crete model compressional flow situation. Formally, the
399
+ superadiabatic force density is generated from the su-
400
+ peradiabatic excess free power functional P exc
401
+ t
402
+ [ρ, J] upon
403
+ functional differentiation with respect to the one-body
404
+ current via [42, 53]:
405
+ Fsup(r, t) = −ρ(r, t)δP exc
406
+ t
407
+ [ρ, J]
408
+ δJ(r, t)
409
+ .
410
+ (12)
411
+ The functional dependence of P exc
412
+ t
413
+ [ρ, J] on the density
414
+ and current is causal, i.e. on the values of these fields
415
+ at prior times to t; density and current need to satisfy
416
+ the continuity equation. Upon using Eqs. (11) the force
417
+ density balance (6) attains the following form:
418
+ γJ(r, t) = −kBT∇ρ(r, t) + Fad(r, t)
419
+ + Fsup(r, t) + ρ(r, t)fext(r, t).
420
+ (13)
421
+ This
422
+ relationship
423
+ holds
424
+ beyond
425
+ gradient
426
+ forms
427
+ of
428
+ fext(r, t), i.e. for external force fields that contain non-
429
+ conservative contributions.
430
+ Crucially Fsup(r, t) will in
431
+ general also acquire nonconservative contributions, such
432
+ as e.g. damping effects that represent viscous behaviour.
433
+ Moreover, nonequilibrium structure-forming effects will
434
+ also arise in general. These affect directly the shape of
435
+ the density profile, whether this evolves in time or per-
436
+ sists in a nonequilibrium steady state.
437
+ If one wishes to eliminate the explicit occurrence of the
438
+ current from the dynamics, then inputting the force den-
439
+ sity balance (13) into the continuity equation (5) leads
440
+ to the following formally exact form of the equation of
441
+ motion for the density profile:
442
+ ∂ρ(r, t)
443
+ ∂t
444
+ = D0∇2ρ(r, t) + ∇ · ρ(r, t)
445
+ γ
446
+ ∇δFexc[ρ]
447
+ δρ(r, t)
448
+ − ∇ · ρ(r, t)
449
+ γ
450
+ [fsup(r, t) + fext(r, t)].
451
+ (14)
452
+ Here it is apparent that the superadiabatic force field
453
+ fsup(r, t) = Fsup(r, t)/ρ(r, t) has a direct effect on the
454
+ system dynamics. The effect is similar to that of the ex-
455
+ ternal force field. Crucially though, both force fields are
456
+ independent of each other: the external force field rep-
457
+ resents a prescribed and inert influence on the system.
458
+ In contrast, the superadiabatic force field is an emer-
459
+ gent phenomenon that arises due to interparticle inter-
460
+ actions and, from the functional point of view, depends
461
+ non-locally in position and causally in time on the one-
462
+ body density and on the current profile.
463
+ Although setting fsup(r, t) = 0 yields the DDFT (1),
464
+ the superadiabatic force field fsup(r, t) was demonstrated
465
+ to exist [54–60] and in general to play a major role in
466
+ the dynamics on the one-body level and, based on test-
467
+ particle concepts [61–66] also for two-body correlation
468
+ functions [67–69] and for active matter [70–74]. Both the
469
+ flow properties as well as the spatial structure formation
470
+ in the system are affected.
471
+ To reveal additional physics, it is useful to split into
472
+ “structural” and “flow” contributions. This was estab-
473
+ lished e.g. for complex flow patterns that occur in driven
474
+ BD [55, 59], for active Brownian particles which form
475
+ a self-sustained interface at motility-induced phase co-
476
+ existence [70–74], as well as very recently for a sheared
477
+
478
+ 5
479
+ FIG. 1. Illustration of unidirectional compressional flow of a liquid. The three-dimensional system is set into motion (red
480
+ arrows) by the action of an external force profile fext(x) (blue arrows) which acts along the x-axis. The system retains planar
481
+ geometry such that spatial inhomogeneities only occur as a function of x. The density profile ρ(x) (orange curve) and the
482
+ velocity profile v(x) (red curve) are both stationary in time but inhomogeneous in position.
483
+ The local one-body current
484
+ J(x) = ρ(x)v(x) = const and as a result the system is in a nonequilibrium steady state. The corresponding adiabatic system
485
+ is in equilibrium (it has no mean flow) and it has by construction an unchanged density profile ρ(x). In the adiabatic system
486
+ the spatial variation of ρ(x) is stabilized by the action of an external force field −∇Vad(x) (olive arrows), which acts solely in
487
+ the adiabatic system.
488
+ three-body colloidal gel former [60]. Before we demon-
489
+ strate these concepts for an example of steady nonequi-
490
+ librium below, we first describe two simple model power
491
+ functionals that respectively generate structure and vis-
492
+ cously dampen the motion and that, as we will see, give
493
+ a good account of the nonequilibrium flow considered be-
494
+ low.
495
+ We concentrate on the low-order terms that are rel-
496
+ evant for compressional/extensional flow, i.e., for situa-
497
+ tions where ∇ · v(r, t) ̸= 0.
498
+ We focus on cases where
499
+ there is no rotational motion (such as shearing) and hence
500
+ ∇ × v(r, t) = 0.
501
+ The velocity gradient superadiabatic
502
+ power functional consists of a sum,
503
+ P exc
504
+ t
505
+ [ρ, v] = P flow
506
+ t
507
+ [ρ, v] + P str
508
+ t
509
+ [ρ, v].
510
+ (15)
511
+ Here the flow and structural [55, 59] contributions are
512
+ approximated, respectively, by the following time-local
513
+ (Markovian) and space-semilocal (i.e. involving ∇) forms
514
+ P flow
515
+ t
516
+ [ρ, v] = η
517
+ 2
518
+
519
+ dr[ρ(r, t)∇ · v(r, t)]2,
520
+ (16)
521
+ P str
522
+ t
523
+ [ρ, v] = −χ
524
+ 3
525
+
526
+ dr[ρ(r, t)∇ · v(r, t)]3,
527
+ (17)
528
+ where the overall prefactors η and χ control the respec-
529
+ tive magnitude.
530
+ The flow functional (16) is quadratic
531
+ both in density and in the velocity field; the structural
532
+ functional (17) is of cubic order in each of these variables.
533
+ Explicit higher-order functionals exist [59] and they be-
534
+ come relevant when driving the system strongly. We will
535
+ return to the consequences of Eqs. (16) and (17) after
536
+ laying out in Sec. V the actual flow situation that we use
537
+ as a model to exemplify the implications for the physics.
538
+ Before doing so, we briefly describe several further key
539
+ aspects of the power functional framework.
540
+ Power functional theory provides a formal framework
541
+ for the inclusion of time- and space-nonlocal dynamics
542
+ [56, 68, 79]. While Eq. (12) applies to overdamped dy-
543
+ namics, the acceleration field becomes a further relevant
544
+ degree of freedom if inertia are relevant [78–81] whether
545
+ classically in molecular dynamics [78, 79] or in quantum
546
+ dynamics [80, 81].
547
+ Here the memory functions act as
548
+ convolution kernels on specific kinematic fields and rota-
549
+ tional and compressional contributions to the dynamics
550
+ are genuinely built in. As laid out above, the framework
551
+ is based on an exact variational concept [42, 53], and the
552
+ resulting functional mapping was shown to be explicitly
553
+ accessible in many-body simulation via the custom flow
554
+ computer simulation method [51, 52].
555
+ Even
556
+ simple
557
+ mathematical
558
+ model
559
+ forms
560
+ for
561
+ the
562
+ nonequilibrium contribution to the power functional,
563
+ such as Eqs. (16) and (17), already capture essential
564
+ physics (as we demonstrate below) and dynamical two-
565
+ body correlation functions are accessible via test particle
566
+ dynamics [8, 9, 61–69]. The power functional is thereby
567
+ not to be confused with the often vague concept of a
568
+
569
+ v(C
570
+ p(α)
571
+ noneguilibrium
572
+ fext(α)
573
+ (α) PeA△
574
+ equilibrium6
575
+ “nonequilibrium free energy”.
576
+ The proper equilibrium
577
+ free energy functional does play a central role in power
578
+ functional theory though, via providing the description
579
+ of the adiabatic reference state [42], see the generation
580
+ of the force density distribution via functional differenti-
581
+ ation (10), as is relevant for the interparticle force split-
582
+ ting (11), and the full density equation of motion (14).
583
+ The relevance of superadiabatic contributions to the
584
+ dynamics, i.e. of those effects that lie beyond Eq. (1), has
585
+ been amply demonstrated in the literature [54–59, 67–
586
+ 69]. Both adiabatic and superadiabatic effects arise from
587
+ integrating out the dynamical degrees of freedom of the
588
+ many-body problem.
589
+ Ensemble differences between canonical dynamics and
590
+ grand canonical equilibrium have been systematically ad-
591
+ dressed [75–77] and these do not account for the observed
592
+ differences between adiabatic and superadiabatic dynam-
593
+ ics.
594
+ The kinematic dependence on the motion of the
595
+ system arises formally [42], it can be explicitly traced
596
+ in many-body computer simulation work [59], and it
597
+ is amenable to machine learning, as we demonstrate in
598
+ Sec. VII. Before doing so, we first formulate the represen-
599
+ tative flow problem that we will use to apply the above
600
+ concepts.
601
+ V.
602
+ NONEQUILIBRIUM STEADY STATES
603
+ We restrict ourselves to flow situations with one-body
604
+ fields that are inhomogeneous in position but indepen-
605
+ dent of time, i.e. ρ(r) and v(r). Then trivially ∂ρ(r)/∂t =
606
+ 0 and the continuity equation (5) constrains both fields
607
+ to satisfy ∇ · [ρ(r)v(r)] = 0. As a representative case
608
+ we illustrate in Fig. 1 a nonequilibrium steady state of a
609
+ three-dimensional liquid undergoing unidirectional com-
610
+ pressional flow. Flow along a single given direction occurs
611
+ e.g. under the influence of gravity, where sedimentation
612
+ of colloids leads to both compression in the lower parts of
613
+ the sample and expansion in the upper parts of the sam-
614
+ ple. Here we disregard transient phenomena and investi-
615
+ gate an idealized periodic system, where flowing steady
616
+ states can form.
617
+ In order to elucidate the physics in such setups, we fol-
618
+ low the splitting (15) of the superadiabatic power func-
619
+ tional into structural and flow contributions and hence
620
+ decompose the superadiabatic force field accordingly as
621
+ fsup(r) = fstr(r) + fflow(r),
622
+ (18)
623
+ where the right hand side consists of the nonequilib-
624
+ rium structural force field fstr(r) and the flow force
625
+ field fflow(r).
626
+ Both of these force contributions arise
627
+ from the microscopic interparticle interactions, as coarse-
628
+ grained in a microscopically sharp way to the one-body
629
+ level.
630
+ We lay out in the following the benefits of the
631
+ structure-flow splitting (18) and its definition via flow
632
+ reversal symmetry.
633
+ First, on the more practical level, Eq. (18) allows to
634
+ carry out a corresponding splitting of the force density
635
+ balance (13) [we divide by ρ(r) to obtain force fields].
636
+ The result is a set of two coupled equations of motion,
637
+ with one of them depending explicitly on the velocity
638
+ profile and the second one depending explicitly on the
639
+ density profile:
640
+ γv(r) = fflow(r) + fext,f(r),
641
+ (19)
642
+ 0 = fstr(r) − kBT∇ ln ρ(r) + fad(r) + fext,s(r). (20)
643
+ Building the sum of Eqs. (19) and (20) and multiplying
644
+ by the density profile restores the full force density bal-
645
+ ance (13). The external force field is split according to
646
+ fext(r) = fext,f(r) + fext,s(r), where the two terms couple
647
+ to the flow via fext,f(r) in Eq. (19) and to the structure
648
+ via fext,s(r) in Eq. (20).
649
+ On the superficial level the two equations (19) and
650
+ (20) appear to be independent of each other, as no sin-
651
+ gle field appears explicitly in both equations. However,
652
+ the two equations are indeed intimately coupled to each
653
+ other by the interparticle interactions, as represented by
654
+ both the adiabatic and the two superadiabatic (flow and
655
+ structural) force fields. These three intrinsic force con-
656
+ tributions provide the physical representation of the true
657
+ nonequilibrium steady state dynamics.
658
+ The flow-structure splitting (18) is uniquely deter-
659
+ mined by the symmetry properties of the forces upon
660
+ motion reversal of the system [59].
661
+ Motion reversal is
662
+ a discrete symmetry operation, and hence different from
663
+ continuous invariances where Noether’s theorem applies
664
+ [44–50].
665
+ One considers a “reversed” system, which is
666
+ also in steady state and possesses an unchanged den-
667
+ sity profile ρ(r). The flow, however, is directed against
668
+ the velocity orientation in the original “forward” system.
669
+ Hence the velocity profile in the reversed system is sim-
670
+ ply −v(r). As a result the current also acquires a mi-
671
+ nus sign, −ρ(r)v(r), which however does not affect the
672
+ (vanishing) divergence, ∇ · [−ρ(r)v(r)] = 0. Thus the re-
673
+ versed state indeed is stationary. The two superadiabatic
674
+ contributions are then defined to be unchanged [fstr(r)]
675
+ and inverted [−fflow(r)] in the reversed system. Conse-
676
+ quentially, the superadiabatic force field in the reversed
677
+ system is the difference fstr(r) − fflow(r).
678
+ Analyzing the symmetry properties of the adiabatic
679
+ force field is straightforward.
680
+ We recall that fad(r) is
681
+ a density functional via Eq. (10). The density profiles
682
+ in the forward and in the reversed systems are identical
683
+ though. Hence fad(r) is invariant under motion reversal.
684
+ Motion reversal is a useful device in order to i) rationalize
685
+ the nonequilibrium behaviour according to the split force
686
+ balance (19) and (20), and to ii) classify the dependence
687
+ of superadiabatic forces on the velocity field into even
688
+ powers, which constitute fstr(r), and odd powers, which
689
+ form fflow(r).
690
+ We can demonstrate this mechanism explicitly on the
691
+ basis of the above flow and structural power functionals
692
+ (16) and (17). Superadiabatic force fields are generated
693
+ via the functional derivative (12) with respect to the cur-
694
+ rent or, analogously, by functionally deriving by v(r, t)
695
+
696
+ 7
697
+ and dividing the result by ρ(r, t). The resulting supera-
698
+ diabatic one-body force field consists of two components.
699
+ The viscous flow force and [55, 58] and the structural
700
+ force follow respectively as
701
+ fflow(r) =
702
+ η
703
+ ρ(r)∇[ρ(r)2∇ · v(r)],
704
+ (21)
705
+ fstr(r) = − χ
706
+ ρ(r)∇{ρ(r)3[∇ · v(r)]2},
707
+ (22)
708
+ where Eq. (21) is odd (linear) and Eq. (22) is even
709
+ (quadratic) in the derivatives of the velocity field, as de-
710
+ sired.
711
+ One might wonder where all this genuine nonequilib-
712
+ rium physics leaves the DDFT! Some readers will find the
713
+ instantaneous dynamics, as generated from an adiabatic
714
+ free energy according to (1), to be more appealing and in-
715
+ tuitive than the thinking in terms of the above described
716
+ apparently intricate functional relationships.
717
+ Why not
718
+ live with Eq. (1), use it, and simply accept its defects?
719
+ In order to address this question and to demonstrate why
720
+ this path is severely restricted from the outset, we turn
721
+ in Sec. VII to an explicit demonstration of the functional
722
+ relationship that governs the nonequilibrium physics, i.e.
723
+ the kinematic functional map from the one-body mean
724
+ motion to the internal force field. Before doing so, we
725
+ demonstrate that Noether’s theorem of invariant varia-
726
+ tions has much to say about our present setup.
727
+ VI.
728
+ NOETHER FORCE SUM RULES
729
+ We discuss one of the arguably simplest cases of ex-
730
+ ploitation of the inherent symmetries of a thermal many-
731
+ body system, that of global translational invariance of its
732
+ statistical mechanics [44, 45]. We consider a “shifting”
733
+ transformation, where all particle coordinates change ac-
734
+ cording to the map ri → ri + ϵ, where ϵ = const. This
735
+ uniform shifting operation leaves all interparticle dis-
736
+ tance unchanged, ri−rj → (ri+ϵ)−(rj+ϵ) ≡ ri−rj. As
737
+ a consequence the interparticle potential is invariant un-
738
+ der the transformation, which we can express as the iden-
739
+ tity u(r1, . . . , rN) = u(r1 + ϵ, . . . , rN + ϵ). Here equality
740
+ holds irrespectively of the magnitude and the direction
741
+ of the shifting vector ϵ.
742
+ The Noether argument proceeds with a twist.
743
+ De-
744
+ spite the absence of dependence on ϵ, we can neverthe-
745
+ less differentiate both sides of the equation with respect
746
+ to ϵ and the result will be a valid identity. We obtain
747
+ 0 = ∂u(ri + ϵ, . . . , rN + ϵ)/∂ϵ = �
748
+ i ∇iu(r1, . . . , rN),
749
+ where we have set ϵ = 0 after taking the derivative. We
750
+ multiply by −1 and insert 1 =
751
+
752
+ drδ(r−ri), which yields
753
+
754
+
755
+ dr
756
+
757
+ i
758
+ δ(r − ri)∇iu(rN) = 0.
759
+ (23)
760
+ The expression on the left hand side allows to identify
761
+ the locally resolved interparticle force operator ˆFint(r) =
762
+ − �
763
+ i δ(r − ri)∇iu(rN), such that Eq. (23) attains the
764
+ form
765
+
766
+ drˆFint(r) = 0. This identity holds for each mi-
767
+ crostate rN and hence it remains trivially valid upon av-
768
+ eraging over the many-body distribution function, irre-
769
+ spective of whether this is in- or out-of-equilibrium. We
770
+ can hence conclude the vanishing of the global interpar-
771
+ ticle force, expressed as the integral over the mean force
772
+ density Fint(r) = ⟨ˆFint(r)⟩ as
773
+
774
+ drFint(r, t) = 0.
775
+ (24)
776
+ Equation (24) holds at all times t and it can be viewed as
777
+ a consequence of Newton’s third law, see the discussion in
778
+ Ref. [44]. Using the adiabatic-superadiabatic force split-
779
+ ting (11) one can further conclude that the both global
780
+ contributions need to vanish individually,
781
+
782
+ drFad(r, t) = 0,
783
+ (25)
784
+
785
+ drFsup(r, t) = 0.
786
+ (26)
787
+ The proof can either be based on the fact that Eq. (25)
788
+ is merely Eq. (24) for the special case of an equilibrium
789
+ system, from which then Eq. (26) follows from the force
790
+ splitting (11).
791
+ Alternatively and starting from a very
792
+ fundamental point of view, the global translational in-
793
+ variance of the excess free energy functional Fexc[ρ] and
794
+ of the superadiabatic free power functional P exc
795
+ t
796
+ [ρ, v],
797
+ here considered instantaneously at time t, lead directly
798
+ to Eqs. (25) and (26), see Refs. [44, 45] for the detailed
799
+ account.
800
+ It is interesting to apply the Noether concept to the
801
+ flow-structure splitting Eq. (18) of the superadiabatic
802
+ force field. One can see straightforwardly, from the sym-
803
+ metry upon motion reversal, that both the global struc-
804
+ tural force and the global flow force need to vanish indi-
805
+ vidually:
806
+
807
+ drρ(r)fflow(r) = 0,
808
+ (27)
809
+
810
+ drρ(r)fstr(r) = 0.
811
+ (28)
812
+ We prove by contradiction and assume that it is not
813
+ the case, i.e. that each integral gives the same global
814
+ force, but with opposite sign, such that the sum vanishes
815
+ and Eq. (26) remains valid.
816
+ Per construction, fflow(r)
817
+ changes sign in the motion reversed system, but fstr(r)
818
+ does not.
819
+ Hence Eq. (26) can only be satisfied in the
820
+ motion-reversed system provided that both the flow and
821
+ structural contribution vanish separately.
822
+ We
823
+ can
824
+ explicitly
825
+ test
826
+ the
827
+ validity
828
+ of
829
+ the
830
+ sum
831
+ rules (27) and (28) for the above analytical force ap-
832
+ proximations (21) and (22).
833
+ The respective integrals
834
+ are η
835
+
836
+ dr∇[ρ(r)2∇ · v(r)] = 0 and χ
837
+
838
+ dr∇{ρ(r)3[∇ ·
839
+ v(r)]2} = 0, which follows from the divergence theorem,
840
+ as boundary terms vanish. Hence the simple non-local
841
+ velocity gradient power functional approximations (16)
842
+
843
+ 8
844
+ density
845
+ current
846
+ external force field
847
+ interparticle
848
+ force field
849
+ Mermin
850
+ Evans
851
+ map
852
+ (DFT)
853
+ kinematic fields
854
+ kinematic
855
+ map
856
+ adiabatic-superadiabatic
857
+ splitting
858
+ structure-flow splitting
859
+ superadiabatic
860
+ force field
861
+ adiabatic
862
+ force field
863
+ flow
864
+ force
865
+ structural
866
+ force
867
+ adaptive
868
+ BD
869
+ super-
870
+ adiabatic
871
+ map
872
+ (PFT)
873
+ 0.3
874
+ 0.4
875
+ 0.5
876
+ 0.6
877
+ 0.7
878
+ 0
879
+ 2
880
+ 4
881
+ 6
882
+ 8
883
+ 10
884
+ ��3
885
+ x/�
886
+ 0
887
+ 1
888
+ 2
889
+ 3
890
+ 4
891
+ 5
892
+ 0
893
+ 2
894
+ 4
895
+ 6
896
+ 8
897
+ 10
898
+ J�2�
899
+ x/�
900
+ -1.5
901
+ -1
902
+ -0.5
903
+ 0
904
+ 0.5
905
+ 1
906
+ 1.5
907
+ 0
908
+ 2
909
+ 4
910
+ 6
911
+ 8
912
+ 10
913
+ fint�/�
914
+ x/�
915
+ -1.5
916
+ -1
917
+ -0.5
918
+ 0
919
+ 0.5
920
+ 1
921
+ 1.5
922
+ 0
923
+ 2
924
+ 4
925
+ 6
926
+ 8
927
+ 10
928
+ fad�/�
929
+ x/�
930
+ 0
931
+ 4
932
+ 8
933
+ 12
934
+ 16
935
+ 0
936
+ 2
937
+ 4
938
+ 6
939
+ 8
940
+ 10
941
+ fext�/�
942
+ x/�
943
+ -0.6
944
+ -0.4
945
+ -0.2
946
+ 0
947
+ 0.2
948
+ 0.4
949
+ 0
950
+ 2
951
+ 4
952
+ 6
953
+ 8
954
+ 10
955
+ fsup�/�
956
+ x/�
957
+ -0.6
958
+ -0.4
959
+ -0.2
960
+ 0
961
+ 0.2
962
+ 0.4
963
+ 0
964
+ 2
965
+ 4
966
+ 6
967
+ 8
968
+ 10
969
+ f�ow�/�
970
+ x/�
971
+ -0.2
972
+ -0.1
973
+ 0
974
+ 0.1
975
+ 0
976
+ 2
977
+ 4
978
+ 6
979
+ 8
980
+ 10
981
+ fstr�/�
982
+ x/�
983
+ FIG. 2. Kinematic profiles and force fields for uniaxial compressional flow of the LJ fluid. Results are shown from machine
984
+ learning (lines) and from direct adaptive BD simulations (symbols). Functional relationships are represented by vertical arrows.
985
+ Shown are the density profile ρ(x), the one-body current J(x) and the external force field fext(x) (top row) as a function of the
986
+ scaled distance x/σ, where σ is the LJ length scale. The density and the current functionally determine both the interparticle
987
+ force field fint(x) via the kinematic map and the superadiabatic force field fsup(x) via the superadiabatic kinematic map (middle
988
+ row). The internal force field fint(x) splits into superadiabatic and adiabatic force contributions. The adiabatic force field fad(x)
989
+ is a density functional via the Mermin-Evans map of density functional theory. The structural and flow force fields are split
990
+ according to their symmetry upon motion reversal. The colour code represents different values of the current J0 = 0, 1, 2, 3, 4, 5
991
+ (from violet to yellow, see the center panel in the top row); the two insets show the predictions from the analytical velocity
992
+ gradient functionals (21) and (22). The system with J0 = 0 is at rest in equilibrium and it doubles as the adiabatic state as its
993
+ density profile is identical to that of the flowing systems (first panel).
994
+ and (17) have passed the global Noether validation test.
995
+ This is nontrivial, as the proof rests on the specific struc-
996
+ ture of the integrands being gradients, which for more
997
+ general analytical forms will not be the case. This exem-
998
+ plifies the merits of Noether sum rules for assessing and
999
+ by extension also constructing theoretical nonequilibrium
1000
+ force approximations.
1001
+ The Noether concept carries much further. Reference
1002
+ [44] presents memory sum rules for so-called time di-
1003
+ rect correlation functions. These are defined via func-
1004
+ tional derivatives of the superadiabatic power functional,
1005
+ in generalization of the superadiabatic force density as
1006
+ generated via the derivative (12) with respect to the cur-
1007
+ rent distribution. We expect the corresponding identities
1008
+ to be helpful in the study of temporal nonlocality. Fur-
1009
+ ther work was addressed at the variance of global fluctu-
1010
+ ations, which were shown to be constrained by Noether
1011
+ invariance at the second order global level [49]. Noether’s
1012
+ theorem also yields the locally resolved force balance re-
1013
+ lationship in quantum mechanical many-body systems
1014
+ [48].
1015
+ Very recently, striking two-body force-force and
1016
+ force-gradient correlation functions for the precise and
1017
+ novel characterization of disordered (liquid and gel) sys-
1018
+ tems [50] were revealed. Exploiting Noether’s concept in
1019
+ a stastical mechanical setting is robust against changes of
1020
+ ensemble, Ref. [45] presents the transfer of the grand en-
1021
+ semble formalism [44] to canonical systems. Considering
1022
+ global rotational invariance leads to (classical) spin-orbit
1023
+
1024
+ J&pJ&p9
1025
+ coupling of torque identities [44].
1026
+ We return to steady states and demonstrate that the
1027
+ seemingly entirely formal functional relationships do in
1028
+ fact apply to real systems. We present in the following
1029
+ new computational methodology that we use to demon-
1030
+ strate the functional point of view. We will also demon-
1031
+ strate that the sum rules (26) and (27) are highly valuable
1032
+ in providing checks for numerical results.
1033
+ VII.
1034
+ MACHINE LEARNING THE KINEMATIC
1035
+ MAP
1036
+ Machine learning proves itself to be an increasingly
1037
+ useful tool in a variety of settings in soft matter, rang-
1038
+ ing from soft matter characterization [82], engineering of
1039
+ colloidal self-assembly [83], to the inverse design of soft
1040
+ materials [84]. Pivotal studies were addressed at colloidal
1041
+ structure detection [85], the identification of combinato-
1042
+ rial rules in mechanical metamaterials [86], the learning
1043
+ of many-body interaction potentials for spherical [87] and
1044
+ for anisotropic particles [88], and the prediction of the
1045
+ dynamics of supercooled liquids from their static proper-
1046
+ ties [89].
1047
+ More specifically, in the context of classical density
1048
+ functional theory, an early and pioneering study formu-
1049
+ lated a neural-network approach to liquid crystal order-
1050
+ ing in confinement [90].
1051
+ Free energy density function-
1052
+ als were obtained for one-dimensional fluids from a con-
1053
+ volutional neural network [91] and an analytical form
1054
+ of an excess free energy functional was generated from
1055
+ an equation learning network [92]. Cats et al. [93] re-
1056
+ cently used machine learning to improve the standard
1057
+ mean-field approximation of the excess Helmholtz free-
1058
+ energy functional for a three-dimensional Lennard-Jones
1059
+ (LJ) system at a supercritical temperature. These signif-
1060
+ icant reserach efforts were devoted to tailoring analytical
1061
+ forms of model free energy functionals, by training cer-
1062
+ tain key components such as spatial convolution kernels,
1063
+ and much insight into the inner workings of excess free
1064
+ energy functionals was gained [91–93].
1065
+ However, here we proceed very differently and more-
1066
+ over do so out-of-equilibrium. We use the LJ model and
1067
+ the identical planar geometry as in Ref. [93], such that
1068
+ the density profile ρ(x) depends only on a single posi-
1069
+ tion coordinate x. We consider steady states and retain
1070
+ planar symmetry by considering flow that is directed in
1071
+ the x-direction, such that the current J(x) = J(x)ex,
1072
+ where J(x) is the magnitude of the current and ex is the
1073
+ unit vector in the x-direction. Both the density profile
1074
+ ρ(x) and the velocity field v(x) = J(x)/ρ(x) are indepen-
1075
+ dent of time. The continuity equation (5) then implies
1076
+ 0 = ∂ρ(x)/∂t = −∂[v(x)ρ(x)]/∂x, from which one ob-
1077
+ tains by spatial integration ρ(x)v(x) = J0 = const. Here
1078
+ the value of J0 determines the intensity of the flow; we
1079
+ recall the illustration shown in Fig. 1.
1080
+ We base the machine learning procedure on a convolu-
1081
+ tional neural network, as was done e.g. in Ref. [91], and
1082
+ following Refs. [91–93] we use many-body computer sim-
1083
+ ulations to provide training, validation, and test data.
1084
+ In contrast to these equilibrium studies though, in or-
1085
+ der to address the nonequilibrium problem we need to
1086
+ represent the physical time evolution on the many-body
1087
+ trajectory level. We use the recently developed highly
1088
+ performant adaptive BD algorithm [6] and apply it to
1089
+ the three-dimensional LJ fluid.
1090
+ As laid out above, in
1091
+ order to address situations of planar symmetry we drive
1092
+ the system only along the ex-direction. The specific form
1093
+ of the driving force field fext(x)ex is however irrelevant,
1094
+ as the training data only serves to extract the intrinsic
1095
+ kinematic functional relationship.
1096
+ In order to cover a sufficiently broad range of flow sit-
1097
+ uations, we represent the external force field as a trun-
1098
+ cated Fourier series fext(x) = �nmax
1099
+ n=0 An cos(2πnx/L),
1100
+ where L is the size of the cubic simulation box with pe-
1101
+ riodic boundary conditions and An are random ampli-
1102
+ tudes with zero mean and uniform distribution inside of
1103
+ a given finite interval. We truncate at order nmax = 5
1104
+ such that the length scale L/(2πnmax) is comparable to
1105
+ the LJ molecular size σ. Ten percent of our simulation
1106
+ runs are carried out in equilibrium, i.e. for A0 = 0. We
1107
+ use N = 500 LJ particles inside of a cubic simulation
1108
+ box of size L = 10σ. The temporal duration of each run
1109
+ is 1000τ, where τ = σ2/D0 is the Brownian time scale.
1110
+ After initialization the system is randomized for 1τ at a
1111
+ very high temperature. Then we wait for 100τ to allow
1112
+ the system to reach a steady state and then collect data
1113
+ during the remaining time. In total we use 1000 such sim-
1114
+ ulation runs; these are subdivided for purposes of train-
1115
+ ing (520), validation (280) and testing (200).
1116
+ A more
1117
+ detailed account will be given elsewhere.
1118
+ Our aim is to machine-learn and hence to explicitly
1119
+ demonstrate the kinematic map, ρ(r), v(r) → fint(r) in
1120
+ steady state. We present the learning algorithm with in-
1121
+ puts ρ(x), v(x) and targets fint(x). The data for these
1122
+ three fields are from building steady state averages via
1123
+ the adaptive BD over the corresponding one-body oper-
1124
+ ators. We recall the microscopic definition of the inter-
1125
+ particle one-body force density Fint(r) via Eq. (7) and
1126
+ we refer the reader to Appendix A of Ref. [51] for a de-
1127
+ scription of several methods to sample the current in
1128
+ BD and hence obtain the overdamped velocity profile
1129
+ v(r). Finally, we use the standard counting method for
1130
+ the density profile ρ(r), although more efficient “force
1131
+ sampling” methods [94–97] exist. At this stage we nei-
1132
+ ther impose adiabatic-superadiabatic splitting (11), nor
1133
+ structure-flow splitting (18), nor do we use any analyti-
1134
+ cal model form of the functional relationship. We rather
1135
+ work on the level of the bare one-body simulation data,
1136
+ generated in the above described randomized uniaxial
1137
+ flow situations of the desired planar symmetry.
1138
+ We refer to the result of this procedure as the machine-
1139
+ learned internal force field f ⋆
1140
+ int(x, [ρ, v]). This represents
1141
+ a “surrogate model” in the sense of the terminology of the
1142
+ machine learning community. By construction this data
1143
+ structure depends functionally on the density profile and
1144
+
1145
+ 10
1146
+ on the velocity profile. Importantly the external force
1147
+ field fext(x), as given by the above described randomized
1148
+ Fourier series, has not been used in the training, which
1149
+ was rather based solely on the intrinsic force field and
1150
+ its kinematic dependence on the density profile and the
1151
+ velocity field.
1152
+ In order to test the validity of the functional relation-
1153
+ ship and to address the question whether f ⋆
1154
+ int(x, [ρ, v])
1155
+ indeed represents the true fint(r, t, [ρ, v]) of power func-
1156
+ tional theory restricted to the present planar and steady
1157
+ situation, we consider a toy flow situation as an appli-
1158
+ cation.
1159
+ We choose the density profile to consist of a
1160
+ single (co)sinusoidal deviation from the bulk, ρ(x) =
1161
+ [0.5 + 0.2 cos(2πx/L)]σ−3.
1162
+ In order for the system to
1163
+ be in steady state, the velocity then necessarily needs to
1164
+ satisfy v(x) = J0/ρ(x), where the strength of the current
1165
+ J0 = const is a free parameter.
1166
+ We proceed in two ways.
1167
+ First, we check for self-
1168
+ consistency. Therefore we solve the force density balance
1169
+ relationship (6) for the external force field, which yields
1170
+ the explicit result:
1171
+ fext(x) = kBTρ′(x) + γv(x) − f ⋆
1172
+ int(x, [ρ, v]),
1173
+ (29)
1174
+ where ρ′(x) = ∂ρ(x)/∂x. As is explicit in Eq. (29), in-
1175
+ putting the toy state ρ(x), v(x) on the right hand side
1176
+ yields a concrete machine learning prediction for the ex-
1177
+ ternal force field on the left hand side. We then input
1178
+ this result for fext(x) as the driving force field in a single
1179
+ adaptive BD simulation run and expect this procedure to
1180
+ reproduce the density and velocity profile of the toy state.
1181
+ The reproductive success will however materialize only
1182
+ provided that i) the functional kinematic dependence ac-
1183
+ tually exists and that ii) it is accurately represented by
1184
+ the neural network.
1185
+ The results, shown in Fig. 2, demonstrate the accom-
1186
+ plishment of the reconstruction of the toy state.
1187
+ This
1188
+ establishes that the machine learned functional provides
1189
+ a numerically very highly accurate representation of the
1190
+ true internal force functional. We take this validation via
1191
+ the machine learning to be a practical, data-science-level
1192
+ verification of the existence of the power functional kine-
1193
+ matic map. We recall the original formal construction
1194
+ [42, 53] and its subsequent confirmation via custom flow
1195
+ [51, 52].
1196
+ Turning to the physics of the compressional flow, we
1197
+ use the adiabatic-superadiabatic decomposition (11) to-
1198
+ gether with the flow-structure splitting (18) to analyze
1199
+ both the machine-learned functional f ⋆
1200
+ int(x, [ρ, v]) as well
1201
+ as the direct simulation results. As anticipated, both flow
1202
+ and structural force fields have nontrivial spatial varia-
1203
+ tion, see Fig. 2. The flow force primarily contains viscous
1204
+ effects that stem from the dissipation that the compres-
1205
+ sional and extensional regions of the flow pattern gener-
1206
+ ate. The structural force field becomes more strongly in-
1207
+ homogeneous and also larger in magnitude upon increas-
1208
+ ing the amplitude of the flow. This trend is necessary
1209
+ to provide a balance for the increasingly asymmetric and
1210
+ growing external force field, which in turn is required to
1211
+ keep the density profile unchanged upon increasing the
1212
+ throughput through the prescribed density wave.
1213
+ The
1214
+ power functional predictions (21) and (22) capture these
1215
+ effects reasonably well given the simplicity of the ana-
1216
+ lytical expressions, see the insets in Fig. 2. We find our
1217
+ numerical results to satisfy the Noether sum rules (26)
1218
+ and (27) to very good accuracy.
1219
+ It remains to point out the stark contrast with the
1220
+ standard DDFT (1), which gives a trivial null result in
1221
+ the present setup by construction: the density profile re-
1222
+ mains unchanged upon increasing flow, and so does the
1223
+ adiabatic force field. So the DDFT provides no mecha-
1224
+ nism to account for the nonequilibrium physics.
1225
+ VIII.
1226
+ CONCLUSIONS
1227
+ For the purpose of assessing the status of the DDFT
1228
+ equation of motion (1) we have first described two exact
1229
+ limits that this approximation reproduces: the dynamics
1230
+ of the noninteracting diffusive ideal gas [see Eq. (4)] and
1231
+ the spatially inhomogeneous static equilibrium limit [see
1232
+ Eq. (2)]. On general grounds one expects the DDFT to
1233
+ perform well when the situation under consideration is
1234
+ close to one of these limits. In particular near the static
1235
+ case this is nontrivial, as the system might be dense and
1236
+ spatially highly structured, as evident by a strongly inho-
1237
+ mogeneous density profile. Provided that the dynamics
1238
+ are driven weakly enough via a time-dependent external
1239
+ potential then the DDFT can be a highly useful device,
1240
+ which enables one to describe the temporal evolution as
1241
+ a chain of equilibrium states, labelled by time.
1242
+ In general the contributions beyond the equilibrium
1243
+ physics will however be relevant.
1244
+ On the level of the
1245
+ formally exact one-body equation of motion (14), the su-
1246
+ peradiabatic force field fsup(r, t) will then contribute and
1247
+ potentially very significantly so. Together with the adi-
1248
+ abatic force field, which follows from the equilibrium ex-
1249
+ cess free energy functional via −∇δFexc[ρ]/δρ(r, t), their
1250
+ sum constitutes the full interparticle forces. These are
1251
+ coarse-grained, in a microscopically sharp way, to the
1252
+ one-body level of dynamical correlation functions. We
1253
+ have argued i) that power functional theory is a con-
1254
+ crete formal structure that allows to obtain fsup(r, t)
1255
+ from a generating functional and ii) that simple approx-
1256
+ imate forms already capture much relevant nonequilib-
1257
+ rium physics and they do so in a transparent and sys-
1258
+ tematic way.
1259
+ We have described and exemplified for uniaxial steady
1260
+ compressional flow of the three-dimensional Lennard-
1261
+ Jones fluid the kinematic functional map that governs
1262
+ the exact nonequilibrium dynamics on the one-body level
1263
+ of dynamic correlation functions. As this description is
1264
+ based on a single position coordinate and a single time
1265
+ variable, it is of both conceptual and practical simplicity.
1266
+ As described by power functional theory the superadia-
1267
+ batic interparticle force field functionally depends on the
1268
+ density and the velocity field, i.e. fsup(r, t, [ρ, v]), for over-
1269
+
1270
+ 11
1271
+ damped Brownian motion. The functional dependence is
1272
+ causal, i.e. on the values of the density profile and velocity
1273
+ field at previous times, in general up to an initial state.
1274
+ The superadiabatic force field carries this kinematic de-
1275
+ pendence, i.e. on the history of ρ(r, t) and v(r, t), but
1276
+ crucially it is independent of the external force field that
1277
+ drives the system.
1278
+ We have explicitly demonstrated the functional map
1279
+ ρ(r, t), v(r, t) → fint(r, t) by establishing this functional
1280
+ relationship via machine learning the intrinsic force field.
1281
+ Using the force balance then gives direct access to the
1282
+ form of the required external force field via Eq. (29). The
1283
+ machine-learned model of the functional map hence en-
1284
+ ables “instant custom flow” at negligible computational
1285
+ cost at the time of use. We recall that the custom flow
1286
+ method [51, 52] is based on the kinematic functional map,
1287
+ such that from knowing the kinematic one-body fields,
1288
+ the external force field that is necessary to generate the
1289
+ given time evolution follows straightforwardly from the
1290
+ exact force balance (6).
1291
+ An analytical approach to one-body functional maps
1292
+ leads to the simple structure of velocity gradient forms
1293
+ for the viscous and structural superadiabatic forces, as
1294
+ exemplified in Eqs. (16) and (17) for compressional flow,
1295
+ i.e. for velocity fields with nonvanishing divergence. As
1296
+ we have shown, the resulting predictions for the flow
1297
+ force (21) and for the structural force field (22) represent
1298
+ a reasonable description of the simulation data and its
1299
+ representation via the machine-learned functional. We
1300
+ attribute the remaining differences to higher-order terms
1301
+ [59] which we have not addressed here for simplicity. As
1302
+ we have shown, our results from direct simulation, from
1303
+ machine learning, and from the analytical approxima-
1304
+ tions, satisfy exact global Noether sum rules.
1305
+ We have restricted our discussion to a single and rela-
1306
+ tively easily accessible type of nonequilibrium dynamics,
1307
+ that of stationary uniaxial compressional flow that rep-
1308
+ resents a model steady (batch) sedimentation situation.
1309
+ The power functional approach allows to go much fur-
1310
+ ther, including the treatment of viscoelasticity [56], as
1311
+ arising from superadiabatic memory, deconfinement un-
1312
+ der shear [57], the dynamic decay of the van Hove pair
1313
+ correlation function as governed by drag, viscous and
1314
+ structural forces [68, 69], and the complex forms of both
1315
+ flow and structural forces that arise under spatially com-
1316
+ plex forms of driving [59]. Time-dependent uniaxial flow
1317
+ is relevant in a variety of situations, including colloidal
1318
+ stratification [98, 99] and sedimentation [100].
1319
+ Although power functional theory operates on the one-
1320
+ body level of dynamical correlation functions, two-body
1321
+ correlation functions are accessible both formally via the
1322
+ nonequilibrium Ornstein-Zernike route [42] and explic-
1323
+ itly by the dynamical test particle limit. The latter is
1324
+ the dynamic generalization of Percus’ static test parti-
1325
+ cle limit [61], which identifies two-point correlation func-
1326
+ tions, such as g(r) as also recently shown to be intimat-
1327
+ edly related to thermal Noether invariance at second or-
1328
+ der [50], with one-body density profiles in an external
1329
+ potential. This is set equal to the interparticle pair po-
1330
+ tential.
1331
+ The dynamical test-particle limit goes further
1332
+ in that it describes the test particle via its own dynami-
1333
+ cal degrees of freedom, which are coupled to those of all
1334
+ other particles in the system. The concept was originally
1335
+ formulated as an approximation within DDFT [62, 63]
1336
+ and formally exactly within power functional theory [66].
1337
+ Two-body superadiabatic effects were shown via simula-
1338
+ tion work to be significant [67–69] and they arise natu-
1339
+ rally in an exact formulation of the test particle dynamics
1340
+ [66]. The test particle limit allowed for a rationalization
1341
+ of the dynamical pair structure as e.g. experimentally
1342
+ observed in two-dimensional colloids [9]. Recently an ap-
1343
+ proach to DDFT based on the two-body level was formu-
1344
+ lated [101].
1345
+ In event-driven BD simulations superadiabatic forces
1346
+ were shown to consist of drag, viscous, and structural
1347
+ contributions [68, 69]; see Ref. [42] for an extended dis-
1348
+ cussion. The physics of active particles [70–74] is very
1349
+ significantly governed by a vigorous interplay between su-
1350
+ peradiabatic and adiabatic forces, both of which are very
1351
+ strong, as the tendency of these systems to self-compress
1352
+ leads naturally to very high local densities.
1353
+ Furthermore,
1354
+ relevant and interesting microscopic
1355
+ models that go beyond the simple fluid paradigm of a
1356
+ pair potential, such as the monatomic water model by
1357
+ Molinero and Moore [102, 103] and the three-body gel
1358
+ by Saw et al. [104, 105], are accessible. Despite the com-
1359
+ plexity of both its defining Hamiltonian and the intricate
1360
+ transient network structure, the inhomogeneous viscous
1361
+ response of the three-body gel was recently demonstrated
1362
+ [60] to be surprisingly well captured by a simple power
1363
+ functional flow approximation.
1364
+ We finally recall that
1365
+ superadiabatic effects transcend overdamped dynamics,
1366
+ and are relevant both in quantum dynamics [42, 80, 81]
1367
+ and in classical molecular dynamics [42, 78, 79].
1368
+ While we have restricted ourselves to discussing the
1369
+ point of view of functional relationships, it would be in-
1370
+ teresting to explore in future work possible cross connec-
1371
+ tions to other theoretical approaches, such as Onsager’s
1372
+ variational principle for soft matter [106–109], stochastic
1373
+ thermodynamics [110], large deviation theory [111, 112],
1374
+ mode-coupling theory [113, 114], generalized hydrody-
1375
+ namics [115], as well as to the physics of nonequilibrium
1376
+ phase transitions [116] and of Brownian solitons [117].
1377
+ ACKNOWLEDGMENTS
1378
+ This work is supported by the German Research Foun-
1379
+ dation (DFG) via Project No. 436306241.
1380
+
1381
+ 12
1382
+ [1] S. R. Nagel, Experimental soft-matter science, Rev.
1383
+ Mod. Phys. 89, 025002 (2017).
1384
+ [2] R. Evans, D. Frenkel, and M. Dijkstra, From simple
1385
+ liquids to colloids and soft matter, Phys. Today 72, 38
1386
+ (2019).
1387
+ [3] J. P. Hansen and I. R. McDonald, Theory of Simple
1388
+ Liquids, 4th ed. (Academic Press, London, 2013).
1389
+ [4] R. Evans, The nature of the liquid-vapour interface and
1390
+ other topics in the statistical mechanics of non-uniform,
1391
+ classical fluids, Adv. Phys. 28, 143 (1979).
1392
+ [5] P. C. Hohenberg and B. I. Halperin, Theory of dynamic
1393
+ critical phenomena, Rev. Mod. Phys. 49, 435 (1977).
1394
+ [6] F. Samm¨uller and M. Schmidt, Adaptive Brownian dy-
1395
+ namics, J. Chem. Phys. 155, 134107 (2021).
1396
+ [7] C. P. Royall, J. Dzubiella, M. Schmidt, and A. van
1397
+ Blaaderen, Non-equilibrium sedimentation of colloids on
1398
+ the particle scale, Phys. Rev. Lett. 98, 188304 (2007).
1399
+ [8] M. Bier, R. van Roij, M. Dijkstra, and P. van der Schoot,
1400
+ Self-diffusion of particles in complex fluids: temporary
1401
+ cages and permanent barriers, Phys. Rev. Lett. 101,
1402
+ 215901 (2008).
1403
+ [9] D. Stopper, A. L. Thorneywork, R. P. A. Dullens, and
1404
+ R. Roth, Bulk dynamics of Brownian hard disks: Dy-
1405
+ namical density functional theory versus experiments on
1406
+ two-dimensional colloidal hard spheres, J. Chem. Phys.
1407
+ 148, 104501 (2018).
1408
+ [10] R. Evans, Density functionals in the theory nonuniform
1409
+ fluids, in Fundamentals of Inhomogeneous Fluids, edited
1410
+ by D. Henderson (Dekker, New York, 1992).
1411
+ [11] For an overview of new developments in classical den-
1412
+ sity functional theory, see:
1413
+ R. Evans, M. Oettel, R.
1414
+ Roth, and G. Kahl, New developments in classical den-
1415
+ sity functional theory, J. Phys.: Condens. Matter 28,
1416
+ 240401 (2016).
1417
+ [12] R. Evans, M. C. Stewart, and N. B. Wilding, A unified
1418
+ description of hydrophilic and superhydrophobic sur-
1419
+ faces in terms of the wetting and drying transitions of
1420
+ liquids, Proc. Nat. Acad. Sci. 116, 23901 (2019).
1421
+ [13] M. K. Coe, R. Evans, and N. B. Wilding, Density deple-
1422
+ tion and enhanced fluctuations in water near hydropho-
1423
+ bic solutes: identifying the underlying physics, Phys.
1424
+ Rev. Lett. 128, 045501 (2022).
1425
+ [14] M. K. Coe, R. Evans, and N. B. Wilding, Understanding
1426
+ the physics of hydrophobic solvation, arXiv:2212.04967
1427
+ [15] D. Martin-Jimenez, E. Chac´on, P. Tarazona, and R.
1428
+ Garcia, Atomically resolved three-dimensional struc-
1429
+ tures of electrolyte aqueous solutions near a solid sur-
1430
+ face, Nat. Commun. 7, 12164 (2016).
1431
+ [16] J. Hern´andez-Mu˜noz, E. Chac´on, and P. Tarazona, Den-
1432
+ sity functional analysis of atomic force microscopy in a
1433
+ dense fluid, J. Chem. Phys. 151, 034701 (2019).
1434
+ [17] P. Cats, R. Evans, A. H¨artel, and R. van Roij, Primi-
1435
+ tive model electrolytes in the near and far field: Decay
1436
+ lengths from DFT and simulations, J. Chem. Phys. 154,
1437
+ 124504 (2021).
1438
+ [18] Y. Rosenfeld, Free-energy model for the inhomogeneous
1439
+ hard-sphere fluid mixture and density-functional theory
1440
+ of freezing, Phys. Rev. Lett. 63, 980 (1989)..
1441
+ [19] R. Roth, Fundamental measure theory for hard-sphere
1442
+ mixtures:
1443
+ a review, J. Phys.:
1444
+ Condens. Matter 22,
1445
+ 063102 (2010).
1446
+ [20] R. Roth, R. Evans, A. Lang, and G. Kahl, Fundamen-
1447
+ tal measure theory for hard-sphere mixtures revisited:
1448
+ the White Bear version, J. Phys.: Condens. Matter 14,
1449
+ 12063 (2002).
1450
+ [21] H. Hansen-Goos and R. Roth, Density functional the-
1451
+ ory for hard-sphere mixtures: the White Bear version
1452
+ mark II, J.Phys.: Condens. Matter 18, 8413 (2006).
1453
+ [22] U. M. B. Marconi and P. Tarazona, Dynamic density
1454
+ functional theory of fluids, J. Chem. Phys. 110, 8032
1455
+ (1999).
1456
+ [23] A. J. Archer and R. Evans, Dynamical density func-
1457
+ tional theory and its application to spinodal decompo-
1458
+ sition, J. Chem. Phys. 121, 4246 (2004).
1459
+ [24] G. K.-L. Chan and R. Finken, Time-dependent density
1460
+ functional theory of classical fluids, Phys. Rev. Lett. 94,
1461
+ 183001 (2005).
1462
+ [25] P. Espa˜nol and H. L¨owen, Derivation of dynamical
1463
+ density functional theory using the projection operator
1464
+ technique, J. Chem. Phys. 131, 244101 (2009).
1465
+ [26] U. M. B. Marconi and S. Melchionna, Phase-space ap-
1466
+ proach to dynamical density functional theory, J. Chem.
1467
+ Phys. 126, 184109 (2007).
1468
+ [27] J. Dzubiella and C. N. Likos, Mean-field dynamical den-
1469
+ sity functional theory, J. Phys.: Condens. Matter 15,
1470
+ L147 (2003).
1471
+ [28] J. F. Lutsko and M. Oettel, Reconsidering power func-
1472
+ tional theory, J. Chem. Phys. 155, 094901 (2021).
1473
+ [29] G. Szamel, An alternative, dynamic density functional-
1474
+ like theory for time-dependent density fluctuations in
1475
+ glass-forming fluids, J. Chem. Phys. 156, 191102 (2022).
1476
+ [30] B. D. Goddard, R. D. Mills-Williams, M. Ottobre,
1477
+ and G. Pavliotis, Well-posedness and equilibrium be-
1478
+ haviour of overdamped dynamic density functional the-
1479
+ ory, arXiv:2002.11663
1480
+ [31] A. J. Archer, Dynamical density functional theory for
1481
+ dense atomic liquids, J. Phys.: Condens. Matter 18,
1482
+ 5617 (2006).
1483
+ [32] A. J. Archer, Dynamical density functional theory for
1484
+ molecular and colloidal fluids: A microscopic approach
1485
+ to fluid mechanics, J. Chem. Phys. 130, 014509 (2009).
1486
+ [33] R. Stierle and J. Gross, Hydrodynamic density func-
1487
+ tional theory for mixtures from a variational principle
1488
+ and its application to droplet coalescence, J. Chem.
1489
+ Phys. 155, 134101 (2021).
1490
+ [34] B. D. Goddard, A. Nold, N. Savva, G. A. Pavliotis,
1491
+ and S. Kalliadasis, General dynamical density func-
1492
+ tional theory for classical fluids, Phys. Rev. Lett. 109,
1493
+ 120603 (2012).
1494
+ [35] M. Rex and H. L¨owen, Dynamical density functional
1495
+ theory for colloidal dispersions including hydrodynamic
1496
+ interactions, Eur. Phys. J. E 28, 139 (2009).
1497
+ [36] J. Dzubiella, and A. Moncho-Jord´a, Controlling the mi-
1498
+ crostructure and phase behavior of confined soft colloids
1499
+ by active interaction switching, Phys. Rev. Lett. 125,
1500
+ 078001 (2020).
1501
+ [37] M. Bley, J. Dzubiella, and A. Moncho-Jord´a, Active bi-
1502
+ nary switching of soft colloids: stability and structural
1503
+ properties, Soft Matter 17, 7682 (2021).
1504
+ [38] B. D. Goddard, B. Gooding, G. A. Pavliotis, and H.
1505
+ Short, Noisy bounded confidence models for opinion dy-
1506
+ namics: the effect of boundary conditions on phase tran-
1507
+
1508
+ 13
1509
+ sitions, IMA J. Appl. Math. 87, 80 (2022).
1510
+ [39] M. te Vrugt, J. Bickmann, and R. Wittkowski, Effects
1511
+ of social distancing and isolation on epidemic spreading
1512
+ modeled via dynamical density functional theory, Nat.
1513
+ Commun. 11, 5576 (2020).
1514
+ [40] M. te Vrugt, H. L¨owen, and R. Wittkowski, Classical
1515
+ dynamical density functional theory: from fundamen-
1516
+ tals to applications, Adv. Phys. 69, 121 (2020).
1517
+ [41] M. te Vrugt and R. Wittkowski, Perspective: New direc-
1518
+ tions in dynamical density functional theory, J. Phys.:
1519
+ Condens. Matter 35, 041501 (2023).
1520
+ [42] M. Schmidt, Power functional theory for many-body dy-
1521
+ namics, Rev. Mod. Phys. 94, 015007 (2022).
1522
+ [43] T. Schilling, Coarse-grained modelling out of equilib-
1523
+ rium, Phys. Rep. 972, 1 (2022).
1524
+ [44] S. Hermann and M. Schmidt, Noether’s theorem in sta-
1525
+ tistical mechanics, Commun. Phys. 4, 176 (2021).
1526
+ [45] S. Hermann and M. Schmidt, Why Noether’s theorem
1527
+ applies to statistical mechanics, J. Phys.:
1528
+ Condens.
1529
+ Matter 34, 213001 (2022) (Topical Review).
1530
+ [46] S. M. Tschopp, F. Samm¨uller, S. Hermann, M. Schmidt,
1531
+ and J. M. Brader, Force density functional theory
1532
+ in- and out-of-equilibrium, Phys. Rev. E 106, 014115
1533
+ (2022).
1534
+ [47] F. Samm¨uller, S. Hermann, and M. Schmidt, Should
1535
+ classical density functional theory be based on forces?
1536
+ A comparative study, arxiv:2212.01780 (2022).
1537
+ [48] S. Hermann and M. Schmidt, Force balance in thermal
1538
+ quantum many-body systems from Noether’s theorem,
1539
+ J. Phys. A: Math. Theor. 55, 464003 (2022). (Claritons
1540
+ and the Asymptotics of ideas: the Physics of Michael
1541
+ Berry).
1542
+ [49] S. Hermann and M. Schmidt, Variance of fluctuations
1543
+ from Noether invariance, Commun. Phys. 5, 276 (2022).
1544
+ [50] F. Samm¨uller, S. Hermann, D. de las Heras, and M.
1545
+ Schmidt, What is liquid, from Noether’s perspective?
1546
+ (to be published).
1547
+ [51] D. de las Heras, J. Renner, and M. Schmidt, Custom
1548
+ flow in overdamped Brownian dynamics, Phys. Rev. E
1549
+ 99, 023306 (2019).
1550
+ [52] J. Renner, M. Schmidt, and D. de las Heras, Custom
1551
+ flow in molecular dynamics, Phys. Rev. Res. 3, 013281
1552
+ (2021).
1553
+ [53] M. Schmidt and J. M. Brader, Power functional theory
1554
+ for Brownian dynamics, J. Chem. Phys. 138, 214101
1555
+ (2013).
1556
+ [54] A. Fortini,
1557
+ D. de las Heras,
1558
+ J. M. Brader,
1559
+ and
1560
+ M. Schmidt, Superadiabatic forces in Brownian many-
1561
+ body dynamics, Phys. Rev. Lett. 113, 167801 (2014).
1562
+ [55] N. C. X. Stuhlm¨uller, T. Eckert, D. de las Heras, and
1563
+ M. Schmidt, Structural nonequilibrium forces in driven
1564
+ colloidal systems, Phys. Rev. Lett. 121, 098002 (2018).
1565
+ [56] L. L. Treffenst¨adt and M. Schmidt, Memory-induced
1566
+ motion reversal in Brownian liquids, Soft Matter 16,
1567
+ 1518 (2020).
1568
+ [57] N. Jahreis and M. Schmidt, Shear-induced deconfine-
1569
+ ment of hard disks, Col. Pol. Sci. 298, 895 (2020).
1570
+ [58] D. de las Heras and M. Schmidt, Velocity gradient
1571
+ power functional for Brownian dynamics, Phys. Rev.
1572
+ Lett. 120, 028001 (2018).
1573
+ [59] D. de las Heras and M. Schmidt, Flow and structure
1574
+ in nonequilibrium Brownian many-body systems, Phys.
1575
+ Rev. Lett. 125, 018001 (2020).
1576
+ [60] F. Samm¨uller, D. de las Heras, and M. Schmidt, Inho-
1577
+ mogeneous steady shear dynamics of a three-body col-
1578
+ loidal gel former, J. Chem. Phys. (to appear in the Spe-
1579
+ cial Topic on Colloidal Gels); arXiv:2210.07679.
1580
+ [61] J. K. Percus, Approximation methods in classical sta-
1581
+ tistical mechanics, Phys. Rev. Lett. 8, 462 (1962).
1582
+ [62] A. J. Archer, P. Hopkins, and M. Schmidt, Dynamics in
1583
+ inhomogeneous liquids and glasses via the test particle
1584
+ limit, Phys. Rev. E 75, 040501(R) (2007).
1585
+ [63] P. Hopkins, A. Fortini, A. J. Archer, and M. Schmidt,
1586
+ The van Hove distribution function for Brownian hard
1587
+ spheres: Dynamical test particle theory and computer
1588
+ simulations for bulk dynamics, J. Chem. Phys. 133,
1589
+ 224505 (2010).
1590
+ [64] D. Stopper, R. Roth and H. Hansen-Goos, Communi-
1591
+ cation: Dynamical density functional theory for dense
1592
+ suspensions of colloidal hard spheres, J. Chem. Phys.
1593
+ 143, 181105 (2015).
1594
+ [65] D. Stopper, K. Marolt, R. Roth, and H. Hansen-Goos,
1595
+ Modeling diffusion in colloidal suspensions by dynam-
1596
+ ical density functional theory using fundamental mea-
1597
+ sure theory of hard spheres, Phys. Rev. E 92, 022151
1598
+ (2015).
1599
+ [66] J. M Brader and M. Schmidt, Power functional theory
1600
+ for the dynamic test particle limit, J. Phys.: Condens.
1601
+ Matter 27, 194106 (2015).
1602
+ [67] T. Schindler and M. Schmidt, Dynamic pair correlations
1603
+ and superadiabatic forces in a dense Brownian liquid, J.
1604
+ Chem. Phys. 145, 064506 (2016).
1605
+ [68] L. L. Treffenst¨adt and M. Schmidt, Universality in
1606
+ driven and equilibrium hard sphere liquid dynamics,
1607
+ Phys. Rev. Lett. 126, 058002 (2021).
1608
+ [69] L. L. Treffenst¨adt, T. Schindler, M. Schmidt, Dynamic
1609
+ decay and superadiabatic forces in the van Hove dy-
1610
+ namics of bulk hard sphere fluids, SciPost Phys. 12,
1611
+ 133 (2022).
1612
+ [70] S. Hermann, D. de las Heras, and M. Schmidt, Non-
1613
+ negative interfacial tension in phase-separated active
1614
+ Brownian particles,
1615
+ Phys. Rev. Lett. 123,
1616
+ 268002
1617
+ (2019).
1618
+ [71] S. Hermann, P. Krinninger, D. de las Heras, and M.
1619
+ Schmidt, Phase coexistence of active Brownian parti-
1620
+ cles, Phys. Rev. E 100, 052604 (2019).
1621
+ [72] P. Krinninger, M. Schmidt, and J. M. Brader, Nonequi-
1622
+ librium phase behaviour from minimization of free
1623
+ power dissipation, Phys. Rev. Lett. 117, 208003 (2016).
1624
+ [73] P. Krinninger and M. Schmidt, Power functional theory
1625
+ for active Brownian particles: general formulation and
1626
+ power sum rules, J. Chem. Phys. 150, 074112 (2019).
1627
+ [74] S. Hermann, D. de las Heras, and M. Schmidt, Phase
1628
+ separation of active Brownian particles in two dimen-
1629
+ sions: Anything for a quiet life, Mol. Phys. e1902585
1630
+ (2021).
1631
+ [75] D. de las Heras, M. Schmidt, Full canonical information
1632
+ from grand potential density functional theory, Phys.
1633
+ Rev. Lett. 113, 238304 (2014).
1634
+ [76] D. de las Heras, J. M. Brader, A. Fortini, M. Schmidt,
1635
+ Particle conservation in dynamical density functional
1636
+ theory, J. Phys.: Condens. Matter 28, 244024 (2016).
1637
+ [77] T. Schindler, R. Wittmann, and J. M. Brader, Particle-
1638
+ conserving dynamics on the single-particle level, Phys.
1639
+ Rev. E 99, 012605 (2019).
1640
+ [78] M. Schmidt, Power functional theory for Newtonian
1641
+ many-body dynamics, J. Chem. Phys. 148, 044502
1642
+
1643
+ 14
1644
+ (2018).
1645
+ [79] J. Renner, M. Schmidt, and D. de las Heras, Shear and
1646
+ bulk acceleration viscosities in simple fluids, Phys. Rev.
1647
+ Lett. 128, 094502 (2022).
1648
+ [80] M. Schmidt, Quantum power functional theory for
1649
+ many-body dynamics, J. Chem. Phys. 143, 174108
1650
+ (2015).
1651
+ [81] M. Br¨utting, T. Trepl, D. de las Heras, and M. Schmidt,
1652
+ Superadiabatic forces via the acceleration gradient in
1653
+ quantum many-body dynamics, Molecules 24, 3660
1654
+ (2019).
1655
+ [82] P. S. Clegg, Characterising soft matter using machine
1656
+ learning, Soft Matter, 17, 3991 (2021).
1657
+ [83] M. Dijkstra and E. Luijten, From predictive modelling
1658
+ to machine learning and reverse engineering of colloidal
1659
+ self-assembly, Nature Materials 20, 762 (2021)
1660
+ [84] G. M. Coli, E. Boattini, L. Filion, M. Dijkstra, Inverse
1661
+ design of soft materials via a deep learning-based evo-
1662
+ lutionary strategy, Sci. Adv. 8, eabj6731 (2022)
1663
+ [85] E. Boattini, M. Dijkstra, and L. Filion, Unsupervised
1664
+ learning for local structure detection in colloidal sys-
1665
+ tems, J. Chem. Phys. 151, 154901 (2019).
1666
+ [86] R. van Mastrigt, M. Dijkstra, M. van Hecke, and
1667
+ C. Coulais, Machine learning of implicit combinato-
1668
+ rial rules in mechanical metamaterials, Phys. Rev. Lett.
1669
+ 129, 198003 (2022).
1670
+ [87] G. Campos-Villalobos, E. Boattini, L. Filion, and M.
1671
+ Dijkstra, Machine learning many-body potentials for
1672
+ colloidal systems, J. Chem. Phys. 155, 174902 (2021).
1673
+ [88] G. Campos-Villalobos, G. Giunta, S. Mar´ın-Aguilar,
1674
+ M. Dijkstra,
1675
+ Machine-learning effective many-body
1676
+ potentials for anisotropic particles using orientation-
1677
+ dependent symmetry functions, J. Chem. Phys. 157,
1678
+ 024902 (2022).
1679
+ [89] S. Ciarella, M. Chiappini, E. Boattini, M. Dijkstra,
1680
+ and L. M. C. Janssen, Dynamics of supercooled liquids
1681
+ from static averaged quantities using machine learning,
1682
+ arXiv:2212.09338
1683
+ [90] T. Santos-Silva, P. I. C. Teixeira, C. Anquetil-Deck, and
1684
+ D. J. Cleaver, Neural-network approach to modeling liq-
1685
+ uid crystals in complex confinement, Phys. Rev. E 89,
1686
+ 053316 (2014).
1687
+ [91] S.-C. Lin and M. Oettel, A classical density functional
1688
+ from machine learning and a convolutional neural net-
1689
+ work, SciPost Phys. 6, 025 (2019).
1690
+ [92] S.-C. Lin, G. Martius, and M. Oettel, Analytical clas-
1691
+ sical density functionals from an equation learning net-
1692
+ work, J. Chem. Phys. 152, 021102 (2020).
1693
+ [93] P. Cats, S. Kuipers, S. de Wind, R. van Damme, G. M.
1694
+ Coli, M. Dijkstra, and R. van Roij, Machine-learning
1695
+ free-energy functionals using density profiles from sim-
1696
+ ulations, APL Mater. 9, 031109 (2021).
1697
+ [94] B. Rotenberg, Use the force! Reduced variance estima-
1698
+ tors for densities, radial distribution functions, and lo-
1699
+ cal mobilities in molecular simulations, J. Chem. Phys.
1700
+ 153, 150902 (2020).
1701
+ [95] D. Borgis, D., R. Assaraf, B. Rotenberg, and R. Vuilleu-
1702
+ mier, Computation of pair distribution functions and
1703
+ three-dimensional densities with a reduced variance
1704
+ principle, Mol. Phys. 111, 3486 (2013).
1705
+ [96] D. de las Heras and M. Schmidt, Better than counting:
1706
+ Density profiles from force sampling, Phys. Rev. Lett.
1707
+ 120, 218001 (2018).
1708
+ [97] J. Renner, M. Schmidt, and D. de las Heras, Bet-
1709
+ ter than counting: Orientational distribution functions
1710
+ from torque sampling, arXiv:2212.11576
1711
+ [98] B. He, I. Martin-Fabiani, R. Roth, G. I. T´oth, and A.
1712
+ J. Archer, Dynamical density functional theory for the
1713
+ drying and stratification of binary colloidal dispersions,
1714
+ Langmuir 37, 1399 (2021).
1715
+ [99] M. Kundu and M. P. Howard, Dynamic density func-
1716
+ tional theory for drying colloidal suspensions: Compar-
1717
+ ison of hard-sphere free-energy functionals, J. Chem.
1718
+ Phys. 157, 184904 (2022).
1719
+ [100] J. Sui, M. Doiab and Y. Ding, Dynamics of the float-
1720
+ ing nematic phase formation in platelet suspension with
1721
+ thickness polydispersity by sedimentation, Soft Matter
1722
+ 14, 8956 (2018).
1723
+ [101] S. M. Tschopp and J. M. Brader, First-principles su-
1724
+ peradiabatic theory for the dynamics of inhomogeneous
1725
+ fluids, J. Chem. Phys. 157, 234108 (2022).
1726
+ [102] V. Molinero and E. B. Moore, Water modeled as an in-
1727
+ termediate element between carbon and silicon, J. Phys.
1728
+ Chem. B 113, 4008–4016 (2009).
1729
+ [103] M. K. Coe, R. Evans, and N. B. Wilding, The coexis-
1730
+ tence curve and surface tension of a monatomic water
1731
+ model, J. Chem. Phys. 156, 154505 (2022).
1732
+ [104] S. Saw, N. L. Ellegaard, W. Kob, and S. Sastry, Struc-
1733
+ tural relaxation of a gel modeled by three body interac-
1734
+ tions, Phys. Rev. Lett. 103, 248305 (2009).
1735
+ [105] S. Saw, N. L. Ellegaard, W. Kob, and S. Sastry, Com-
1736
+ puter simulation study of the phase behavior and struc-
1737
+ tural relaxation in a gel-former modeled by three-body
1738
+ interactions, J. Chem. Phys. 134, 164506 (2011).
1739
+ [106] M. Doi, Onsager’s variational principle in soft matter,
1740
+ J. Phys.: Condens. Matter 23, 284118 (2011).
1741
+ [107] M. Doi, Onsager principle as a tool for approximation,
1742
+ Chinese Phys. B 24, 020505 (2015).
1743
+ [108] H. Wang, T. Qian, and X. Xu, Onsager’s variational
1744
+ principle in active soft matter, Soft Matter 17, 3634
1745
+ (2021).
1746
+ [109] X. Wang, J. Dobnikar, and D. Frenkel, Numerical test
1747
+ of the Onsager relations in a driven system, Phys. Rev.
1748
+ Lett. 129, 238002 (2022).
1749
+ [110] U. Seifert, Stochastic thermodynamics, fluctuation the-
1750
+ orems and molecular machines, Rep. Prog. Phys. 75,
1751
+ 126001 (2012).
1752
+ [111] R. L. Jack and P. Sollich, Large Deviations and Ensem-
1753
+ bles of Trajectories in Stochastic Models, Prog. Theo.
1754
+ Phys. Suppl. 184, 304 (2010).
1755
+ [112] R. L. Jack and P. Sollich, Effective interactions and large
1756
+ deviations in stochastic processes, Europ. Phys. J. Spec.
1757
+ Top. 224, 2351 (2015).
1758
+ [113] L. M. C. Janssen, Mode-coupling theory of the glass
1759
+ transition: a primer, Front. Phys. 6, 97 (2018).
1760
+ [114] L. M. C. Janssen and D. R. Reichman, Microscopic dy-
1761
+ namics of supercooled liquids from first principles, Phys.
1762
+ Re. Lett. 115, 205701 (2015).
1763
+ [115] G. Mazzuca, T. Grava, T. Kriecherbauer, K. T.-R.
1764
+ McLaughlin, C. B. Mendl, H. Spohn, Equilibrium space-
1765
+ time correlations of the toda lattice on the hydrody-
1766
+ namic scale, arXiv:2301.02431.
1767
+ [116] D. Lips, A. Ryabov, and P. Maass, Brownian asym-
1768
+ metric simple exclusion process, Phys. Rev. Lett. 121,
1769
+ 160601 (2018).
1770
+ [117] A. P. Antonov, A. Ryabov, and P. Maass, Solitons in
1771
+ overdamped Brownian dynamics, Phys. Rev. Lett. 129,
1772
+
1773
+ 15
1774
+ 080601 (2022).
1775
+
99FLT4oBgHgl3EQfui_z/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9tFAT4oBgHgl3EQfpx3L/content/tmp_files/2301.08642v1.pdf.txt ADDED
@@ -0,0 +1,2672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Optimal multiple FSO transceiver configuration for using on High-altitude
2
+ platforms
3
+ Dieu Linh Truong ∗1 and The Ngoc Dang †2
4
+ 1School of Information and Communication Technology, Hanoi University of Science and Technology,
5
+ Vietnam
6
+ 2Department of Wireless Communications, Posts and Telecommunication Institute of Technology,
7
+ Vietnam
8
+ January 23, 2023
9
+ Abstract
10
+ Free-space optical (FSO) communication requires light of
11
+ sight (LoS) between the transmitter and the receiver.
12
+ For
13
+ long-distance communication, many research projects have
14
+ been conducted towards using a network composed of high-
15
+ altitude platforms (HAPs) flying at an elevation of 20 km to
16
+ carry intermediate FSO transceivers that forward data be-
17
+ tween ground stations.
18
+ The clear environment at high el-
19
+ evations prevents terrestrial obstacles from cutting the LoS
20
+ between the transceivers.
21
+ An FSO transceiver on a HAP
22
+ can communicate with ground stations within a small area
23
+ owing to its limited beam size. We suggest using multiple
24
+ FSO transceivers on a HAP to extend its ground coverage.
25
+ However, the use of too many FSO transceivers may quickly
26
+ exhaust the onboard energy of the HAP. As a result, HAP
27
+ must be lowered to recharge frequently.
28
+ In this study, we first propose a configuration of multiple
29
+ FSO transceivers to widen the ground coverage of a HAP.
30
+ We then propose a set of closed-form expressions to calculate
31
+ the extended coverage. Finally, to implement a HAP network
32
+ using multiple FSO transceivers, we seek the optimal config-
33
+ uration of multiple FSO transceivers that minimizes the to-
34
+ tal cost of the HAP network, including amortization, energy,
35
+ and maintenance costs. The simulation results show that the
36
+ proposed multiple FSO transceiver configuration clearly in-
37
+ creases the ground coverage of a HAP and significantly re-
38
+ duces the cost of the HAP network.
39
+ Keywords— Free Space Optics, High-altitude platform, Beam
40
+ size optimization, HAP based FSO network
41
+ 1
42
+ Introduction
43
+ Free-space optical (FSO) communication uses light propagation
44
+ in free space to transmit data. In recent years, this technology
45
+ has emerged as a promising choice for short-distance high-speed
46
+ communication between endpoints with a clear light of sight (LoS).
47
+ ∗linhtd@soict.hust.edu.vn
48
+ †ngocdt@ptit.edu.vn
49
+ Commercial FSO transmitters available in the market at prices of
50
+ thousands of dollars can operate at 1.25 − 10 Gbps over 1 − 2
51
+ kilometers, for example, the SONABeam series of fSona [1].
52
+ To reach a long distance, a multi-hop FSO system can be used,
53
+ where data are transmitted through intermediate FSO transceivers
54
+ [2], [3]. To avoid obstacles that cut the LoS between terrestrial
55
+ FSO transceivers, researchers from academia and industry have
56
+ proposed placing intermediate FSO transceivers of the multi-hop
57
+ FSO system on high-altitude platforms (HAPs).
58
+ High-altitude
59
+ platforms are flying objects that operate at altitudes of 17–24 km
60
+ in the stratosphere. Several HAP models have been proposed and
61
+ piloted previously. Some projects continue until recently, such as
62
+ the Loon Project of Google [4], the UAV project of Facebook [5],
63
+ and the Stratobus project of Thales Alenia Space [6].
64
+ A multi-hop FSO system using a HAP network is described
65
+ in [7] and illustrated in Figure 1. According to this model, FSO
66
+ transceivers on the ground (so-called ground FSO nodes) are re-
67
+ grouped into clusters to become the serving zones of HAPs.
68
+ A
69
+ HAP has an FSO transceiver looking down to exchange data
70
+ with the ground FSO nodes of the cluster under it.
71
+ This FSO
72
+ transceiver is called serving FSO transceiver. A HAP also carries
73
+ several FSO transceivers pointing towards other HAPs for inter-
74
+ HAP communication. These FSO transceivers are known as inter-
75
+ HAP FSO transceivers.
76
+ Although the ITU recommends a HAP footprint width of ap-
77
+ proximately 500 km in radius, experimental projects show much
78
+ smaller coverage areas [8].
79
+ Nevertheless, a network of multiple
80
+ HAPs can cover a country entirely. For example, a constellation
81
+ of 16 HAPs with multiple radio frequency antennas was considered
82
+ to cover Japan [9].
83
+ An end-to-end data-switching scheme for a multi-hop FSO sys-
84
+ tem using HAP was proposed in [7].
85
+ Since the communication
86
+ between a HAP and the ground is point-to-multipoint, the serv-
87
+ ing FSO transceiver on the HAP controls multiple accesses from
88
+ ground FSO nodes under it using the WDM technique.
89
+ Each
90
+ ground node is assigned a separate wavelength for up and down
91
+ communication. An IP router on the HAP aggregates IP packets
92
+ heading toward a common cluster within a single flow. The flow
93
+ will be carried by one or more continuous lightpaths between the
94
+ source and destination HAPs. The number of lightpaths is deter-
95
+ mined according to the size of the flow and the transport capacity
96
+ of a wavelength. A WDM switch is installed on each HAP to route
97
+ 1
98
+ arXiv:2301.08642v1 [cs.NI] 20 Jan 2023
99
+
100
+ these lightpaths over the HAP network on a wavelength-switched
101
+ basis. In Figure 1, the blue path HAP1-HAP2-HAP4-HAP5 and
102
+ the red path HAP1-HAP2-HAP3 are two flows.
103
+ 1
104
+ 2
105
+ 2
106
+ 1
107
+ 1
108
+ 1
109
+ 1
110
+ 2
111
+ 3
112
+ 3
113
+ 3
114
+ 3
115
+ 3
116
+ WDM switch
117
+ IP router
118
+ IP router
119
+ WDM switch
120
+ 1
121
+ 2
122
+ 2
123
+ p-HAP-2
124
+ 1
125
+ 3
126
+ 3
127
+ p-HAP-1
128
+ toward HAP-2 of cluster-2
129
+ toward HAP-1 of cluster-1
130
+ HAP2
131
+ Inter-HAP FSO transceiver
132
+ Ground FSO node
133
+ Serving FSO
134
+ 3
135
+ 1
136
+ 2
137
+ A cluster
138
+ A cluster
139
+ transceiver
140
+ HAP1
141
+ HAP3
142
+ HAP4
143
+ HAP5
144
+ Serving zone of HAP 1
145
+ Serving zone of HAP 2
146
+ inter-HAP link
147
+ inter-HAP link
148
+ Figure 1: Multi-hop FSO communication system using HAP.
149
+ In terrestrial FSO communications, the light beams are usually
150
+ set to be very narrow for low transmission energies. However, for
151
+ HAP and ground communication, the serving FSO transceiver of
152
+ the HAP must project a sufficiently wide laser beam for covering
153
+ distributed ground FSO nodes.
154
+ A single serving FSO transceiver has a relatively small foot-
155
+ print owing to the low capacity of the current laser source, and
156
+ the limited sensibility and aperture sizes of ground receivers. The
157
+ calculation in Section 2.1 shows that with a laser source of 1 Watt,
158
+ required received power at receivers of -41.1dBm, and receiver
159
+ aperture radius of 2 m, a single serving FSO transceiver at an
160
+ elevation of 20 km can cover a ground area of 6.691 km radius
161
+ only (see Table 3).
162
+ To extend the coverage of a HAP, we propose using multiple
163
+ serving FSO transceivers arranged in a bundle, as shown in Fig-
164
+ ure 2. Each serving FSO transceiver points in a slightly different
165
+ direction to cover a particular ground area that overlaps other ar-
166
+ eas to create a continuous coverage region. Given a ground region
167
+ to be served, using HAPs with multiple serving FSO transceivers
168
+ reduces the number of required HAPs compared to using HAPs
169
+ with a single serving FSO transceiver. However, the expenditure
170
+ for serving FSO transceivers increases. Therefore, the number of
171
+ serving FSO transceivers to be used on a HAP should be carefully
172
+ considered.
173
+ Regarding the communication between ground nodes and a
174
+ HAP, the multiple serving FSO transceiver model still uses the
175
+ WDM technique, where each ground node is assigned a unique
176
+ wavelength within its cluster to communicate with its HAP. The
177
+ number of ground nodes to be served by a HAP is restricted by
178
+ the number of wavelengths offered by the WDM technique.
179
+ In this study, we focus on identifying the optimal configuration
180
+ of multiple serving FSO transceivers to achieve a minimal-cost
181
+ HAP network for serving a set of ground FSO nodes. The optimal
182
+ configuration should define the number of serving FSO transceivers
183
+ to be set up on a HAP and the beam width for each transceiver.
184
+ The cost of the HAP network includes the investment, energy, and
185
+ maintenance costs.
186
+ Compared with the previous study in reference [7], the current
187
+ research differs in two aspects.
188
+ First, the current research pro-
189
+ Figure 2: A HAP with multiple serving FSO transceivers and
190
+ its footprint.
191
+ poses the use of multiple serving FSO transceivers on each HAP
192
+ instead of a single serving FSO transceiver, as in [7]. Second, the
193
+ current research identifies the optimal beam widths for serving
194
+ FSO transceivers, whereas in [7], the beam widths are predefined.
195
+ The current study also differs from that in [10], where beam size
196
+ was optimized for an inter-HAP link, which is a point-to-point link.
197
+ The remainder of this paper is organized as follows. First, we
198
+ analyze the single and multiple serving FSO transceivers configu-
199
+ rations in Section 2 to determine their ground coverage sizes and
200
+ constraints on transmitter beams. In Section 3, we state the prob-
201
+ lem of designing a minimal-cost HAP-based FSO network, which is
202
+ the target of the optimization of multiple serving FSO transceiver
203
+ configuration. Then, in Section 4, we define a HAP energy con-
204
+ sumption formula and show that solar energy is necessary for keep-
205
+ ing the HAP working in space for a long period. We also present
206
+ a constraint that a HAP must respect to relying uniquely on so-
207
+ lar energy. In Section 5, we present the algorithms for identifying
208
+ the optimal multiple serving FSO transceiver configuration and
209
+ its footprint radius. Section 6 presents the process designing the
210
+ minimal cost HAP-based FSO network using the optimal multi-
211
+ ple serving FSO transceiver configuration. Section 7 presents the
212
+ simulation results. Finally, Section 8 concludes the paper.
213
+ 2
214
+ Serving FSO transceiver configu-
215
+ rations
216
+ 2.1
217
+ Single serving FSO transceiver configu-
218
+ ration
219
+ In this section, the allowable beam width and ground coverage of
220
+ a single serving FSO transceiver are determined. The beam size
221
+ is restricted to ensure that the received power at a ground point
222
+ within the beam footprint is detectable by receivers.
223
+ 2
224
+
225
+ Figure 3: Surface of the part of sphere blocked by solid angle
226
+ α is calculated as the sum of the surface of all ribbons around
227
+ the sphere when the solid angle varies from α to 0.
228
+ Assume that the transmitter source radiates within a solid angle
229
+ α and that the radiation density is uniform in all directions within
230
+ the solid angle at a distance r from the source. The radiation den-
231
+ sity at distance r is inversely proportional to the surface of the part
232
+ of the sphere radius r blocked by the solid angle α. To calculate
233
+ this surface, we divide the sphere into thin ribbons corresponding
234
+ to open angles of d(α/2). The width of a ribbon is rd(α/2), as
235
+ shown in Figure 3. The radius of the ribbon at zenith angle α/2 is
236
+ r sin(α/2). Thus, the ribbon surface is 2πr sin(α/2)rd(α/2). The
237
+ surface of the part of the sphere blocked by the solid angle α is the
238
+ sum of the surfaces of all ribbons when zenith angle varies from α
239
+ to 0, as follows:
240
+ � 0
241
+ α
242
+ 2πr sin (α
243
+ 2 )rd(α
244
+ 2 ) = 2πr2(1 − cos (α
245
+ 2 ))
246
+ Let Ur be the radiation density at distance r and Ptx be the
247
+ transmitted power at the source. We deduce:
248
+ Ur =
249
+ Ptx
250
+ 2πr2(1 − cos (α/2))
251
+ (1)
252
+ Let P rx
253
+ j
254
+ be the received power at ground FSO node j.
255
+ The
256
+ received power is proportional to the radiation density and the
257
+ received aperture of the ground node. It is:
258
+ P rx
259
+ j
260
+ =
261
+ e−σLjULjAR
262
+ (2)
263
+ where
264
+ • Lj is the distance between ground FSO node j and its serving
265
+ HAP Hi (see Figure 4),
266
+ • σ is the attenuation coefficient of the links between the HAP
267
+ and ground,
268
+ • ULj is radiation density at distance Lj from the source,
269
+ • AR is the aperture area of the receiver. Let Rrx be the receiver
270
+ aperture radius, then, AR = πR2
271
+ rx.
272
+ In (2), the first term represents the attenuation of laser power
273
+ through the atmosphere, which is described by the exponential
274
+ Beer–Lambert Law [11].
275
+ Figure 4: Received power on border nodes of a coverage area
276
+ is the smallest amongst all nodes in the area.
277
+ By substituting ULj from (1) into (2), we obtain the received
278
+ power at node j as follows:
279
+ P rx
280
+ j
281
+ = e−σLj × Ptx × R2
282
+ rx
283
+ 2L2
284
+ j
285
+ ×
286
+ 1
287
+ 1 − cos (α/2)
288
+ (3)
289
+ The power received at node j must not be less than the required
290
+ level of the receiver, denoted by ρrx. It is obvious that point j at
291
+ the border of the ground coverage area receives the least power
292
+ because it is the furthest from the source (see Figure 4). Hence,
293
+ all points in the coverage areas of HAP Hi receive sufficient power
294
+ if and only if the border points receive at least the required power;
295
+ that is,
296
+ P rx
297
+ j
298
+ = e−σH/ cos ( α
299
+ 2 ) PtxR2
300
+ rx cos2 ( α
301
+ 2 )
302
+ 2H2(1 − cos ( α
303
+ 2 )) ≥ ρrx
304
+ (4)
305
+ where Lj is substituted by H/ cos( α
306
+ 2 ) for border node j.
307
+ Solving inequation (4) yields the beam width of the single serv-
308
+ ing FSO transceiver configuration. Corresponding to beam width
309
+ α, the ground coverage radius of the configuration is:
310
+ Ri = H tan(α
311
+ 2 )
312
+ (5)
313
+ Lemma 1. Function P rx
314
+ j
315
+ decreases with α ∈ [0..π].
316
+ Proof of Lemma 1 is given in Appendix A.
317
+ Figure 5 shows the received power at the border of the cover-
318
+ age area with different receiver aperture radius Rrx. This figure
319
+ confirms that P rx
320
+ j
321
+ decrease with an increase in α.
322
+ Let αmax be the value for α that makes P rx
323
+ j (αmax) = ρrx; then
324
+ according to Lemma 1,
325
+ P rx
326
+ j (α) ≥ P rx
327
+ j (αmax) = ρrx, ∀α ∈ [0..αmax]
328
+ thus all α ∈ [0..αmax] satisfy constraint (4).
329
+ Calculations using the parameters given in Table 1 show that
330
+ when Rrx = 2 m, αmax = 37° and the coverage radius is 6.691 km.
331
+ When Rrx = 4 m, αmax
332
+ = 67° and the coverage radius is
333
+ 13.237 km.
334
+ 3
335
+
336
+ Ribbon surface= 2πr sin(α/2) r d(α/2)
337
+ Kd(a/2)
338
+ r.sin(a/2)
339
+ a/2
340
+ SourceHAP H;
341
+ α
342
+ H
343
+ Received power Prx
344
+ R;
345
+ Nodej
346
+ coverage area of HAP H 0
347
+ 2
348
+ 4
349
+ 6
350
+ 8
351
+ 10
352
+ 0
353
+ 20
354
+ 40
355
+ 60
356
+ 80
357
+ 100
358
+ 120
359
+ 140
360
+ 160
361
+ 180
362
+ Received power at coverage border (10-8 W)
363
+ Beam size α(degree)
364
+ Rrx=0.125m
365
+ Rrx=1m
366
+ Rrx=2m
367
+ Rrx=4m
368
+ Required at receiver (Prx)
369
+ Figure 5: Received power at the coverage border of the single
370
+ serving FSO transceiver configuration with different receiver
371
+ apertures.
372
+ 2.2
373
+ Multiple serving FSO transceiver config-
374
+ uration
375
+ The ground coverage of a HAP can be widened by combining sev-
376
+ eral serving FSO transceivers. Different combinations are possible.
377
+ In this research, we study a straightforward configuration in which
378
+ a principal serving FSO transceiver is in the center projecting
379
+ light perpendicular to the ground, and several identical supplemen-
380
+ tary serving FSO transceivers are set evenly around the principal
381
+ one (Figure 6). Each supplementary transceiver projects slanted
382
+ beams to extend the coverage in one direction. This arrangement
383
+ is referred to as mFSO configuration. Usually, the transmitters in
384
+ a bundle are considered to project signals in parallel. However,
385
+ because of the large principal beam, the supplementary serving
386
+ FSO transceiver projection directions are far from being perpen-
387
+ dicular to the ground, and their footprints are ellipses instead of
388
+ circles.
389
+ To create a continuous coverage region, the footprint of the
390
+ principal serving FSO transceiver and those of the supplemen-
391
+ tary serving FSO transceivers should overlap.
392
+ Therefore, there
393
+ should be a sufficiently large number of supplementary serving
394
+ FSO transceivers to cover entirely the contour of the principal foot-
395
+ print. The extended coverage area is defined as the largest circle
396
+ covered by these footprints (Figure 6). The principal transceiver is
397
+ responsible for the region defined by its footprint. A supplemen-
398
+ tary serving FSO transceiver is responsible for the part limited
399
+ by its footprint, principal coverage circle, and extended coverage
400
+ circle.
401
+ Let α be always the beam width of the principal serving
402
+ FSO transceiver.
403
+ To ensure that ground nodes under principal
404
+ coverage receive sufficient power, α should still respect constraint
405
+ (4), as in the single serving FSO transceiver configuration.
406
+ Let the beam width of a supplementary serving FSO transceiver
407
+ be β. In the responsible area of the supplementary transceiver, the
408
+ points on the extended coverage circle are the farthest from the
409
+ supplementary transceiver; thus, they receive the least power. If
410
+ these points receive at least ρrx, all other points receive sufficient
411
+ power.
412
+ It is easy to note that the footprints of the neighboring supple-
413
+ Figure 6: footprint of multiple FSO transceiver (mFSO) con-
414
+ figuration.
415
+ mentary serving FSO transceivers join each other on the extended
416
+ coverage circle. Let J be such a joint point, the power J receives
417
+ from the supplementary FSO transceiver is defined similar to (3)
418
+ but with beam width β, which is
419
+ P rx
420
+ J
421
+ = e−σ×LJ × Ptx × R2
422
+ rx
423
+ 4L2
424
+ J
425
+ ×
426
+ 2
427
+ 1 − cos (β/2)
428
+ (6)
429
+ Thus, β is constrained by the condition P rx
430
+ J
431
+ ≥ ρrx, which gives:
432
+ e−σ×LJ
433
+ PtxR2
434
+ rx
435
+ 2L2
436
+ J(1 − cos (β/2)) ≥ ρrx
437
+ (7)
438
+ Let us denote the extended coverage radius by Rext then
439
+ LJ =
440
+
441
+ H2 + R2
442
+ ext
443
+ (8)
444
+ Appendix B presents detailed calculations of LJ and Rext. The
445
+ calculations yielded the following results
446
+ Rext = H2 tan( ξ+α
447
+ 2 ) − tan( α
448
+ 2 )(1 − tan2( ξ+α
449
+ 2 ))
450
+ 1 − tan2( ξ+α
451
+ 2 ) + 2 tan( ξ+α
452
+ 2 ). tan( α
453
+ 2 )
454
+ (9)
455
+ where
456
+ tan(ξ + α
457
+ 2
458
+ ) = tan(γ) + tan(θ)
459
+ 1 − tan(γ). tan(θ). cos( π
460
+ m)
461
+ tan(γ) = tan(α
462
+ 2 ). cos( π
463
+ m)
464
+ tan(θ) =
465
+
466
+ sin2( β
467
+ 2 ) − sin2( α
468
+ 2 ). sin2( π
469
+ m)
470
+ cos( β
471
+ 2 )
472
+ (10)
473
+ (11)
474
+ (12)
475
+ and m is the number of supplementary FSO transceivers set
476
+ around the principal one.
477
+ We can remark that Rext and thus LJ depend on α, β and m.
478
+ Hereafter, Rext is sometimes denoted by Rext(α, m, β) and LJ by
479
+ LJ(α, m, β) to express these dependencies.
480
+ 4
481
+
482
+ Principle coverage circle
483
+ 0
484
+ K'
485
+ K
486
+ 2T
487
+ m
488
+ Extended coverage circle3
489
+ Problem of designing minimal cost
490
+ HAP network
491
+ There are several costs in a HAP network, such as investment, en-
492
+ ergy, and maintenance costs. Based on the expected life duration
493
+ and maintenance cycle of a HAP, these costs can be distributed
494
+ by day as 1) daily amortization cost representing investment cost,
495
+ 2) average daily maintenance cost, and 3) daily energy cost. Con-
496
+ sequently, the problem of minimizing network cost becomes min-
497
+ imizing the daily network cost, which comprises these three com-
498
+ ponents.
499
+ Following variables are introduced for formulating mathemati-
500
+ cally the daily network cost:
501
+ • K: Number of HAPs in the network. The HAPs are indexed
502
+ by i ∈ 1..K.
503
+ • niF
504
+ i : Number of FSO transceivers used on HAPi for inter-
505
+ HAP communications.
506
+ • nsF
507
+ i : Number of serving FSO transceiver of HAPi.
508
+ Let ζday
509
+ H
510
+ and ζday
511
+ F
512
+ be constants that express the daily amortiza-
513
+ tion costs of a HAP and an FSO transceiver, respectively. These
514
+ costs are defined as the ratio of the prices of the HAP or FSO
515
+ transceiver to their expected lifetime duration. Then, the overall
516
+ daily amortization cost of the HAP network is:
517
+ Kζday
518
+ H
519
+ + (
520
+ K
521
+
522
+ i=1
523
+ nsF
524
+ i
525
+ +
526
+ K
527
+
528
+ i=1
529
+ niF
530
+ i )ζday
531
+ F
532
+ (13)
533
+ To evaluate the daily maintenance and energy costs, we need to
534
+ consider the HAP design. HAPs are classified into two categories
535
+ based on the underlying physical principle that provides the lifting
536
+ force for the HAPs: aerodynamic (the HAP is heavier than air)
537
+ and aerostatic (the HAP is lighter than air).
538
+ While aerostatic
539
+ platforms use buoyancy to float in the air, aerodynamic platforms
540
+ use dynamic forces created by movement through the air [8]. In
541
+ general, both aerostatic and aerodynamic systems require a “flying
542
+ energy” to keep the HAP relatively stable for maintaining FSO
543
+ communication between HAPs and that between HAPs and FSO
544
+ ground nodes. An aerodynamic system requires a large propulsion
545
+ power to move. Aerostatic systems typically consume less energy
546
+ than aerodynamic systems do. To be able to operate for a long
547
+ duration in space, HAPs are mainly unmanned.
548
+ HAPs are equipped with different energy resources such as on-
549
+ site production (e.g., solar energy harvested by solar panels) or
550
+ rechargeable energy (e.g., batteries or fuel cells brought from the
551
+ ground).
552
+ Solar energy-based HAPs can operate continuously in
553
+ space until they are lowered for maintenance purpose. Recharge-
554
+ able energy-based HAPs are lowered once the reserved energy is
555
+ depleted. In brief, the continuous in-space working duration of a
556
+ HAP is limited by its available energy, which is relatively fixed by
557
+ the HAP design, its energy consumption level, which varies de-
558
+ pending on the payload weight and communication of the HAP,
559
+ and its maintenance cycle.
560
+ We define the maintenance cost of a HAP as the expense of low-
561
+ ering the HAP to perform technical maintenance, energy recharge
562
+ on the ground, and then reinstall it in space.
563
+ Let di be the number of days on which HAPi can operate con-
564
+ tinuously in space. Let ζmtn be constant expressing the cost of
565
+ one time lowering a HAP, maintaining it, recharging it, and then
566
+ reinstalling it in space. The daily maintenance cost of the HAP
567
+ network is
568
+ K
569
+
570
+ i=1
571
+ ζmtn
572
+ di
573
+ (14)
574
+ Regarding the daily energy cost, we consider solar energy to be
575
+ free, whereas the solar panel cost is counted in the cost of the HAP.
576
+ The cost of rechargeable energy is part the maintenance cost. As a
577
+ result, the energy cost does not explicitly represent the total cost.
578
+ Nonetheless, the energy consumption level of a HAP affects its
579
+ in-space working duration di; therefore, we analyze this in Section
580
+ 4.
581
+ Combining (13) and (14), we obtain the following overall daily
582
+ cost of the HAP network:
583
+ Cost = Kζday
584
+ H
585
+ + (
586
+ K
587
+
588
+ i=1
589
+ nsF
590
+ i
591
+ +
592
+ K
593
+
594
+ i=1
595
+ niF
596
+ i )ζday
597
+ F
598
+ +
599
+ K
600
+
601
+ i=1
602
+ ζmtn
603
+ di
604
+ (15)
605
+ The problem of minimizing daily cost of the HAP network is
606
+ stated as follows.
607
+ • Given input parameters including
608
+ – NFSO: Set of ground FSO nodes and their coordinates.
609
+ The number of nodes in the set is denoted as |NFSO|,
610
+ – M:
611
+ Data traffic to be carried between ground FSO
612
+ nodes. This is the list of bandwidth demands between
613
+ the ground nodes.
614
+ • Outputs to seek are
615
+ – A HAP network with HAP locations and inter-HAP
616
+ links,
617
+ – Beam width to set to each serving FSO transceiver.
618
+ • Optimization objective is
619
+ – Minimizing the daily cost expressed in (15) of the HAP
620
+ network.
621
+ The following two remarks drive us to conduct further analyses
622
+ in subsequent sections.
623
+ First, if a HAP has self-sufficient solar
624
+ energy, its in-space working duration di is not limited by its energy
625
+ consumption but depends uniquely on the maintenance cycle of the
626
+ HAP, which is usually constant. In Section 4, we show the daily
627
+ energy consumption of a HAP and the constraint that a HAP
628
+ needs to respect to rely solely on solar energy.
629
+ Second, the cost of the HAP network increases with an increase
630
+ in the number of FSO transceivers and HAPs.
631
+ The number of
632
+ HAPs can be reduced by increasing ground coverage. To increase
633
+ ground coverage, more serving FSO transceivers can be used on
634
+ each HAP, but this introduces greater energy consumption and ex-
635
+ tra amortization cost. Section 5 focuses on identifying the optimal
636
+ configuration for serving FSO transceivers on a HAP to achieve a
637
+ minimal HAP network cost.
638
+ 4
639
+ Daily energy consumption of a
640
+ HAP with payload
641
+ Several parameters affect the power consumption of a HAP. The
642
+ descriptions and notations of these parameters are listed in section
643
+ Energy parameters of Table 1. Most parameters were set based on
644
+ industrial experimental projects such as the Loon project [4], Stra-
645
+ tobus project [6], and other studies listed in the reference column.
646
+ Section 7.1 presents the choice of parameter values in detail.
647
+ 5
648
+
649
+ Param.
650
+ nota-
651
+ tions
652
+ Descriptions
653
+ Values
654
+ References
655
+ Cost related parameters
656
+ ζday
657
+ H
658
+ Daily amortization cost of a HAP.
659
+ 100
660
+ ζday
661
+ F
662
+ Daily amortization cost of an FSO transceiver on HAP.
663
+ 10
664
+ ζmtn
665
+ Cost of one-time maintenance of a HAP including lowing it down,
666
+ 1000
667
+ maintenance, charging and reinstall it in the stratosphere.
668
+ Dm
669
+ Maintenance cycle.
670
+ 365 days
671
+ [6]
672
+ Energy parameters
673
+ Esolar
674
+ Minimum daily harvested solar energy by a HAP.
675
+ 42 - 290 kWh
676
+ [12]
677
+ ρavion
678
+ Power consumed by the avionic part of a HAP to carry an unit of mass.
679
+ 2 W/kg
680
+ ρHCM
681
+ F
682
+ Power for heating, cooling, and management for each FSO on HAP.
683
+ 20 W
684
+ [4]
685
+ ρPAT
686
+ Power consumed by a PAT system.
687
+ 15W
688
+ [13]
689
+ ρinter
690
+ F
691
+ Power consumed by inter-HAP FSO transceivers for laser source (0.1 W),
692
+ heating/cooling/management (20 W) and PAT (15 W).
693
+ 35.1 W
694
+ [4]
695
+ Inter-HAP FSO link parameters
696
+ C2
697
+ n
698
+ Atmosphere structure parameter.
699
+ 5.0 × 10−18m−2/3
700
+ -
701
+ Attenuation coefficient.
702
+ 3.5 × 10−6m−1
703
+ [4]
704
+ -
705
+ Coupling loss.
706
+ 45 dBm
707
+ -
708
+ Transmitted power of an inter-HAP FSO transceiver.
709
+ 0.1 W
710
+ [4]
711
+ -
712
+ Receiver aperture diameter of an inter-HAP FSO transceiver.
713
+ 0.037 m
714
+ [4]
715
+ -
716
+ Beam width of an inter-HAP FSO transmitter.
717
+ 280 µrad
718
+ [4]
719
+ HAP-ground link parameters and variables
720
+ σ
721
+ Attenuation coefficient.
722
+ 3.5 × 10−6m−1
723
+ ρFSO
724
+ tx
725
+ Transmitted power of the laser source of a serving FSO transceiver.
726
+ 1 Watt
727
+ Rrx
728
+ Receiver aperture radius of a ground FSO transceiver.
729
+ 0.05 m
730
+ SONABeam [1]
731
+ ρrx
732
+ Required received power at a ground FSO transceiver.
733
+ 7.76.10−8 W
734
+ -41.1 dBm in [4]
735
+ Other parameters
736
+ H
737
+ Elevation of HAPs.
738
+ 20 km
739
+ LHH
740
+ Maximum length of an inter-HAP link so that its BER is under δ.
741
+ 88 km
742
+ δ
743
+ BER threshold for inter-HAP links and lightpaths between HAPs.
744
+ W
745
+ The number of wavelengths in WDM technique.
746
+ 40; 80
747
+ µH
748
+ Platform mass excluding FSO transceivers.
749
+ 28.5 kg; 500 kg
750
+ [4]
751
+ µF
752
+ FSO transceiver mass.
753
+ 6.3 kg
754
+ [4]
755
+ Table 1: Parameters. Greek characters are used for denoting constant parameters.
756
+ Let us consider the power consumption of a single HAP Hi
757
+ that has m serving FSO transceiver and niF
758
+ i
759
+ inter-HAP FSO
760
+ transceivers. The power consumption includes:
761
+ • P avion
762
+ Hi
763
+ : Power draw of avionic part for maintaining Hi with
764
+ payload in space.
765
+ • P down
766
+ Hi
767
+ : Power draw of all serving FSO transceivers on HAP
768
+ Hi.
769
+ This power includes the heating/cooling/management
770
+ power, laser transmitted power of all serving FSO transceivers
771
+ on the HAP, and the power consumed by the Pointing Acqui-
772
+ sition and Tracking (PAT) system of the HAP.
773
+ • P inter
774
+ Hi
775
+ : Power draw of all inter-HAP FSO transceivers on
776
+ HAP Hi for inter-HAP communication. The power includes
777
+ the heating/cooling/management, and PAT power for each
778
+ inter-HAP FSO transceiver.
779
+ Inter-HAP FSO transceivers
780
+ are oriented towards different remote HAPs; therefore, each
781
+ transceiver must have a PAT system.
782
+ The total daily energy consumption (by 24 hours) of Hi is
783
+ Econsum = (P avion
784
+ Hi
785
+ + P down
786
+ Hi
787
+ + P inter
788
+ Hi
789
+ ) × 24
790
+ (16)
791
+ To breakdown further P avion
792
+ Hi
793
+ , P down
794
+ Hi
795
+ , and P inter
796
+ Hi
797
+ , we introduce
798
+ following parameters:
799
+ • ρavion: Power consumed by the avionic part of the HAP to
800
+ carry a unit of mass.
801
+ • ρFSO
802
+ tx
803
+ : Transmitted power of each serving FSO transceiver.
804
+ Because the current power of laser source is limited to 1 W,
805
+ which is very small in comparison with the power consumed
806
+ by other factors on the HAP, we consider that ρFSO
807
+ tx
808
+ = 1 W,
809
+ regardless of the beam width of the serving FSO transceiver.
810
+ • ρHCM
811
+ F
812
+ : Power draw for heating, cooling, and management. It
813
+ is also considered constant for each serving FSO transceiver
814
+ and is set to ρHCM
815
+ F
816
+ = 20 W, according to reference [4].
817
+ • ρPAT : Power draw for Pointing, Acquisition and Tracking
818
+ activity; it is another constant and is set to ρPAT = 15 W
819
+ [13]. A HAP system uses a single PAT for its set of serving
820
+ FSO transceivers.
821
+ • ρinter
822
+ F
823
+ : Power draw of a single inter-HAP FSO transceiver
824
+ including communication, heating, cooling, management, and
825
+ 6
826
+
827
+ PAT. According to [4], 0.1 W laser power is sufficient for an
828
+ inter-HAP communication of 100 km distance. In this study,
829
+ we limited the inter-HAP link length to less than 100 km and
830
+ considered the laser power constantly 0.1 W regardless of the
831
+ distance. Therefore, ρinter
832
+ F
833
+ = ρHCM
834
+ F
835
+ + ρPAT + 0.1.
836
+ • µH: Mass of the HAP.
837
+ • µF: Mass of an FSO on the HAP.
838
+ Assuming that P avion
839
+ Hi
840
+ is linearly proportional to the weight of
841
+ the HAP by ρavion,
842
+ P avion
843
+ Hi
844
+ = [µH + (nsF
845
+ i
846
+ + niF
847
+ i )µF]ρavion
848
+ (17)
849
+ P down
850
+ Hi
851
+ is
852
+ the
853
+ sum
854
+ of
855
+ the
856
+ power
857
+ consumed
858
+ by
859
+ serving
860
+ FSO transceivers and PAT activity of the HAP; thus,
861
+ P down
862
+ Hi
863
+ = nsF
864
+ i (ρFSO
865
+ tx
866
+ + ρHCM
867
+ F
868
+ ) + ρPAT
869
+ (18)
870
+ P inter
871
+ Hi
872
+ is the sum of the power consumed by inter-HAP FSO
873
+ transceivers; thus,
874
+ P inter
875
+ Hi
876
+ = ρinter
877
+ F
878
+ .niF
879
+ i
880
+ (19)
881
+ Substituting (17), (18), and (19) into (16), we obtain the daily
882
+ power consumption of a HAP as
883
+ Econsum = {[µH + (nsF
884
+ i
885
+ + niF
886
+ i )µF]ρavion
887
+ + nsF
888
+ i (ρFSO
889
+ tx
890
+ + ρHCM
891
+ F
892
+ ) + ρPAT
893
+ + ρinter
894
+ F
895
+ niF
896
+ i } × 24
897
+ (20)
898
+ 4.1
899
+ Necessity of solar energy and utilization
900
+ constraint
901
+ Current HAPs mainly use energy from solar panels mounted on
902
+ HAP wings and/or energy from batteries or hydrogen fuel cells
903
+ (HFC) onboard. Solar energy can be harvested and charged into
904
+ batteries during the day for nighttime use. Harvested solar energy
905
+ varies with year time and location. According to the experiments
906
+ in [12], in York, UK, the harvested solar power is 42–80 kWh/day,
907
+ and in Enugu, Nigeria, it is 290–545 kWh/day, depending on the
908
+ size of the solar panel.
909
+ Figure
910
+ 7
911
+ depicts
912
+ the
913
+ total
914
+ daily
915
+ energy
916
+ consumption
917
+ of
918
+ a HAP, calculated from (20), versus the number of serving
919
+ FSO transceivers.
920
+ Parameters were ρavion = 2 W/kg, ρPAT =
921
+ 15 W, HAP weights µH = 28.5 kg or 500 kg. The HAP carried
922
+ 10 inter-HAP FSO transceivers.
923
+ The referenced daily solar en-
924
+ ergy levels were the minimum daily solar energy levels in York
925
+ and Enugu. From a certain number of serving FSO transceivers,
926
+ a HAP consumes more energy than the harvested solar energy
927
+ in York, and an HFC would be necessary. Owing to the limited
928
+ payload capacity of a HAP, its HFC capacity is also very limited.
929
+ According to [8], the current state-of-the-art fuel-cell density is
930
+ approximately 1600 Wh/kg. A lightweight HAP, such as a Google
931
+ balloon weights 28.5 kg, cannot carry heavy long-lasting fuel cells
932
+ on board. The larger HAP Stratobus can carry up to 450 kg, but
933
+ it weights already 7 tons leading to high energy consumption for
934
+ flying. Even if the Stratobus payload capacity is reserved for the
935
+ HFC, its energy would quickly run out within a few days.
936
+ Based on this observation, we believe that long-duration flights
937
+ should consider solar energy as the principal energy source. In this
938
+ case, the power consumption of a HAP with payload must not
939
+ 0
940
+ 50000
941
+ 100000
942
+ 150000
943
+ 200000
944
+ 250000
945
+ 300000
946
+ 350000
947
+ 0
948
+ 20
949
+ 40
950
+ 60
951
+ 80
952
+ 100
953
+ Total daily energy consumption (W-hr)
954
+ Number of serving FSO transceivers
955
+ µΗ=28.5 kg
956
+ µΗ=500 kg
957
+ Min solar energy at York
958
+ Min solar energy at Enugu
959
+ Figure 7: Energy consumption by a HAP with different num-
960
+ ber of serving FSO transceivers in comparison with the min-
961
+ imum harvested solar energy at York and Enugu. ρavion =
962
+ 2/kg W and ρPAT = 15 W.
963
+ exceed the daily harvested solar energy. Let the daily harvested
964
+ solar energy be Esolar; then,
965
+
966
+ [µH + (nsF
967
+ i
968
+ + niF
969
+ i )µF]ρavion + ρPAT
970
+ + nsF
971
+ i (ρFSO
972
+ tx
973
+ + ρHCM
974
+ F
975
+ ) + ρinter
976
+ F
977
+ niF
978
+ i
979
+
980
+ ≤ Esolar
981
+ 24
982
+ (21)
983
+ According to Figure 7, solar energy provision does not need to
984
+ be very large. A solar energy level between the minimum harvested
985
+ in York and Enugu allows a 500 kg HAP to carry at least 6 serving
986
+ FSO transceivers. A HAP can carry hundreds FSO transceivers
987
+ with more than 125 kWh solar energy. Therefore, it is realistic to
988
+ rely on the solar energy. Hereafter, we consider that HAPs solely
989
+ use solar energy.
990
+ Despite self-sufficient solar energy, HAPs still need to be lowered
991
+ periodically for maintenance, for example, after one year in the
992
+ case of Stratobus [6]. Let us denote the maintenance cycle as a
993
+ constant Dm. Then
994
+ di = Dm,
995
+ ∀i ∈ 1..K
996
+ (22)
997
+ 5
998
+ Optimal mFSO configuration
999
+ Using multiple serving FSO transceivers increases the expense of
1000
+ FSO transceivers, although it can reduce the expense of HAPs.
1001
+ This section aims to determine the mFSO configuration that min-
1002
+ imizes the HAP network cost defined in (15). We assume that all
1003
+ HAPs use identical mFSO configurations, that is, identical prin-
1004
+ cipal beam width α, supplementary beam width β and number of
1005
+ supplementary serving FSO transceivers m.
1006
+ Let us now consider the dependence of the HAP network cost on
1007
+ mFSO configuration. As each HAP has m supplementary serving
1008
+ FSO transceivers and uses only solar energy, the cost (15) becomes
1009
+ Cost = Kζday
1010
+ H
1011
+ + (Km +
1012
+ K
1013
+
1014
+ i=1
1015
+ niF
1016
+ i )ζday
1017
+ F
1018
+ + Kζmtn
1019
+ Dm
1020
+ Cost is a function of K, m and niF
1021
+ i . K depends on the coverage
1022
+ radius Rext(α, m, β) of the mFSO configuration. niF
1023
+ i , as the num-
1024
+ ber of inter-HAP links of HAP i, depends on the traffic demand
1025
+ 7
1026
+
1027
+ set M. Hence, Cost depends on mFSO configuration and M. It
1028
+ is difficult to determine the optimal mFSO configuration without
1029
+ considering M. To relax the dependance on M, we estimate Cost
1030
+ by a function that depends solely on mFSO configuration, that is,
1031
+ tuple (α, m, β); then try to find an instance (α, m, β) minimizing
1032
+ the estimated cost in expecting that the instance also drives the
1033
+ real cost to a minimum.
1034
+ 5.1
1035
+ Cost estimation
1036
+ Figure 8: A ground area is divided into grid of square cells;
1037
+ each cell is circumscribed by a circle representing a serving
1038
+ zone of a HAP.
1039
+ First, we estimate the number of HAPs K.
1040
+ Samples of the
1041
+ estimation are datasets with uniformly distributed ground nodes.
1042
+ Let S be the surface of the ground area containing those nodes,
1043
+ and W the number of wavelengths in the WDM technique. We
1044
+ divide the ground zone S into a grid of square cells of size ℓ × ℓ,
1045
+ each one will be covered by a HAP (see Figure 8). To be served
1046
+ by a HAP, a cell must satisfy the following two conditions:
1047
+ 1. A cell can contain at most W ground nodes because a HAP
1048
+ can use at most W wavelengths to serve ground nodes. Owing
1049
+ to the uniform distribution of ground nodes, we have
1050
+ ℓ2
1051
+ S |NFSO| ≤ W
1052
+ 2. A cell must be contained inside by a circle radius equivalent
1053
+ to the extended radius Rext of a HAP
1054
+ ℓ ≤
1055
+
1056
+ 2Rext
1057
+ The maximum number of HAPs required to cover region S is the
1058
+ number of cells. Let this number be ˆK, then,
1059
+ ˆK = S
1060
+ ℓ2 = ⌈max {|NFSO|
1061
+ W
1062
+ ,
1063
+ S
1064
+ 2R2
1065
+ ext
1066
+ }⌉
1067
+ (23)
1068
+ Hence, ˆK is an overestimation of the number of HAPs.
1069
+ Next, we estimate the value of niF
1070
+ i .
1071
+ Let V be the maximum
1072
+ number of inter-HAP links that a HAP may have. Then
1073
+ niF
1074
+ i
1075
+ ≤ V, ∀i.
1076
+ Finally, Cost can be overestimated as:
1077
+
1078
+ Cost = ˆK
1079
+
1080
+ ζday
1081
+ H
1082
+ + (m + V + 1)ζday
1083
+ F
1084
+ + ζmtn
1085
+ Dm
1086
+
1087
+ (24)
1088
+
1089
+ Cost is a function of Rext(α, m, β) and m while V is a parameter
1090
+ of the estimator. The estimation is more precise when V is set close
1091
+ to the actual number of inter-HAP links of a HAP, and coarser
1092
+ otherwise.
1093
+ 5.2
1094
+ Algorithms finding optimal configura-
1095
+ tion
1096
+ Given α and m, a larger β results in a larger Rext, and thus a
1097
+ smaller ˆK and �
1098
+ Cost. Therefore, β should be set to the largest value
1099
+ according to (7) for a given α and m. It is worth noting that the
1100
+ value of β does not affect the solar energy consumption because the
1101
+ laser power ρFSO
1102
+ tx
1103
+ is small and is considered constant. Determining
1104
+ the optimal configuration becomes finding the optimal values of α
1105
+ and m.
1106
+ Algorithm 1 Find the optimal mFSO configuration
1107
+ 1: function Find-optimal-mFSO
1108
+ 2:
1109
+ niF
1110
+ i ← V
1111
+ 3:
1112
+ cMin ← ∞
1113
+ ▷ cost min
1114
+ 4:
1115
+ αMax ← maximum α by (4)
1116
+ 5:
1117
+ for α = αMax . . . 0 do
1118
+ 6:
1119
+ mMax ← calculated by (25)
1120
+ ▷ max m
1121
+ 7:
1122
+ mOpt ← 0
1123
+ ▷ optimal m
1124
+ 8:
1125
+ for m = 0 . . . mMax do
1126
+ 9:
1127
+ β ← Beta-max(α, m)
1128
+ ▷ max β
1129
+ 10:
1130
+ Calculate Rext(α, m, β) using (9),(10),(11) (12)
1131
+ 11:
1132
+ Calculate �
1133
+ Cost(α, m, β) using (24)
1134
+ 12:
1135
+ if �
1136
+ Cost < cmin then
1137
+ 13:
1138
+ cmin ← �
1139
+ Cost
1140
+ 14:
1141
+ αOpt ← α
1142
+ ▷ optimal α
1143
+ 15:
1144
+ mOpt ← m
1145
+ ▷ optimal m
1146
+ 16:
1147
+ βOpt ← β
1148
+ ▷ optimal β
1149
+ 17:
1150
+ end if
1151
+ 18:
1152
+ end for
1153
+ 19:
1154
+ end for
1155
+ 20:
1156
+ return αOpt, mOpt, βOpt
1157
+ 21: end function
1158
+ Algorithm 2 Find the maximum β given α, m
1159
+ 1: function Beta-max(α, m)
1160
+ 2:
1161
+ for β = 0 . . . 180 do
1162
+ 3:
1163
+ Calculate Rext(α, m, β) using (9),(10),(11) (12)
1164
+ 4:
1165
+ Calculate LJ using (8)
1166
+ 5:
1167
+ Calculate P rx
1168
+ J
1169
+ using (6)
1170
+ 6:
1171
+ if P rx
1172
+ J
1173
+ ¡ρrx then ▷ Looking for the first β violate
1174
+ constraint (7)
1175
+ 7:
1176
+ return β-1
1177
+ ▷ the previous trial β was the
1178
+ maximum
1179
+ 8:
1180
+ end if
1181
+ 9:
1182
+ end for
1183
+ 10: end function
1184
+ Following an exhaustive search approach, we examine all possi-
1185
+ ble values of α and m to seek for the pair that minimizes �
1186
+ Cost
1187
+ in (24).
1188
+ The search range of α is from 0° to the maximum
1189
+ 8
1190
+
1191
+ Serving zone
1192
+ of a HAPvalue set by constraint (4). The number of supplementary serving
1193
+ FSO transceivers m is also limited. Indeed, since the number of
1194
+ inter-HAP links of a HAP can go up to V as set in Section 5.1, and
1195
+ nsF
1196
+ i
1197
+ = m + 1, ∀i, then from the energy constraint (21), we deduce
1198
+ the upper bound for m:
1199
+ m ≤
1200
+ Esolar
1201
+ 24
1202
+ − (VµFρavion + Vρinter
1203
+ F
1204
+ + µHρavion + ρPAT)
1205
+ µFρavion + ρHCM
1206
+ F
1207
+ + ρFSO
1208
+ tx
1209
+ − 1 (25)
1210
+ Algorithm 1 implements the exhaustive search idea. First, two
1211
+ nested loops scan all possible values of α satisfying constraint (4)
1212
+ and all possible values of m satisfying (25) to find the pair that
1213
+ minimizes �
1214
+ Cost in (24). For each pair (α, m), the largest value
1215
+ of β according to constraint (7) is selected using Algorithm 2.
1216
+ The optimal mFSO configuration is reported by the algorithms as
1217
+ (αOpt, mOpt, βOpt).
1218
+ Algorithm 2 finds the maximum β that satisfies constraint (7)
1219
+ for a given pair of (α, m) by testing the possible values of β in-
1220
+ creasingly from 0 until the received power P rx
1221
+ J
1222
+ at the border of
1223
+ the extended coverage area reaches the required received power
1224
+ ρrx. The received power P rx
1225
+ J
1226
+ is calculated using the set of equa-
1227
+ tions (6), (8), (9),(10),(11), and (12).
1228
+ In the implementation of both algorithms, α and β step by 1°
1229
+ after each iteration. Finer stepping allows obtaining more accu-
1230
+ rate results. However, even with 1° stepping, the variation in the
1231
+ optimal Rext is only a few hundred meters, which is negligible in
1232
+ comparison to the absolute value of Rext which is in the range of
1233
+ 6-30 kilometers.
1234
+ The complexity of Algorithm 1 is O(m) because α ≤ π. The
1235
+ complexity of Algorithm 2 is constant because β ≤ π.
1236
+ 6
1237
+ Design HAP network topology
1238
+ This section presents the HAP network design using the optimal
1239
+ configuration identified above. Let denote Linter as the number
1240
+ of inter-HAP links. Since �K
1241
+ i=1 niF
1242
+ i
1243
+ is the total number of inter-
1244
+ HAP FSO transceivers, it is equal to 2Linter. The network cost
1245
+ becomes:
1246
+ Cost = Kζday
1247
+ H
1248
+ + (K(m + 1) + 2Linter)ζday
1249
+ F
1250
+ + K ζmtn
1251
+ Dm
1252
+ and is equivalent to
1253
+ Cost = K
1254
+
1255
+ ζday
1256
+ H
1257
+ + (m + 1)ζday
1258
+ F
1259
+ + ζmtn
1260
+ Dm
1261
+
1262
+ + 2Linterζday
1263
+ F
1264
+ (26)
1265
+ The cost is proportional to the number of HAPs K and the
1266
+ number of inter-HAP links Linter.
1267
+ We consider that the daily
1268
+ amortization cost of a HAP is much greater than that of an FSO
1269
+ transceiver; thus, the coefficient of K is much greater than the
1270
+ coefficient of Linter in Cost. Consequently, K should be prioritized
1271
+ to minimize over Linter. Therefore, the topology design is broken
1272
+ into following two steps:
1273
+ i) ground nodes are clustered into equal radius circles that will
1274
+ become serving zones of HAPs in such a way that the number
1275
+ of clusters is the smallest for minimizing K;
1276
+ ii) corresponding HAPs are located at the centers of clusters but
1277
+ at an elevation of 20 km and are interconnected by the fewest
1278
+ number of inter-HAP links, Linter.
1279
+ A HAP network topology design algorithm was proposed in [7]
1280
+ following these two steps. In this algorithm, the clustering radius
1281
+ was not determined but was left as an input of the algorithm. In
1282
+ the current study, we set the clustering radius as the extended
1283
+ coverage radius Rext of the optimal mFSO configuration to drive
1284
+ towards a HAP network with minimal Cost. The main steps of the
1285
+ Figure 9: HAP network design flowchart.
1286
+ HAP network design process are presented in Figure 9, where the
1287
+ steps taken from [7] are shown in color. The process is explained
1288
+ as follows:
1289
+ • Initialize V, the maximum number of inter-HAP links of a
1290
+ HAP, by a constant.
1291
+ • Calculate the optimal mFSO configuration using Algorithm
1292
+ 1, and set the clustering radius as its Rext.
1293
+ • Apply the clustering algorithm proposed in [7] to distribute
1294
+ ground nodes into clusters of radius Rext while keeping the
1295
+ number of ground nodes in each cluster under W. Each cluster
1296
+ becomes a serving zone of a HAP. The HAP is located at the
1297
+ center of the cluster but at an elevation of 20 km.
1298
+ • Bandwidth demands between ground nodes belonging to dif-
1299
+ ferent serving zones are bundled into lightpaths between
1300
+ corresponding HAPs, creating the inter-HAP traffic matrix
1301
+ MHAP .
1302
+ • Apply HAP topology design algorithm proposed in [7] to build
1303
+ the HAP topology.
1304
+ The algorithm begins with an empty
1305
+ topology. It finds a route for each lightpath demand of MHAP
1306
+ 9
1307
+
1308
+ Init V
1309
+ Find optimal conf.
1310
+ (α, m, β) and Rext
1311
+ Clustering ground nodes
1312
+ with Rext radius [7]
1313
+ V=V+1
1314
+ Calculate inter-HAP
1315
+ Design HAP topo [7]
1316
+ M
1317
+ HAF
1318
+ No
1319
+ is
1320
+ routed entirely
1321
+ Report HAP topofrom a full-mesh graph linking all HAPs within communica-
1322
+ tion distance limit LHH. Each time a lightpath uses an inter-
1323
+ HAP link that has not yet been included in the current HAP
1324
+ topology, the link is incorporated into the topology. The link
1325
+ in the topology is prioritized for use in building routes for the
1326
+ next lightpath demands.
1327
+ • Once all lightpath demands in MHAP are routed, the final
1328
+ topology is achieved. Otherwise, routing may fail due to the
1329
+ low connectivity between HAPs. In this case, V is increased
1330
+ by one, and the process is repeated until all lightpath de-
1331
+ mands in MHAP are routed.
1332
+ 7
1333
+ Simulation results
1334
+ The algorithms for finding the optimal mFSO configuration were
1335
+ implemented and integrated with the topology designed algorithm
1336
+ described in Section 6. We performed simulations with practical
1337
+ parameters and evaluated the efficiency of mFSO configuration
1338
+ compared to the single serving FSO transceiver configuration.
1339
+ 7.1
1340
+ Parameter values
1341
+ The simulation parameters are listed in Table 1. The values of
1342
+ these parameters were chosen according to experiments reported
1343
+ in the literature. This subsection explains the choices of the pa-
1344
+ rameter values.
1345
+ Cost-related parameters:
1346
+ The cost-related parameters are set
1347
+ such that the daily amortization cost of a HAP is significantly
1348
+ greater than that of an FSO transceiver, and the one-time mainte-
1349
+ nance cost is significantly higher than the daily amortization cost
1350
+ of a HAP. The maintenance cycle of a HAP is set as Dm = 1 year
1351
+ according to published information on Stratobus [6].
1352
+ Energy-related parameters:
1353
+ • Esolar - daily harvested solar energy.
1354
+ We considered daily
1355
+ solar energy levels between the minimum daily solar energy
1356
+ values in York and Enugu reported in [12], which were 42
1357
+ kWh and 290 kWh, respectively.
1358
+ • ρavion - power consumed by the avionic part of a HAP to
1359
+ carry a unit of mass. Although the power-to-mass ratio can
1360
+ be estimated as 6 W/kg according to [12], the published power
1361
+ rates of real systems are smaller. For aerodynamic systems
1362
+ such as Zephir-S, Zephir-T [14], and Phasa-35 [15], ρavion
1363
+ varies from 2.68 -3.04 W/kg. Indeed, Zephir-S weighs 80 kg
1364
+ (75 kg platform and 5 kg payload) and consumes 243 W,
1365
+ Zephir-T weighs 160 kg (140 kg platform and 20 kg payload)
1366
+ and consumes 429 W, and Phasa-35 weighs 165 kg (150 kg
1367
+ platform and 15 kg payload) and consumes 459 W. Aerostatic
1368
+ systems consume even less power. The Stratobus weighs 7000
1369
+ kg and consumes 5 kW when it carries a 250 kg payload
1370
+ and 8 kW when it carries 450 kg [6]. Thus, the power-to-
1371
+ mass ratio of Stratobus is between 0.69 and 1.07 W/kg only.
1372
+ Therefore, in this simulation ρavion was set to 2 W/kg.
1373
+ • ρPAT - power consumed by a PAT system; it was set to 15 W
1374
+ according to [13].
1375
+ • ρHCM
1376
+ F
1377
+ - power consumed for heating, cooling, and manage-
1378
+ ment; it was set to 20 W according to [4].
1379
+ • ρinter
1380
+ F
1381
+ - power consumed by an inter-HAP FSO transceiver;
1382
+ it was set to 35.1 W including laser power, ρHCM
1383
+ F
1384
+ and ρPAT.
1385
+ Inter-HAP link parameters: These parameters were set to values
1386
+ similar to those provided in the Loon project [4].
1387
+ HAP-ground FSO link parameters: The attenuation coefficient
1388
+ of an FSO link between a HAP and a ground node is set identical
1389
+ to that of inter-HAP links. The required received power ρrx at a
1390
+ ground node was set according to [4]. The aperture radius Rrx of
1391
+ a ground FSO receiver was set according to the commercial FSO
1392
+ transceiver SONABeam [1].
1393
+ Other parameters:
1394
+ • δ - BER threshold for inter-HAP links and lightpaths. We
1395
+ set δ = 10−3 because errors with that BER can be corrected
1396
+ using current Forward Error Correction (FEC) techniques.
1397
+ • LHH - the maximum allowable distance between two HAPs
1398
+ such that the BER of an inter-HAP link is less than δ = 10−3.
1399
+ Using the inter-HAP FSO link parameters listed in Table 1,
1400
+ the calculation yielded LHH = 88 km.
1401
+ • µH - platform mass; it varies significantly from one design
1402
+ to another. The Loon balloon weighs just 28.5 kg while the
1403
+ Stratobus weighs 7000 kg. With ρavion = 2 W/kg, a HAP
1404
+ weighing more than 7000 kg already consumes 326 kWh/day
1405
+ to carry itself, which is more than the maximum harvested
1406
+ solar energy, leading to no remaining energy to carry FSO
1407
+ transceivers. Therefore, µH = 500 kg was used in the simula-
1408
+ tions.
1409
+ • µF - mass of an FSO transceiver on HAPs. It was set accord-
1410
+ ing to the FSO transceiver used in the Loon project, which
1411
+ weighs 6.3 kg [4]. This value is consistent with the weights
1412
+ between 8 and 10 kg of commercial terrestrial SONABeam
1413
+ FSO transceivers [1].
1414
+ • W - the number of wavelengths per FSO link. It was set to
1415
+ 40 or 80 according to the current WDM technique.
1416
+ The test dataset contained 19 test cases, each with 400 – 2800
1417
+ ground nodes.
1418
+ The ground FSO node locations were randomly
1419
+ generated on a square surface of 100 × 100 km, which is the size
1420
+ of a large metropolis.
1421
+ The test cases had different numbers of
1422
+ ground nodes, reflecting different ground node densities. The traf-
1423
+ fic requirement M contained demands randomly generated between
1424
+ ground FSO nodes such that the total incoming or outgoing traffic
1425
+ of a ground FSO node did not exceed 1 Gbps, which is the capacity
1426
+ of a single wavelength.
1427
+ Initially, V was set to 10.
1428
+ The optimal multiple serving
1429
+ FSO transceiver configuration (α, m, β) was calculated using Algo-
1430
+ rithms 1 and 2. The extended radius Rext of the optimal configura-
1431
+ tions was calculated using (9) and was then used as the clustering
1432
+ radius in the HAP topology design step.
1433
+ With Esolar = 42 kWh and W = 40, V must be increased to 12
1434
+ to get all demands in MHAP routed successfully for all test cases.
1435
+ With all other Esolar and W values, the topology design algorithm
1436
+ successfully routed all demands in MHAP for all test cases right
1437
+ with initial V = 10.
1438
+ Figure 10 illustrates the HAP locations and their footprints
1439
+ calculated using the proposed algorithms for a test case of 1005
1440
+ ground FSO nodes, Esolar = 75 kWh, and W = 80.
1441
+ 7.2
1442
+ mFSO configuration versus single serv-
1443
+ ing FSO transceiver configuration
1444
+ Table 3 lists the maximum beam width αmax according to (4)
1445
+ and the maximum ground coverage radius of the single serving
1446
+ FSO transceiver configuration when the receiver aperture radius
1447
+ 10
1448
+
1449
+ Esolar = 42 kWh
1450
+ Esolar = 50 ∼ 290 kWh
1451
+ W = 40, V = 12
1452
+ W = 80, V = 10
1453
+ W = 40, V = 10
1454
+ W = 80, V = 10
1455
+ |NFSO|
1456
+ α
1457
+ m
1458
+ β
1459
+ Rext
1460
+ Cost
1461
+ α
1462
+ m
1463
+ β
1464
+ Rext
1465
+ Cost
1466
+ α
1467
+ m
1468
+ β
1469
+ Rext
1470
+ Cost
1471
+ α
1472
+ m
1473
+ β
1474
+ Rext
1475
+ Cost
1476
+ (1)
1477
+ (2)
1478
+ (3)
1479
+ (4)
1480
+ (5)
1481
+ (6)
1482
+ (7)
1483
+ (8)
1484
+ (9)
1485
+ (10)
1486
+ (11)
1487
+ (12)
1488
+ (13)
1489
+ (14)
1490
+ (15)
1491
+ (16)
1492
+ (17)
1493
+ (18)
1494
+ (19)
1495
+ (20)
1496
+ (21)
1497
+ 480
1498
+ 37
1499
+ 0
1500
+ -
1501
+ 6691
1502
+ 15308
1503
+ 37
1504
+ 0
1505
+ -
1506
+ 6691
1507
+ 13488
1508
+ 37
1509
+ 13
1510
+ 16
1511
+ 11929
1512
+ 10010
1513
+ 37
1514
+ 13
1515
+ 16
1516
+ 11929
1517
+ 9470
1518
+ 588
1519
+ 37
1520
+ 0
1521
+ -
1522
+ 6691
1523
+ 16639
1524
+ 37
1525
+ 0
1526
+ -
1527
+ 6691
1528
+ 14519
1529
+ 37
1530
+ 13
1531
+ 16
1532
+ 11929
1533
+ 9304
1534
+ 37
1535
+ 13
1536
+ 16
1537
+ 11929
1538
+ 8964
1539
+ 763
1540
+ 37
1541
+ 0
1542
+ -
1543
+ 6691
1544
+ 18495
1545
+ 37
1546
+ 0
1547
+ -
1548
+ 6691
1549
+ 15815
1550
+ 37
1551
+ 13
1552
+ 16
1553
+ 11929
1554
+ 9990
1555
+ 37
1556
+ 13
1557
+ 16
1558
+ 11929
1559
+ 9510
1560
+ 854
1561
+ 37
1562
+ 0
1563
+ -
1564
+ 6691
1565
+ 19141
1566
+ 37
1567
+ 0
1568
+ -
1569
+ 6691
1570
+ 16341
1571
+ 37
1572
+ 13
1573
+ 16
1574
+ 11929
1575
+ 10493
1576
+ 37
1577
+ 13
1578
+ 16
1579
+ 11929
1580
+ 9933
1581
+ 998
1582
+ 37
1583
+ 0
1584
+ -
1585
+ 6691
1586
+ 19101
1587
+ 37
1588
+ 0
1589
+ -
1590
+ 6691
1591
+ 16701
1592
+ 37
1593
+ 13
1594
+ 16
1595
+ 11929
1596
+ 10855
1597
+ 37
1598
+ 13
1599
+ 16
1600
+ 11929
1601
+ 10215
1602
+ 1005
1603
+ 37
1604
+ 0
1605
+ -
1606
+ 6691
1607
+ 19068
1608
+ 37
1609
+ 0
1610
+ -
1611
+ 6691
1612
+ 16308
1613
+ 37
1614
+ 13
1615
+ 16
1616
+ 11929
1617
+ 11138
1618
+ 37
1619
+ 13
1620
+ 16
1621
+ 11929
1622
+ 10478
1623
+ 1150
1624
+ 37
1625
+ 0
1626
+ -
1627
+ 6691
1628
+ 19666
1629
+ 37
1630
+ 0
1631
+ -
1632
+ 6691
1633
+ 16926
1634
+ 37
1635
+ 13
1636
+ 16
1637
+ 11929
1638
+ 10915
1639
+ 37
1640
+ 13
1641
+ 16
1642
+ 11929
1643
+ 10275
1644
+ 1345
1645
+ 37
1646
+ 0
1647
+ -
1648
+ 6691
1649
+ 20644
1650
+ 37
1651
+ 0
1652
+ -
1653
+ 6691
1654
+ 17564
1655
+ 37
1656
+ 13
1657
+ 16
1658
+ 11929
1659
+ 11741
1660
+ 37
1661
+ 13
1662
+ 16
1663
+ 11929
1664
+ 10395
1665
+ 1477
1666
+ 37
1667
+ 0
1668
+ -
1669
+ 6691
1670
+ 20752
1671
+ 37
1672
+ 0
1673
+ -
1674
+ 6691
1675
+ 17612
1676
+ 37
1677
+ 12
1678
+ 16
1679
+ 11539
1680
+ 13115
1681
+ 37
1682
+ 13
1683
+ 16
1684
+ 11929
1685
+ 10335
1686
+ 1523
1687
+ 37
1688
+ 0
1689
+ -
1690
+ 6691
1691
+ 21895
1692
+ 37
1693
+ 0
1694
+ -
1695
+ 6691
1696
+ 18595
1697
+ 37
1698
+ 12
1699
+ 16
1700
+ 11539
1701
+ 14053
1702
+ 37
1703
+ 13
1704
+ 16
1705
+ 11929
1706
+ 10375
1707
+ 1675
1708
+ 37
1709
+ 0
1710
+ -
1711
+ 6691
1712
+ 21735
1713
+ 37
1714
+ 0
1715
+ -
1716
+ 6691
1717
+ 18535
1718
+ 37
1719
+ 11
1720
+ 16
1721
+ 11042
1722
+ 14128
1723
+ 37
1724
+ 13
1725
+ 16
1726
+ 11929
1727
+ 10495
1728
+ 1736
1729
+ 37
1730
+ 0
1731
+ -
1732
+ 6691
1733
+ 22461
1734
+ 37
1735
+ 0
1736
+ -
1737
+ 6691
1738
+ 19301
1739
+ 37
1740
+ 11
1741
+ 16
1742
+ 11042
1743
+ 14874
1744
+ 37
1745
+ 13
1746
+ 16
1747
+ 11929
1748
+ 10455
1749
+ 1911
1750
+ 37
1751
+ 0
1752
+ -
1753
+ 6691
1754
+ 22481
1755
+ 37
1756
+ 0
1757
+ -
1758
+ 6691
1759
+ 19021
1760
+ 37
1761
+ 10
1762
+ 16
1763
+ 10395
1764
+ 14869
1765
+ 37
1766
+ 13
1767
+ 16
1768
+ 11929
1769
+ 10495
1770
+ 2009
1771
+ 37
1772
+ 0
1773
+ -
1774
+ 6691
1775
+ 22641
1776
+ 37
1777
+ 0
1778
+ -
1779
+ 6691
1780
+ 19321
1781
+ 37
1782
+ 10
1783
+ 16
1784
+ 10395
1785
+ 15575
1786
+ 37
1787
+ 13
1788
+ 16
1789
+ 11929
1790
+ 10595
1791
+ 2135
1792
+ 37
1793
+ 0
1794
+ -
1795
+ 6691
1796
+ 22761
1797
+ 37
1798
+ 0
1799
+ -
1800
+ 6691
1801
+ 19221
1802
+ 37
1803
+ 10
1804
+ 16
1805
+ 10395
1806
+ 16493
1807
+ 37
1808
+ 13
1809
+ 16
1810
+ 11929
1811
+ 10575
1812
+ 2304
1813
+ 37
1814
+ 0
1815
+ -
1816
+ 6691
1817
+ 22881
1818
+ 37
1819
+ 0
1820
+ -
1821
+ 6691
1822
+ 19301
1823
+ 37
1824
+ 9
1825
+ 16
1826
+ 9524
1827
+ 18192
1828
+ 37
1829
+ 13
1830
+ 16
1831
+ 11929
1832
+ 10655
1833
+ 2325
1834
+ 37
1835
+ 0
1836
+ -
1837
+ 6691
1838
+ 22368
1839
+ 37
1840
+ 0
1841
+ -
1842
+ 6691
1843
+ 18948
1844
+ 37
1845
+ 9
1846
+ 16
1847
+ 9524
1848
+ 18555
1849
+ 37
1850
+ 13
1851
+ 16
1852
+ 11929
1853
+ 10675
1854
+ 2491
1855
+ 37
1856
+ 0
1857
+ -
1858
+ 6691
1859
+ 22761
1860
+ 37
1861
+ 0
1862
+ -
1863
+ 6691
1864
+ 19401
1865
+ 37
1866
+ 8
1867
+ 16
1868
+ 8946
1869
+ 18660
1870
+ 37
1871
+ 13
1872
+ 16
1873
+ 11929
1874
+ 10655
1875
+ 2753
1876
+ 37
1877
+ 0
1878
+ -
1879
+ 6691
1880
+ 23346
1881
+ 37
1882
+ 0
1883
+ -
1884
+ 6691
1885
+ 19926
1886
+ 37
1887
+ 8
1888
+ 16
1889
+ 8946
1890
+ 20284
1891
+ 37
1892
+ 13
1893
+ 16
1894
+ 11929
1895
+ 11178
1896
+ Table 2: Optimal configurations and costs of all test cases with Rrx = 2 m.
1897
+ Receiver aperture
1898
+ Maximum beam
1899
+ Maximum
1900
+ radius Rrx (m)
1901
+ width αmax
1902
+ coverage radius (m)
1903
+ 2
1904
+ 37 °
1905
+ 6691
1906
+ 4
1907
+ 67 °
1908
+ 13237
1909
+ Table 3: Maximum beam width and coverage radius of single
1910
+ serving FSO transceiver configuration.
1911
+ was varied. Table 4 lists the extended coverage radius of the max-
1912
+ imum mFSO configuration for different solar energy levels and
1913
+ receiver aperture radii. The maximum mFSO configuration was
1914
+ obtained using the largest principal beam αmax, largest m accord-
1915
+ ing to (25), and largest β according to (7), given αmax and m.
1916
+ The coverage radius of the maximum mFSO configuration was ex-
1917
+ tended approximately twice in comparison with that of single FSO
1918
+ transceiver configuration, except for Esolar = 42kWh. When solar
1919
+ energy level increased, the maximum m increased; thus, the ex-
1920
+ tended coverage radius increased. However, when m was already
1921
+ large, the extention increased slowly with m.
1922
+ Additionally, the
1923
+ maximum extended coverage was much larger when Rrx = 4 than
1924
+ Rrx = 2m because a receiver can accept weaker signals with larger
1925
+ apertures.
1926
+ To compare the network costs incurred by the two configura-
1927
+ tions, we examined the detailed results in Table 2.
1928
+ The table
1929
+ lists the optimal mFSO con��gurations and network costs. When
1930
+ Esolar = 42kWh, the optimal number of supplementary serving
1931
+ FSO transceivers is m = 0; thus, the configuration uses a single
1932
+ serving FSO transceiver. Therefore, these cases were used as ref-
1933
+ erences for single serving FSO transceiver configuration.
1934
+ When
1935
+ Esolar > 50kWh, all optimal configurations were truly mFSO,
1936
+ and the results were identical for all solar energy levels.
1937
+ The
1938
+ numbers indicate that mFSO configuration offered significantly
1939
+ Esolar
1940
+ Max Rext (m)
1941
+ (kWh)
1942
+ Max m
1943
+ Rrx = 2 (m)
1944
+ Rrx = 4 (m)
1945
+ 42
1946
+ 6
1947
+ 6691
1948
+ 13237
1949
+ 50
1950
+ 16
1951
+ 12174
1952
+ 25582
1953
+ 75
1954
+ 47
1955
+ 13559
1956
+ 28403
1957
+ 100
1958
+ 78
1959
+ 13678
1960
+ 28845
1961
+ 125
1962
+ 109
1963
+ 13711
1964
+ 28969
1965
+ 150
1966
+ 140
1967
+ 13724
1968
+ 29020
1969
+ 175
1970
+ 171
1971
+ 13731
1972
+ 29047
1973
+ 200
1974
+ 202
1975
+ 13735
1976
+ 29062
1977
+ 225
1978
+ 233
1979
+ 13738
1980
+ 29071
1981
+ 250
1982
+ 264
1983
+ 13739
1984
+ 29077
1985
+ 275
1986
+ 295
1987
+ 13740
1988
+ 29082
1989
+ 290
1990
+ 314
1991
+ 13741
1992
+ 29084
1993
+ Table 4: Maximum extended coverage radius of mFSO con-
1994
+ figuration when V = 10.
1995
+ lower costs (listed in columns 16th and 21th) than those of single
1996
+ serving FSO transceiver configuration (listed in columns 6th and
1997
+ 11th) for the same test cases and number of wavelengths W. The
1998
+ costs resulting from mFSO configuration were as low as 54–87% of
1999
+ those resulting from single serving FSO transceiver configuration.
2000
+ These numbers confirm that when there is sufficient solar energy,
2001
+ mFSO configuration is definitively a better choice than single serv-
2002
+ ing FSO configuration.
2003
+ 11
2004
+
2005
+ 0
2006
+ 20
2007
+ 40
2008
+ 60
2009
+ 80
2010
+ 100
2011
+ 120
2012
+ 0
2013
+ 20
2014
+ 40
2015
+ 60
2016
+ 80
2017
+ 100
2018
+ y-axis
2019
+ x-axis
2020
+ Figure 10: Footprints of HAPs with mFSO configuration ob-
2021
+ tained from the topology design for a test case of 1005 ground
2022
+ FSO nodes when Esolar = 75 kwh, W = 80. A circle repre-
2023
+ sents an extended coverage area of a HAP. Small points in-
2024
+ side the circle are ground nodes and the dot at the center of
2025
+ the circle is the projected location of its serving HAP on the
2026
+ ground.
2027
+ 7.3
2028
+ Factors impact optimal mFSO configu-
2029
+ ration
2030
+ Comparing the values of the optimal extended coverage radius in
2031
+ Table 2 and the maximum extended coverage radius in Table 4,
2032
+ we can see that the optimal extended coverage radius was gener-
2033
+ ally not the maximum. This is reasonable because the maximum
2034
+ configuration uses an excessive number of supplementary serving
2035
+ FSO transceivers.
2036
+ Low solar energy may render mFSO configuration impossible.
2037
+ Indeed, Esolar = 42 kWh could afford maximally 6 supplemen-
2038
+ tary serving FSO transceivers (see Table 4), which was too few to
2039
+ entirely cover the contour of the principal coverage area. Thus,
2040
+ single FSO transceiver configuration was the unique choice.
2041
+ When the solar energy level exceeds 50 kWh, its exact value does
2042
+ not affect the optimal configuration. The simulation showed that
2043
+ the optimal configurations were identical for all solar energy levels
2044
+ from 50 kWh/day and above. This is explained by the fact that
2045
+ a greater solar energy level allows to accept configurations with
2046
+ large coverage but may be more expensive because of using more
2047
+ supplementary serving FSO transceivers. As a result, large con-
2048
+ figurations were not selected as optimal configurations. In other
2049
+ words, increasing solar energy does not necessarily improve the
2050
+ HAP network cost.
2051
+ Since the optimal multiple serving FSO transceiver configura-
2052
+ tions were identical for all Esolar ≥ 50 kWh, all other numerical
2053
+ results related to topology design and routing with these solar en-
2054
+ ergy levels were identical and are presented as single results in
2055
+ subsequent figures.
2056
+ The coverage of the optimal configurations decreased when the
2057
+ ground nodes became denser. Indeed, test cases with large num-
2058
+ bers of ground nodes had greater ground node densities, and
2059
+ columns 13th and 16th of Table 2 shows that the optimal m and
2060
+ Rext decreased when the density increased. The reason is that,
2061
+ with a greater ground node density, is a small ground region al-
2062
+ ready contains W ground nodes, which is the maximum serving
2063
+ capacity of a HAP. Therefore, a HAP could serve only a small
2064
+ zone and required only a few supplementary FSO transceivers to
2065
+ cover the zone.
2066
+ 7.4
2067
+ Numbers of HAPs and inter-HAP links
2068
+ 0
2069
+ 20
2070
+ 40
2071
+ 60
2072
+ 80
2073
+ 100
2074
+ 120
2075
+ 140
2076
+ 160
2077
+ 0
2078
+ 500
2079
+ 1000
2080
+ 1500
2081
+ 2000
2082
+ 2500
2083
+ 3000
2084
+ Number of HAPs
2085
+ Number of ground FSO nodes
2086
+ ˆK for Esolar=42 kWh
2087
+ K for Esolar=42 kWh
2088
+ ˆK for Esolar ≥ 50 kWh kWh
2089
+ K for Esolar ≥ 50 kWh
2090
+ Lower bound
2091
+ (a) W=40
2092
+ 0
2093
+ 20
2094
+ 40
2095
+ 60
2096
+ 80
2097
+ 100
2098
+ 120
2099
+ 140
2100
+ 160
2101
+ 0
2102
+ 500
2103
+ 1000
2104
+ 1500
2105
+ 2000
2106
+ 2500
2107
+ 3000
2108
+ Number of HAPs
2109
+ Number of ground FSO nodes
2110
+ ˆK for Esolar=42 kWh
2111
+ K for Esolar=42 kWh
2112
+ ˆK for Esolar ≥ 50 kWh kWh
2113
+ K for Esolar ≥ 50 kWh
2114
+ Lower bound
2115
+ (b) W=80
2116
+ Figure 11: Number of HAPs and lower bound with (a) W =
2117
+ 40 and (b) W = 80 in different solar energy levels.
2118
+ Since each HAP can serve at most W ground FSO nodes, a lower
2119
+ bound for the number of HAPs is:
2120
+ nLB
2121
+ HAP = |NFSO|
2122
+ W
2123
+ (27)
2124
+ Figure 11 shows the number of HAPs, the estimated number of
2125
+ HAPs ˆK and lower bound nLB
2126
+ HAP when (a) W = 40 and (b) W = 80.
2127
+ With Esolar ≥ 50 kWh, the actual number of HAPs was almost
2128
+ identical to ˆK in both subfigures.
2129
+ Furthermore, when W = 40
2130
+ 12
2131
+
2132
+ 0
2133
+ 100
2134
+ 200
2135
+ 300
2136
+ 400
2137
+ 500
2138
+ 600
2139
+ 0
2140
+ 500
2141
+ 1000
2142
+ 1500
2143
+ 2000
2144
+ 2500
2145
+ 3000
2146
+ Number of inter-HAP links
2147
+ Number of ground FSO nodes
2148
+ Esolar=42 kWh
2149
+ Esolar ≥ 50 kWh
2150
+ (a) W=40
2151
+ 0
2152
+ 100
2153
+ 200
2154
+ 300
2155
+ 400
2156
+ 500
2157
+ 600
2158
+ 0
2159
+ 500
2160
+ 1000
2161
+ 1500
2162
+ 2000
2163
+ 2500
2164
+ 3000
2165
+ Number of inter-HAP links
2166
+ Number of ground FSO nodes
2167
+ Esolar=42 kWh
2168
+ Esolar ≥ 50 kWh
2169
+ (b) W=80
2170
+ Figure 12: Number of inter-HAP links when (a) W = 40 and
2171
+ (b) W = 80 for different solar energy levels.
2172
+ and Esolar ≥ 50 kWh, the number of HAPs approached the lower
2173
+ bound starting from test cases with 1000 ground nodes or above.
2174
+ This implies that the number of HAPs was almost optimal.
2175
+ Figure 12 presents the absolute numbers of inter-HAP links.
2176
+ The number of inter-HAP links increased with the number of
2177
+ ground nodes, because the network size and traffic demand in-
2178
+ creased. The number of inter-HAP links clearly decreased when
2179
+ the wavelength density increased from W = 40 to W = 80. In
2180
+ other words, denser WDM technique helps reduce the number of
2181
+ inter-HAP FSO transceivers and consequently the network cost.
2182
+ mFSO configuration allows reducing significantly both the num-
2183
+ bers of HAPs and inter-HAP links. Indeed, according to Figure
2184
+ 11, the number of HAPs was much smaller with Esolar ≥ 50
2185
+ kWh where mFSO configuration was used, in comparison with
2186
+ Esolar = 42 kWh, where single serving FSO configuration was
2187
+ used. A similar phenomenon is observed in Figure 12 for the num-
2188
+ ber of inter-HAP links.
2189
+ 0
2190
+ 5000
2191
+ 10000
2192
+ 15000
2193
+ 20000
2194
+ 25000
2195
+ 30000
2196
+ 35000
2197
+ 40000
2198
+ 0
2199
+ 500
2200
+ 1000
2201
+ 1500
2202
+ 2000
2203
+ 2500
2204
+ 3000
2205
+ Cost
2206
+ Number of ground FSO nodes
2207
+
2208
+ Cost of Esolar = 42 kWh
2209
+ Cost of Esolar = 42 kWh
2210
+
2211
+ Cost of Esolar ≥ 50 kWh
2212
+ Cost of Esolar ≥ 50 kWh
2213
+ (a) W=40
2214
+ 0
2215
+ 5000
2216
+ 10000
2217
+ 15000
2218
+ 20000
2219
+ 25000
2220
+ 30000
2221
+ 35000
2222
+ 40000
2223
+ 0
2224
+ 500
2225
+ 1000
2226
+ 1500
2227
+ 2000
2228
+ 2500
2229
+ 3000
2230
+ Cost
2231
+ Number of ground FSO nodes
2232
+
2233
+ Cost of Esolar = 42 kWh
2234
+ Cost of Esolar = 42 kWh
2235
+
2236
+ Cost of Esolar ≥ 50 kWh
2237
+ Cost of Esolar ≥ 50 kWh
2238
+ (b) W=80
2239
+ Figure 13: Real costs and overestimated costs with W = 40
2240
+ and W = 80.
2241
+ 7.5
2242
+ Quality of cost estimation
2243
+ Figure 13 presents the estimated and actual costs for different
2244
+ solar energy levels and wavelength densities. The estimated cost
2245
+ was very close to the actual cost, mostly for Esolar ≥ 50kWh and
2246
+ W = 40.
2247
+ Parameter V, the threshold of the number of inter-HAP links
2248
+ of a HAP, affects the quality of the cost estimation. To evaluate
2249
+ the choice of V, we compared it with the number of inter-HAP
2250
+ links that a HAP finally has. Figure 14 shows the average number
2251
+ of inter-HAP links per HAP. When there were 40 wavelengths
2252
+ per link, the average number of inter-HAP links per HAP varied
2253
+ between 5.7 and 9.3 for Esolar ≥ 50 kWh and V = 10, and between
2254
+ 8.8 and 11.8 for Esolar = 42 kWh while V raised up to 12. Hence,
2255
+ the value of V was close to the actual number of inter-HAP links
2256
+ required by a HAP. However, when there were 80 wavelengths per
2257
+ link, the average number of Inter-HAP links per HAP was reduced
2258
+ to between 4.4 and 8.4, which is slightly far from the threshold
2259
+ V = 10. A smaller V may help better estimate of the optimal cost
2260
+ in these cases.
2261
+ 13
2262
+
2263
+ 0
2264
+ 5
2265
+ 10
2266
+ 15
2267
+ 20
2268
+ 0
2269
+ 500
2270
+ 1000
2271
+ 1500
2272
+ 2000
2273
+ 2500
2274
+ 3000
2275
+ Average number of inter-HAP links per HAP
2276
+ Number of ground FSO nodes
2277
+ Esolar=42 kWh
2278
+ Esolar ≥ 50 kWh
2279
+ (a) W=40
2280
+ 0
2281
+ 5
2282
+ 10
2283
+ 15
2284
+ 20
2285
+ 0
2286
+ 500
2287
+ 1000
2288
+ 1500
2289
+ 2000
2290
+ 2500
2291
+ 3000
2292
+ Average number of inter-HAP links per HAP
2293
+ Number of ground FSO nodes
2294
+ Esolar=42 kWh
2295
+ Esolar ≥ 50 kWh
2296
+ (b) W=80
2297
+ Figure 14: Number of inter-HAP links per HAP when (a)
2298
+ W = 40 and (b) W = 80 for different solar energy levels.
2299
+ 8
2300
+ Conclusions
2301
+ Using mFSO configuration widens a HAP footprint, however, its
2302
+ application is constrained by the available solar energy of the HAP.
2303
+ Moreover, mFSO configuration may imply an extra investment
2304
+ cost due to additional serving FSO transceivers in comparison with
2305
+ single FSO transceiver configuration. This study focused on de-
2306
+ termining the optimal mFSO configuration. First, we proposed
2307
+ a set of closed-form expressions for computing the coverage of
2308
+ an mFSO configuration in terms of beam widths of the princi-
2309
+ pal and supplementary transceivers and number of supplementary
2310
+ FSO transceivers. Second, we proposed an algorithm to determine
2311
+ the optimal mFSO configuration that minimizes the total HAP
2312
+ network cost. Third, we designed a HAP network topology using
2313
+ the optimal configuration to achieve a minimal final cost.
2314
+ The simulation results showed that mFSO significantly ex-
2315
+ tended the HAP footprint. With the testing dataset, the extended
2316
+ footprint radii were generally two times larger than the single FSO
2317
+ transceiver footprint radii, leading to a four-fold larger coverage
2318
+ surface. The network cost with the optimal mFSO configuration
2319
+ was as low as 54% of the network cost when using a single serving
2320
+ FSO transceiver on a HAP.
2321
+ Acknowledgements
2322
+ This research was funded by the Vietnam National Foundation for
2323
+ Science and Technology Development (NAFOSTED) under grant
2324
+ number 102.02-2018.305.
2325
+ References
2326
+ [1] fSONA,
2327
+ ��SONABeam
2328
+ 2500-E+
2329
+ model
2330
+ specifications.”
2331
+ http://fsona.com. Accessed Jan. 2022.
2332
+ [2] A. Acampora and S. Krishnamurthy, “A broadband wireless
2333
+ access network based on mesh-connected free-space optical
2334
+ links,” IEEE Personal Communications, vol. 6, no. 5, pp. 62–
2335
+ 65, 1999.
2336
+ [3] J. Zhang, “Proposal of free space optical mesh network ar-
2337
+ chitecture for broadband access,” in 2002 IEEE Interna-
2338
+ tional Conference on Communications. Conference Proceed-
2339
+ ings. ICC 2002 (Cat. No.02CH37333), vol. 4, pp. 2142–2145
2340
+ vol.4, 2002.
2341
+ [4] B. Moision, B. Erkmen, E. Keyes, T. Belt, O. Bowen,
2342
+ D. Brinkley, P. Csonka, M. Eglington, A. Kazmierski, N. hy-
2343
+ ong Kim, J. Moody, T. Tu, and W. Vermeer, “Demonstration
2344
+ of free-space optical communication for long-range data links
2345
+ between balloons on Project Loon,” in Free-Space Laser Com-
2346
+ munication and Atmospheric Propagation XXIX (H. Hem-
2347
+ mati and D. M. Boroson, eds.), vol. 10096, pp. 259 – 272,
2348
+ International Society for Optics and Photonics, SPIE, 2017.
2349
+ [5] C. Chen, A. Grier, M. Malfa, E. Booen, H. Harding, C. Xia,
2350
+ M. Hunwardsen, J. Demers, K. Kudinov, G. Mak, B. Smith,
2351
+ A. Sahasrabudhe, F. Patawaran, T. Wang, A. Wang, C. Zhao,
2352
+ D. Leang, J. Gin, M. Lewis, D. Nguyen, and K. Quirk,
2353
+ “High-speed optical links for UAV applications,” in Free-
2354
+ Space Laser Communication and Atmospheric Propagation
2355
+ XXIX (H. Hemmati and D. M. Boroson, eds.), vol. 10096,
2356
+ pp. 316 – 324, International Society for Optics and Photon-
2357
+ ics, SPIE, 2017.
2358
+ [6] Thales
2359
+ group,
2360
+ “What’s
2361
+ up
2362
+ with
2363
+ stratobus.”
2364
+ https://www.thalesgroup.com/en/worldwide/space/news/whats-
2365
+ stratobus, 2017. Accessed Jan. 2022.
2366
+ [7] D. L. Truong, X. V. Dang, and T. N. Dang, “Survivable free
2367
+ space optical mesh network using high-altitude platforms,”
2368
+ Optical Switching and Networking, vol. 47, p. 100716, 2023.
2369
+ [8] G. Karabulut Kurt,
2370
+ M. G. Khoshkholgh,
2371
+ S. Alfattani,
2372
+ A. Ibrahim, T. S. J. Darwish, M. S. Alam, H. Yanikomeroglu,
2373
+ and A. Yongacoglu, “A Vision and Framework for the High
2374
+ Altitude Platform Station (HAPS) Networks of the Future,”
2375
+ IEEE Communications Surveys & Tutorials, vol. 23, no. 2,
2376
+ pp. 729–779, 2021.
2377
+ [9] R. Miura and M. Oodo, “Wireless Communications System
2378
+ Using Stratospheric Platforms: R and D Program on Telecom
2379
+ and Broadcasting System Using High Altitude Platform Sta-
2380
+ tions,” Journal of the Communication Research Laboratory,
2381
+ vol. 48, pp. 33–48, Dec. 2001.
2382
+ 14
2383
+
2384
+ [10] V. V. Mai and H. Kim, “Beam size optimization and adap-
2385
+ tation for high-altitude airborne free-space optical commu-
2386
+ nication systems,” IEEE Photonics Journal, vol. 11, no. 2,
2387
+ pp. 1–13, 2019.
2388
+ [11] A. A. Farid and S. Hranilovic, “Outage capacity optimization
2389
+ for free-space optical links with pointing errors,” Journal of
2390
+ Lightwave Technology, vol. 25, no. 7, pp. 1702–1710, 2007.
2391
+ [12] S. C. Arum, D. Grace, P. D. Mitchell, M. D. Zakaria, and
2392
+ N. Morozs, “Energy management of solar-powered aircraft-
2393
+ based high altitude platform for wireless communications,”
2394
+ Electronics, vol. 9, no. 1, 2020.
2395
+ [13] F. Fidler, M. Knapek, J. Horwath, and W. R. Leeb, “Optical
2396
+ Communications for High-Altitude Platforms,” IEEE Journal
2397
+ of Selected Topics in Quantum Electronics, vol. 16, pp. 1058–
2398
+ 1070, Sep. 2010.
2399
+ [14] Airbus,
2400
+ “Zephir:
2401
+ Persistance
2402
+ and
2403
+ flexibility.”
2404
+ https://lf5422.com/wp-
2405
+ content/uploads/2018/08/0296 18 2 zephyr datasheet e horizontal a4.pdf,
2406
+ 2018. Accessed Jan. 2022.
2407
+ [15] BAE Systems, “Phasa-35.” http://prismaticltd.co.uk/products/phasa-
2408
+ 35/, 2018. Accessed Jan. 2022.
2409
+ A
2410
+ Proof of Lemma 1
2411
+ Proof. Let x = cos(α/2), a = σH, and b =
2412
+ PtxR2
2413
+ rx
2414
+ 2H2
2415
+ then
2416
+ P rx
2417
+ j (x) = e−a/x
2418
+ bx2
2419
+ (1 − x)
2420
+ (28)
2421
+ Calculate the derivative of P rx
2422
+ j (x) we get
2423
+ P
2424
+ ′rx
2425
+ j
2426
+ (x) = e−a/x
2427
+
2428
+ a
2429
+ 1 − x + 2x − x2
2430
+ (1 − x)2
2431
+
2432
+ b
2433
+ (29)
2434
+ Thus, the derivative of P rx
2435
+ j (α) is
2436
+ P
2437
+ ′rx
2438
+ j
2439
+ (α) = P
2440
+ ′rx
2441
+ j
2442
+ (x).(− sin(α))
2443
+ (30)
2444
+ Beam α is limited between [0..π] because it orients to the ground.
2445
+ Thus, x ∈ [0..1].
2446
+ Consequently, 1 − x > 0 and 2x − x2 > 0.
2447
+ In addition, a, b > 0, then P
2448
+ ′rx
2449
+ j
2450
+ (x) > 0 for all x ∈ [0..1].
2451
+ Be-
2452
+ cause − sin(α) < 0, ∀α ∈ [0..π], thus, P
2453
+ ′rx
2454
+ j
2455
+ (α) < 0. Consequently,
2456
+ P rx
2457
+ j (α) decreases with α.
2458
+ B
2459
+ Calculation of extended coverage
2460
+ radius of mFSO configuration
2461
+ This section identifies formulas that calculate the extended cover-
2462
+ age radius of an mFSO configuration characterized by the princi-
2463
+ pal beam width α, supplementary beam width β and number of
2464
+ supplementary beams m.
2465
+ Conventionally, the coverage provided by a bundle of transmit-
2466
+ ters is calculated as if the transmitters project perpendicular to the
2467
+ ground. In mFSO configuration, the principal beam in the center
2468
+ is large, and it pushes the supplementary serving FSO transceiver
2469
+ projection directions far from perpendicular to the ground. These
2470
+ supplementary beams form oblique cones that intersect with the
2471
+ ground plane in ellipses. Considering of the elliptical form adds
2472
+ more complexity to the calculation.
2473
+ In Figure 15, H denotes the position of a HAP, and its projec-
2474
+ tion on the ground plane is O, thus HO = H. The principal beam
2475
+ forms a right circular cone whose axis is HO. The cone intersects
2476
+ the ground plane by a circle of radius Rα, which defines the prin-
2477
+ cipal footprint. The beam of a supplementary FSO transceiver is
2478
+ an oblique cone intersecting the ground plane by an ellipse that
2479
+ defines the corresponding supplementary footprint. The cone of
2480
+ the supplementary beam intersects with the cone of the principal
2481
+ beam by two lines: HK and HK′ where K and K′ are the two
2482
+ intersection points of the principal and supplementary footprints.
2483
+ Thus, OK = OK′ = Rα.
2484
+ m supplementary FSO transceivers are arranged evenly around
2485
+ the principal transceiver, each of which is responsible for extending
2486
+ the coverage within an angle of 2π/m from the center O.
2487
+ The
2488
+ responsible angle of the supplementary FSO transceiver in Figure
2489
+ 15 is defined by rays −−→
2490
+ OK and −−→
2491
+ OK′. Thus, �
2492
+ KOK′ = 2π/m.
2493
+ Ray −−→
2494
+ OK intersects with the supplementary beam cone at J,
2495
+ then OJ is the radius of the extended coverage region. Readers
2496
+ refer to Figure 6 for a complete view of the extended coverage
2497
+ circle and the positions of K, K′ and J on the ground.
2498
+ Figure 15: Computation of the distance from supplementary
2499
+ FSO transceivers and the border of extended coverage area
2500
+ LJ in function of Beta.
2501
+ Since the principal beam width is α, then �
2502
+ OHK = α/2.
2503
+ Let the base plane containing K and K′ of the supplementary
2504
+ beam cone cuts the cone axis at T, the primary cone axis HO at P,
2505
+ and HJ at J1. Then �
2506
+ THK = β/2. In addition, the supplementary
2507
+ cone intersects with this base plane by a circle containing K, K′
2508
+ with center T. Let Rβ be the radius of the circle, then TK =
2509
+ TK′ = Rβ.
2510
+ Let M be the midpoint of KK′ then H, O, T, M belong to the
2511
+ same plane.
2512
+ Let ξ = �
2513
+ KHJ. The extended coverage radius is Rext = OJ =
2514
+ 15
2515
+
2516
+ H
2517
+ β/2
2518
+ LY
2519
+ a/2
2520
+ Supplementarycone base plane
2521
+ K'
2522
+ R
2523
+ Pilm
2524
+ a
2525
+ Supplementary foot print
2526
+ Principal
2527
+ foot print
2528
+ GroundHO tan(�
2529
+ OHJ) = H tan( α
2530
+ 2 + ξ). Thus,
2531
+ Rext = H. tan
2532
+
2533
+ 2(ξ + α
2534
+ 2
2535
+ ) − α
2536
+ 2
2537
+
2538
+ Rext = H2 tan( ξ+α
2539
+ 2 ) − tan( α
2540
+ 2 )(1 − tan2( ξ+α
2541
+ 2 ))
2542
+ 1 − tan2( ξ+α
2543
+ 2 ) + 2 tan( ξ+α
2544
+ 2 ). tan( α
2545
+ 2 )
2546
+ (31)
2547
+ B.1
2548
+ Calculation of tan( ξ+α
2549
+ 2 )
2550
+ Let N be the midpoint of KJ1. As K and J1 are at the intersection
2551
+ of the supplementary cone and its base plane, HK = HJ1, HN ⊥
2552
+ KJ1, and HN is the angle bisector of �
2553
+ KHJ1. Therefore, �
2554
+ NHK =
2555
+ ξ/2, thus �
2556
+ NHP = ξ+α
2557
+ 2 . In addition, since KO is on the base plane
2558
+ of the principal cone, HO ⊥ KO.
2559
+ Thus, △PNH and △POK
2560
+ are similar right triangles. Consequently, �
2561
+ OKP = �
2562
+ NHP = ξ+α
2563
+ 2 .
2564
+ Furthermore,
2565
+ tan(ξ + α
2566
+ 2
2567
+ ) = OP
2568
+ OK = OP
2569
+
2570
+ (32)
2571
+ Let �
2572
+ OHM = γ and �
2573
+ THM = θ Then �
2574
+ OHT = θ + γ.
2575
+ Because MO is on the base plan of the principal cone, MO ⊥
2576
+ HO. In addition, as PT is on the base plane of the supplementary
2577
+ cone whose axis is HT then HT ⊥ PT. Consequently, △PTH and
2578
+ △POM are similar right triangles. We can deduce that �
2579
+ PMO =
2580
+
2581
+ PHT = θ + γ. Therefore,
2582
+ tan(θ + γ) = OP
2583
+ OM =
2584
+ OP
2585
+ Rα. cos( π
2586
+ m)
2587
+ Combining with (32) we deduce :
2588
+ tan(ξ + α
2589
+ 2
2590
+ ) = tan(θ + γ). cos( π
2591
+ m)
2592
+ (33)
2593
+ Thus
2594
+ tan(ξ + α
2595
+ 2
2596
+ ) =
2597
+ tan(γ) + tan(θ)
2598
+ 1 − tan(γ). tan(θ). cos( π
2599
+ m)
2600
+ (34)
2601
+ Since γ = �
2602
+ OHM then, tan(γ) = MO
2603
+ HO .
2604
+ From right triangle △OMK we have MO = OK. cos( π
2605
+ m).
2606
+ From right triangle △HOK we have HO = OK/ tan( α
2607
+ 2 ).
2608
+ Thus
2609
+ tan(γ) = tan(α
2610
+ 2 ). cos( π
2611
+ m)
2612
+ (35)
2613
+ It remains to calculate tan (θ).
2614
+ B.2
2615
+ Calculation of tan (θ)
2616
+ Look at the right triangle △HTM, we can see that:
2617
+ tan(θ) = TM
2618
+ TH
2619
+ (36)
2620
+ Since K and K′ are on a circle centered at T, and M is the
2621
+ midpoint of KK′ then △TMK is a right triangle, then
2622
+ TM =
2623
+
2624
+ TK2 − KM 2 =
2625
+
2626
+ R2
2627
+ β − R2α. sin2( π
2628
+ m)
2629
+ (37)
2630
+ Easy to find that △THK is another right triangle then
2631
+ TH = TK/ tan(β
2632
+ 2 ) = Rβ/ tan(β
2633
+ 2 )
2634
+ (38)
2635
+ Replacing (37) and (38) in to (36) we get
2636
+ tan(θ)
2637
+ =
2638
+
2639
+ R2
2640
+ β − R2α. sin2( π
2641
+ m)
2642
+ Rβ/ tan( β
2643
+ 2 )
2644
+ =
2645
+ tan(β
2646
+ 2 )
2647
+
2648
+ 1 − (Rα
2649
+ Rβ )2. sin2( π
2650
+ m)
2651
+ (39)
2652
+ From right triangle △HTK we obtain Rβ = HK sin( β
2653
+ 2 ).
2654
+ From right triangle △HOK we obtain Rα = HK sin( α
2655
+ 2 ).
2656
+ Replacing these values to (39), we obtain:
2657
+ tan(θ) =
2658
+
2659
+ sin2( β
2660
+ 2 ) − sin2( α
2661
+ 2 ). sin2( π
2662
+ m)
2663
+ cos( β
2664
+ 2 )
2665
+ (40)
2666
+ Substituting the values of tan(γ) in (35) and tan(θ) in (40) into
2667
+ (34), we obtain tan( ξ+α
2668
+ 2 ). Subsequently, replacing the obtained
2669
+ tan( ξ+α
2670
+ 2 ) to (31) we get Rext.
2671
+ 16
2672
+
9tFAT4oBgHgl3EQfpx3L/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
BNE0T4oBgHgl3EQfPwB8/content/2301.02183v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f7ea372d91ddd2df6f709be1a83d339164bb0cf4b1927c67b6959ace08c64652
3
+ size 1457905
BdE1T4oBgHgl3EQfpQWt/content/tmp_files/2301.03330v1.pdf.txt ADDED
@@ -0,0 +1,3626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Noname manuscript No.
2
+ (will be inserted by the editor)
3
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for
4
+ Few-shot Action Recognition
5
+ Xiang Wang · Shiwei Zhang · Zhiwu Qing · Zhengrong Zuo · Changxin Gao ·
6
+ Rong Jin · Nong Sang
7
+ Received: date / Accepted: date
8
+ Abstract Few-shot action recognition is a challenging but
9
+ practical problem aiming to learn a model that can be eas-
10
+ ily adapted to identify new action categories with only a few
11
+ labeled samples. Recent attempts mainly focus on learning
12
+ deep representations for each video individually under the
13
+ episodic meta-learning regime and then performing tempo-
14
+ ral alignment to match query and support videos. However,
15
+ they still suffer from two drawbacks: (i) learning individ-
16
+ ual features without considering the entire task may result
17
+ in limited representation capability, and (ii) existing align-
18
+ ment strategies are sensitive to noises and misaligned in-
19
+ stances. To handle the two limitations, we propose a novel
20
+ Hybrid Relation guided temporal Set Matching (HyRSM++)
21
+ approach for few-shot action recognition. The core idea of
22
+ HyRSM++ is to integrate all videos within the task to learn
23
+ discriminative representations and involve a robust match-
24
+ ing technique. To be specific, HyRSM++ consists of two key
25
+ components, a hybrid relation module and a temporal set
26
+ matching metric. Given the basic representations from the
27
+ feature extractor, the hybrid relation module is introduced
28
+ to fully exploit associated relations within and cross videos
29
+ in an episodic task and thus can learn task-specific embed-
30
+ dings. Subsequently, in the temporal set matching metric, we
31
+ carry out the distance measure between query and support
32
+ Xiang Wang · Zhiwu Qing · Zhengrong Zuo · Changxin Gao (Corre-
33
+ sponding author) · Nong Sang
34
+ Key Laboratory of Ministry of Education for Image Processing and
35
+ Intelligent Control, School of Artificial Intelligence and Automation,
36
+ Huazhong University of Science and Technology
37
+ E-mail: {wxiang, qzw, zhrzuo, cgao, nsang}@hust.edu.cn
38
+ Shiwei Zhang
39
+ Alibaba Group
40
+ E-mail: zhangjin.zsw@alibaba-inc.com
41
+ Rong Jin
42
+ Twitter
43
+ E-mail: rongjinemail@gmail.com
44
+ videos from a set matching perspective and design a bidi-
45
+ rectional Mean Hausdorff Metric to improve the resilience
46
+ to misaligned instances. In addition, we explicitly exploit
47
+ the temporal coherence in videos to regularize the matching
48
+ process. In this way, HyRSM++ facilitates informative cor-
49
+ relation exchanged among videos and enables flexible pre-
50
+ dictions under the data-limited scenario. Furthermore, we
51
+ extend the proposed HyRSM++ to deal with the more chal-
52
+ lenging semi-supervised few-shot action recognition and un-
53
+ supervised few-shot action recognition tasks. Experimental
54
+ results on multiple benchmarks demonstrate that our method
55
+ consistently outperforms existing methods and achieves
56
+ state-of-the-art performance under various few-shot set-
57
+ tings. The source code is available at https://github.
58
+ com/alibaba-mmai-research/HyRSMPlusPlus.
59
+ Keywords Few-shot Action Recognition · Set Match-
60
+ ing · Semi-supervised Few-shot Action Recognition ·
61
+ Unsupervised Few-shot Action Recognition
62
+ 1 Introduction
63
+ Recently, the development of large-scale video bench-
64
+ marks [8, 23, 6, 13, 24] and deep networks [88, 51, 18,
65
+ 89, 65, 52] have significantly boosted the progress of ac-
66
+ tion recognition. To achieve this success, we typically re-
67
+ quire large amounts of manually labeled data. However, ac-
68
+ quiring these labeled examples consumes a lot of manpower
69
+ and time, which actually limits further applications of this
70
+ task. In this case, researchers look to alternatives to achieve
71
+ action classification without extensive costly labeling. Few-
72
+ shot action recognition is a promising direction to reduce
73
+ manual annotations and thus has attracted much attention
74
+ recently [112, 105]. It aims at learning to classify unseen
75
+ action classes with extremely few annotated examples.
76
+ arXiv:2301.03330v1 [cs.CV] 9 Jan 2023
77
+
78
+ 2
79
+ Xiang Wang et al.
80
+ ...
81
+ CNN
82
+ CNN
83
+ CNN
84
+ ...
85
+ 0.8
86
+ 0.1
87
+ Query video
88
+ ...
89
+ Support set
90
+ Hybrid relation module
91
+ Support: make coffee
92
+ Query: make coffee
93
+ Support: make coffee
94
+ Query: make coffee
95
+
96
+
97
+ Metric space
98
+ (support)
99
+ Metric space
100
+ (quey)
101
+ “pour water”
102
+ “pour coffee powder”
103
+ Temporal alignment
104
+ Temporal set matching
105
+ (b)
106
+ Time line
107
+ Matching line
108
+
109
+
110
+ (a)
111
+ Pull
112
+ Push
113
+ Pull
114
+ Push
115
+ Fig. 1 (a) Concept of the proposed hybrid relation module. We adaptively produce task-specific video embeddings by extracting relevant discrim-
116
+ inative patterns cross videos in an episodic task. (b) Example of make coffee, the current temporal alignment metrics tend to be strict, resulting in
117
+ an incorrect match on misaligned videos. In contrast, the proposed temporal set matching metric involving set matching technique and temporal
118
+ coherence regularization is more flexible in finding the best correspondences.
119
+ To solve the few-shot data-scarcity problem, popu-
120
+ lar attempts [112, 7, 68, 106] are mainly based on the
121
+ metric-based meta-learning technique [86], in which a com-
122
+ mon embedding space is first learnt via episodic training
123
+ and then an explicit or implicit alignment metric is em-
124
+ ployed to calculate the distances between the query (test)
125
+ videos and support (reference) videos for classification in
126
+ an episodic task. Typically, Ordered Temporal Alignment
127
+ Module (OTAM) [7] adopts a deep feature extractor to con-
128
+ vert an input video into a frame feature sequence indepen-
129
+ dently and explicitly explores the ordered temporal align-
130
+ ment path between support and query videos in this feature
131
+ space. Temporal-Relational CrossTransformer (TRX) [68]
132
+ learns a deep embedding space and tries to exhaustively con-
133
+ struct temporally-corresponding sub-sequences of actions to
134
+ compare. Some recent works [33, 94, 108, 62] propose to
135
+ design multi-level metrics for few-shot action recognition.
136
+ Although these methods have achieved remarkable per-
137
+ formance, there are still two limitations: individual feature
138
+ learning and inflexible matching strategy. First, discrimina-
139
+ tive interactive clues cross videos in an episode are ignored
140
+ when each video is considered independently during repre-
141
+ sentation learning. As a result, these methods actually as-
142
+ sume the learned representations are equally effective on
143
+ different episodic tasks and maintain a fixed set of video fea-
144
+ tures for all test-time tasks, i.e., task-agnostic, which hence
145
+ might overlook the most discriminative dimensions for the
146
+ current task. Existing work also shows that the task-agnostic
147
+ methods tend to suffer inferior generalization in other fields,
148
+ such as image recognition [47, 101], NLP [66, 57], and in-
149
+ formation retrieval [53]. Second, actions are usually com-
150
+ plicated and involve many subactions with different orders
151
+ and offsets, which may cause the failure of existing tempo-
152
+ ral alignment metrics. For example, as shown in Figure 1(b),
153
+ to make coffee, you can pour water before pour coffee pow-
154
+ der, or in a reverse order, hence it is hard for recent temporal
155
+ alignment strategies to find the right correspondences. Thus
156
+ a more flexible metric is required to cope with the misalign-
157
+ ment.
158
+ Inspired by the above observations, we thus solve the
159
+ few-shot action recognition problem by developing a novel
160
+ Hybrid Relation guided temporal Set Matching algorithm,
161
+ dubbed HyRSM++, which is architecturally composed of a
162
+ hybrid relation module and a temporal set matching metric.
163
+ In the hybrid relation module, we argue that the considerable
164
+ relevant relations within and cross videos are beneficial to
165
+ generate a set of customized features that are discriminative
166
+ for a given task. To this end, we first apply an intra-relation
167
+ function to strengthen structural patterns within a video via
168
+ modeling long-range temporal dependencies. Then an inter-
169
+ relation function operates on different videos to extract rich
170
+ semantic information to reinforce the features which are
171
+ more relevant to query predictions, as shown in Figure 1(a).
172
+ By this means, we can learn task-specific embeddings for
173
+ the few-shot task. On top of the hybrid relation module,
174
+ we design a novel temporal set matching metric consist-
175
+ ing of a bidirectional Mean Hausdorff Metric and a tem-
176
+ poral coherence regularization to calculate the distances be-
177
+ tween query and support videos, as shown in Figure 1(b).
178
+ The objective of the bidirectional Mean Hausdorff Metric
179
+ is to measure video distance from the set matching per-
180
+ spective. Concretely, we treat each video as a set of frames
181
+ and alleviate the strictly ordered constraints to acquire bet-
182
+ ter query-support correspondences. Furthermore, to exploit
183
+ long-range temporal order dependencies, we explicitly im-
184
+ pose temporal coherence regularization on the input videos
185
+ for more stable measurement without introducing extra net-
186
+ work parameters. In this way, by combining the hybrid re-
187
+ lation module and temporal set matching metric, the pro-
188
+ posed HyRSM++ can sufficiently integrate semantically re-
189
+ lational representations within the entire task and provide
190
+ flexible video matching in an end-to-end manner. We evalu-
191
+ ate the proposed HyRSM++ on six challenging benchmarks
192
+ and achieve remarkable improvements again current state-
193
+ of-the-art methods.
194
+ Although the intuition of HyRSM++ is straightforward,
195
+ it is elaborately designed for few-shot action recognition.
196
+ Can our HyRSM++ be applied to the more challenging
197
+
198
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
199
+ 3
200
+ semi-supervised or unsupervised action recognition tasks
201
+ even if the settings are entirely different? To answer this
202
+ question, we extend HyRSM++ to the semi-supervised and
203
+ unsupervised objectives with minor task adaptation modi-
204
+ fications, and experimental results indicate that HyRSM++
205
+ can be well adapted to different scenarios well and achieves
206
+ impressive performance.
207
+ In summary, we make the following four contributions:
208
+ (1) We propose a novel hybrid relation module to cap-
209
+ ture the intra- and inter-relations inside the episodic task,
210
+ yielding task-specific representations for different tasks.
211
+ (2) We reformulate the query-support video pair distance
212
+ metric as a set matching problem and develop a bidirectional
213
+ Mean Hausdorff Metric, which can be robust to complex ac-
214
+ tions. To utilize long-term temporal order cues, we further
215
+ design a new temporal coherence regularization on videos
216
+ without adding network parameters.
217
+ (3) We conduct extensive experiments on six challeng-
218
+ ing datasets to verify that the proposed HyRSM++ achieves
219
+ superior performance over the state-of-the-art methods.
220
+ (4) We show that the proposed HyRSM++ can be di-
221
+ rectly extended to the more challenging semi-supervised
222
+ few-shot action recognition and unsupervised few-shot ac-
223
+ tion recognition task with minor modifications.
224
+ In this paper, we have extended our preliminary CVPR-
225
+ 2022 conference version [91] in the following aspects. i)
226
+ We integrate the temporal coherence regularization and set
227
+ matching strategy into a temporal set matching metric so
228
+ that the proposed metric can explicitly leverage temporal
229
+ order information in videos and match flexibly. Note that
230
+ temporal coherence regularization does not introduce ad-
231
+ ditional parameters and will not increase the burden of in-
232
+ ference. ii) We conduct more comprehensive ablation stud-
233
+ ies to verify the effectiveness and efficiency of the pro-
234
+ posed HyRSM++. iii) We clearly improve the few-shot
235
+ action recognition performance over the previous version.
236
+ Experimental results also manifest that HyRSM++ signifi-
237
+ cantly surpasses existing competitive methods and achieves
238
+ state-of-the-art performance. iv) We show that the proposed
239
+ HyRSM++ can be easily extended to the more challeng-
240
+ ing semi-supervised few-shot recognition and unsupervised
241
+ few-shot action recognition tasks.
242
+ 2 Related Work
243
+ In the literature, there are some techniques related to this
244
+ paper, mainly including few-shot image classification, set
245
+ matching, temporal coherence, semi-supervised few-shot
246
+ learning, unsupervised few-shot learning, and few-shot ac-
247
+ tion recognition. In this section, we will briefly review them
248
+ separately.
249
+ Few-shot Image Classification.
250
+ Recently, the research
251
+ of few-shot learning [17, 55, 56] has proceeded roughly
252
+ along with the following directions: data augmentation,
253
+ optimization-based, and metric-based. Data augmentation is
254
+ an intuitive method to increase the number of training sam-
255
+ ples and improve the diversity of data. Mainstream strategies
256
+ include spatial deformation [70, 67] and semantic feature
257
+ augmentation [9, 100]. Optimization-based methods learn
258
+ a meta-learner model that can quickly adopt to a new task
259
+ given a few training examples. These algorithms include the
260
+ LSTM-based meta-learner [74], learning efficient model ini-
261
+ tialization [19], and learning stochastic gradient descent op-
262
+ timizer [50]. Metric-based methods attempt to address the
263
+ few-shot classification problem by ”learning to compare”.
264
+ This family of approaches aims to learn a feature space and
265
+ compare query and support images through Euclidean dis-
266
+ tance [76, 101, 99], cosine similarity [86, 98], or learnable
267
+ non-linear metric [80, 29, 47]. Our work is more closely re-
268
+ lated to the metric-based methods [47, 101] that share the
269
+ same spirit of learning task-specific features, whereas we fo-
270
+ cus on solving the more challenging few-shot action recog-
271
+ nition task with diverse spatio-temporal dependencies. In
272
+ addition, we will further point out the differences and con-
273
+ duct performance comparisons in the experimental section.
274
+ Set Matching. The objective of set matching is to accu-
275
+ rately measure the similarity of two sets, which have re-
276
+ ceived much attention over the years. Set matching tech-
277
+ niques can be used to efficiently process complex data struc-
278
+ tures [2, 72, 3] and has been applied in many computer vi-
279
+ sion fields, including face recognition [63, 93, 92], object
280
+ matching [73, 107], etc. Among them, Hausdorff distance
281
+ is an important alternative to handle set matching problems.
282
+ Hausdorff distance and its variants have been widely used
283
+ in the field of image matching and achieved remarkable re-
284
+ sults [34, 16, 35, 107, 82, 79]. Inspired by these great suc-
285
+ cesses, we introduce set matching into the few-shot action
286
+ recognition field for the first time.
287
+ Temporal Coherence. Videos naturally involve temporal
288
+ continuity, and there is much effort to effectively explore
289
+ how to leverage this property [11, 22, 27, 58]. Inverse Dif-
290
+ ference Moment (IDM) [11] is a commonly used measure of
291
+ local homogeneity, which assumes that in a sequence, two
292
+ elements are more similar if they are located next to each
293
+ other. The idea of IDM has been widely applied to texture
294
+ feature extraction [60], face recognition [59], and unsuper-
295
+ vised representation learning [22, 27] and achieved remark-
296
+ able performance. In this paper, we focus on constraining
297
+ the few-shot matching process by exploiting temporal co-
298
+ herence.
299
+ Semi-supervised Few-shot Learning. In practical appli-
300
+ cation scenarios, there are usually many unlabeled samples.
301
+ Semi-supervised few-shot learning considers learning new
302
+ concepts in the presence of extra unlabeled data. Ren et
303
+ al. [71] first introduce the challenging semi-supervised few-
304
+ shot learning paradigm and refine the prototypes by adopt-
305
+
306
+ 4
307
+ Xiang Wang et al.
308
+ ing a soft k-means on unlabeled data. LST [49] proposes a
309
+ novel recursive-learning-based self-training strategy for ro-
310
+ bust convergence of the inner loop. TransMatch [103] de-
311
+ velops a new transfer learning framework by incorporat-
312
+ ing MixMatch [4] and existing few-shot learning methods.
313
+ PTN [31] employs the Poisson learning model to obtain in-
314
+ formative presentations between the labeled and unlabeled
315
+ data. PLCM [32] and iLPC [44] focus on cleaning predicted
316
+ pseudo-labels and generating accurate confidence estima-
317
+ tion. In the field of semi-supervised few-shot action recog-
318
+ nition, LIM [113] utilizes a label-independent memory to
319
+ preserve a feature bank and produces class prototypes for
320
+ query classification.
321
+ Unsupervised Few-shot Learning. The objective of un-
322
+ supervised few-shot learning is to utilize unlabeled samples
323
+ to construct meta-tasks for few-shot training. CACTUs [30]
324
+ and UFLST [36] construct many tasks by clustering em-
325
+ beddings and optimize the meta-learning process over the
326
+ constructed tasks. UMTRA [38] generates artificial tasks
327
+ by randomly sampling support examples from the training
328
+ set and produces corresponding queries by augmentation.
329
+ ULDA [69] and AAL [1] follow this paradigm to randomly
330
+ group augmented images for meta-learning and point out the
331
+ importance of data augmentation. More recently, MetaU-
332
+ VFS [64] presents the first unsupervised meta-learning al-
333
+ gorithm for few-shot action recognition and adopts a two-
334
+ stream 2D and 3D CNN model to explore spatial and tem-
335
+ poral features via contrastive learning.
336
+ Few-shot Action Recognition.
337
+ The difference between
338
+ few-shot action recognition and the previous few-shot learn-
339
+ ing approaches is that it deals with more complex higher
340
+ dimensional video data instead of two-dimensional images.
341
+ The existing methods mainly focus on metric-based learn-
342
+ ing. OSS-Metric Learning [40] adopts OSS-Metric of video
343
+ pairs to match videos. TARN [5] learns an attention-based
344
+ deep-distance measure from an attribute to a class center
345
+ for zero-shot and few-shot action recognition. CMN [112]
346
+ utilizes a multi-saliency embedding algorithm to encode
347
+ video representations. AMeFu-Net [20] uses depth infor-
348
+ mation to assist learning. Xian et al. [95] propose to learn
349
+ a generative adversarial network and produce video fea-
350
+ tures of novel classes for generalization. Coskun et al. [12]
351
+ leverage object-object interaction, hand grasp, optical flow,
352
+ and hand trajectory to learn an egocentric few-shot classi-
353
+ fier. OTAM [7] preserves the frame ordering in video data
354
+ and estimates distances with ordered temporal alignment.
355
+ ARN [105] introduces a self-supervised permutation invari-
356
+ ant strategy for spatio-temporal modeling. ITANet [106]
357
+ proposes a frame-wise implicit temporal alignment strategy
358
+ to achieve accurate and robust video matching. TRX [68]
359
+ matches actions by matching plentiful tuples of different
360
+ sub-sequences. More recently, STRM [84] makes use of lo-
361
+ cal and global enrichment mechanism for spatio-temporal
362
+ modeling based on TRX [68] and enforces class-separability
363
+ at different phase. Some works [33, 94, 108, 62] propose
364
+ to design multi-level metrics for few-shot action recogni-
365
+ tion. Note that most above methods focus on learning video
366
+ embedding independently. Unlike these previous methods,
367
+ our HyRSM++ improves the transferability of embedding
368
+ by learning intra- and inter-relational patterns that can bet-
369
+ ter generalize to unseen classes.
370
+ 3 Method
371
+ In this section, we first formulate the definition of the
372
+ few-shot action recognition task. Then we present our Hy-
373
+ brid Relation guided temporal Set Matching (HyRSM++)
374
+ method.
375
+ 3.1 Problem formulation
376
+ Few-shot action recognition aims to obtain a model that can
377
+ generalize well to new classes when limited labeled video
378
+ data is available. To make training more faithful to the test
379
+ environment, we adopt the episodic training manner [86] for
380
+ few-shot adaptation as in previous work [86, 7, 68, 106]. In
381
+ each episodic task, there are two sets, i.e., a support set S
382
+ and a query set Q. The support set S contains N × K sam-
383
+ ples from N different action classes, and each class contains
384
+ K support videos, termed the N-way K-shot problem. The
385
+ goal is to classify the query videos in Q into N classes with
386
+ these support videos.
387
+ 3.2 HyRSM++
388
+ Pipeline. The overall architecture of HyRSM++ is illus-
389
+ trated in Figure 2. For each input video sequence, we first
390
+ divide it into T segments and extract a snippet from each
391
+ segment, as in previous methods [88, 7]. This way, in an
392
+ episodic task, the support set can be denoted as S
393
+ =
394
+ {s1, s2, ..., sN×K}, where si = {s1
395
+ i , s2
396
+ i , ..., sT
397
+ i }. For sim-
398
+ plicity and convenience, we discuss the process of the N-
399
+ way 1-shot problem, i.e., K = 1, and consider that the
400
+ query set Q contains a single video q. Then we apply
401
+ an embedding model to extract the feature representations
402
+ for each video sequence and obtain the support features
403
+ Fs = {fs1, fs2, ..., fsN } and the query feature fq, where
404
+ fsi = {f 1
405
+ i , f 2
406
+ i , ..., f T
407
+ i } and fq = {f 1
408
+ q , f 2
409
+ q , ..., f T
410
+ q }. After
411
+ that, we input Fs and fq to the hybrid relation module to
412
+ learn task-specific features, resulting in ˜Fs and ˜fq. Finally,
413
+ the enhanced representations ˜Fs and ˜fq are fed into the set
414
+ matching metric to generate matching scores. Based on the
415
+ output scores, we can train or test the total framework.
416
+
417
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
418
+ 5
419
+ Support set
420
+ Query video
421
+ Backbone
422
+ Intra-relation
423
+ Intra-relation
424
+ Intra-relation
425
+ Intra-relation
426
+ A
427
+ Inter-relation modeling
428
+ Hybrid relation module
429
+ 0.1
430
+ 0.2
431
+ 0.7
432
+ A
433
+ Avg-pooling
434
+ E
435
+ Expend
436
+ Concatenate
437
+ Convolution
438
+ Temporal set matching metric
439
+ Backbone
440
+ Backbone
441
+ Backbone
442
+ A
443
+ A
444
+ A
445
+ E
446
+ E
447
+ E
448
+ E
449
+ Pull
450
+ Push
451
+ Fig. 2 Schematic illustration of the proposed Hybrid Relation guided temporal Set Matching (HyRSM++) approach on a 3-way 1-shot problem.
452
+ Given an episode of video data, a feature embedding network is first employed to extract their feature vectors. Then, A hybrid relation module is
453
+ followed to integrate rich information within each video and cross videos with intra-relation and inter-relation functions. Finally, the task-specific
454
+ features are fed forward into a temporal set matching metric for matching score prediction. Best viewed in color.
455
+ Hybrid relation module. Given the features Fs and fq
456
+ output by the embedding network, current approaches, e.g.,
457
+ OTAM [7], directly apply a classifier C in this feature space.
458
+ They can be formulated as:
459
+ yi = C(fsi, fq)
460
+ (1)
461
+ where yi is the matching score between fsi and fq. During
462
+ training, yi = 1 if they belong to the same class, otherwise
463
+ yi = 0. In the testing phase, yi can be adopted to predict
464
+ the query label. From the perspective of probability theory,
465
+ it makes decisions based on the priors fsi and fq:
466
+ yi = P((fsi, fq)|fsi, fq)
467
+ (2)
468
+ which is a typical task-agnostic method. However, the task-
469
+ agnostic embedding is often vulnerable to overfit irrelevant
470
+ representations [29, 47] and may fail to transfer to unseen
471
+ classes not yet observed in the training stage.
472
+ Unlike the previous methods, we propose to learn task-
473
+ specific features for each target task. To achieve this goal, we
474
+ introduce a hybrid relation module to generate task-specific
475
+ features by capturing rich information from different videos
476
+ in an episode. Specifically, we elaborately design the hybrid
477
+ relation module H in the following form:
478
+ ˜fi = H(fi, G); fi ∈ [Fs, fq], G = [Fs, fq]
479
+ (3)
480
+ That is, we improve the feature fi by aggregating seman-
481
+ tic information cross video representations, i.e., G, in an
482
+ episodic task, allowing the obtained task-specific feature ˜fi
483
+ to be more discriminative than the isolated feature. For ef-
484
+ ficiency, we further decompose hybrid relation module into
485
+ two parts: intra-relation function Ha and inter-relation func-
486
+ tion He.
487
+ The intra-relation function aims to strengthen structural
488
+ patterns within a video by capturing long-range temporal de-
489
+ pendencies. We express this process as:
490
+ f a
491
+ i = Ha(fi)
492
+ (4)
493
+ here f a
494
+ i
495
+ ∈ RT ×C is the output of fi through the intra-
496
+ relation function and has the same shape as fi. Note that the
497
+ intra-relation function has many alternative implements, in-
498
+ cluding multi-head self-attention (MSA), Transformer [85],
499
+ Bi-LSTM [25], Bi-GRU [10], etc., which is incredibly flex-
500
+ ible and can be any one of them.
501
+ Based on the features generated by the intra-relation
502
+ function, an inter-relation function is deployed to semanti-
503
+ cally enhance the features cross different videos:
504
+ f e
505
+ i = He
506
+ i (f a
507
+ i , Ga) =
508
+ |Ga|
509
+
510
+ j
511
+ (κ(ψ(f a
512
+ i ), ψ(f a
513
+ j )) ∗ ψ(f a
514
+ j ))
515
+ (5)
516
+ where Ga = [F a
517
+ s , f a
518
+ q ], ψ(·) is a global average pooling layer,
519
+ and κ(f a
520
+ i , f a
521
+ j ) is a learnable function that calculates the se-
522
+ mantic correlation between f a
523
+ i and f a
524
+ j . The potential logic
525
+ is that if the correlation score between f a
526
+ i and f a
527
+ j is high,
528
+ i.e., κ(f a
529
+ i , f a
530
+ j ), it means they tend to have the same seman-
531
+ tic content, hence we can borrow more information from f a
532
+ j
533
+ to elevate the representation f a
534
+ i , and vice versa. In the same
535
+ way, if the score κ(f a
536
+ i , f a
537
+ i ) is less than 1, it indicates that
538
+ some irrelevant information in f a
539
+ i should be suppressed.
540
+ In this way, we can improve the feature discrimination
541
+ by taking full advantage of the limited samples in each
542
+ episodic task. The inter-relation function also has similar
543
+ implements with the intra-relation function but with a dif-
544
+ ferent target. After the inter-relation function, we employ
545
+ an Expend-Concatenate-Convolution operation to aggregate
546
+
547
+ 6
548
+ Xiang Wang et al.
549
+ information, as shown in Figure 2, where the output feature
550
+ ˜fi has the same shape as f e
551
+ i . In the form of prior, our method
552
+ can be formulated as:
553
+ yi = P(( ˜fsi, ˜fq)|H(fsi, G), H(fq, G)); G = [Fs, fq]
554
+ (6)
555
+ Intuitively, compared with Equation 2, it can be conducive
556
+ to making better decisions because more priors are provided.
557
+ In particular, the hybrid relation module is a plug-and-play
558
+ unit. In the experiment, we will fully explore different con-
559
+ figurations of the hybrid relation module and further inves-
560
+ tigate its insertablility.
561
+ Temporal set matching metric. Many prior few-shot
562
+ action recognition algorithms usually impose a strict tempo-
563
+ ral alignment strategy on generated video representations for
564
+ few-shot classification. However, they suffer from causing
565
+ some failed matches when encountering misaligned video
566
+ instances. Instead, we develop a flexible metric based on set
567
+ matching that explicitly discovers optimal frame matching
568
+ pairs for the ability to be insensitive to misalignment. Con-
569
+ cretely, the proposed temporal set matching metric contains
570
+ two parts, bidirectional Mean Hausdorff Metric (Bi-MHM)
571
+ and temporal coherence regularization, respectively. We will
572
+ describe them in detail below.
573
+ Given the relation-enhanced features ˜Fs and ˜fq, we
574
+ present a novel metric to enable efficient and flexible match-
575
+ ing. In this metric, we treat each video as a set of T frames
576
+ and reformulate distance measurement between videos as
577
+ a set matching problem, which is robust to complicated
578
+ instances, whether they are aligned or not. Specifically,
579
+ we achieve this goal by modifying the Hausdorff distance,
580
+ which is a typical set matching approach. The standard
581
+ Hausdorff distance D can be formulated as:
582
+ d( ˜fi, ˜fq) = max
583
+ ˜
584
+ f a
585
+ i ∈ ˜fi
586
+ ( min
587
+ ˜
588
+ f bq ∈ ˜
589
+ fq
590
+ ��� ˜f a
591
+ i − ˜f bq
592
+ ���)
593
+ d( ˜fq, ˜fi) = max
594
+ ˜
595
+ f b
596
+ q ∈ ˜
597
+ fq
598
+ ( min
599
+ ˜
600
+ f a
601
+ i ∈ ˜fi
602
+ ��� ˜f bq − ˜f a
603
+ i
604
+ ���)
605
+ D = max(d( ˜fi, ˜fq), d( ˜fq, ˜fi))
606
+ (7)
607
+ where ˜fi ∈ RT ×C contains T frame features, and
608
+ ��·
609
+ �� is a
610
+ distance measurement function, which is the cosine distance
611
+ in our method.
612
+ However, the previous methods [102, 21, 111, 16]
613
+ pointed out that Hausdorff distance can be easily affected
614
+ by noisy examples, resulting in inaccurate measurements.
615
+ Hence they employ a directed modified Hausdorff distance
616
+ that robust to noise as follows:
617
+ dm( ˜fi, ˜fq) = 1
618
+ Ni
619
+
620
+ ˜
621
+ f a
622
+ i ∈ ˜fi
623
+ ( min
624
+ ˜
625
+ f b
626
+ q ∈ ˜
627
+ fq
628
+ ��� ˜f a
629
+ i − ˜f bq
630
+ ���)
631
+ (8)
632
+ where Ni is the length of ˜fi, and equal to T in this paper.
633
+ Hausdorff distance and its variants achieve great success in
634
+ image matching [82, 16, 34] and face recognition [21, 79].
635
+ We thus propose to introduce the set matching strategy into
636
+ the few-shot action recognition field and further design a
637
+ novel bidirectional Mean Hausdorff Metric (Bi-MHM):
638
+ Db = 1
639
+ Ni
640
+
641
+ ˜
642
+ f a
643
+ i ∈ ˜fi
644
+ ( min
645
+ ˜
646
+ f bq ∈ ˜
647
+ fq
648
+ ��� ˜f a
649
+ i − ˜f bq
650
+ ���)+
651
+ 1
652
+ Nq
653
+
654
+ ˜
655
+ f bq ∈ ˜
656
+ fq
657
+ ( min
658
+ ˜
659
+ f a
660
+ i ∈ ˜fi
661
+ ��� ˜f bq − ˜f a
662
+ i
663
+ ���)
664
+ (9)
665
+ where Ni and Nq are the lengths of the support feature ˜fi
666
+ and the query feature ˜fq respectively.
667
+ The proposed Bi-MHM is a symmetric function, and the
668
+ two items are complementary to each other. From Equa-
669
+ tion 9, we can find that Db can automatically find the best
670
+ correspondencies between two videos, e.g., ˜fi and ˜fq. Note
671
+ that our Bi-MHM is a non-parametric classifier and does not
672
+ involve numerous non-parallel calculations, which helps to
673
+ improve computing efficiency and transfer ability compared
674
+ to the previous complex alignment classifiers [7, 68]. More-
675
+ over, the hybrid relation module and Bi-MHM can mutually
676
+ reinforce each other, consolidating the correlation between
677
+ two videos collectively.
678
+ The Bi-MHM approach described above assumes video
679
+ sequence representations belonging to the same action have
680
+ the same set structure in the feature space and does not
681
+ explicitly utilize temporal order information. However, it
682
+ would be much more general to take the inherent temporal
683
+ information in videos into account. For this reason, we take
684
+ advantage of the temporal coherence that naturally exists in
685
+ sequential video data and construct a temporal coherence
686
+ regularization to further constrain the matching process by
687
+ incorporating temporal order information.
688
+ IDM [11] is a commonly used means that can exploit
689
+ temporal coherence within videos, which can be formulated
690
+ as:
691
+ I( ˜fi) =
692
+ T
693
+
694
+ a=1
695
+ T
696
+
697
+ b=1
698
+ 1
699
+ (a − b)2 + 1 ·
700
+ ��� ˜f a
701
+ i − ˜f b
702
+ i
703
+ ���
704
+ (10)
705
+ where ˜fi is the input video feature, T is the temporal length
706
+ of the video, and the above loss encourages frames that are
707
+ close in time to be close in the feature space as well. In addi-
708
+ tion, there is another way to use temporal order information
709
+ in the literature [22, 59]:
710
+ I( ˜fi; ˜f a
711
+ i , ˜f b
712
+ i ) =
713
+
714
+
715
+
716
+ ��� ˜f a
717
+ i − ˜f b
718
+ i
719
+ ��� ,
720
+ if |a − b| = 1
721
+ max(0, m −
722
+ ��� ˜f a
723
+ i − ˜f b
724
+ i
725
+ ���)
726
+ if |a − b| > 1
727
+ (11)
728
+ where m is the size of the margin. Equation 11 utilizes
729
+ the video coherence property by pulling two frame features
730
+ closer if they are adjacent, pushing farther apart by one mar-
731
+ gin m if they are not adjacent. Through observation, we can
732
+
733
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
734
+ 7
735
+ see that in Equation 10, all frames are pulled close regardless
736
+ of time distance. In Equation 11, all frame features are sep-
737
+ arated by a margin m if they are not adjacent to the current
738
+ frame, i.e., all pairs are treated equally. The above two man-
739
+ ners do not fully exploit the smooth and continuous changes
740
+ of the video. To this end, we propose a novel form to mine
741
+ temporal coherence property:
742
+ I( ˜fi; ˜f a
743
+ i , ˜f b
744
+ i ) =
745
+
746
+
747
+
748
+ 1
749
+ (a−b)2+1 ·
750
+ ��� ˜f a
751
+ i − ˜f b
752
+ i
753
+ ��� ,
754
+ if |a − b| ≤ δ
755
+ max(0, mab −
756
+ ��� ˜f a
757
+ i − ˜f b
758
+ i
759
+ ���)
760
+ if |a − b| > δ
761
+ (12)
762
+ where δ is a window size and mab = 1 − e− (|a−b|−δ)2
763
+ 2σ2
764
+ for smooth temporal coherence. Compared with the origi-
765
+ nal forms, our proposed temporal coherence regularization
766
+ can better reflect the continuous change of video and thus
767
+ lead to better performance.
768
+ In the training phase, we take the negative distance for
769
+ each class as logit. Then we utilize the same cross-entropy
770
+ loss as in [7, 68], the auxiliary semantic loss [46, 54] and the
771
+ temporal coherence regularization to jointly train the model.
772
+ The auxiliary semantic loss refers to the cross-entropy loss
773
+ on the real action classes, which is widely used to improve
774
+ training stability and generalization. During inference, we
775
+ select the support class closest to the query for classification.
776
+ 3.3 Extended applications of HyRSM++
777
+ 3.3.1 Semi-supervised few-shot action recognition
778
+ The objective of semi-supervised few-shot action recogni-
779
+ tion [113] is to fully explore the auxiliary information from
780
+ unlabeled video data to boost the few-shot classification.
781
+ Compared with the standard supervised few-shot setting, in
782
+ addition to the support set S and query set Q, an extra un-
783
+ labeled set U is also included in a semi-supervised few-shot
784
+ task to alleviate data scarcity. We demonstrate that the pro-
785
+ posed HyRSM++ can build a bridge between labeled and
786
+ unlabeled examples, leading to higher classification perfor-
787
+ mance.
788
+ Given an unlabeled set U, a common practice in semi-
789
+ supervised learning literature [110, 104, 77] is to adopt the
790
+ Pseudo Labeling technique [45], which assumes that the de-
791
+ cision boundary usually lies in low-density areas and data
792
+ samples in a high-density area have the same label. Sim-
793
+ ilarly, traditional semi-supervised few-shot learning meth-
794
+ ods [71, 49] usually produce pseudo labels for unlabeled
795
+ data based on the known support set, and then the gener-
796
+ ated high-confidence pseudo-label data is augmented into
797
+ the support set. In this paper, we follow this paradigm and
798
+ utilize HyRSM++ to leverage unlabeled examples. Since
799
+ Algorithm 1 HyRSM++ for semi-supervised few-shot ac-
800
+ tion recognition
801
+ Require: A labeled support set S, an auxiliary unlabeled set U, and a
802
+ query set Q
803
+ Ensure: Optimized few-shot classifier HyRSM++
804
+ 1: Enter support set S and unlabeled set U into HyRSM++ and obtain
805
+ the category prediction of U based on Equation 9;
806
+ 2: According to the prediction distribution, select the high-confidence
807
+ samples to generate pseudo-labels and update S with the selected
808
+ samples to get the augmented S
809
+ ′;
810
+ 3: Apply the augmented S
811
+ ′ and query set Q for supervised few-shot
812
+ training as described in Section 3.2;
813
+ noisy videos usually have higher losses in training, it is pos-
814
+ sible to leverage the strong HyRSM++ to distinguish be-
815
+ tween clean and noisy videos from the prediction scores.
816
+ Based on this, we choose reliable pseudo-labeled samples
817
+ in the unlabeled set by predictions and augment the support
818
+ set with high-confidence pseudo-label data. Subsequently,
819
+ we take advantage of the augmented support set to classify
820
+ the query videos as in the supervised few-shot task. During
821
+ the training stage, many semi-supervised few-shot tasks are
822
+ sampled to optimize the whole model, as shown in Algo-
823
+ rithm 1. For inference, the evaluation process is also con-
824
+ ducted by sampling 10,000 episodic tasks.
825
+ 3.3.2 Unsupervised few-shot action recognition
826
+ Unlike the previously described settings involving labelled
827
+ data, unsupervised few-shot action recognition aims to use
828
+ unlabeled data to construct few-shot tasks and learn adap-
829
+ tations to different tasks. We further extend HyRSM++ to
830
+ this unsupervised task and verify its capability of transfer-
831
+ ring prior knowledge to learn to deal with unseen tasks effi-
832
+ ciently.
833
+ To perform unsupervised few-shot learning, construct-
834
+ ing few-shot tasks is the first step. However, there are no
835
+ label annotations that can be directly applied for few-shot
836
+ learning in the challenging unsupervised setting. Following
837
+ prior unsupervised few-shot algorithms [38, 36], we gener-
838
+ ate few-shot tasks by first adopting existing unsupervised
839
+ learning approaches to learn initialized feature embeddings
840
+ of the input videos, and then leveraging deep clustering tech-
841
+ niques to construct pseudo-classes of the videos. According
842
+ to clustering results, we are able to produce few-shot tasks
843
+ by sampling N-way K-shot episodes. We then use the con-
844
+ structed few-shot tasks to train HyRSM++. During the test-
845
+ ing phase, we sample 10,000 episodes from the test set to
846
+ obtain the performance, and the label information is only
847
+ used for evaluation.
848
+
849
+ 8
850
+ Xiang Wang et al.
851
+ Table 1 Comparison to recent few-shot action recognition methods on the meta-testing set of SSv2-Full, Kinetics, Epic-kitchens and HMDB51.
852
+ The experiments are conducted under the 5-way setting, and results are reported as the shot increases from 1 to 5. ”-” means the result is not
853
+ available in published works, and the underline indicates the second best result.
854
+ Method
855
+ Reference
856
+ Dataset
857
+ 1-shot
858
+ 2-shot
859
+ 3-shot
860
+ 4-shot
861
+ 5-shot
862
+ CMN++ [112]
863
+ ECCV’18
864
+ SSv2-Full
865
+ 34.4
866
+ -
867
+ -
868
+ -
869
+ 43.8
870
+ TRN++ [109]
871
+ ECCV’18
872
+ 38.6
873
+ -
874
+ -
875
+ -
876
+ 48.9
877
+ OTAM [7]
878
+ CVPR’20
879
+ 42.8
880
+ 49.1
881
+ 51.5
882
+ 52.0
883
+ 52.3
884
+ TTAN [48]
885
+ ArXiv’21
886
+ 46.3
887
+ 52.5
888
+ 57.3
889
+ 59.3
890
+ 60.4
891
+ ITANet [7]
892
+ IJCAI’21
893
+ 49.2
894
+ 55.5
895
+ 59.1
896
+ 61.0
897
+ 62.3
898
+ TRX (Ω={1}) [68]
899
+ CVPR’21
900
+ 38.8
901
+ 49.7
902
+ 54.4
903
+ 58.0
904
+ 60.6
905
+ TRX (Ω={2, 3})[68]
906
+ CVPR’21
907
+ 42.0
908
+ 53.1
909
+ 57.6
910
+ 61.1
911
+ 64.6
912
+ STRM [84]
913
+ CVPR’22
914
+ 43.1
915
+ 53.3
916
+ 59.1
917
+ 61.7
918
+ 68.1
919
+ MTFAN [94]
920
+ CVPR’22
921
+ 45.7
922
+ -
923
+ -
924
+ -
925
+ 60.4
926
+ Nguyen et al. [62]
927
+ ECCV’22
928
+ 43.8
929
+ -
930
+ -
931
+ -
932
+ 61.1
933
+ Huang et al. [33]
934
+ ECCV’22
935
+ 49.3
936
+ -
937
+ -
938
+ -
939
+ 66.7
940
+ HCL [108]
941
+ ECCV’22
942
+ 47.3
943
+ 54.5
944
+ 59.0
945
+ 62.4
946
+ 64.9
947
+ HyRSM
948
+ CVPR’22
949
+ 54.3 (+5.0)
950
+ 62.2 (+6.7)
951
+ 65.1 (+6.0)
952
+ 67.9 (+5.5)
953
+ 69.0 (+0.9)
954
+ HyRSM++
955
+ -
956
+ 55.0 (+5.7)
957
+ 63.5 (+8.0)
958
+ 66.0 (+6.9)
959
+ 68.8 (+6.4)
960
+ 69.8 (+1.7)
961
+ MatchingNet [86]
962
+ NeurIPS’16
963
+ Kinetics
964
+ 53.3
965
+ 64.3
966
+ 69.2
967
+ 71.8
968
+ 74.6
969
+ MAML [19]
970
+ ICML’17
971
+ 54.2
972
+ 65.5
973
+ 70.0
974
+ 72.1
975
+ 75.3
976
+ Plain CMN [112]
977
+ ECCV’18
978
+ 57.3
979
+ 67.5
980
+ 72.5
981
+ 74.7
982
+ 76.0
983
+ CMN-J [113]
984
+ TPAMI’20
985
+ 60.5
986
+ 70.0
987
+ 75.6
988
+ 77.3
989
+ 78.9
990
+ TARN [5]
991
+ BMVC’19
992
+ 64.8
993
+ -
994
+ -
995
+ -
996
+ 78.5
997
+ ARN [105]
998
+ ECCV’20
999
+ 63.7
1000
+ -
1001
+ -
1002
+ -
1003
+ 82.4
1004
+ OTAM [7]
1005
+ CVPR’20
1006
+ 73.0
1007
+ 75.9
1008
+ 78.7
1009
+ 81.9
1010
+ 85.8
1011
+ ITANet [106]
1012
+ IJCAI’21
1013
+ 73.6
1014
+ -
1015
+ -
1016
+ -
1017
+ 84.3
1018
+ TRX (Ω={1}) [68]
1019
+ CVPR’21
1020
+ 63.6
1021
+ 75.4
1022
+ 80.1
1023
+ 82.4
1024
+ 85.2
1025
+ TRX (Ω={2, 3}) [68]
1026
+ CVPR’21
1027
+ 63.6
1028
+ 76.2
1029
+ 81.8
1030
+ 83.4
1031
+ 85.9
1032
+ STRM [84]
1033
+ CVPR’22
1034
+ 62.9
1035
+ 76.4
1036
+ 81.1
1037
+ 83.8
1038
+ 86.7
1039
+ MTFAN [94]
1040
+ CVPR’22
1041
+ 74.6
1042
+ -
1043
+ -
1044
+ -
1045
+ 87.4
1046
+ Nguyen et al. [62]
1047
+ ECCV’22
1048
+ 74.3
1049
+ -
1050
+ -
1051
+ -
1052
+ 87.4
1053
+ Huang et al. [33]
1054
+ ECCV’22
1055
+ 73.3
1056
+ -
1057
+ -
1058
+ -
1059
+ 86.4
1060
+ HCL [108]
1061
+ ECCV’22
1062
+ 73.7
1063
+ 79.1
1064
+ 82.4
1065
+ 84.0
1066
+ 85.8
1067
+ HyRSM
1068
+ CVPR’22
1069
+ 73.7 (-0.9)
1070
+ 80.0 (+0.9)
1071
+ 83.5 (+1.1)
1072
+ 84.6 (+0.6)
1073
+ 86.1 (-1.3)
1074
+ HyRSM++
1075
+ -
1076
+ 74.0 (-0.6)
1077
+ 80.8 (+1.7)
1078
+ 83.9 (+1.5)
1079
+ 85.3 (+1.3)
1080
+ 86.4 (-1.0)
1081
+ OTAM [7]
1082
+ CVPR’20
1083
+ Epic-kitchens
1084
+ 46.0
1085
+ 50.3
1086
+ 53.9
1087
+ 54.9
1088
+ 56.3
1089
+ TRX [68]
1090
+ CVPR’21
1091
+ 43.4
1092
+ 50.6
1093
+ 53.5
1094
+ 56.8
1095
+ 58.9
1096
+ STRM [84]
1097
+ CVPR’22
1098
+ 42.8
1099
+ 50.4
1100
+ 54.9
1101
+ 58.0
1102
+ 59.2
1103
+ HyRSM
1104
+ CVPR’22
1105
+ 47.4 (+1.4)
1106
+ 52.9 (+2.3)
1107
+ 56.4 (+1.5)
1108
+ 58.8 (+0.8)
1109
+ 59.8 (+0.6)
1110
+ HyRSM++
1111
+ -
1112
+ 48.0 (+2.0)
1113
+ 54.9 (+4.3)
1114
+ 57.5 (+2.6)
1115
+ 59.6 (+1.6)
1116
+ 60.8 (+1.6)
1117
+ ARN [105]
1118
+ ECCV’20
1119
+ HMDB51
1120
+ 45.5
1121
+ -
1122
+ -
1123
+ -
1124
+ 60.6
1125
+ OTAM [7]
1126
+ CVPR’20
1127
+ 54.5
1128
+ 63.5
1129
+ 65.7
1130
+ 67.2
1131
+ 68.0
1132
+ TTAN [48]
1133
+ ArXiv’21
1134
+ 57.1
1135
+ -
1136
+ -
1137
+ -
1138
+ 74.0
1139
+ TRX [68]
1140
+ CVPR’21
1141
+ 53.1
1142
+ 62.5
1143
+ 66.8
1144
+ 70.2
1145
+ 75.6
1146
+ STRM [84]
1147
+ CVPR’22
1148
+ 52.3
1149
+ 62.5
1150
+ 67.4
1151
+ 70.9
1152
+ 77.3
1153
+ MTFAN [94]
1154
+ CVPR’22
1155
+ 59.0
1156
+ -
1157
+ -
1158
+ -
1159
+ 74.6
1160
+ Nguyen et al. [62]
1161
+ ECCV’22
1162
+ 59.6
1163
+ -
1164
+ -
1165
+ -
1166
+ 76.9
1167
+ Huang et al. [33]
1168
+ ECCV’22
1169
+ 60.1
1170
+ -
1171
+ -
1172
+ -
1173
+ 77.0
1174
+ HCL [108]
1175
+ ECCV’22
1176
+ 59.1
1177
+ 66.5
1178
+ 71.2
1179
+ 73.8
1180
+ 76.3
1181
+ HyRSM
1182
+ CVPR’22
1183
+ 60.3 (+0.2)
1184
+ 68.2 (+1.7)
1185
+ 71.7 (+0.5)
1186
+ 75.3 (+1.5)
1187
+ 76.0 (-1.3)
1188
+ HyRSM++
1189
+ -
1190
+ 61.5 (+1.4)
1191
+ 69.0 (+2.5)
1192
+ 72.7 (+1.5)
1193
+ 75.4 (+1.6)
1194
+ 76.4 (-0.9)
1195
+ 4 Experiments
1196
+ In this section, the following key questions will be answered
1197
+ in detail: (1) Is HyRSM++ competitive to other state-of-
1198
+ the-art methods on challenging few-shot benchmarks? (2)
1199
+ What components play an integral role in HyRSM++ so that
1200
+ HyRSM++ can work well? (3) Can the proposed hybrid re-
1201
+ lation module be viewed as a simple plug-and-play unit and
1202
+ have the same effect for other methods? (4) Does the pro-
1203
+ posed temporal set matching metric have an advantage over
1204
+ other measure competitors? (5) Can HyRSM++ have stable
1205
+ performance in a variety of different video scenarios?
1206
+ 4.1 Datasets and experimental setups
1207
+ Datasets. We evaluate our HyRSM++ on six standard public
1208
+ few-shot benchmarks. For the Kinetics [8], SSv2-Full [23],
1209
+ and SSv2-Small [23] datasets, we adopt the existing splits
1210
+ proposed by [7, 112, 106, 68], and each dataset consists
1211
+
1212
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
1213
+ 9
1214
+ MSA
1215
+ Transformer
1216
+ Bi-LSTM
1217
+ Bi-GRU
1218
+ Inter-relation
1219
+ MSA
1220
+ Transformer
1221
+ Bi-LSTM
1222
+ Bi-GRU
1223
+ Intra-relation
1224
+ 54.3
1225
+ 53.0
1226
+ 53.6
1227
+ 53.3
1228
+ 54.3
1229
+ 53.8
1230
+ 53.6
1231
+ 54.1
1232
+ 50.6
1233
+ 50.4
1234
+ 51.4
1235
+ 50.9
1236
+ 51.8
1237
+ 50.6
1238
+ 50.8
1239
+ 51.8
1240
+ 50.5
1241
+ 51.0
1242
+ 51.5
1243
+ 52.0
1244
+ 52.5
1245
+ 53.0
1246
+ 53.5
1247
+ 54.0
1248
+ Fig. 3 Comparison between different components in hybrid relation
1249
+ module on 5-way 1-shot few-shot action classification without tempo-
1250
+ ral coherence regularization. Experiments are conducted on the SSv2-
1251
+ Full dataset.
1252
+ MSA
1253
+ Transformer
1254
+ Bi-LSTM
1255
+ Bi-GRU
1256
+ Inter-relation
1257
+ MSA
1258
+ Transformer
1259
+ Bi-LSTM
1260
+ Bi-GRU
1261
+ Intra-relation
1262
+ 55.0
1263
+ 55.0
1264
+ 54.6
1265
+ 54.4
1266
+ 54.5
1267
+ 54.6
1268
+ 54.4
1269
+ 54.5
1270
+ 50.7
1271
+ 50.6
1272
+ 51.7
1273
+ 51.9
1274
+ 52.1
1275
+ 51.7
1276
+ 52.3
1277
+ 52.0
1278
+ 51.0
1279
+ 51.5
1280
+ 52.0
1281
+ 52.5
1282
+ 53.0
1283
+ 53.5
1284
+ 54.0
1285
+ 54.5
1286
+ 55.0
1287
+ Fig. 4 Comparison between different components in hybrid relation
1288
+ module on 5-way 1-shot few-shot action classification with temporal
1289
+ coherence regularization. Experiments are conducted on the SSv2-Full
1290
+ dataset.
1291
+ of 64 and 24 classes as the meta-training and meta-testing
1292
+ set, respectively. For UCF101 [78] and HMDB51 [42], we
1293
+ verify our proposed methods by leveraging existing splits
1294
+ from [105, 68]. In addition to the above, we also utilize
1295
+ the egocentric Epic-kitchens [14, 13] dataset to evaluate
1296
+ HyRSM++.
1297
+ Implementation details. Following previous works [112, 7,
1298
+ 68, 106], ResNet-50 [28] initialized with ImageNet [15] pre-
1299
+ trained weights is utilized as the feature extractor in our ex-
1300
+ periments. We sparsely and uniformly sample 8 (i.e., T = 8)
1301
+ frames per video to construct input frame sequence, which is
1302
+ in line with previous methods [7, 106]. In the training phase,
1303
+ we also adopt basic data augmentation such as random crop-
1304
+ ping and color jitter, and use Adam [39] optimizer to train
1305
+ our model. During the inference stage, we conduct few-shot
1306
+ action recognition evaluation on 10,000 randomly sampled
1307
+ episodes from the meta-testing set and report the mean ac-
1308
+ curacy. For many shot classification, e.g., 5-shot, we follow
1309
+ ProtoNet [76] and calculate the mean features of support
1310
+ videos in each class as the prototypes, and classify the query
1311
+ videos according to their distances against the prototypes.
1312
+ 4.2 Comparison with state-of-the-art
1313
+ In this section, we validate the effectiveness of the proposed
1314
+ HyRSM++ by comparing it with state-of-the-art methods
1315
+ under various settings. As indicated in Table 1 and Ta-
1316
+ ble 2, the proposed HyRSM++ surpasses other advanced
1317
+ approaches significantly and is able to achieve new state-
1318
+ of-the-art performance. For instance, HyRSM++ improves
1319
+ the state-of-the-art performance from 49.2% to 55.0% un-
1320
+ der the 1-shot setting on SSv2-Full and consistently outper-
1321
+ forms our original conference version [91]. Specially, ex-
1322
+ tensively compared with current strict temporal alignment
1323
+ techniques [7, 106] and complex fusion methods [48, 68],
1324
+ HyRSM++ produces results that are superior to them un-
1325
+ der most different shots, which implies that our approach
1326
+ is considerably flexible and efficient. Note that the SSv2-
1327
+ Full and SSv2-Small datasets tend to be motion-based and
1328
+ generally focus on temporal reasoning. While Kinetics and
1329
+ UCF101 are partly appearance-related datasets, and scene
1330
+ understanding is usually essential. Besides, Epic-kitchens
1331
+ and HMDB51 are relatively complicated and might involve
1332
+ diverse object interactions. Extensively evaluated on these
1333
+ benchmarks, HyRSM++ provides excellent performance. It
1334
+ reveals that our HyRSM++ has strong robustness and gen-
1335
+ eralization for different scenes. From Table 2, we observe
1336
+ that HyRSM++ outperforms current state-of-the-art meth-
1337
+ ods on UCF101 and SSv2-Small under the 1-shot and 3-
1338
+ shot settings, which suggests that our HyRSM++ can learn
1339
+ rich and effective representations with extremely limited
1340
+ samples. It’s worth noting that under the 5-shot evaluation,
1341
+ our HyRSM++ yields 95.9% and 58.0% 5-shot performance
1342
+ on UCF101 and SSv2-Small, respectively, which is slightly
1343
+ behind STRM and HCL. We attribute this to STRM and
1344
+ HCL are ensemble methods that weight each sample with
1345
+ attention or use multiple metrics for few-shot classification,
1346
+ which makes them more suitable for multi-shots, while our
1347
+ HyRSM++ is a simple and general method without involves
1348
+ complex ensemble operations. Moreover, we also observe
1349
+ that with the introduction of temporal coherence regulariza-
1350
+ tion, HyRSM++ has a significant improvement compared to
1351
+ HyRSM, which verifies the effectiveness of exploiting tem-
1352
+ poral order information during the set matching process.
1353
+ 4.3 Ablation study
1354
+ For ease of comparison, we use a baseline method Pro-
1355
+ toNet [76] that applies global-average pooling to backbone
1356
+ representations to obtain a prototype for each class. We will
1357
+ explore the role and validity of our proposed modules in de-
1358
+ tail below.
1359
+ Design choices of relation modeling.
1360
+ To systematically
1361
+ investigate the effect of different relation modeling opera-
1362
+
1363
+ 10
1364
+ Xiang Wang et al.
1365
+ Table 2 Results on 1-shot, 3-shot, and 5-shot few-shot classification on the UCF101 and SSv2-Small datasets. ”-” means the result is not available
1366
+ in published works, and the underline indicates the second best result.
1367
+ UCF101
1368
+ SSv2-Small
1369
+ Method
1370
+ Reference
1371
+ 1-shot
1372
+ 3-shot
1373
+ 5-shot
1374
+ 1-shot
1375
+ 3-shot
1376
+ 5-shot
1377
+ MatchingNet [86]
1378
+ NeurIPS’16
1379
+ -
1380
+ -
1381
+ -
1382
+ 31.3
1383
+ 39.8
1384
+ 45.5
1385
+ MAML [19]
1386
+ ICML’17
1387
+ -
1388
+ -
1389
+ -
1390
+ 30.9
1391
+ 38.6
1392
+ 41.9
1393
+ Plain CMN [112]
1394
+ ECCV’18
1395
+ -
1396
+ -
1397
+ -
1398
+ 33.4
1399
+ 42.5
1400
+ 46.5
1401
+ CMN-J [113]
1402
+ TPAMI’20
1403
+ -
1404
+ -
1405
+ -
1406
+ 36.2
1407
+ 44.6
1408
+ 48.8
1409
+ ARN [105]
1410
+ ECCV’20
1411
+ 66.3
1412
+ -
1413
+ 83.1
1414
+ -
1415
+ -
1416
+ -
1417
+ OTAM [7]
1418
+ CVPR’20
1419
+ 79.9
1420
+ 87.0
1421
+ 88.9
1422
+ 36.4
1423
+ 45.9
1424
+ 48.0
1425
+ TTAN [48]
1426
+ ArXiv’21
1427
+ 80.9
1428
+ -
1429
+ 93.2
1430
+ -
1431
+ -
1432
+ -
1433
+ ITANet [106]
1434
+ IJCAI’21
1435
+ -
1436
+ -
1437
+ -
1438
+ 39.8
1439
+ 49.4
1440
+ 53.7
1441
+ TRX [68]
1442
+ CVPR’21
1443
+ 78.2
1444
+ 92.4
1445
+ 96.1
1446
+ 36.0
1447
+ 51.9
1448
+ 59.1
1449
+ STRM [84]
1450
+ CVPR’22
1451
+ 80.5
1452
+ 92.7
1453
+ 96.9
1454
+ 37.1
1455
+ 49.2
1456
+ 55.3
1457
+ MTFAN [94]
1458
+ CVPR’22
1459
+ 84.8
1460
+ -
1461
+ 95.1
1462
+ -
1463
+ -
1464
+ -
1465
+ Nguyen et al. [62]
1466
+ ECCV’22
1467
+ 84.9
1468
+ -
1469
+ 95.9
1470
+ -
1471
+ -
1472
+ -
1473
+ Huang et al. [33]
1474
+ ECCV’22
1475
+ 71.4
1476
+ -
1477
+ 91.0
1478
+ 38.9
1479
+ -
1480
+ 61.6
1481
+ HCL [108]
1482
+ ECCV’22
1483
+ 82.5
1484
+ 91.0
1485
+ 93.9
1486
+ 38.7
1487
+ 49.1
1488
+ 55.4
1489
+ HyRSM
1490
+ CVPR’22
1491
+ 83.9 (-1.0)
1492
+ 93.0 (+0.3)
1493
+ 94.7 (-2.2)
1494
+ 40.6 (+0.8)
1495
+ 52.3 (+0.4)
1496
+ 56.1 (-5.5)
1497
+ HyRSM++
1498
+ -
1499
+ 85.8 (+0.9)
1500
+ 93.5 (+0.8)
1501
+ 95.9 (-1.0)
1502
+ 42.8 (+3.0)
1503
+ 52.4 (+0.5)
1504
+ 58.0 (-2.6)
1505
+ Table 3 Ablation study under 5-way 1-shot and 5-way 5-shot settings
1506
+ on the SSv2-Full dataset. “TCR” refers to temporal coherence regular-
1507
+ ization.
1508
+ Intra-relation
1509
+ Inter-relation
1510
+ Bi-MHM
1511
+ TCR
1512
+ 1-shot
1513
+ 5-shot
1514
+ 35.2
1515
+ 45.3
1516
+
1517
+ 41.2
1518
+ 55.0
1519
+
1520
+ 43.7
1521
+ 55.2
1522
+
1523
+ 44.6
1524
+ 56.0
1525
+
1526
+
1527
+ 45.3
1528
+ 57.1
1529
+
1530
+
1531
+ 48.1
1532
+ 60.5
1533
+
1534
+
1535
+ 48.3
1536
+ 61.2
1537
+
1538
+
1539
+
1540
+ 49.2
1541
+ 62.8
1542
+
1543
+
1544
+ 51.4
1545
+ 64.6
1546
+
1547
+
1548
+
1549
+ 52.4
1550
+ 65.8
1551
+
1552
+
1553
+
1554
+ 54.3
1555
+ 69.0
1556
+
1557
+
1558
+
1559
+
1560
+ 55.0
1561
+ 69.8
1562
+ Table 4 Generalization of hybrid relation module. We conduct exper-
1563
+ iments on SSv2-Full.
1564
+ Method
1565
+ 1-shot
1566
+ 5-shot
1567
+ OTAM [7]
1568
+ 42.8
1569
+ 52.3
1570
+ OTAM [7]+ Intra-relation
1571
+ 48.9
1572
+ 60.4
1573
+ OTAM [7]+ Inter-relation
1574
+ 46.9
1575
+ 57.8
1576
+ OTAM [7]+ Intra-relation + Inter-relation
1577
+ 51.7
1578
+ 63.9
1579
+ tions in hybrid relation module, we vary the components to
1580
+ construct some variants and report the results in Figure 3
1581
+ and Figure 4. The comparison experiments are conducted
1582
+ on the SSv2-Full dataset under the 5-way 1-shot setting. We
1583
+ can observe that different combinations have quite distinct
1584
+ properties, e.g., multi-head self-attention (MSA) and Trans-
1585
+ former are more effective to model intra-class relations than
1586
+ Bi-LSTM and Bi-GRU. For example, utilizing multi-head
1587
+ 5-way
1588
+ 6-way
1589
+ 7-way
1590
+ 8-way
1591
+ 9-way
1592
+ 10-way
1593
+ Accuracy (%)
1594
+ Kinetics
1595
+ OTAM
1596
+ TRX
1597
+ STRM
1598
+ HyRSM
1599
+ HyRSM++
1600
+ 50
1601
+ 66
1602
+ 54
1603
+ 58
1604
+ 70
1605
+ 62
1606
+ 5-way
1607
+ 6-way
1608
+ 7-way
1609
+ 8-way
1610
+ 9-way
1611
+ 10-way
1612
+ Accuracy (%)
1613
+ SSv2-Full
1614
+ OTAM
1615
+ TRX
1616
+ STRM
1617
+ HyRSM
1618
+ HyRSM++
1619
+ 25
1620
+ 30
1621
+ 40
1622
+ 35
1623
+ 50
1624
+ 45
1625
+ 55
1626
+ 74
1627
+ Fig. 5 N-way 1-shot performance trends of our HyRSM++ and other
1628
+ state-of-the-art methods with different N on SSv2-Full. The compari-
1629
+ son results prove the superiority of our HyRSM++.
1630
+ Accuracy (%)
1631
+ (a) Frames
1632
+ 42
1633
+ 46
1634
+ 50
1635
+ 54
1636
+ 2
1637
+ 3
1638
+ 4
1639
+ 5
1640
+ 6
1641
+ 7
1642
+ 8
1643
+ 9
1644
+ 10
1645
+ 1
1646
+ 2
1647
+ 4
1648
+ 8
1649
+ 16
1650
+ 32
1651
+ Accuracy (%)
1652
+ 1-shot
1653
+ 5-shot
1654
+ 45
1655
+ 50
1656
+ 60
1657
+ 55
1658
+ 70
1659
+ 65
1660
+ (b) Head number
1661
+ Fig. 6 (a) Performance on SSv2-Full using a different number of
1662
+ frames under the 5-way 1-shot setting. (b) The effect of the number
1663
+ of heads on SSv2-Full.
1664
+ self-attention to learn intra-relation produces at least 2.5%
1665
+ improvements than with Bi-LSTM. Nevertheless, compared
1666
+ with other recent algorithms [68, 106], the performance of
1667
+ each combination can still be improved, which strongly sug-
1668
+ gests the necessity of structure design for learning task-
1669
+ specific features. For simplicity, we choose the same struc-
1670
+ ture to explore intra-relation and inter-relation, and the con-
1671
+
1672
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
1673
+ 11
1674
+ 20
1675
+ 30
1676
+ 40
1677
+ 50
1678
+ 60
1679
+ 70
1680
+ SSv2-Full (Resnet-18)
1681
+ OTAM
1682
+ TRX
1683
+ HyRSM
1684
+ HyRSM++
1685
+ 1-shot
1686
+ 2-shot
1687
+ 3-shot
1688
+ 4-shot
1689
+ 5-shot
1690
+ 20
1691
+ 30
1692
+ 40
1693
+ 50
1694
+ 60
1695
+ 70
1696
+ SSv2-Full (Resnet-34)
1697
+ OTAM
1698
+ TRX
1699
+ HyRSM
1700
+ HyRSM++
1701
+ 1-shot
1702
+ 2-shot
1703
+ 3-shot
1704
+ 4-shot
1705
+ 5-shot
1706
+ Accuracy (%)
1707
+ 50
1708
+ 55
1709
+ 60
1710
+ 65
1711
+ 70
1712
+ 75
1713
+ 80
1714
+ 85
1715
+ Kinetics (Resnet-18)
1716
+ OTAM
1717
+ TRX
1718
+ HyRSM
1719
+ HyRSM++
1720
+ 1-shot
1721
+ 2-shot
1722
+ 3-shot
1723
+ 4-shot
1724
+ 5-shot
1725
+ 55
1726
+ 60
1727
+ 65
1728
+ 70
1729
+ 75
1730
+ 80
1731
+ 85
1732
+ Kinetics (Resnet-34)
1733
+ OTAM
1734
+ TRX
1735
+ HyRSM
1736
+ HyRSM++
1737
+ 1-shot
1738
+ 2-shot
1739
+ 3-shot
1740
+ 4-shot
1741
+ 5-shot
1742
+ Accuracy (%)
1743
+ Accuracy (%)
1744
+ Accuracy (%)
1745
+ Fig. 7 Comparison of the backbone with different depths on the SSv2-
1746
+ Full and Kinetics datasets.
1747
+ Table 5 Comparative experiments on SSv2-Full using the Inception-
1748
+ v3 [81] feature extractor.
1749
+ Method
1750
+ 1-shot
1751
+ 2-shot
1752
+ 3-shot
1753
+ 4-shot
1754
+ 5-shot
1755
+ OTAM [7]
1756
+ 42.4
1757
+ 46.6
1758
+ 48.7
1759
+ 49.2
1760
+ 52.1
1761
+ TRX [68]
1762
+ 37.7
1763
+ 50.2
1764
+ 55.5
1765
+ 57.2
1766
+ 60.1
1767
+ STRM [84]
1768
+ 42.9
1769
+ 53.9
1770
+ 58.9
1771
+ 62.3
1772
+ 63.4
1773
+ HyRSM++
1774
+ 53.3
1775
+ 62.7
1776
+ 65.3
1777
+ 67.8
1778
+ 69.3
1779
+ Table 6 Performance comparison on SSv2-Full with self-supervised
1780
+ initialization weights [97].
1781
+ Method
1782
+ 1-shot
1783
+ 2-shot
1784
+ 3-shot
1785
+ 4-shot
1786
+ 5-shot
1787
+ OTAM [7]
1788
+ 41.2
1789
+ 45.9
1790
+ 48.8
1791
+ 50.1
1792
+ 51.0
1793
+ TRX [68]
1794
+ 37.5
1795
+ 43.8
1796
+ 49.9
1797
+ 51.6
1798
+ 52.1
1799
+ STRM [84]
1800
+ 38.0
1801
+ 46.2
1802
+ 49.9
1803
+ 53.4
1804
+ 54.4
1805
+ HyRSM++
1806
+ 50.9
1807
+ 59.1
1808
+ 62.6
1809
+ 65.5
1810
+ 66.4
1811
+ Table 7 Performance comparison with different relation modeling
1812
+ paradigms on SSv2-Full and Kinetics.
1813
+ Setting
1814
+ Method
1815
+ Dataset
1816
+ 1-shot
1817
+ 5-shot
1818
+ Support-only
1819
+ HyRSM
1820
+ SSv2-Full
1821
+ 52.1
1822
+ 67.2
1823
+ Support-only
1824
+ HyRSM++
1825
+ 53.7
1826
+ 68.8
1827
+ Support&Query
1828
+ HyRSM
1829
+ 54.3
1830
+ 69.0
1831
+ Support&Query
1832
+ HyRSM++
1833
+ 55.0
1834
+ 69.8
1835
+ Support-only
1836
+ HyRSM
1837
+ Kinetics
1838
+ 73.4
1839
+ 85.5
1840
+ Support-only
1841
+ HyRSM++
1842
+ 73.5
1843
+ 85.7
1844
+ Support&Query
1845
+ HyRSM
1846
+ 73.7
1847
+ 86.1
1848
+ Support&Query
1849
+ HyRSM++
1850
+ 74.0
1851
+ 86.4
1852
+ figuration of multi-head self-attention is adopted in the ex-
1853
+ periments.
1854
+ Analysis of the proposed components. Table 3 summa-
1855
+ rizes the ablation study of each module in HyRSM++. To
1856
+ evaluate the function of the proposed components, Pro-
1857
+ toNet [76] is taken as our baseline. From the ablation results,
1858
+ we can conclude that each component is highly effective.
1859
+ In particular, compared to the baseline, intra-relation mod-
1860
+ eling can respectively bring 6.0% and 9.7% performance
1861
+ 52.3
1862
+ 51.2
1863
+ 49.7
1864
+ 49.1
1865
+ 48.0
1866
+ 47.2
1867
+ 64.6
1868
+ 61.3
1869
+ 59.2
1870
+ 56.3
1871
+ 53.1
1872
+ 50.4
1873
+ 68.1
1874
+ 62.3
1875
+ 60.8
1876
+ 58.5
1877
+ 55.9
1878
+ 52.0
1879
+ 69.8
1880
+ 66.2
1881
+ 65.1
1882
+ 64.5
1883
+ 62.3
1884
+ 60.0
1885
+ 0%
1886
+ 10%
1887
+ 20%
1888
+ 30%
1889
+ 40%
1890
+ 50%
1891
+ Accuracy (%)
1892
+ 5-way 5-shot
1893
+ OTAM
1894
+ TRX
1895
+ STRM
1896
+ HyRSM++
1897
+ 45
1898
+ 61
1899
+ 49
1900
+ 53
1901
+ 65
1902
+ 57
1903
+ 42.8
1904
+ 41.4
1905
+ 40.3
1906
+ 39.0
1907
+ 37.1
1908
+ 35.7
1909
+ 42.0
1910
+ 38.5
1911
+ 35.8
1912
+ 33.2
1913
+ 31.3
1914
+ 28.9
1915
+ 43.1
1916
+ 40.4
1917
+ 37.8
1918
+ 34.5
1919
+ 32.1
1920
+ 30.0
1921
+ 55.0
1922
+ 49.8
1923
+ 48.1
1924
+ 46.4
1925
+ 43.2
1926
+ 41.3
1927
+ 0%
1928
+ 10%
1929
+ 20%
1930
+ 30%
1931
+ 40%
1932
+ 50%
1933
+ Accuracy (%)
1934
+ 5-way 1-shot
1935
+ OTAM
1936
+ TRX
1937
+ STRM
1938
+ HyRSM++
1939
+ 25
1940
+ 30
1941
+ 40
1942
+ 35
1943
+ 50
1944
+ 45
1945
+ 55
1946
+ 69
1947
+ Noisy ratio
1948
+ Noisy ratio
1949
+ Fig. 8 Robustness comparison experiments in the presence of noisy
1950
+ samples. X% represents the proportion of noisy labels included in the
1951
+ dataset.
1952
+ Table 8 Comparison with recent temporal alignment methods on the
1953
+ SSv2-Full dataset under the 5-way 1-shot and 5-way 5-shot settings.
1954
+ Diagonal means matching frame by frame.
1955
+ Metric
1956
+ Bi-direction
1957
+ 1-shot
1958
+ 5-shot
1959
+ Diagonal
1960
+ -
1961
+ 38.3
1962
+ 48.7
1963
+ Plain DTW [61]
1964
+ -
1965
+ 39.6
1966
+ 49.0
1967
+ OTAM [7]
1968
+
1969
+ 39.3
1970
+ 47.7
1971
+ OTAM [7]
1972
+
1973
+ 42.8
1974
+ 52.3
1975
+ Bi-MHM
1976
+
1977
+ 44.6
1978
+ 56.0
1979
+ Temporal set matching metric
1980
+
1981
+ 45.3
1982
+ 57.1
1983
+ Table 9 Comparison of different set matching strategies on the SSv2-
1984
+ Full dataset.
1985
+ Metric
1986
+ Bi-direction
1987
+ 1-shot
1988
+ 5-shot
1989
+ Hausdorff distance
1990
+
1991
+ 32.4
1992
+ 38.2
1993
+ Hausdorff distance
1994
+
1995
+ 34.5
1996
+ 39.1
1997
+ Modified Hausdorff distance
1998
+
1999
+ 44.2
2000
+ 50.0
2001
+ Bi-MHM
2002
+
2003
+ 44.6
2004
+ 56.0
2005
+ Temporal set matching metric
2006
+
2007
+ 45.3
2008
+ 57.1
2009
+ Table 10 Generalization of temporal coherence regularization. We
2010
+ conduct experiments on SSv2-Full. ”Hard margin” represents the
2011
+ method described in Equation 11.
2012
+ Method
2013
+ 1-shot
2014
+ 5-shot
2015
+ OTAM [7]
2016
+ 42.8
2017
+ 52.3
2018
+ OTAM [7] + IDM
2019
+ 43.7
2020
+ 55.0
2021
+ OTAM [7] + Hard margin
2022
+ 43.2
2023
+ 55.3
2024
+ OTAM [7] + Temporal coherence regularization
2025
+ 44.1
2026
+ 55.8
2027
+ Bi-MHM
2028
+ 44.6
2029
+ 56.0
2030
+ Bi-MHM + IDM
2031
+ 44.7
2032
+ 56.3
2033
+ Bi-MHM + Hard margin
2034
+ 44.7
2035
+ 56.5
2036
+ Bi-MHM + Temporal coherence regularization
2037
+ 45.3
2038
+ 57.1
2039
+ gains on 1-shot and 5-shot, and inter-relation function boosts
2040
+ the performance by 8.5% and 9.9% on 1-shot and 5-shot.
2041
+ In addition, the proposed set matching metric improves 1-
2042
+ shot and 5-shot classification by 9.4% and 10.7%, respec-
2043
+ tively, which indicates the ability to find better correspond-
2044
+ ing frames in the video pair. Adding temporal coherence
2045
+ regularization to the set matching metric also achieves sta-
2046
+
2047
+ 12
2048
+ Xiang Wang et al.
2049
+ ble performance improvements. Moreover, stacking the pro-
2050
+ posed modules can further improve performance, indicating
2051
+ the complementarity between components. When consider-
2052
+ ing all the proposed modules together to form HyRSM++,
2053
+ the performance of 1-shot and 5-shot is improved to 55.0%
2054
+ and 69.8%, respectively, which strongly supports the impor-
2055
+ tance of learning task-related features and flexible metrics.
2056
+ Pluggability of hybrid relation module. In Table 4, we
2057
+ experimentally show that the hybrid relation module gen-
2058
+ eralizes well to other methods by inserting it into the re-
2059
+ cent OTAM [7]. In this study, OTAM with our hybrid re-
2060
+ lation module benefits from relational information and fi-
2061
+ nally achieves 8.9% and 11.6% gains on 1-shot and 5-shot.
2062
+ This fully evidences that mining the rich information among
2063
+ videos to learn task-specific features is especially valuable.
2064
+ N-way few-shot classification.
2065
+ In the previous experi-
2066
+ ments, all of our comparative evaluation experiments were
2067
+ carried out under the 5-way setting. In order to further ex-
2068
+ plore the influence of different N, in Figure 5, we com-
2069
+ pare N-way (N ≥ 5) 1-shot results on SSv2-Full and Ki-
2070
+ netics. Results show that as N increases, the difficulty be-
2071
+ comes higher, and the performance decreases. Neverthe-
2072
+ less, the performance of our HyRSM++ is still consistently
2073
+ ahead of the recent state-of-the-art STRM [84], TRX [68]
2074
+ and OTAM [7], which shows the feasibility of our method
2075
+ to boost performance by introducing rich relations among
2076
+ videos and the power of the set matching metric.
2077
+ Varying the number of frames. To demonstrate the scal-
2078
+ ability of HyRSM++, we also explore the impact of differ-
2079
+ ent video frame numbers on performance. Of note, previous
2080
+ comparisons are performed under 8 frames of input. Results
2081
+ in Figure 6(a) show that as the number of frames increases,
2082
+ the performance improves. HyRSM++ gradually tends to be
2083
+ saturated when more than 7 frames.
2084
+ Influence of head number. Previous analyses have shown
2085
+ that multi-head self-attention can focus on different patterns
2086
+ and is critical to capturing diverse features [41]. We investi-
2087
+ gate the virtue of varying the number of heads in multi-head
2088
+ self-attention on performance in Figure 6(b). Experimental
2089
+ results indicate that the effect of multi-head is remarkable,
2090
+ and the performance starts to saturate beyond a particular
2091
+ point.
2092
+ Varying depth of the backbone. The proposed HyRSM++
2093
+ is general and compatible with feature extractors of various
2094
+ capacities. The previous methods all utilize ResNet-50 as
2095
+ backbone by default for a fair comparison, and the impact
2096
+ of backbone’s depth on performance is still under-explored.
2097
+ As presented in Figure 7, we attempt to answer this question
2098
+ by adopting ResNet-18 and ResNet-34 pre-trained on Ima-
2099
+ geNet as alternative backbones. Results demonstrate that the
2100
+ deeper network clearly benefits from greater learning capac-
2101
+ ity and results in better performance. In addition, we notice
2102
+ Acc = 100%
2103
+ Acc = 100%
2104
+ (+ hybrid relation module)
2105
+ 1
2106
+ 2
2107
+ 3
2108
+ 4
2109
+ 5
2110
+ 1
2111
+ 2
2112
+ 3
2113
+ 4
2114
+ 5
2115
+ 1
2116
+ 2
2117
+ 3
2118
+ 4
2119
+ 5
2120
+ 1
2121
+ 2
2122
+ 3
2123
+ 4
2124
+ 5
2125
+ 1
2126
+ 2
2127
+ 3
2128
+ 4
2129
+ 5
2130
+ 1
2131
+ 2
2132
+ 3
2133
+ 4
2134
+ 5
2135
+ 1
2136
+ 2
2137
+ 3
2138
+ 4
2139
+ 5
2140
+ 1
2141
+ 2
2142
+ 3
2143
+ 4
2144
+ 5
2145
+ 1
2146
+ 2
2147
+ 3
2148
+ 4
2149
+ 5
2150
+ 1
2151
+ 2
2152
+ 3
2153
+ 4
2154
+ 5
2155
+ 1
2156
+ 2
2157
+ 3
2158
+ 4
2159
+ 5
2160
+ 1
2161
+ 2
2162
+ 3
2163
+ 4
2164
+ 5
2165
+ 1
2166
+ 2
2167
+ 3
2168
+ 4
2169
+ 5
2170
+ 1
2171
+ 2
2172
+ 3
2173
+ 4
2174
+ 5
2175
+ 1
2176
+ 2
2177
+ 3
2178
+ 4
2179
+ 5
2180
+ 1
2181
+ 2
2182
+ 3
2183
+ 4
2184
+ 5
2185
+ 1
2186
+ 2
2187
+ 3
2188
+ 4
2189
+ 5
2190
+ 1
2191
+ 2
2192
+ 3
2193
+ 4
2194
+ 5
2195
+ 1
2196
+ 2
2197
+ 3
2198
+ 4
2199
+ 5
2200
+ 1
2201
+ 2
2202
+ 3
2203
+ 4
2204
+ 5
2205
+ 1
2206
+ 2
2207
+ 3
2208
+ 4
2209
+ 5
2210
+ 1
2211
+ 2
2212
+ 3
2213
+ 4
2214
+ 5
2215
+ 1
2216
+ 2
2217
+ 3
2218
+ 4
2219
+ 5
2220
+ 1
2221
+ 2
2222
+ 3
2223
+ 4
2224
+ 5
2225
+ Acc = 40%
2226
+ Acc = 60%
2227
+ Acc = 60%
2228
+ Acc = 80%
2229
+ Acc = 60%
2230
+ Acc = 100%
2231
+ (+ hybrid relation module)
2232
+ Acc = 100%
2233
+ (+ hybrid relation module)
2234
+ Acc = 100%
2235
+ (+ hybrid relation module)
2236
+ Acc = 100%
2237
+ (+ hybrid relation module)
2238
+ Acc = 100%
2239
+ (+ hybrid relation module)
2240
+ (a) Examples from SSv2-Full
2241
+ (b) Examples from Kinetics
2242
+ Fig. 9 Similarity visualization of how query videos (rows) match to
2243
+ support videos (columns). The boxes of different colors correspond to:
2244
+ correct match and incorrect match.
2245
+ Support
2246
+ Query
2247
+ (a) SSv2-Full: ”pretending to open something without actually opening it”
2248
+ (b) SSv2-Full: ”showing that something is empty”
2249
+ Support
2250
+ Query
2251
+ Support
2252
+ Query
2253
+ (c) Kinetics: ”cutting watermelon”
2254
+ Fig. 10 Visualization of matching results with the proposed set match-
2255
+ ing metric on SSv2-Full and Kinetics.
2256
+ that our proposed HyRSM++ consistently outperforms the
2257
+ competitors (i.e., OTAM and TRX), which indicates that our
2258
+ HyRSM++ is a generally effective framework.
2259
+ Influence of different backbones. To verify that our ap-
2260
+ proach is not limited to ResNet-like structures, we further
2261
+ perform experiments on Inception-v3 and report the results
2262
+ in Table 5. From the comparison, we note that HyRSM++ is
2263
+ significantly superior to other competitive algorithms. Com-
2264
+ pared with STRM [84], our proposed HyRSM++ leads to at
2265
+ least 5.5% performance gain under various settings.
2266
+ Impact of pretraining types. Supervised ImageNet initial-
2267
+ ization [15] is widely employed in many vision tasks [7,
2268
+ 113, 90] and achieves impressive success. Recently, self-
2269
+ supervised techniques have also received widespread at-
2270
+ tention and revealed excellent application potential. In Ta-
2271
+
2272
+ 0.73
2273
+ 0.063
2274
+ 0.06
2275
+ 0.082
2276
+ 0.065
2277
+ 0.16
2278
+ 0.35
2279
+ 0.21
2280
+ 0.1
2281
+ 0.18
2282
+ 0.067
2283
+ 0.069
2284
+ 0.67
2285
+ 0.08
2286
+ 0.11
2287
+ 0.27
2288
+ 0.038
2289
+ 0.076
2290
+ 0.42
2291
+ 0.2
2292
+ 0.11
2293
+ 0.27
2294
+ 0.13
2295
+ 0.09
2296
+ 0.4 0.23
2297
+ 0.25
2298
+ 0.11
2299
+ 0.19
2300
+ 0.22
2301
+ 0.18
2302
+ 0.25
2303
+ 0.24
2304
+ 0.14
2305
+ 0.19
2306
+ 0.18
2307
+ 0.18
2308
+ 0.36
2309
+ 0.091
2310
+ 0.18
2311
+ 0.23
2312
+ 0.15
2313
+ 0.11
2314
+ 0.4
2315
+ 0.1
2316
+ 0.14
2317
+ 0.32
2318
+ 0.08
2319
+ 0.11
2320
+ 0.340.65
2321
+ 0.029
2322
+ 0.15
2323
+ 0.088
2324
+ 0.078
2325
+ 0.092
2326
+ 0.55
2327
+ 0.04
2328
+ 0.053
2329
+ 0.27
2330
+ 0.17
2331
+ 0.076
2332
+ 0.59
2333
+ 0.094
2334
+ 0.07
2335
+ 0.11
2336
+ 0.053
2337
+ 0.042
2338
+ 0.65
2339
+ 0.14
2340
+ 0.1
2341
+ 0.039
2342
+ 0.024
2343
+ 0.087
2344
+ 0.750.34
2345
+ 0.16
2346
+ 0.14
2347
+ 0.23
2348
+ 0.14
2349
+ 0.17
2350
+ 0.26
2351
+ 0.19
2352
+ 0.21
2353
+ 0.18
2354
+ 0.16
2355
+ 0.2
2356
+ 0.27
2357
+ 0.13
2358
+ 0.24
2359
+ 0.2
2360
+ 0.21
2361
+ 0.16
2362
+ 0.27
2363
+ 0.16
2364
+ 0.15
2365
+ 0.23
2366
+ 0.17
2367
+ 0.18
2368
+ 0.270.46
2369
+ 0.064
2370
+ 0.037
2371
+ 0.38
2372
+ 0.055
2373
+ 0.035
2374
+ 0.69
2375
+ 0.021
2376
+ 0.17
2377
+ 0.082
2378
+ 0.051
2379
+ 0.042
2380
+ 0.78
2381
+ 0.06
2382
+ 0.067
2383
+ 0.09
2384
+ 0.2
2385
+ 0.18
2386
+ 0.45
2387
+ 0.087
2388
+ 0.12
2389
+ 0.18
2390
+ 0.098
2391
+ 0.13
2392
+ 0.470.26
2393
+ 0.13
2394
+ 0.28
2395
+ 0.21
2396
+ 0.12
2397
+ 0.13
2398
+ 0.38
2399
+ 0.23
2400
+ 0.19
2401
+ 0.069
2402
+ 0.14
2403
+ 0.16
2404
+ 0.19
2405
+ 0.2
2406
+ 0.32
2407
+ 0.25
2408
+ 0.13
2409
+ 0.2
2410
+ 0.29
2411
+ 0.12
2412
+ 0.15
2413
+ 0.09
2414
+ 0.21
2415
+ 0.19
2416
+ 0.360.21
2417
+ 0.25
2418
+ 0.16
2419
+ 0.25
2420
+ 0.13
2421
+ 0.24
2422
+ 0.23
2423
+ 0.16
2424
+ 0.19
2425
+ 0.18
2426
+ 0.14
2427
+ 0.16
2428
+ 0.27
2429
+ 0.12
2430
+ 0.32
2431
+ 0.22
2432
+ 0.2
2433
+ 0.1
2434
+ 0.35
2435
+ 0.13
2436
+ 0.13
2437
+ 0.19
2438
+ 0.25
2439
+ 0.15
2440
+ 0.290.61
2441
+ 0.074
2442
+ 0.068
2443
+ 0.14
2444
+ 0.1
2445
+ 0.076
2446
+ 0.43
2447
+ 0.084
2448
+ 0.35
2449
+ 0.063
2450
+ 0.12
2451
+ 0.061
2452
+ 0.63
2453
+ 0.077
2454
+ 0.11
2455
+ 0.21
2456
+ 0.21
2457
+ 0.06
2458
+ 0.47
2459
+ 0.053
2460
+ 0.15
2461
+ 0.055
2462
+ 0.069
2463
+ 0.054
2464
+ 0.670.59
2465
+ 0.043
2466
+ 0.13
2467
+ 0.13
2468
+ 0.1
2469
+ 0.11
2470
+ 0.51
2471
+ 0.14
2472
+ 0.2
2473
+ 0.036
2474
+ 0.13
2475
+ 0.27
2476
+ 0.44
2477
+ 0.091
2478
+ 0.068
2479
+ 0.27
2480
+ 0.14
2481
+ 0.17
2482
+ 0.35
2483
+ 0.066
2484
+ 0.05
2485
+ 0.057
2486
+ 0.044
2487
+ 0.038
2488
+ 0.810.19
2489
+ 0.24
2490
+ 0.11
2491
+ 0.2
2492
+ 0.25
2493
+ 0.27
2494
+ 0.16
2495
+ 0.21
2496
+ 0.23
2497
+ 0.13
2498
+ 0.21
2499
+ 0.22
2500
+ 0.3
2501
+ 0.14
2502
+ 0.13
2503
+ 0.15
2504
+ 0.15
2505
+ 0.11
2506
+ 0.34
2507
+ 0.26
2508
+ 0.22
2509
+ 0.16
2510
+ 0.19
2511
+ 0.18
2512
+ 0.250.59
2513
+ 0.18
2514
+ 0.014
2515
+ 0.095
2516
+ 0.13
2517
+ 0.21
2518
+ 0.43
2519
+ 0.025
2520
+ 0.26
2521
+ 0.076
2522
+ 0.049
2523
+ 0.042
2524
+ 0.76
2525
+ 0.071
2526
+ 0.081
2527
+ 0.31
2528
+ 0.097
2529
+ 0.019
2530
+ 0.42
2531
+ 0.16
2532
+ 0.21
2533
+ 0.063
2534
+ 0.11
2535
+ 0.13
2536
+ 0.490.47
2537
+ 0.1
2538
+ 0.054
2539
+ 0.21
2540
+ 0.17
2541
+ 0.19
2542
+ 0.23
2543
+ 0.08
2544
+ 0.27
2545
+ 0.24
2546
+ 0.098
2547
+ 0.15
2548
+ 0.5
2549
+ 0.096
2550
+ 0.16
2551
+ 0.23
2552
+ 0.082
2553
+ 0.076
2554
+ 0.4
2555
+ 0.21
2556
+ 0.12
2557
+ 0.4
2558
+ 0.13
2559
+ 0.11
2560
+ 0.25HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
2561
+ 13
2562
+ ble 6, we show the performance comparison with self-
2563
+ supervised pretraining weights [97]. Results demonstrate
2564
+ that our HyRSM++ is still powerful and not limited to the
2565
+ specific initialization weights.
2566
+ Other relation modeling forms. Previous few-shot image
2567
+ classification methods of learning task-specific features have
2568
+ also achieved promising results [101, 47]. However, many
2569
+ of them use some complex and fixed operations to learn the
2570
+ dependencies between images, while our method is straight-
2571
+ forward and flexible. Moreover, most previous works only
2572
+ use the information within the support set to learn task-
2573
+ specific features, ignoring the correlation with query sam-
2574
+ ples. In our hybrid relation module, we add the query video
2575
+ to the pool of inter-relation modeling to extract relevant in-
2576
+ formation suitable for query classification. As illustrated in
2577
+ Table 7, we try to remove the query video from the pool
2578
+ in HyRSM++, i.e., Support-only, but we can observe that
2579
+ after removing the query video, the performance of 1-shot
2580
+ and 5-shot on SSv2-Full reduces by 1.3% and 1.0%, respec-
2581
+ tively. There are similar conclusions on the Kinetics dataset.
2582
+ This evidences that the proposed hybrid relation module is
2583
+ reasonable and can effectively extract task-related features,
2584
+ thereby promoting query classification accuracy.
2585
+ Robustness to noise labels. To demonstrate the robustness
2586
+ of HyRSM++ to noise samples, we simulate the presence
2587
+ of noise labels in the dataset in Figure 8. From the results,
2588
+ we can observe that performance generally decreases as the
2589
+ proportion of noise rises. However, our HyRSM++ still ex-
2590
+ hibits higher performance than other methods, which illus-
2591
+ trates the robustness of our method and its adaptability to
2592
+ complex conditions.
2593
+ 4.4 Comparison with other matching approaches
2594
+ Our proposed temporal set matching metric Bi-MHM aims
2595
+ to accurately find the corresponding video frames between
2596
+ video pairs by relaxing the strict temporal ordering con-
2597
+ straints. The following comparative experiments in Table 8
2598
+ are carried out under identical experimental setups, i.e., re-
2599
+ Table 11 Complexity analysis for 5-way 1-shot SSv2-Full evaluation.
2600
+ The experiments are carried out on one Nvidia V100 GPU.
2601
+ Method
2602
+ Backbone
2603
+ Param
2604
+ FLOPs
2605
+ Latency
2606
+ Acc
2607
+ HyRSM
2608
+ ResNet-18
2609
+ 13.8M
2610
+ 3.64G
2611
+ 36.5ms
2612
+ 46.6
2613
+ HyRSM++
2614
+ ResNet-18
2615
+ 13.8M
2616
+ 3.64G
2617
+ 36.5ms
2618
+ 47.7
2619
+ HyRSM
2620
+ ResNet-34
2621
+ 23.9M
2622
+ 7.34G
2623
+ 67.5ms
2624
+ 50.0
2625
+ HyRSM++
2626
+ ResNet-34
2627
+ 23.9M
2628
+ 7.34G
2629
+ 67.5ms
2630
+ 50.4
2631
+ OTAM [7]
2632
+ ResNet-50
2633
+ 23.5M
2634
+ 8.17G
2635
+ 116.6ms
2636
+ 42.8
2637
+ TRX [68]
2638
+ ResNet-50
2639
+ 47.1M
2640
+ 8.22G
2641
+ 94.6ms
2642
+ 42.0
2643
+ STRM [84]
2644
+ ResNet-50
2645
+ 73.3M
2646
+ 8.27G
2647
+ 113.3ms
2648
+ 43.1
2649
+ HyRSM
2650
+ ResNet-50
2651
+ 65.6M
2652
+ 8.36G
2653
+ 83.5ms
2654
+ 54.3
2655
+ HyRSM++
2656
+ ResNet-50
2657
+ 65.6M
2658
+ 8.36G
2659
+ 83.5ms
2660
+ 55.0
2661
+ place the OTAM directly with our Bi-MHM while keep-
2662
+ ing other settings unchanged. Results show that our Bi-
2663
+ MHM performs well and outperforms other temporal align-
2664
+ ment methods (e.g., OTAM). We further analyze different
2665
+ set matching approaches in Table 9, and the results indi-
2666
+ cate that Hausdorff distance is susceptible to noise interfer-
2667
+ ence, resulting in the mismatch and relatively poor perfor-
2668
+ mance. However, our Bi-MHM shows stability to noise and
2669
+ obtains better performance. Furthermore, compared with the
2670
+ single directional metric, our proposed bidirectional metric
2671
+ is more comprehensive in reflecting the actual distances be-
2672
+ tween videos and achieves better performance on few-shot
2673
+ tasks. In addition, we observe that the proposed temporal
2674
+ set matching metric achieves clear improvement over Bi-
2675
+ MHM after incorporating temporal coherence. For instance,
2676
+ the temporal set matching metric obtains 0.7%, 1.1% perfor-
2677
+ mance gains on 5-way 1-shot, and 5-way 5-shot SSv2-Full
2678
+ classification. It indicates the effectiveness of the proposed
2679
+ temporal set matching metric.
2680
+ 4.5 Comparison of temporal coherence manners
2681
+ Pioneering work [11, 22, 59] also indicates the important
2682
+ role of temporal coherence and shows remarkable results in
2683
+ face recognition [59] and unsupervised representation learn-
2684
+ ing [22, 27]. However, they also have some limitations as
2685
+ noted in Section 3.2, and thus the temporal coherence reg-
2686
+ ularization is proposed for smooth video coherence. Ta-
2687
+ ble 10 compares the proposed temporal coherence regular-
2688
+ ization with existing temporal coherence schemes based on
2689
+ OTAM and Bi-MHM. Results show that exploiting tempo-
2690
+ ral coherence helps improve the classification accuracy of
2691
+ the metrics, which confirms our motivation for consider-
2692
+ ing temporal order information during the matching process.
2693
+ In addition, our proposed temporal coherence regularization
2694
+ achieves more significant improvements than other manners,
2695
+ and we attribute this to the smooth property of temporal co-
2696
+ herence regularization.
2697
+ 4.6 Visualization results
2698
+ To qualitatively show the discriminative capability of the
2699
+ learned task-specific features in our proposed method, we
2700
+ visualize the similarities between query and support videos
2701
+ with and without the hybrid relation module. As depicted in
2702
+ Figure 9, by adding the hybrid relation module, the discrim-
2703
+ ination of features is significantly improved, contributing to
2704
+ predicting more accurately. Additionally, the matching re-
2705
+ sults of the set matching metric are visualized in Figure 10,
2706
+ and we can observe that our Bi-MHM is considerably flexi-
2707
+ ble in dealing with alignment and misalignment.
2708
+
2709
+ 14
2710
+ Xiang Wang et al.
2711
+ Support
2712
+ Query
2713
+ Support
2714
+ Query
2715
+ “tipping Sth over” from SSv2-Full
2716
+ OTAM
2717
+ HyRSM++
2718
+ “taking Sth out of Sth” from SSv2-Full
2719
+ OTAM
2720
+ HyRSM++
2721
+ Support
2722
+ Query
2723
+ “showing Sth next to Sth” from SSv2-Full
2724
+ OTAM
2725
+ HyRSM++
2726
+ Support
2727
+ Query
2728
+ “riding elephant” from Kinetics
2729
+ OTAM
2730
+ HyRSM++
2731
+ Support
2732
+ Query
2733
+ “playing trumpet” from Kinetics
2734
+ OTAM
2735
+ HyRSM++
2736
+ Support
2737
+ Query
2738
+ “filling eyebrows” from Kinetics
2739
+ OTAM
2740
+ HyRSM++
2741
+ Fig. 11 Visualization of activation maps with Grad-CAM [75]. Compared to OTAM [7], HyRSM++ focuses more precisely on classification-
2742
+ related regions.
2743
+ To further visually evaluate the proposed HyRSM++, we
2744
+ compare the activation visualization results of HyRSM++ to
2745
+ the competitive OTAM [7]. As shown in Figure 11, the fea-
2746
+ tures of OTAM usually contain non-target objects or ignore
2747
+ most discriminative parts since it lacks the mechanism of
2748
+ learning task-specific embeddings for feature adaptation. In
2749
+ contrast, our proposed HyRSM++ processes the query and
2750
+ support videos with an adaptive relation modeling operation,
2751
+ which allows it to focus on the different target objects. The
2752
+ above qualitative experiments illustrate the rationality of our
2753
+ model design and the necessity of learning task-related fea-
2754
+ tures.
2755
+ 4.7 Limitations
2756
+ In order to further understand HyRSM++, Table 11 il-
2757
+ lustrates its differences with OTAM and TRX in terms
2758
+ of parameters, computation, and runtime. In the inference
2759
+
2760
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
2761
+ 15
2762
+ Table 12 Comparison to existing semi-supervised few-shot action recognition methods on the meta-testing set of Kinetics and SSv2-Small. The
2763
+ experiments are conducted under the 5-way setting, and results are reported as the shot increases from 1 to 5. ”w/o unlabeled data” indicates
2764
+ that there is no unlabeled set in a episode, i.e., the traditional few-shot action recognition setting, which can act as the lower bound of the semi-
2765
+ supervised counterpart.
2766
+ Dataset
2767
+ Method
2768
+ Backbone
2769
+ 1-shot
2770
+ 2-shot
2771
+ 3-shot
2772
+ 4-shot
2773
+ 5-shot
2774
+ Kinetics
2775
+ OTAM w/o unlabeled data [7]
2776
+ Inception-v3
2777
+ 68.6
2778
+ 72.7
2779
+ 74.1
2780
+ 75.7
2781
+ 76.9
2782
+ DeepCluster CACTUs-MAML [30]
2783
+ Inception-v3
2784
+ 65.1
2785
+ 72.8
2786
+ 76.5
2787
+ 77.9
2788
+ 79.5
2789
+ DeepCluster CACTUs-ProtoNets [30]
2790
+ Inception-v3
2791
+ 66.9
2792
+ 73.2
2793
+ 77.0
2794
+ 78.1
2795
+ 79.9
2796
+ LIM [113]
2797
+ Inception-v3
2798
+ 69.8
2799
+ 75.9
2800
+ 78.3
2801
+ 80.4
2802
+ 82.6
2803
+ HyRSM++ w/o unlabeled data
2804
+ Inception-v3
2805
+ 69.1
2806
+ 76.0
2807
+ 78.6
2808
+ 81.6
2809
+ 81.9
2810
+ HyRSM++
2811
+ Inception-v3
2812
+ 73.7
2813
+ 79.4
2814
+ 80.9
2815
+ 81.8
2816
+ 83.1
2817
+ CMN w/o unlabeled data [112]
2818
+ ResNet-50
2819
+ 60.5
2820
+ 70.0
2821
+ 75.6
2822
+ 77.3
2823
+ 78.9
2824
+ OTAM w/o unlabeled data [7]
2825
+ ResNet-50
2826
+ 73.0
2827
+ 75.9
2828
+ 78.7
2829
+ 81.9
2830
+ 85.8
2831
+ LIM (ensemble) [113]
2832
+ ResNet-50, Inception-v3, ResNet-18
2833
+ 73.3
2834
+ 78.3
2835
+ 80.8
2836
+ 82.4
2837
+ 84.0
2838
+ HyRSM++ w/o unlabeled data
2839
+ ResNet-50
2840
+ 74.0
2841
+ 80.8
2842
+ 83.6
2843
+ 85.3
2844
+ 86.4
2845
+ HyRSM++
2846
+ ResNet-50
2847
+ 79.1
2848
+ 84.3
2849
+ 85.4
2850
+ 86.4
2851
+ 86.8
2852
+ SSv2-Small
2853
+ OTAM w/o unlabeled data [112]
2854
+ Inception-v3
2855
+ 36.7
2856
+ 41.0
2857
+ 43.6
2858
+ 44.1
2859
+ 46.9
2860
+ DeepCluster CACTUs-MAML [30]
2861
+ Inception-v3
2862
+ 37.9
2863
+ 44.5
2864
+ 45.9
2865
+ 47.8
2866
+ 49.9
2867
+ DeepCluster CACTUs-ProtoNets [30]
2868
+ Inception-v3
2869
+ 38.4
2870
+ 44.8
2871
+ 46.1
2872
+ 48.0
2873
+ 50.1
2874
+ LIM [113]
2875
+ Inception-v3
2876
+ 41.1
2877
+ 46.9
2878
+ 48.0
2879
+ 51.5
2880
+ 53.0
2881
+ HyRSM++ w/o unlabeled data
2882
+ Inception-v3
2883
+ 41.5
2884
+ 46.1
2885
+ 49.5
2886
+ 52.9
2887
+ 55.1
2888
+ HyRSM++
2889
+ Inception-v3
2890
+ 43.6
2891
+ 49.5
2892
+ 51.8
2893
+ 52.4
2894
+ 54.5
2895
+ CMN w/o unlabeled data [112]
2896
+ ResNet-50
2897
+ 36.2
2898
+ 42.1
2899
+ 44.6
2900
+ 47.0
2901
+ 48.8
2902
+ OTAM w/o unlabeled data [112]
2903
+ ResNet-50
2904
+ 36.4
2905
+ 42.9
2906
+ 45.9
2907
+ 46.8
2908
+ 48.0
2909
+ LIM (ensemble) [113]
2910
+ ResNet-50, Inception-v3, ResNet-18
2911
+ 44.0
2912
+ 49.8
2913
+ 51.3
2914
+ 53.9
2915
+ 55.1
2916
+ HyRSM++ w/o unlabeled data
2917
+ ResNet-50
2918
+ 42.8
2919
+ 47.1
2920
+ 52.4
2921
+ 54.7
2922
+ 58.0
2923
+ HyRSM++
2924
+ ResNet-50
2925
+ 45.4
2926
+ 51.1
2927
+ 55.2
2928
+ 57.4
2929
+ 58.8
2930
+ 74.0
2931
+ 77.7
2932
+ 79.1
2933
+ 79.7
2934
+ 79.9
2935
+ 80.8
2936
+ 83.0
2937
+ 84.3
2938
+ 84.3
2939
+ 84.6
2940
+ 83.6
2941
+ 84.8
2942
+ 85.4
2943
+ 85.5
2944
+ 85.9
2945
+ 85.3
2946
+ 85.8
2947
+ 86.4
2948
+ 86.6
2949
+ 86.8
2950
+ 86.4
2951
+ 86.5
2952
+ 86.7
2953
+ 86.8
2954
+ 86.9
2955
+ 0
2956
+ 50
2957
+ 100
2958
+ 150
2959
+ 200
2960
+ Accuracy (%)
2961
+ Kinetics
2962
+ 1-shot
2963
+ 2-shot
2964
+ 3-shot
2965
+ 4-shot
2966
+ 5-shot
2967
+ 73
2968
+ 75
2969
+ 79
2970
+ 77
2971
+ 85
2972
+ 81
2973
+ 87
2974
+ 83
2975
+ 89
2976
+ Fig. 12 Performance comparison of different amounts of unlabeled
2977
+ data for testing in an episode on Kinetics.
2978
+ phase, HyRSM++ does not add additional computational
2979
+ burden compared to HyRSM because the temporal coher-
2980
+ ence regularization is not involved in the calculation. No-
2981
+ tably, HyRSM++ introduces extra parameters (i.e., hybrid
2982
+ relation module), resulting in increased GPU memory and
2983
+ computational consumption. Nevertheless, without complex
2984
+ non-parallel classifier heads, the whole inference speed of
2985
+ HyRSM++ is faster than OTAM and TRX. We will further
2986
+ investigate how to reduce complexity with no loss of perfor-
2987
+ mance in the future.
2988
+ 42.8
2989
+ 45.0
2990
+ 45.4
2991
+ 45.8
2992
+ 46.2
2993
+ 47.1
2994
+ 50.1
2995
+ 51.1
2996
+ 51.2
2997
+ 51.6
2998
+ 52.4
2999
+ 54.1
3000
+ 55.2
3001
+ 55.5
3002
+ 55.8
3003
+ 54.7
3004
+ 56.8
3005
+ 57.4
3006
+ 57.8
3007
+ 57.9
3008
+ 58.0
3009
+ 58.6
3010
+ 58.8
3011
+ 59.0
3012
+ 59.6
3013
+ 0
3014
+ 50
3015
+ 100
3016
+ 150
3017
+ 200
3018
+ Accuracy (%)
3019
+ SSv2-Small
3020
+ 1-shot
3021
+ 2-shot
3022
+ 3-shot
3023
+ 4-shot
3024
+ 5-shot
3025
+ 60
3026
+ 38
3027
+ 40
3028
+ 44
3029
+ 42
3030
+ 50
3031
+ 46
3032
+ 58
3033
+ 48
3034
+ 62
3035
+ 54
3036
+ 52
3037
+ 56
3038
+ Fig. 13 Performance comparison of different amounts of unlabeled
3039
+ data for testing in an episode on the SSv2-Small dataset.
3040
+ 5 Extension to Semi-supervised Few-shot Action
3041
+ Recognition
3042
+ In this section, we demonstrate that the proposed HyRSM++
3043
+ can be extended to address the more challenging semi-
3044
+ supervised few-shot action recognition problem. Follow-
3045
+ ing LIM [113], we utilize two common datasets (Kinet-
3046
+ ics [8] and SSv2-Small [23]) to perform comparative exper-
3047
+ iments. These two datasets are subsets of Kinetics-400 [8]
3048
+ and Something-Something-v2 [23], respectively, and the un-
3049
+ labeled examples in our experiments are collected from the
3050
+ remaining videos of the same category as these subsets. To
3051
+
3052
+ 16
3053
+ Xiang Wang et al.
3054
+ Table 13 Comparison to state-of-the-art unsupervised few-shot action
3055
+ recognition approaches on UCF101, HMDB51, and Kinetics. ∗ indi-
3056
+ cates that the algorithm adopt the same 2D ResNet-50 backbone as
3057
+ HyRSM++.
3058
+ Method
3059
+ Supervision
3060
+ UCF101
3061
+ HMDB51 Kinetics
3062
+ MAML [19]
3063
+ Supervised
3064
+ -
3065
+ -
3066
+ 54.2
3067
+ CMN [112]
3068
+ Supervised
3069
+ -
3070
+ -
3071
+ 60.5
3072
+ TARN [5]
3073
+ Supervised
3074
+ -
3075
+ -
3076
+ 66.6
3077
+ ProtoGAN [43]
3078
+ Supervised
3079
+ 57.8
3080
+ 34.7
3081
+ -
3082
+ ARN [105]
3083
+ Supervised
3084
+ 66.3
3085
+ 45.2
3086
+ 63.7
3087
+ 3DRotNet [37]
3088
+ Unsupervised
3089
+ 39.4
3090
+ 32.4
3091
+ 27.5
3092
+ VCOP [96]
3093
+ Unsupervised
3094
+ 32.9
3095
+ 27.8
3096
+ 26.5
3097
+ IIC [83]
3098
+ Unsupervised
3099
+ 56.8
3100
+ 34.7
3101
+ 37.7
3102
+ Pace [87]
3103
+ Unsupervised
3104
+ 25.6
3105
+ 26.2
3106
+ 22.4
3107
+ MemDPC [83]
3108
+ Unsupervised
3109
+ 49.3
3110
+ 30.3
3111
+ 42.0
3112
+ CoCLR [26]
3113
+ Unsupervised
3114
+ 52.0
3115
+ 31.3
3116
+ 37.6
3117
+ MetaUVFS∗ [64]
3118
+ Unsupervised
3119
+ 66.1
3120
+ 40.0
3121
+ 50.9
3122
+ HyRSM++
3123
+ Unsupervised
3124
+ 68.0
3125
+ 41.0
3126
+ 55.0
3127
+ 64.0
3128
+ 64.7
3129
+ 66.3
3130
+ 66.5
3131
+ 68.0
3132
+ 66.5
3133
+ 66.4
3134
+ 50
3135
+ 75
3136
+ 100
3137
+ 125
3138
+ 150
3139
+ 175
3140
+ 200
3141
+ Accuracy (%)
3142
+ UCF101
3143
+ 63
3144
+ 36.1
3145
+ 36.1
3146
+ 36.9
3147
+ 39.2
3148
+ 41.0
3149
+ 38.9
3150
+ 39.7
3151
+ 50
3152
+ 75
3153
+ 100
3154
+ 125
3155
+ 150
3156
+ 175
3157
+ 200
3158
+ Accuracy (%)
3159
+ HMDB51
3160
+ 51.2
3161
+ 52.0
3162
+ 52.0
3163
+ 54.7
3164
+ 55.0
3165
+ 54.9
3166
+ 54.3
3167
+ 50
3168
+ 75
3169
+ 100
3170
+ 125
3171
+ 150
3172
+ 175
3173
+ 200
3174
+ Accuracy (%)
3175
+ Kinetics
3176
+ 49
3177
+ 65
3178
+ 67
3179
+ 69
3180
+ 55
3181
+ 37
3182
+ 39
3183
+ 41
3184
+ 43
3185
+ 51
3186
+ 53
3187
+ 57
3188
+ 35
3189
+ Fig. 14 Ablation study of different cluster numbers under 5-way 1-
3190
+ shot unsupervised few-shot settings.
3191
+ conduct the semi-supervised few-shot evaluation, we fol-
3192
+ low the mainstream distractor setting [30, 38, 113], where
3193
+ the unlabeled set contains other interference classes in each
3194
+ episodic task. This setting is more realistic and requires the
3195
+ model to be robust to the existence of noisy samples from
3196
+ other classes. In our experiments, we fixed the number of
3197
+ unlabeled videos in an episodic task to 100.
3198
+ Table 12 provides the comparison of our HyRSM++
3199
+ against state-of-the-art methods on the two standard semi-
3200
+ supervised few-shot benchmarks. We find that HyRSM++
3201
+ substantially surpasses the previous approaches, such as
3202
+ LIM [113]. Under the semi-supervised 5-way 1-shot sce-
3203
+ nario, HyRSM++ produces performance gains of 3.8% and
3204
+ 2.5% on Kinetics and SSv2-Small than LIM with Inception-
3205
+ v3 backbone, respectively. In particular, when using the
3206
+ ResNet-50 backbone, our method is even superior to the
3207
+ multi-modal fusion method (i.e., LIM), which indicates that
3208
+ HyRSM++ enables more accurate pseudo-labels for unla-
3209
+ beled data and then can expand the support set to boost the
3210
+ classification accuracy of the query videos. In addition, com-
3211
+ pared to our supervised counterpart (i.e., HyRSM++ w/o un-
3212
+ labeled data), joining unlabeled data is beneficial to allevi-
3213
+ ating the data scarcity problem and promotes few-shot clas-
3214
+ sification accuracy. We can observe that when ResNet-50
3215
+ is adopted as the backbone, the performance of HyRSM++
3216
+ with unlabeled data is improved by 5.1% compared to that
3217
+ without unlabeled data under the 5-way 1-shot Kinetics
3218
+ evaluation.
3219
+ To further investigate the effect of unlabeled videos in
3220
+ an episode, we conduct comparative experiments with vary-
3221
+ ing numbers of unlabeled videos in Figure 12 and Figure 13.
3222
+ Experimental results show that as the number of unlabeled
3223
+ samples increases, the performance also increases gradually,
3224
+ indicating that the introduction of unlabeled data helps gen-
3225
+ eralize to unseen categories. Furthermore, we notice that the
3226
+ improvement in the 1-shot setting is more significant than
3227
+ that in the 5-shot, which shows that under the condition of
3228
+ low samples, unlabeled videos can improve the estimation
3229
+ of the distribution of new categories more effectively. Mean-
3230
+ while, as the amount of unlabeled data increases to a certain
3231
+ level, the performance starts to saturate slowly.
3232
+ 6 Extension to Unsupervised Few-shot Action
3233
+ Recognition
3234
+ We also extend the proposed HyRSM++ to solve the
3235
+ challenging unsupervised few-shot action recognition task
3236
+ where labels for training videos are not available. Following
3237
+ previous work [38, 36], we adopt the idea of the ”cluster-
3238
+ ing first and then meta-learning” paradigm to construct few-
3239
+ shot tasks and exploit unlabeled data for training. Our ex-
3240
+ periments are based on unsupervised ResNet-50 initializa-
3241
+ tion [97], which is self-supervised pre-trained on Kinetics-
3242
+ 400 [8] without accessing any label information. During the
3243
+ clustering process, we utilize the K-means clustering strat-
3244
+ egy for each dataset to obtain 150 clusters.
3245
+ As presented in Table 13, we compare HyRSM++ with
3246
+ current state-of-the-art methods on the UCF101, HMDB51
3247
+ and Kinetics datasets under the 5-way 1-shot setting. Note
3248
+ that HyRSM++ and MetaUVFS [64] use the same ResNet-
3249
+ 50 structure as the feature extractor, and our HyRSM++
3250
+ shows better performance on each dataset. In particular, we
3251
+ observe that our method achieves 68.0% performance on
3252
+ the UCF101 dataset, a 1.9% improvement over MetaUVFS,
3253
+ and even surpasses the fully supervised ARN. The supe-
3254
+ rior performance of HyRSM++ reveals that our approach of
3255
+ leveraging relations within and cross videos and the flexible
3256
+ metric performs effectively in the low-shot regime. More-
3257
+ over, this phenomenon also demonstrates the potential of our
3258
+ method to learn a strongly robust few-shot model using only
3259
+ unlabeled videos, even though HyRSM++ is not specifically
3260
+
3261
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
3262
+ 17
3263
+ designed for the unsupervised few-shot action recognition
3264
+ task.
3265
+ In the experiments, one parameter involved in apply-
3266
+ ing HyRSM++ to the unsupervised few-shot setting is the
3267
+ number of clusters. In Figure 14, we display the perfor-
3268
+ mance comparison under different number of clusters. Re-
3269
+ sults show that when the number of clusters is 150, the per-
3270
+ formance reaches the peak value, which means that if the
3271
+ cluster number is too small, it may lead to under-clustering.
3272
+ If the number is too large, it may cause over-clustering, dam-
3273
+ aging the performance.
3274
+ 7 Conclusion
3275
+ In this work, we have proposed a hybrid relation guided
3276
+ temporal set matching (HyRSM++) approach for few-shot
3277
+ action recognition. Firstly, we design a hybrid relation mod-
3278
+ ule to model the rich semantic relevance within one video
3279
+ and cross different videos in an episodic task to generate
3280
+ task-specific features. Secondly, built upon the representa-
3281
+ tive task-specific features, an efficient set matching metric is
3282
+ proposed to be resilient to misalignment and match videos
3283
+ accurately. During the matching process, a temporal coher-
3284
+ ence regularization is further imposed to exploit temporal
3285
+ order information. Furthermore, we extend HyRSM++ to
3286
+ solve the more challenging semi-supervised few-shot ac-
3287
+ tion recognition and unsupervised few-shot action recog-
3288
+ nition problems. Experimental results demonstrate that our
3289
+ HyRSM++ achieves the state-of-the-art performance on
3290
+ multiple standard benchmarks.
3291
+ Acknowledgements This work is supported by the National Natural
3292
+ Science Foundation of China under grant 61871435, Fundamental Re-
3293
+ search Funds for the Central Universities no.2019kfyXKJC024, 111
3294
+ Project on Computational Intelligence and Intelligent Control under
3295
+ Grant B18024, and Alibaba Group through Alibaba Research Intern
3296
+ Program.
3297
+ References
3298
+ 1. Antoniou A, Storkey A (2019) Assume, augment and learn: Un-
3299
+ supervised few-shot meta-learning via random labels and data
3300
+ augmentation. arXiv preprint arXiv:190209884 4
3301
+ 2. Bai Y, Ding H, Sun Y, Wang W (2018) Convolutional set match-
3302
+ ing for graph similarity. arXiv preprint arXiv:181010866 3
3303
+ 3. Bai Y, Ding H, Gu K, Sun Y, Wang W (2020) Learning-based
3304
+ efficient graph similarity computation via multi-scale convolu-
3305
+ tional set matching. In: AAAI, vol 34, pp 3219–3226 3
3306
+ 4. Berthelot D, Carlini N, Goodfellow I, Papernot N, Oliver A, Raf-
3307
+ fel CA (2019) Mixmatch: A holistic approach to semi-supervised
3308
+ learning. In: NeurIPS, vol 32 4
3309
+ 5. Bishay M, Zoumpourlis G, Patras I (2019) TARN: temporal at-
3310
+ tentive relation network for few-shot and zero-shot action recog-
3311
+ nition. In: BMVC, p 154 4, 8, 16
3312
+ 6. Caba Heilbron F, Escorcia V, Ghanem B, Carlos Niebles J (2015)
3313
+ Activitynet: A large-scale video benchmark for human activity
3314
+ understanding. In: CVPR, pp 961–970 1
3315
+ 7. Cao K, Ji J, Cao Z, Chang CY, Niebles JC (2020) Few-shot video
3316
+ classification via temporal alignment. In: CVPR, pp 10618–
3317
+ 10627 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
3318
+ 8. Carreira J, Zisserman A (2017) Quo vadis, action recognition? a
3319
+ new model and the kinetics dataset. In: CVPR, pp 6299–6308 1,
3320
+ 8, 15, 16
3321
+ 9. Chen Z, Fu Y, Zhang Y, Jiang YG, Xue X, Sigal L (2019) Multi-
3322
+ level semantic feature augmentation for one-shot learning. TIP
3323
+ 28(9):4594–4605 3
3324
+ 10. Cho K, Van Merri¨enboer B, Gulcehre C, Bahdanau D, Bougares
3325
+ F, Schwenk H, Bengio Y (2014) Learning phrase representa-
3326
+ tions using rnn encoder-decoder for statistical machine transla-
3327
+ tion. arXiv preprint arXiv:14061078 5
3328
+ 11. Conners RW, Harlow CA (1980) A theoretical comparison of tex-
3329
+ ture algorithms. TPAMI (3):204–222 3, 6, 13
3330
+ 12. Coskun H, Zia MZ, Tekin B, Bogo F, Navab N, Tombari F, Sawh-
3331
+ ney H (2021) Domain-specific priors and meta learning for few-
3332
+ shot first-person action recognition. TPAMI 4
3333
+ 13. Damen D, Doughty H, Farinella G, Fidler S, Furnari A, Kaza-
3334
+ kos E, Moltisanti D, Munro J, Perrett T, Price W, et al. (2020)
3335
+ The epic-kitchens dataset: Collection, challenges and baselines.
3336
+ TPAMI (01):1–1 1, 9
3337
+ 14. Damen D, Doughty H, Farinella GM, Furnari A, Kazakos E, Ma
3338
+ J, Moltisanti D, Munro J, Perrett T, Price W, et al. (2020) Rescal-
3339
+ ing egocentric vision. arXiv preprint arXiv:200613256 9
3340
+ 15. Deng J, Dong W, Socher R, Li LJ, Li K, Fei-Fei L (2009) Ima-
3341
+ genet: A large-scale hierarchical image database. In: CVPR, pp
3342
+ 248–255 9, 12
3343
+ 16. Dubuisson MP, Jain AK (1994) A modified hausdorff distance
3344
+ for object matching. In: ICPR, IEEE, vol 1, pp 566–568 3, 6
3345
+ 17. Fei-Fei L, Fergus R, Perona P (2006) One-shot learning of object
3346
+ categories. TPAMI 28(4):594–611 3
3347
+ 18. Feichtenhofer C, Fan H, Malik J, He K (2019) Slowfast networks
3348
+ for video recognition. In: ICCV, pp 6202–6211 1
3349
+ 19. Finn C, Abbeel P, Levine S (2017) Model-Agnostic Meta-
3350
+ Mearning for Fast Adaptation of Deep Networks. In: ICML 3, 8,
3351
+ 10, 16
3352
+ 20. Fu Y, Zhang L, Wang J, Fu Y, Jiang YG (2020) Depth guided
3353
+ adaptive meta-fusion network for few-shot video recognition. In:
3354
+ ACMMM, pp 1142–1151 4
3355
+ 21. Gao Y (2003) Efficiently comparing face images using a modi-
3356
+ fied hausdorff distance. IEE Proceedings-Vision, Image and Sig-
3357
+ nal Processing 150(6):346–350 6
3358
+ 22. Goroshin R, Bruna J, Tompson J, Eigen D, LeCun Y (2015)
3359
+ Unsupervised learning of spatiotemporally coherent metrics. In:
3360
+ ICCV, pp 4086–4093 3, 6, 13
3361
+ 23. Goyal R, Ebrahimi Kahou S, Michalski V, Materzynska J, West-
3362
+ phal S, Kim H, Haenel V, Fruend I, Yianilos P, Mueller-Freitag
3363
+ M, et al. (2017) The” something something” video database
3364
+ for learning and evaluating visual common sense. In: ICCV, pp
3365
+ 5842–5850 1, 8, 15
3366
+ 24. Grauman K, Westbury A, Byrne E, Chavis Z, Furnari A, Girdhar
3367
+ R, Hamburger J, Jiang H, Liu M, Liu X, et al. (2022) Ego4d:
3368
+ Around the world in 3,000 hours of egocentric video. In: CVPR,
3369
+ pp 18995–19012 1
3370
+ 25. Graves A, Mohamed Ar, Hinton G (2013) Speech recognition
3371
+ with deep recurrent neural networks. In: ICASSP, pp 6645–6649
3372
+ 5
3373
+ 26. Han T, Xie W, Zisserman A (2020) Self-supervised co-training
3374
+ for video representation learning. In: NeurIPS, vol 33, pp 5679–
3375
+ 5690 16
3376
+ 27. Haresh S, Kumar S, Coskun H, Syed SN, Konin A, Zia Z, Tran
3377
+ QH (2021) Learning by aligning videos in time. In: CVPR, pp
3378
+
3379
+ 18
3380
+ Xiang Wang et al.
3381
+ 5548–5558 3, 13
3382
+ 28. He K, Zhang X, Ren S, Sun J (2016) Deep residual learning for
3383
+ image recognition. In: CVPR, pp 770–778 9
3384
+ 29. Hou R, Chang H, Ma B, Shan S, Chen X (2019) Cross attention
3385
+ network for few-shot classification. In: NeurIPS, pp 4003–4014
3386
+ 3, 5
3387
+ 30. Hsu K, Levine S, Finn C (2018) Unsupervised learning via meta-
3388
+ learning. In: ICLR 4, 15, 16
3389
+ 31. Huang H, Zhang J, Zhang J, Wu Q, Xu C (2021) Ptn: A pois-
3390
+ son transfer network for semi-supervised few-shot learning. In:
3391
+ AAAI, vol 35, pp 1602–1609 4
3392
+ 32. Huang K, Geng J, Jiang W, Deng X, Xu Z (2021) Pseudo-
3393
+ loss confidence metric for semi-supervised few-shot learning. In:
3394
+ ICCV, pp 8671–8680 4
3395
+ 33. Huang Y, Yang L, Sato Y (2022) Compound prototype matching
3396
+ for few-shot action recognition. In: ECCV, Springer, pp 351–368
3397
+ 2, 4, 8, 10
3398
+ 34. Huttenlocher DP, Klanderman GA, Rucklidge WJ (1993) Com-
3399
+ paring images using the hausdorff distance. TPAMI 15(9):850–
3400
+ 863 3, 6
3401
+ 35. Jesorsky O, Kirchberg KJ, Frischholz RW (2001) Robust face
3402
+ detection using the hausdorff distance. In: AVBPA, Springer, pp
3403
+ 90–95 3
3404
+ 36. Ji Z, Zou X, Huang T, Wu S (2019) Unsupervised few-shot learn-
3405
+ ing via self-supervised training. arXiv preprint arXiv:191212178
3406
+ 4, 7, 16
3407
+ 37. Jing L, Yang X, Liu J, Tian Y (2018) Self-supervised spa-
3408
+ tiotemporal feature learning via video rotation prediction. arXiv
3409
+ preprint arXiv:181111387 16
3410
+ 38. Khodadadeh S, Boloni L, Shah M (2019) Unsupervised meta-
3411
+ learning for few-shot image classification. In: NeurIPS, vol 32 4,
3412
+ 7, 16
3413
+ 39. Kingma DP, Ba J (2014) Adam: A method for stochastic opti-
3414
+ mization. arXiv preprint arXiv:14126980 9
3415
+ 40. Kliper-Gross O, Hassner T, Wolf L (2011) One shot similarity
3416
+ metric learning for action recognition. In: SIMBAD, Springer,
3417
+ pp 31–45 4
3418
+ 41. Koizumi Y, Yatabe K, Delcroix M, Masuyama Y, Takeuchi D
3419
+ (2020) Speech enhancement using self-adaptation and multi-
3420
+ head self-attention. In: ICASSP, pp 181–185 12
3421
+ 42. Kuehne H, Serre T, Jhuang H, Garrote E, Poggio T, Serre T
3422
+ (2011) HMDB: A large video database for human motion recog-
3423
+ nition. In: ICCV, DOI 10.1109/ICCV.2011.6126543 9
3424
+ 43. Kumar Dwivedi S, Gupta V, Mitra R, Ahmed S, Jain A (2019)
3425
+ Protogan: Towards few shot learning for action recognition. In:
3426
+ ICCVW, pp 0–0 16
3427
+ 44. Lazarou M, Stathaki T, Avrithis Y (2021) Iterative label clean-
3428
+ ing for transductive and semi-supervised few-shot learning. In:
3429
+ ICCV, pp 8751–8760 4
3430
+ 45. Lee DH, et al. (2013) Pseudo-label: The simple and efficient
3431
+ semi-supervised learning method for deep neural networks. In:
3432
+ ICMLW, vol 3, p 896 7
3433
+ 46. Li A, Luo T, Xiang T, Huang W, Wang L (2019) Few-shot learn-
3434
+ ing with global class representations. In: ICCV, pp 9715–9724
3435
+ 7
3436
+ 47. Li H, Eigen D, Dodge S, Zeiler M, Wang X (2019) Finding task-
3437
+ relevant features for few-shot learning by category traversal. In:
3438
+ CVPR, pp 1–10 2, 3, 5, 13
3439
+ 48. Li S, Liu H, Qian R, Li Y, See J, Fei M, Yu X, Lin W (2021)
3440
+ Ttan: Two-stage temporal alignment network for few-shot action
3441
+ recognition. arXiv preprint arXiv:210704782 8, 9, 10
3442
+ 49. Li X, Sun Q, Liu Y, Zhou Q, Zheng S, Chua TS, Schiele B (2019)
3443
+ Learning to self-train for semi-supervised few-shot classification.
3444
+ In: NeurIPS, vol 32 4, 7
3445
+ 50. Li Z, Zhou F, Chen F, Li H (2017) Meta-sgd: Learning to learn
3446
+ quickly for few-shot learning. arXiv preprint arXiv:170709835 3
3447
+ 51. Lin J, Gan C, Wang K, Han S (2020) Tsm: Temporal shift module
3448
+ for efficient and scalable video understanding on edge devices.
3449
+ TPAMI 1
3450
+ 52. Liu L, Shao L, Li X, Lu K (2015) Learning spatio-temporal rep-
3451
+ resentations for action recognition: A genetic programming ap-
3452
+ proach. TCYB 46(1):158–170 1
3453
+ 53. Liu X, Gao J, He X, Deng L, Duh K, Wang Yy (2015) Repre-
3454
+ sentation learning using multi-task deep neural networks for se-
3455
+ mantic classification and information retrieval. In: NAACL, pp
3456
+ 912–921 2
3457
+ 54. Liu Y, Zhang X, Zhang S, He X (2020) Part-aware prototype
3458
+ network for few-shot semantic segmentation. In: ECCV, pp 142–
3459
+ 158 7
3460
+ 55. Lu J, Gong P, Ye J, Zhang C (2020) Learning from very few
3461
+ samples: A survey. arXiv preprint arXiv:200902653 3
3462
+ 56. Lu J, Jin S, Liang J, Zhang C (2020) Robust few-shot learning
3463
+ for user-provided data. TNNLS 32(4):1433–1447 3
3464
+ 57. Lu P, Bai T, Langlais P (2019) Sc-lstm: Learning task-specific
3465
+ representations in multi-task learning for sequence labeling. In:
3466
+ NAACL, pp 2396–2406 2
3467
+ 58. Mitra A, Biswas S, Bhattacharyya C (2016) Bayesian modeling
3468
+ of temporal coherence in videos for entity discovery and summa-
3469
+ rization. TPAMI 39(3):430–443 3
3470
+ 59. Mobahi H, Collobert R, Weston J (2009) Deep learning from
3471
+ temporal coherence in video. In: ICML, pp 737–744 3, 6, 13
3472
+ 60. Mohanaiah P, Sathyanarayana P, GuruKumar L (2013) Image
3473
+ texture feature extraction using glcm approach. IJSRP 3(5):1–5
3474
+ 3
3475
+ 61. M¨uller M (2007) Dynamic time warping. Information Retrieval
3476
+ for Music and Motion pp 69–84 11
3477
+ 62. Nguyen KD, Tran QH, Nguyen K, Hua BS, Nguyen R (2022) In-
3478
+ ductive and transductive few-shot video classification via appear-
3479
+ ance and temporal alignments. In: ECCV, Springer, pp 471–487
3480
+ 2, 4, 8, 10
3481
+ 63. Nishiyama M, Yuasa M, Shibata T, Wakasugi T, Kawahara T,
3482
+ Yamaguchi O (2007) Recognizing faces of moving people by hi-
3483
+ erarchical image-set matching. In: CVPR, pp 1–8 3
3484
+ 64. Patravali J, Mittal G, Yu Y, Li F, Chen M (2021) Unsupervised
3485
+ few-shot action recognition via action-appearance aligned meta-
3486
+ adaptation. In: ICCV, pp 8484–8494 4, 16
3487
+ 65. Peng B, Lei J, Fu H, Zhang C, Chua TS, Li X (2018) Unsuper-
3488
+ vised video action clustering via motion-scene interaction con-
3489
+ straint. TCSVT 30(1):131–144 1
3490
+ 66. Peng M, Zhang Q, Xing X, Gui T, Fu J, Huang X (2019) Learning
3491
+ task-specific representation for novel words in sequence labeling.
3492
+ In: IJCAI 2
3493
+ 67. Perez L, Wang J (2017) The effectiveness of data augmenta-
3494
+ tion in image classification using deep learning. arXiv preprint
3495
+ arXiv:171204621 3
3496
+ 68. Perrett T, Masullo A, Burghardt T, Mirmehdi M, Damen D
3497
+ (2021) Temporal-relational crosstransformers for few-shot action
3498
+ recognition. In: CVPR, pp 475–484 2, 4, 6, 7, 8, 9, 10, 11, 12, 13
3499
+ 69. Qin T, Li W, Shi Y, Gao Y (2020) Diversity helps: Unsupervised
3500
+ few-shot learning via distribution shift-based data augmentation.
3501
+ arXiv preprint arXiv:200405805 4
3502
+ 70. Ratner AJ, Ehrenberg HR, Hussain Z, Dunnmon J, R´e C (2017)
3503
+ Learning to compose domain-specific transformations for data
3504
+ augmentation. In: NeurIPS, NIH Public Access, vol 30, p 3239 3
3505
+ 71. Ren M, Triantafillou E, Ravi S, Snell J, Swersky K, Tenenbaum
3506
+ JB, Larochelle H, Zemel RS (2018) Meta-learning for semi-
3507
+ supervised few-shot classification. In: ICLR 3, 7
3508
+ 72. Rezaei M, Fr¨anti P (2016) Set matching measures for external
3509
+ cluster validity. TKDE 28(8):2173–2186 3
3510
+ 73. Saito Y, Nakamura T, Hachiya H, Fukumizu K (2020) Exchange-
3511
+ able deep neural networks for set-to-set matching and learning.
3512
+ In: ECCV, Springer, pp 626–646 3
3513
+
3514
+ HyRSM++: Hybrid Relation Guided Temporal Set Matching for Few-shot Action Recognition
3515
+ 19
3516
+ 74. Santoro A, Bartunov S, Botvinick M, Wierstra D, Lillicrap T
3517
+ (2016) Meta-learning with memory-augmented neural networks.
3518
+ In: ICML, PMLR, pp 1842–1850 3
3519
+ 75. Selvaraju RR, Cogswell M, Das A, Vedantam R, Parikh D, Batra
3520
+ D (2017) Grad-cam: Visual explanations from deep networks via
3521
+ gradient-based localization. In: ICCV, pp 618–626 14
3522
+ 76. Snell J, Swersky K, Zemel R (2017) Prototypical networks for
3523
+ few-shot learning. In: NeurIPS, vol 30, pp 4077–4087 3, 9, 11
3524
+ 77. Sohn K, Berthelot D, Carlini N, Zhang Z, Zhang H, Raffel CA,
3525
+ Cubuk ED, Kurakin A, Li CL (2020) Fixmatch: Simplifying
3526
+ semi-supervised learning with consistency and confidence. In:
3527
+ NeurIPS, vol 33, pp 596–608 7
3528
+ 78. Soomro K, Zamir AR, Shah M (2012) Ucf101: A dataset of 101
3529
+ human actions classes from videos in the wild. arXiv preprint
3530
+ arXiv:12120402 9
3531
+ 79. Sudha N, et al. (2007) Robust hausdorff distance measure for
3532
+ face recognition. Pattern Recognition 40(2):431–442 3, 6
3533
+ 80. Sung F, Yang Y, Zhang L, Xiang T, Torr PH, Hospedales TM
3534
+ (2018) Learning to compare: Relation network for few-shot
3535
+ learning. In: CVPR, pp 1199–1208 3
3536
+ 81. Szegedy C, Vanhoucke V, Ioffe S, Shlens J, Wojna Z (2016)
3537
+ Rethinking the inception architecture for computer vision. In:
3538
+ CVPR, pp 2818–2826 11
3539
+ 82. Takacs B (1998) Comparing face images using the modified
3540
+ hausdorff distance. Pattern recognition 31(12):1873–1881 3, 6
3541
+ 83. Tao L, Wang X, Yamasaki T (2020) Self-supervised video rep-
3542
+ resentation learning using inter-intra contrastive framework. In:
3543
+ ACMMM, pp 2193–2201 16
3544
+ 84. Thatipelli A, Narayan S, Khan S, Anwer RM, Khan FS, Ghanem
3545
+ B (2022) Spatio-temporal relation modeling for few-shot action
3546
+ recognition. In: CVPR 4, 8, 10, 11, 12, 13
3547
+ 85. Vaswani A, Shazeer N, Parmar N, Uszkoreit J, Jones L, Gomez
3548
+ AN, Kaiser Ł, Polosukhin I (2017) Attention is all you need. In:
3549
+ NeurIPS, pp 5998–6008 5
3550
+ 86. Vinyals O, Blundell C, Lillicrap T, Kavukcuoglu K, Wierstra D
3551
+ (2016) Matching Networks for One Shot Learning. In: NeurIPS,
3552
+ arXiv:1606.04080v2 2, 3, 4, 8, 10
3553
+ 87. Wang J, Jiao J, Liu YH (2020) Self-supervised video representa-
3554
+ tion learning by pace prediction. In: ECCV, Springer, pp 504–521
3555
+ 16
3556
+ 88. Wang L, Xiong Y, Wang Z, Qiao Y, Lin D, Tang X, Van Gool
3557
+ L (2018) Temporal segment networks for action recognition in
3558
+ videos. TPAMI 41(11):2740–2755 1, 4
3559
+ 89. Wang X, Zhang S, Qing Z, Shao Y, Gao C, Sang N (2021) Self-
3560
+ supervised learning for semi-supervised temporal action pro-
3561
+ posal. In: CVPR, pp 1905–1914 1
3562
+ 90. Wang X, Zhang S, Qing Z, Shao Y, Zuo Z, Gao C, Sang N (2021)
3563
+ Oadtr: Online action detection with transformers. ICCV 12
3564
+ 91. Wang X, Zhang S, Qing Z, Tang M, Zuo Z, Gao C, Jin R, Sang N
3565
+ (2022) Hybrid relation guided set matching for few-shot action
3566
+ recognition. In: CVPR 3, 9
3567
+ 92. Weng R, Lu J, Hu J, Yang G, Tan YP (2013) Robust feature set
3568
+ matching for partial face recognition. In: ICCV, pp 601–608 3
3569
+ 93. Weng R, Lu J, Tan YP (2016) Robust point set matching for par-
3570
+ tial face recognition. TIP 25(3):1163–1176 3
3571
+ 94. Wu J, Zhang T, Zhang Z, Wu F, Zhang Y (2022) Motion-
3572
+ modulated temporal fragment alignment network for few-shot
3573
+ action recognition. In: CVPR, pp 9151–9160 2, 4, 8, 10
3574
+ 95. Xian Y, Korbar B, Douze M, Torresani L, Schiele B, Akata Z
3575
+ (2021) Generalized few-shot video classification with video re-
3576
+ trieval and feature generation. TPAMI 4
3577
+ 96. Xu D, Xiao J, Zhao Z, Shao J, Xie D, Zhuang Y (2019) Self-
3578
+ supervised spatiotemporal learning via video clip order predic-
3579
+ tion. In: CVPR, pp 10334–10343 16
3580
+ 97. Xu J, Wang X (2021) Rethinking self-supervised correspondence
3581
+ learning: A video frame-level similarity perspective. In: ICCV,
3582
+ pp 10075–10085 11, 13, 16
3583
+ 98. Ye HJ, Hu H, Zhan DC, Sha F (2020) Few-shot learning via
3584
+ embedding adaptation with set-to-set functions. In: CVPR, pp
3585
+ 8808–8817 3
3586
+ 99. Ye HJ, Ming L, Zhan DC, Chao WL (2022) Few-shot learning
3587
+ with a strong teacher. TPAMI 3
3588
+ 100. Yoo S, Bahng H, Chung S, Lee J, Chang J, Choo J (2019) Col-
3589
+ oring with limited data: Few-shot colorization via memory aug-
3590
+ mented networks. In: CVPR, pp 11283–11292 3
3591
+ 101. Yoon SW, Seo J, Moon J (2019) Tapnet: Neural network aug-
3592
+ mented with task-adaptive projection for few-shot learning. In:
3593
+ ICML, pp 7115–7123 2, 3, 13
3594
+ 102. Yu CB, Qin HF, Cui YZ, Hu XQ (2009) Finger-vein image recog-
3595
+ nition combining modified hausdorff distance with minutiae fea-
3596
+ ture matching. Interdisciplinary Sciences: Computational Life
3597
+ Sciences 1(4):280–289 6
3598
+ 103. Yu Z, Chen L, Cheng Z, Luo J (2020) Transmatch: A transfer-
3599
+ learning scheme for semi-supervised few-shot learning. In:
3600
+ CVPR, pp 12856–12864 4
3601
+ 104. Zhang H, Cisse M, Dauphin YN, Lopez-Paz D (2018) mixup:
3602
+ Beyond empirical risk minimization. In: ICLR 7
3603
+ 105. Zhang H, Zhang L, Qi X, Li H, Torr PH, Koniusz P (2020) Few-
3604
+ shot action recognition with permutation-invariant attention. In:
3605
+ ECCV, Springer, pp 525–542 1, 4, 8, 9, 10, 16
3606
+ 106. Zhang S, Zhou J, He X (2021) Learning implicit temporal align-
3607
+ ment for few-shot video classification. In: IJCAI 2, 4, 8, 9, 10
3608
+ 107. Zhao C, Shi W, Deng Y (2005) A new hausdorff distance for
3609
+ image matching. Pattern Recognition Letters 26(5):581–586 3
3610
+ 108. Zheng S, Chen S, Jin Q (2022) Few-shot action recognition
3611
+ with hierarchical matching and contrastive learning. In: ECCV,
3612
+ Springer, pp 297–313 2, 4, 8, 10
3613
+ 109. Zhou B, Andonian A, Oliva A, Torralba A (2018) Temporal rela-
3614
+ tional reasoning in videos. In: ECCV, pp 803–818 8
3615
+ 110. Zhou ZH, Li M (2005) Tri-training: Exploiting unlabeled data
3616
+ using three classifiers. TKDE 17(11):1529–1541 7
3617
+ 111. Zhou ZQ, Wang B (2009) A modified hausdorff distance using
3618
+ edge gradient for robust object matching. In: IASP, IEEE, pp
3619
+ 250–254 6
3620
+ 112. Zhu L, Yang Y (2018) Compound memory networks for few-shot
3621
+ video classification. In: ECCV, pp 751–766 1, 2, 4, 8, 9, 10, 15,
3622
+ 16
3623
+ 113. Zhu L, Yang Y (2020) Label independent memory for semi-
3624
+ supervised few-shot video classification. TPAMI 44(1):273–285,
3625
+ DOI 10.1109/TPAMI.2020.3007511 4, 7, 8, 10, 12, 15, 16
3626
+
BdE1T4oBgHgl3EQfpQWt/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
CNAzT4oBgHgl3EQfh_3R/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af084831e07578dff35c98200b524f590c6810d12c9a3175afd5d23a55d4807f
3
+ size 1966125
CNE1T4oBgHgl3EQfpgW7/content/2301.03333v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1353a64ed35ae431267642a315b673e92e18118dc75134d102dd687a77ff4fbb
3
+ size 368026
CNE1T4oBgHgl3EQfpgW7/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d7f0084f652a7fbc55ceb6f753972583727d3a06ce7e32fc2fe7c62eeedd065
3
+ size 1638445
CNE1T4oBgHgl3EQfpgW7/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff245893cd64b861fe5b9e7a2c21222c1f79b14420bc0f96a587b7f01d53b34
3
+ size 69447
ENE1T4oBgHgl3EQf-QbO/content/tmp_files/2301.03567v1.pdf.txt ADDED
@@ -0,0 +1,4048 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Safer Together: Machine Learning Models Trained on Shared Accident
2
+ Datasets Predict Construction Injuries Better than Company-Specific
3
+ Models
4
+ Submitted to Automation in Construction
5
+ Antoine J.-P. Tixier1a, Matthew R. Hallowella,b
6
+ aSafetyAI R&D
7
+ bUniversity of Colorado at Boulder
8
+ Abstract
9
+ Highlights
10
+ • 9 companies from 3 domains (construction, electric T&D, oil & gas) shared their accident datasets.
11
+ • Machine learning models were trained to predict safety outcomes from fundamental attributes.
12
+ • Models trained on all datasets (full generic models) outperformed the company-specific models
13
+ in 82% of the company-domain-outcome combinations, with large gains in F1 score (+4.4 on
14
+ average and up to +15.3).
15
+ • On average, generic models predicted 2.26 categories more than specific models (up to 7), making
16
+ for more useful forecasts in practice.
17
+ • Per-domain generic models were not always better than full generic models.
18
+ • Combining generic and specific models (data quantity and relevance) was often very beneficial.
19
+ • Generic models give companies devoid of accident datasets access to safety predictions.
20
+ • Generic models address safety cross-organizational learning and dissemination in construction.
21
+ In this study, we capitalized on a collective dataset repository of 57k accidents from 9 companies be-
22
+ longing to 3 domains and tested whether models trained on multiple datasets (generic models) predicted
23
+ safety outcomes better than the company-specific models. We experimented with full generic models
24
+ (trained on all data), per-domain generic models (construction, electric T&D, oil & gas), and with en-
25
+ sembles of generic and specific models. Results are very positive, with generic models outperforming the
26
+ company-specific models in most cases while also generating finer-grained, hence more useful, forecasts.
27
+ Successful generic models remove the needs for training company-specific models, saving a lot of time
28
+ and resources, and give small companies, whose accident datasets are too limited to train their own mod-
29
+ els, access to safety outcome predictions. It may still however be advantageous to train specific models
30
+ to get an extra boost in performance through ensembling with the generic models. Overall, by learning
31
+ lessons from a pool of datasets whose accumulated experience far exceeds that of any single company,
32
+ and making these lessons easily accessible in the form of simple forecasts, generic models tackle the
33
+ holy grail of safety cross-organizational learning and dissemination in the construction industry.
34
+ Keywords: construction safety, artificial intelligence, supervised learning, injury prediction,
35
+ transfer learning, data sharing, collective intelligence
36
+ 1antoine.tixier@safetyfunction.com
37
+ arXiv:2301.03567v1 [cs.LG] 9 Jan 2023
38
+
39
+ 1. Introduction
40
+ The SafetyAI council is a community of large organizations from the construction, oil &
41
+ gas, and electric Transmission and Delivery (T&D) domains, that share their safety-related data
42
+ with the SafetyAI Research and Development (R&D) team.
43
+ Before exploiting the data, the R&D team is in charge of standardizing the datasets received
44
+ by each company, which is crucial, as each one features different variables and different category
45
+ names for each variable. Standardization makes sure that all datasets are based on the same
46
+ taxonomy, i.e., speak the same language.
47
+ The SafetyAI community dataset, comprising close to a million events including near misses,
48
+ observations, good catches, etc., is only accessible to the R&D team, a neutral party, which guar-
49
+ antees that it is impossible for companies to see each other’s data, and that the output of all the
50
+ R&D conducted on the collective dataset is made available to the entire community. This is of
51
+ paramount importance, in a very competitive environment.
52
+ In this study, we started by extracting attributes from accident reports. We briefly introduce
53
+ the attribute framework in what follows.
54
+ 1.1. Attribute-based framework
55
+ Attributes are basic descriptors of construction work that are observable before accident
56
+ occurrence, and cover means, methods, and environmental conditions [1, 2]. One advantage of
57
+ the attribute-based framework over modeling at the task or work package level is that attributes
58
+ are fundamental and universal. That is, any situation from any site around the world, in any
59
+ industry sector, can be characterized by a set of attributes. Attributes can be recorded on-the-fly
60
+ on site, or can be extracted offline from various mediums such as photos and text reports. For
61
+ instance, four attributes can be extracted from the narrative worker tripped on a cable
62
+ when carrying a 2x4 to his truck: (1) cable, (2) object on the floor, (3) lumber, and
63
+ (4) light vehicle.
64
+ Narratives are particularly well-suited if the goal is to use attributes for predictive modeling.
65
+ Indeed, in incident report databases, narratives are often paired with outcomes such as accident
66
+ type, injury severity, body part impacted, etc. Attributes also completely anonymize narratives,
67
+ which is especially desirable when considering a pool of datasets aggregated from different
68
+ companies. For any given event, everything that remains is a set of attributes and a set of
69
+ standardized safety outcomes.
70
+ However, manually extracting attributes from large amounts of text reports is very costly in
71
+ terms of human resources and pose inter-annotator agreement issues. To solve this problem, we
72
+ developed and validated a Natural Language Processing (NLP) tool based on rules and lexicons
73
+ [3]. We later proved that using the attributes extracted by the tool to predict safety outcomes
74
+ was effective and valid [4, 5]. We also used the attributes extracted by the tool for unsupervised
75
+ learning applications, such as clustering and visualization [6], and risk modeling and simulation
76
+ [7].
77
+ 1.2. Differences with our previous research and objective of the current study
78
+ In our original study [4], we provided a proof for the concept of predicting safety outcomes
79
+ from attributes, both extracted with the NLP tool. Then, in [5], we showed that attributes were
80
+ still highly predictive when the safety outcomes were given by independent human annotations,
81
+ which definitely validated the approach. We also used a much larger dataset than in the orig-
82
+ inal study, two new supervised learning algorithms, model stacking, a healthier experimental
83
+ setup with more appropriate performance metrics, and we analyzed per-category attribute im-
84
+ portance scores. We also showed that unlike what we had concluded in [4], injury severity was
85
+ predictable from attributes.
86
+ 2
87
+
88
+ In the present research, we interested ourselves with a new, completely different problem.
89
+ We had access to a pool of accident datasets coming from 9 companies, and our goal was to:
90
+ “Test whether predictive models trained on a generic dataset (i.e., aggregated from the datasets
91
+ of multiple companies) outperformed the models trained on the specific dataset of each com-
92
+ pany.”
93
+ More precisely, we experimented with two types of generic models:
94
+ • Full generic model: one model trained on the datasets of all companies.
95
+ • Per-domain generic models: one model per industry sector, trained only on the datasets
96
+ of the companies involved in that sector (or the parts thereof, as some companies belong
97
+ to multiple domains).
98
+ The potential advantages of generic models are numerous:
99
+ 1. Usually with machine learning, the more data, the better, so generic models are expected
100
+ to bring improvements in predictive skill compared to the company-specific models. This
101
+ is not guaranteed however, as one important question is whether (1) more data (generic
102
+ datasets) or (2) more relevant data (specific datasets) is better.
103
+ 2. By being trained on larger datasets, the generic models learn to predict a greater variety
104
+ of outcome categories than the specific models, making for more useful forecasts.
105
+ 3. Successful generic models would remove the needs for training specific models for each
106
+ company, saving a lot of time and resources.
107
+ 4. Alternatively, if company-specific models are already available, combining them with the
108
+ generic models may provide an extra boost in performance.
109
+ 5. Last but not least, successful generic models would give small companies -whose accident
110
+ datasets are too limited to train their own specific models- access to high quality safety
111
+ outcome forecasts.
112
+ From a high level, generic models tackle the holy grail of safety cross-organizational learn-
113
+ ing and dissemination in the construction industry. Indeed, generic models (1) learn lessons
114
+ from a pool of datasets whose quantity and diversity2 of accumulated experience far exceeds
115
+ that of any single company, and (2) disseminate these lessons as forecasts, which are clear, di-
116
+ rect, and easily accessible information, via, e.g., a user interface (desktop or mobile) or API
117
+ taking attributes as input and returning probabilities for each category of each outcome.
118
+ Moreover, one should note that in the pool, the individual biases of each dataset, due to
119
+ specific annotators, reporting practices and policies, etc., tend to average out. Consequently, the
120
+ lessons learned by the supervised learning algorithms on the generic datasets are more objective
121
+ and broadly applicable than that learned on the specific datasets.
122
+ 2. Background
123
+ The needs to share standardized incident data at the industry level to enable collaborative
124
+ learning have long been recognized in aviation and transportation [8]. Some examples include
125
+ 2Diversity of situations, means and methods, environmental conditions, geographical areas...
126
+ 3
127
+
128
+ the NASA-managed Aviation Safety Reporting System (ASRS) database, created in 1976 and
129
+ featuring over a million incidents, or the European Coordination Center for Accident and In-
130
+ cident Reporting Systems (ECCAIRS) database, started in 2004. Such collective repositories
131
+ also exist in the chemical industry, with the Major Accident Reporting System (eMARS) of the
132
+ European Commission, launched in 1982, and the Process Safety Incident Database (PSID) of
133
+ the Center for Chemical Process Safety [9].
134
+ However, the construction industry still lacks comparable initiatives. The needs for data
135
+ storage and access infrastructures for construction safety did start to receive some attention
136
+ recently [10, 11], but most efforts placed themselves at the company or project level. Cross-
137
+ organizational safety data collection is still rare in practice [12, 13]. This is a major issue,
138
+ as collaborative machine learning at the industry level is not possible until a common pool of
139
+ standardized datasets has been put together. This provided the motivation for us to create the
140
+ SafetyAI council in 2020.
141
+ One should note that some consortiums already exist, such as the INGAA Foundation, the
142
+ Edison Electric Institute (EEI), the Construction Safety Research Alliance (CSRA), or the Na-
143
+ tional Safety Council (NSC), but their activities do not revolve around systematic large-scale
144
+ accident data collection and analysis. These initiatives rather involve working groups, com-
145
+ munities of practice, qualitative analyses, and conferences, towards building communications,
146
+ policies, best practices, business intelligence, safety culture and leadership, training material,
147
+ etc. In other words, they are based on “soft” methods for knowledge sharing and collabo-
148
+ rative learning at the human level. They do not primarily conduct “hard” scientific research
149
+ and software development, and do not pool accident datasets for AI applications and automatic
150
+ large-scale learning and dissemination.
151
+ 3. Data Description
152
+ As already explained, as part of the SafetyAI initiative, we had access to a pool of safety
153
+ datasets coming from nine large companies from the construction, oil & gas, and electric Trans-
154
+ mission and Delivery (T&D) domains. One company, Company73, also had about 600 corporate
155
+ services (office) events for the severity outcome. We kept these cases as training data for the
156
+ full generic model but did not train a specific model on them.
157
+ Member companies conduct work mostly in North America, and rely on their own teams as
158
+ well as contractors. The collective dataset covers the period 2000 to 2022, with a distribution
159
+ biased towards the last decade and especially more recent years.
160
+ While the entire pool comprises almost a million events including near misses and observa-
161
+ tions, we focused on accident cases only in this effort. As can be seen in Table 1, the sizes of
162
+ the individual datasets ranged from 2k to 20k cases, with an average of 6k per company. There
163
+ were 57262 accident cases in total, recorded over tens of millions of work hours.
164
+ We considered the same outcomes as in [5]: injury severity, body part impacted, injury
165
+ type, and accident type. The columns corresponding to each outcome were selected from the
166
+ company datasets and normalized to use a common, standard set of categories, shown in Table
167
+ 2. Not all outcomes were available for every event of every company. From the narrative of
168
+ each report, we extracted with the NLP tool [3] the original set of 80 attributes [3, 5], plus 11
169
+ new items (see Table A.7). We also used the tool to extract a fifth outcome, energy source, that
170
+ was not available in the company datasets.
171
+ 3Company names have been anonymized.
172
+ 4
173
+
174
+ Comp.1
175
+ Comp.2
176
+ Comp.3
177
+ Comp.4
178
+ Comp.5
179
+ Comp.6
180
+ Comp.7
181
+ Comp.8
182
+ Comp.9
183
+ Domains
184
+ Constr.,
185
+ elec.
186
+ Oilgas
187
+ Constr.,
188
+ oilgas
189
+ Elec.
190
+ Constr.
191
+ Constr.,
192
+ elec.
193
+ Elec.,
194
+ oilgas,
195
+ corp.
196
+ Oilgas
197
+ Elec.
198
+ Regions
199
+ Canada
200
+ California
201
+ NAM
202
+ NAM
203
+ NAM
204
+ NAM
205
+ NAM,
206
+ Mexico
207
+ World⋆
208
+ Southeast
209
+ USA
210
+ n
211
+ 4481
212
+ 1965
213
+ 4072
214
+ 5321
215
+ 7245
216
+ 4310
217
+ 8345
218
+ 19298
219
+ 2225
220
+ Table 1: Company overview. NAM: North America (Canada + USA). Constr.: construction. Elec: electric T&D.
221
+ Oilgas: oil & gas. ⋆Including ships and rigs. Corp: corporate.
222
+ Injury Severity
223
+ Body Part
224
+ Injury Type
225
+ Accident Type
226
+ Energy Source
227
+ first aid
228
+ 38994
229
+ hand
230
+ 15782
231
+ cut
232
+ 14086
233
+ handling
234
+ 6379
235
+ motion
236
+ 33958
237
+ report-only
238
+ 6993
239
+ head
240
+ 10296
241
+ strain
242
+ 10069
243
+ fall
244
+ 5374
245
+ gravity
246
+ 15904
247
+ lost time
248
+ 5319
249
+ leg
250
+ 6550
251
+ contusion
252
+ 8558
253
+ exposure
254
+ 3986
255
+ chemical
256
+ 2411
257
+ medical
258
+ 4913
259
+ arm
260
+ 5943
261
+ foreign body
262
+ 3348
263
+ struck
264
+ 3834
265
+ biological
266
+ 2044
267
+ recordable
268
+ 1043
269
+ trunk
270
+ 5375
271
+ pinch
272
+ 1756
273
+ contact
274
+ 2269
275
+ thermal
276
+ 1691
277
+ foot
278
+ 4632
279
+ fracture
280
+ 1681
281
+ caught
282
+ 1758
283
+ mechanical
284
+ 611
285
+ multiple/entire
286
+ 942
287
+ burn
288
+ 1454
289
+ overexertion
290
+ 1523
291
+ pressure
292
+ 296
293
+ irritation
294
+ 1222
295
+ equipment
296
+ 1449
297
+ electricity
298
+ 181
299
+ pain
300
+ 1194
301
+ PPE
302
+ 949
303
+ radiation
304
+ 166
305
+ exhaustion
306
+ 1054
307
+ transitioning
308
+ 578
309
+ bite
310
+ 710
311
+ error
312
+ 425
313
+ Table 2: Outcome category counts, across all companies and domains. PPE: personal protective equipment.
314
+ 4. Experimental Setup
315
+ 4.1. Splits
316
+ Train, validation and test splits were created for each of the 51 company-domain-outcome
317
+ combinations for which at least 2 categories with more than 100 observations each were avail-
318
+ able (shown in Table 4), by randomly sampling without replacement 64%, 16%, and 20% of
319
+ cases, respectively. The counts summed over companies are shown in Table 3. Note that the
320
+ proportions we used in our previous work [5] were 81%, 9% and 10%, but in the present re-
321
+ search, we decided to reserve more observations for the validation and test sets to make them
322
+ more representative of the training sets, in order to increase the stability and validity of hyper-
323
+ parameter tuning and evaluation4.
324
+ A specific model was trained on each of the 51 company-domain-outcome combinations for
325
+ which sufficient data were available, except for that one combination involving the corporate
326
+ cases, making for a total of 50 specific models.
327
+ For a given domain and a given outcome, the splits of the per-domain generic model were
328
+ obtained by combining, across all companies, the splits corresponding to that domain and that
329
+ outcome. In total, there was one per-domain generic model for each domain and for each
330
+ outcome, hence a total of 3 × 5 = 15 per-domain generic models.
331
+ For a given outcome, the splits of the full generic model were obtained by combining, across
332
+ all companies and across all domains, the splits corresponding to that outcome. In total, there
333
+ was one full generic model for each outcome, hence a total of 5 full generic models.
334
+ For each of the aforementioned cases, we tried 3 different algorithms, as will be explained
335
+ in subsection 4.3. Hence, a total of (15 + 5) × 3 = 60 generic models were trained.
336
+ 4Increasing the sizes of the validation and test sets was a good alternative to k-fold cross-validation, which would
337
+ have taken too much time.
338
+ 5
339
+
340
+ # Companies
341
+ Train
342
+ Val
343
+ Test
344
+ Severity
345
+ Construction
346
+ 4
347
+ 9980
348
+ 2494
349
+ 3119
350
+ Electric T&D
351
+ 4
352
+ 6672
353
+ 1669
354
+ 2085
355
+ Oil & Gas
356
+ 4
357
+ 18381
358
+ 4595
359
+ 5744
360
+ Corporate
361
+ 1
362
+ 418
363
+ 105
364
+ 131
365
+ Full
366
+ 9
367
+ 35451
368
+ 8863
369
+ 11079
370
+ Body Part
371
+ Construction
372
+ 4
373
+ 8209
374
+ 2052
375
+ 2565
376
+ Electric T&D
377
+ 4
378
+ 6036
379
+ 1508
380
+ 1885
381
+ Oil & Gas
382
+ 3
383
+ 15788
384
+ 3947
385
+ 4933
386
+ Full
387
+ 9
388
+ 30033
389
+ 7507
390
+ 9383
391
+ Injury Type
392
+ Construction
393
+ 4
394
+ 6267
395
+ 1566
396
+ 1958
397
+ Electric T&D
398
+ 4
399
+ 4764
400
+ 1191
401
+ 1489
402
+ Oil & Gas
403
+ 3
404
+ 14960
405
+ 3740
406
+ 4675
407
+ Full
408
+ 9
409
+ 25991
410
+ 6497
411
+ 8122
412
+ Acc. Type
413
+ Construction
414
+ 2
415
+ 2740
416
+ 685
417
+ 856
418
+ Electric T&D
419
+ 2
420
+ 1600
421
+ 400
422
+ 500
423
+ Oil & Gas
424
+ 3
425
+ 2910
426
+ 728
427
+ 910
428
+ Full
429
+ 6
430
+ 7250
431
+ 1813
432
+ 2266
433
+ En. Source
434
+ Construction
435
+ 4
436
+ 4875
437
+ 1218
438
+ 1524
439
+ Electric T&D
440
+ 3
441
+ 2637
442
+ 660
443
+ 825
444
+ Oil & Gas
445
+ 2
446
+ 2600
447
+ 650
448
+ 813
449
+ Full
450
+ 8
451
+ 10112
452
+ 2528
453
+ 3162
454
+ Table 3: Split counts for each domain-outcome combination, summed over companies. For # Companies, full ̸=
455
+ total as some companies belong to multiple domains (see Tables 1 and 4).
456
+ Construction
457
+ Electric T&D
458
+ Oil & Gas
459
+ Corp.
460
+ Comp.
461
+ S
462
+ B
463
+ IT
464
+ AT
465
+ E
466
+ S
467
+ B
468
+ IT
469
+ AT
470
+ E
471
+ S
472
+ B
473
+ IT
474
+ AT
475
+ E
476
+ S
477
+ 1
478
+ x
479
+ x
480
+ x
481
+ x
482
+ 2
483
+ x
484
+ x
485
+ x
486
+ 3
487
+ x
488
+ x
489
+ x
490
+ x
491
+ x
492
+ x
493
+ x
494
+ 4
495
+ x
496
+ x
497
+ x
498
+ x
499
+ x
500
+ 5
501
+ x
502
+ x
503
+ x
504
+ x
505
+ x
506
+ 6
507
+ x
508
+ x
509
+ x
510
+ x
511
+ x
512
+ x
513
+ x
514
+ x
515
+ 7
516
+ x
517
+ x
518
+ x
519
+ x
520
+ x
521
+ x
522
+ x
523
+ x
524
+ x
525
+ 8
526
+ x
527
+ x
528
+ x
529
+ x
530
+ x
531
+ 9
532
+ x
533
+ x
534
+ x
535
+ x
536
+ x
537
+ Table 4: The 51 company-domain-outcome combinations associated with at least 2 categories with more than 100
538
+ observations each. S: severity, B: body part, IT: injury type, AT: accident type, E: energy source. Corp.: corportate.
539
+ 4.2. Class imbalance
540
+ To address the problem of class imbalance, weights inversely proportional to category
541
+ counts in the training set were computed with the formula max(counts)/counts, like in
542
+ [5]. During training, these weights forced the models to pay more attention to the cases from
543
+ the minority categories. Per-category counts with training weights can be found in Tables B.8
544
+ and B.9 for the 15 domain-outcome combinations.
545
+ 4.3. Supervised learning algorithms
546
+ Like in [5], we relied on three popular machine learning models: Random Forest (RF) [14],
547
+ eXtreme Gradient Boosting (XGBoost or XGB) [15], and linear Support Vector Machine (SVM)
548
+ [16]. More precisely, we used the Python’s scikit-learn implementations of Random
549
+ Forest5 and linear SVM6, while, for XGBoost, we used the original Python library7 and in
550
+ 5https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
551
+ 6https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html
552
+ 7https://xgboost.readthedocs.io/en/latest/python/python api.html#module-xgboost.sklearn
553
+ 6
554
+
555
+ particular the GPU-accelerated implementation of the “fast histogram” algorithm (gpu hist)
556
+ as the tree method8.
557
+ For theoretical details about each algorithm, we refer the reader to our paper [5], publicly
558
+ available9.
559
+ 4.4. Hyperparameter optimization
560
+ We tuned the models by performing grid searches on the validation sets. Details about the
561
+ parameters searched are available in Appendix C. The final models were trained on the union
562
+ of the training and validation sets with the best parameter values. Both the specific models and
563
+ the generic models were tested on the test sets of the specific models, to ensure fair comparison.
564
+ As already explained, there were 50 such test sets, one for each company-domain-outcome
565
+ combination.
566
+ 4.5. Transfer learning by stacking generic and specific models
567
+ As was mentioned in the introduction, one important question is the extent to which (1) more
568
+ data (generic datasets) or (2) more relevant data (specific datasets) is better. In what follows,
569
+ we explore a way to move past this binary choice and have a tradeoff between quantity and
570
+ relevance.
571
+ Inspired by transfer learning, which is very successful in computer vision [17] and NLP
572
+ [18, 19, 20, 21], we experimented with combining the predictions of the generic and specific
573
+ models via an ensemble model.
574
+ Very briefly, in AI, transfer learning refers to a two-step process. First, a model is trained
575
+ at solving a general task on large amounts of data. This phase is called the pretraining phase,
576
+ as it allows the model to acquire generic knowledge (e.g., in NLP, reading and writing), that
577
+ is applicable to a great variety of situations downstream. Second, the pretrained model is fine-
578
+ tuned on a specific task of interest, often associated with a much smaller dataset (e.g. in NLP,
579
+ summarization, classification, question answering, paraphrase detection, etc.).
580
+ In our case, the generic and the specific models have to perform the same task, i.e., predict-
581
+ ing a given safety outcome10, and there is no pretraining phase per se, in that the generic and the
582
+ specific models are two different models. However, our approach is similar in spirit to transfer
583
+ learning, as our goal is to capitalize on generic knowledge gained from large amounts of data to
584
+ improve performance on a specific task associated with a smaller dataset.
585
+ More precisely, for each company-domain-outcome combination, we trained a meta-model
586
+ taking as input the weighted elementwise sum of the probabilistic forecasts of the best generic
587
+ and specific models11. We used a simple logistic regression12 as our meta-model, with the C
588
+ parameter fixed and equal to 0.2, like in [5]. We grid searched the validation set to find the best
589
+ values of coefficients a and b where:
590
+ inputensemble = a × outputgeneric + b × outputspecific
591
+ (1)
592
+ Besides performance considerations, using tunable weights improves interpretability, by
593
+ providing information regarding which of the generic model or the specific model makes the
594
+ most important contribution to predictive skill.
595
+ 8https://xgboost.readthedocs.io/en/latest/gpu/
596
+ 9https://arxiv.org/pdf/1908.05972.pdf
597
+ 10The generic model has to perform a more difficult version of the task, though (more categories to predict).
598
+ 11The entries of the specific model vector for the categories that it did not predict were set to zero.
599
+ 12https://scikit-learn.org/stable/modules/generated/sklearn.linear model.LogisticRegression.html
600
+ 7
601
+
602
+ We tried values from 0.1 to 1 with 0.1 steps, holding the other parameter equal to 1, and
603
+ conversely. That is, the following 19 pairs: (0.1, 1), (0.2, 1), ... , (1, 1), (1, 0.1), (1, 0.2), ... ,
604
+ (1, 0.9).
605
+ SVM issue. By design, the implementation of the linear SVM model we used, linearSVC,
606
+ only returns discrete predictions, that is, a single label corresponding to the most likely category,
607
+ rather than a probability distribution over all categories. To address this issue, in [5], we tried
608
+ retraining the best SVM using the SVC implementation13 with linear Kernel. However, results
609
+ were not convincing. Therefore, in the present study, we decided simply not to use model
610
+ stacking when one of the two models involved (e.g., best generic or specific model) was a
611
+ SVM.
612
+ 4.6. Performance metrics
613
+ Due to the large class imbalance for all outcomes, measuring classification performance
614
+ with accuracy was inadequate. Rather, we computed precision, recall, and F1-score.
615
+ Precision, respectively recall, for category i, is equal to the number of correct predictions
616
+ for category i (number of hits), divided by the number of predictions made for category i (hits
617
+ and false alarms), respectively by the number of observations in category i (hits and misses).
618
+ precision =
619
+ Ci,i
620
+ �K
621
+ j=1 Cj,i
622
+ recall =
623
+ Ci,i
624
+ �K
625
+ j=1 Ci,j
626
+ (2)
627
+ Where the confusion matrix C is a square matrix of dimension K ×K (K being the number
628
+ of categories) and whose (i, j)th element Ci,j indicates how many of the observations known
629
+ to be in category i were predicted to be in category j. Finally, we computed the F1-score, the
630
+ harmonic mean of precision and recall:
631
+ F1 = 2 × precision × recall
632
+ precision + recall
633
+ (3)
634
+ 4.7. Configuration
635
+ We relied on a single Ubuntu 20.04.4 machine featuring a 4.9 MHz 12-thread CPU, a 12
636
+ GB Nvidia Titan V GPU, 64 GB of RAM, R version 4.1.3 [22], and Python version 3.8.13 with
637
+ scikit-learn version 1.1.1 [23]. Running all experiments took approximately ten days.
638
+ 5. Results
639
+ Each generic model (full and per-domain), as well as ensembles thereof (stacking approach
640
+ described in section 4.5) was tested on the test set of each company-domain-outcome combina-
641
+ tion and compared against the best performing specific model for this combination.
642
+ Results are very positive. As can be seen in Table 5, across all companies, the generic mod-
643
+ els (full or per-domain) outperform the specific models 82% of the time, i.e., for 41 company-
644
+ domain-outcome combinations out of 50. Detailed per-company results can be found in Ap-
645
+ pendix E for the full generic models and Appendix F for the per-domain generic models. At
646
+ the company level, improvements are brought on average for 80.6% of outcomes (across all
647
+ domains), ranging from 33.3% for Company2 to 100% for Company1, Company4, Company6,
648
+ and Company9.
649
+ 13https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
650
+ 8
651
+
652
+ Construction
653
+ Electric T&D
654
+ Oil & Gas
655
+ C
656
+ S
657
+ B
658
+ IT
659
+ AT
660
+ E
661
+ S
662
+ B
663
+ IT
664
+ AT
665
+ E
666
+ S
667
+ B
668
+ IT
669
+ AT
670
+ E
671
+ 1
672
+ +1.26
673
+ +0.2
674
+ +2.55
675
+ +3.07
676
+ 2
677
+ x
678
+ +3.15
679
+ x
680
+ 3
681
+ x
682
+ +6.56
683
+ +0.49
684
+ x
685
+ +12.47
686
+ +0.99
687
+ x
688
+ 4
689
+ +3.49
690
+ +0.47
691
+ +3.06
692
+ +0.98
693
+ +0.63
694
+ 5
695
+ x
696
+ +2.64
697
+ +1.29
698
+ +3.14
699
+ +2.79
700
+ 6
701
+ +12.86
702
+ +4.39
703
+ +1.63
704
+ +7.09
705
+ +12.87
706
+ +5
707
+ +11.19
708
+ +0.59
709
+ 7
710
+ x
711
+ +6.69
712
+ +15.3
713
+ x
714
+ +1.04
715
+ +9.54
716
+ +2.12
717
+ +2.2
718
+ 8
719
+ +1.11
720
+ +0.47
721
+ +2.78
722
+ x
723
+ +3.13
724
+ 9
725
+ +1.56
726
+ +5.38
727
+ +12.16
728
+ +5.01
729
+ +6.16
730
+ Table 5: Company-level max gains. x: no improvement. S: severity, B: body part, IT: injury type, AT: accident type,
731
+ E: energy source. C: company
732
+ Furthermore, as shown in Fig. 1, gains are high on average (+4.4 in F1 score) and reach im-
733
+ pressive values, e.g., +15.3 for Company7 on electric T&D-injury type, +12.87 for Company6
734
+ on electric T&D-severity, +12.86 for Company6 on construction-severity, +6.56 for Company3
735
+ on construction-body part, etc. And all of that, while predicting more categories.
736
+ Distribution of Company−level Max Gains
737
+ Gain in F1 score over specific models
738
+ 0
739
+ 5
740
+ 10
741
+ 15
742
+ 0
743
+ 2
744
+ 4
745
+ 6
746
+ 8
747
+ 10
748
+ 12
749
+ 14
750
+ Counts
751
+ +
752
+ +
753
+ +++
754
+ +
755
+ +
756
+ +
757
+ +
758
+ +
759
+ +
760
+ +
761
+ +
762
+ +
763
+ +
764
+ +
765
+ +
766
+ +
767
+ +
768
+ +
769
+ +
770
+ +
771
+ +
772
+ +
773
+ +
774
+ +
775
+ +
776
+ +
777
+ +
778
+ +
779
+ ++
780
+ +
781
+ +
782
+ ++
783
+ +
784
+ +
785
+ +
786
+ +
787
+ +
788
+ Figure 1: Company-level max gains, across all domains and outcomes. n=41, min=0.2, max=15.3, mean=4.4.
789
+ There are only 9 domain-outcome combinations over 50, across 5 companies, on which
790
+ the generic models do not bring any quantitative improvement. However, since their forecasts
791
+ are more informative (more categories predicted), it may still make sense in practice to use the
792
+ generic models in lieu of the specific models, even on these combinations. For instance, for
793
+ Company3-oil & gas-accident type, the specific model only predicts exposure and struck, but
794
+ the generic model also predicts the categories caught, fall, and overexertion.
795
+ The F1 scores averaged over all companies are shown in Table 6. Overall, the generic mod-
796
+ els bring improvement over the specific models for 73.3 % of the domain-outcome combinations
797
+ (11 out of 15). As shown on the right of Fig. 2, maximum gains range from 0.95 (for electric
798
+ T&D-energy source) to 9.98 (for electric T&D-injury type) with an average of 3.37. Also, not
799
+ only do the 11 best generic models outperform their specific counterparts with a comfortable
800
+ margin, but they also generate finer-grained forecasts, which are much more useful in practice.
801
+ More specifically, generic models predict 2.26 additional categories on average, even up to 7
802
+ for construction-injury type (while still providing a gain of 3.48 in F1 score). This is remarkable,
803
+ considering that the more categories to be predicted, the more difficult the task (see Appendix
804
+ D).
805
+ 9
806
+
807
+ Distribution of All Gains
808
+ Gain in F1 score over specific models
809
+ Counts
810
+ 0
811
+ 2
812
+ 4
813
+ 6
814
+ 8
815
+ 10
816
+ 0
817
+ 5
818
+ 10
819
+ 15
820
+ 20
821
+ 25
822
+ 30
823
+ Counts
824
+ ++
825
+ +
826
+ +
827
+ +
828
+ +
829
+ + + +
830
+ +
831
+ +
832
+ +
833
+ +
834
+ +
835
+ +
836
+ +
837
+ +
838
+ +
839
+ +
840
+ ++
841
+ +
842
+ +
843
+ +
844
+ +
845
+ +
846
+ +
847
+ +
848
+ +
849
+ +
850
+ +
851
+ +
852
+ +
853
+ +
854
+ +
855
+ +++
856
+ +
857
+ +++
858
+ ++
859
+ +
860
+ +
861
+ +
862
+ +
863
+ Distribution of Max Gains
864
+ Gain in F1 score over specific models
865
+ Frequency
866
+ 0
867
+ 2
868
+ 4
869
+ 6
870
+ 8
871
+ 10
872
+ 0
873
+ 1
874
+ 2
875
+ 3
876
+ 4
877
+ 5
878
+ 6
879
+ Counts
880
+ +++
881
+ +
882
+ +
883
+ +
884
+ +
885
+ +
886
+ +
887
+ +
888
+ +
889
+ Figure 2: Gains averaged over companies. Left: n=48, min=0.16, max=9.98, mean=2.22. Right: n=11, min=0.95,
890
+ max=9.98, mean=3.37.
891
+ The construction and oil & gas domains see gains for 3 outcomes out of 5, while on the
892
+ electric T&D domain, we observe improvement for every outcome. Further, for the body part,
893
+ injury type, and energy source outcomes, there is at least one generic model that outperforms
894
+ its specific counterpart, on every domain, while the severity and accident type outcomes see
895
+ improvements only on the electric T&D domain.
896
+ However, it is important to note that even on those 4 domain-outcome combinations on
897
+ which the generic models do not offer gains in predictive performance, it can still be desirable
898
+ to use them in practice over the specific models, as they generate more informative forecasts,
899
+ with 2 additional categories predicted, on average.
900
+ Overall, more than half of all F1 scores recorded for the generic models (79 out of 150,
901
+ or 53%) are greater or within two points of that of the specific models, while predicting 1.83
902
+ more categories on average. And, as shown on the left of Fig. 2, the 48 generic models that
903
+ outperform their specific counterparts bring on average an improvement of 2.22 in F1 score.
904
+ 5.1. Body part, injury type, and energy source
905
+ Some of the greatest improvements are observed for injury type, where the best generic
906
+ models provide large average gains of 3.48, 9.98, and 3.07, respectively on the construction,
907
+ electric T&D, and oil & gas domains, while predicting on average 4.25 more categories than
908
+ the company-specific models. This large boost in performance is remarkable considering the
909
+ significant increase in task difficulty.
910
+ Similarly, for energy source, the best generic models provide 4.48, 0.95, and 1.83 improve-
911
+ ments in F1 score, while predicting 1.53 more categories on average; and for body part, the
912
+ gains are 3.38, 3.42, and 1.37, with 0.17 more categories predicted.
913
+ 5.2. Severity and accident type
914
+ For severity and accident type, the generic models outperform the company-specific ones
915
+ on the electric T&D domain, with 3.09 and 2.03 gains in F1 scores, while predicting 2 and 0.5
916
+ more categories on average.
917
+ On the construction and oil & gas domains, the best generic models are between 2.4 and 6
918
+ points below the company-specific ones. However, they still offer the benefit of predicting more
919
+ categories (+1.6 on average).
920
+ 10
921
+
922
+ Construction
923
+ Electric T&D
924
+ Oil & Gas
925
+ Full
926
+ Dom.
927
+ Full
928
+ Dom.
929
+ Full
930
+ Dom.
931
+ Severity
932
+ F1
933
+ SVM
934
+ gen
935
+ 31.66
936
+ 30.29
937
+ 35.52
938
+ 35.79
939
+ 30.92
940
+ 28.97
941
+ RF
942
+ gen
943
+ 27.26
944
+ 31.04
945
+ 33.37
946
+ 41.28
947
+ 23.48
948
+ 26.08
949
+ ens
950
+ 30.33
951
+ 31.82
952
+ 41.17†
953
+ 43.88†
954
+ 28.98
955
+ 30.76
956
+ XGB
957
+ gen
958
+ 26.98
959
+ 28.74
960
+ 36.4
961
+ 39.69⋆
962
+ 24.25
963
+ 24.19
964
+ ens
965
+ 29.67
966
+ 31.81
967
+ 40.01⋆
968
+ 40.95
969
+ 29.14
970
+ 31.44
971
+ spec
972
+ 35.34†
973
+ 40.79
974
+ 33.86†
975
+ Count
976
+ # categories
977
+ spec
978
+ 3.5
979
+ 3
980
+ 3.5
981
+ gen
982
+ 5
983
+ 5
984
+ 5
985
+ 5
986
+ 5
987
+ 5
988
+ # datasets
989
+ 9
990
+ 4
991
+ 9
992
+ 4
993
+ 9
994
+ 4
995
+ Body Part
996
+ F1
997
+ SVM
998
+ gen
999
+ 25.51
1000
+ 31.32
1001
+ 26.68
1002
+ 25.24
1003
+ 23.65
1004
+ 24.66
1005
+ RF
1006
+ gen
1007
+ 34.41†
1008
+ 31.81
1009
+ 34.94†
1010
+ 33.05
1011
+ 30.22†
1012
+ 28.85†
1013
+ ens
1014
+ 30.33⋆
1015
+ 30.39⋆
1016
+ 34.08
1017
+ 29.25
1018
+ 26.43
1019
+ 27.54⋆
1020
+ XGB
1021
+ gen
1022
+ 32.86
1023
+ 32.23†
1024
+ 33.76
1025
+ 35.21†
1026
+ 29.22
1027
+ 28.59⋆
1028
+ ens
1029
+ 29.05⋆
1030
+ 29.2⋆
1031
+ 32.31
1032
+ 34.71
1033
+ 26.37
1034
+ 27.74⋆
1035
+ spec
1036
+ 31.03
1037
+ 31.79
1038
+ 28.85
1039
+ Count
1040
+ # categories
1041
+ spec
1042
+ 6
1043
+ 5.5
1044
+ 6
1045
+ gen
1046
+ 6
1047
+ 6
1048
+ 6
1049
+ 6
1050
+ 6
1051
+ 6
1052
+ # datasets
1053
+ 9
1054
+ 4
1055
+ 9
1056
+ 4
1057
+ 9
1058
+ 3
1059
+ Injury Type
1060
+ F1
1061
+ SVM
1062
+ gen
1063
+ 38.73
1064
+ 42.78⋆
1065
+ 53.7†
1066
+ 42.91⋆
1067
+ 35.11⋆
1068
+ 33.89
1069
+ RF
1070
+ gen
1071
+ 40.03
1072
+ 42.11⋆
1073
+ 42.44⋆
1074
+ 41.68⋆
1075
+ 32.15
1076
+ 32.82
1077
+ ens
1078
+ 41.75
1079
+ 45.46†
1080
+ 49.42
1081
+ 45.10
1082
+ 38.65†
1083
+ 38.97
1084
+ XGB
1085
+ gen
1086
+ 36.76
1087
+ 42.55⋆
1088
+ 41.03
1089
+ 41.48
1090
+ 31.33
1091
+ 32.34
1092
+ ens
1093
+ 47.4†
1094
+ 45.45
1095
+ 51.44
1096
+ 49.28†
1097
+ 38.05
1098
+ 39.79†
1099
+ spec
1100
+ 43.92
1101
+ 43.72
1102
+ 36.72
1103
+ Count
1104
+ # categories
1105
+ spec
1106
+ 4
1107
+ 5.25
1108
+ 7
1109
+ gen
1110
+ 11
1111
+ 6
1112
+ 11
1113
+ 8
1114
+ 11
1115
+ 11
1116
+ # datasets
1117
+ 9
1118
+ 4
1119
+ 9
1120
+ 4
1121
+ 9
1122
+ 3
1123
+ Accident Type
1124
+ F1
1125
+ SVM
1126
+ gen
1127
+ 42.39
1128
+ 42.20
1129
+ 44.58
1130
+ 44.84
1131
+ 60.69
1132
+ 64.85
1133
+ RF
1134
+ gen
1135
+ 42.46
1136
+ 42.42
1137
+ 48.58†
1138
+ 50.2†
1139
+ 63.14
1140
+ 64.12
1141
+ ens
1142
+ 44.35
1143
+ 40.8
1144
+ 41.29
1145
+ 39.72
1146
+ 66.40
1147
+ 65.74
1148
+ XGB
1149
+ gen
1150
+ 48.27
1151
+ 49.21
1152
+ 47.80⋆
1153
+ 49.58
1154
+ 58.58
1155
+ 63.04
1156
+ ens
1157
+ 42.02
1158
+ 43.40
1159
+ 41.08
1160
+ 43.53
1161
+ 64.56
1162
+ 67.13
1163
+ spec
1164
+ 54.98†
1165
+ 48.17
1166
+ 73.16†
1167
+ Count
1168
+ # categories
1169
+ spec
1170
+ 3.5
1171
+ 4.5
1172
+ 2.67
1173
+ gen
1174
+ 5
1175
+ 5
1176
+ 5
1177
+ 5
1178
+ 5
1179
+ 4
1180
+ # datasets
1181
+ 6
1182
+ 2
1183
+ 6
1184
+ 2
1185
+ 6
1186
+ 3
1187
+ Energy Source
1188
+ F1
1189
+ SVM
1190
+ gen
1191
+ 74.12†
1192
+ 73.78
1193
+ 77.64⋆
1194
+ 78.87†
1195
+ 69.54⋆
1196
+ 55.63
1197
+ RF
1198
+ gen
1199
+ 72.71
1200
+ 73.91†
1201
+ 77.12⋆
1202
+ 78.61
1203
+ 71.12
1204
+ 70.72†
1205
+ ens
1206
+ 69.06⋆
1207
+ 69.64⋆
1208
+ 75.83
1209
+ 76.48⋆
1210
+ 70.58
1211
+ 69.12⋆
1212
+ XGB
1213
+ gen
1214
+ 73.77
1215
+ 71.04
1216
+ 78.83†
1217
+ 75.61
1218
+ 72.22†
1219
+ 70.35⋆
1220
+ ens
1221
+ 69.05⋆
1222
+ 70.23
1223
+ 76.47⋆
1224
+ 75.03
1225
+ 70.98
1226
+ 70.38⋆
1227
+ spec
1228
+ 69.64
1229
+ 77.92
1230
+ 70.39
1231
+ Count
1232
+ # categories
1233
+ spec
1234
+ 2.25
1235
+ 2.67
1236
+ 3
1237
+ gen
1238
+ 5
1239
+ 3
1240
+ 5
1241
+ 3
1242
+ 5
1243
+ 4
1244
+ # datasets
1245
+ 8
1246
+ 4
1247
+ 8
1248
+ 3
1249
+ 8
1250
+ 2
1251
+ Table 6: Results averaged over companies. †: best of their sub-column. Bold/⋆: better than/within 2 pts of spec. Full: full generic
1252
+ model (one per outcome, same across domains). Dom.: per-domain generic model (one per outcome per domain). Gen/spec:
1253
+ generic/specific. Ens: ensemble thereof. # datasets: number of company datasets forming the generic dataset. Note: for the same
1254
+ outcome, # categories and # datasets are the same for Full across domains, we repeat them only to ease comparison.
1255
+ 5.3. Full vs. per-domain
1256
+ In what follows, we refer to the full and per-domain models and their ensemble versions.
1257
+ When considering full generic models, the average improvement in F1-score over the specific
1258
+ models is 2.85 and there are 2.44 additional categories predicted (min=0, max=7), while when
1259
+ 11
1260
+
1261
+ considering per-domain generic models, the average improvement is 2.57 and 1.38 additional
1262
+ categories are predicted (min=0, max=4). The per-domain models reach a higher max score than
1263
+ the full models on 9 combinations out of 15 (60%), and in 5 out of 11 (45%) when the specific
1264
+ models are outperformed. The full and per-domain models outperform the specific models on
1265
+ the same 11 domain-outcome combinations.
1266
+ So, in terms of performance, there is no clear winner. However, since the full generic models
1267
+ predict more categories, and are also simpler conceptually (just one model per outcome), full
1268
+ models seem like the way to go. This conclusion however will need to be validated when more
1269
+ datasets are available for each domain. One thing to note, however, is that specific models may
1270
+ still be desirable in the context of model stacking, as covered next.
1271
+ 5.4. Generic vs. ensemble (generic + specific)
1272
+ The transfer learning-like stacking approach, i.e., combining the predictions of the generic
1273
+ and specific models, boosts performance over the generic models (both full and per-domain)
1274
+ on all domains for the severity and injury type outcomes, in some cases for accident type, and
1275
+ nowhere for body part and energy source.
1276
+ For severity, the average gains are of 3.93, and range from 0.78 to an impressive 7.8 (for
1277
+ electric T&D-full-RF). Results are even more impressive for injury type. Gains range from 1.72
1278
+ to 10.64 (for construction-full-XGB), with a high average of 6.17.
1279
+ It is interesting to note that for severity and injury type, very few of the generic models
1280
+ outperform the specific models in the first place, and it is only by combining their predictions
1281
+ with that of the specific models that absolute best performance can be reached, on the electrical
1282
+ domain for severity, and on all domains for injury type.
1283
+ We also observe that conversely, for body part and energy source, where model stacking
1284
+ does not bring additional skill, the generic models are stronger than the specific models in the
1285
+ first place.
1286
+ All in all, these results may suggest that ensembling only works when the generic models are
1287
+ not already better than the specific models. However, this rule does not hold everywhere (e.g.,
1288
+ construction-accident type-XGB), so additional data, experiments and results will be necessary
1289
+ to draw any general conclusion here.
1290
+ 5.5. Quantity vs. relevance
1291
+ As far as whether more data or more relevant data is best, Fig. 3 shows the distributions of
1292
+ the best a and b coefficients as determined on the validation sets. It tends to indicate that, on
1293
+ average, the best tradeoff involves anywhere from a little bit to a lot of generic model (anywhere
1294
+ in the [0.1,1] range, with peaks towards [0.1,0.2] and [0.9,1]), but almost always a lot of specific
1295
+ model (between 0.9 and 1). In other words, data relevance always seems important, while
1296
+ the contribution of data quantity fluctuates. However, this is only a general trend. As can be
1297
+ seen in the detailed results per company (Appendix E and Appendix F), in some cases, the
1298
+ contribution of the generic model is more important than that of the specific model, e.g., (1,0.6)
1299
+ for Company6-XGB in the first table of Appendix F.
1300
+ 5.6. Best model type
1301
+ For the full generic models, the best algorithm is RF (6 domain-outcome combinations over
1302
+ 15), followed by SVM (5/15) and XGB (4/15). When stacked with the specific model, RF
1303
+ reaches best performance in 10 out of 15 combinations.
1304
+ 12
1305
+
1306
+ Coefficient Distributions
1307
+ Coefficient Value
1308
+ Count
1309
+ 0.2
1310
+ 0.4
1311
+ 0.6
1312
+ 0.8
1313
+ 1.0
1314
+ 0
1315
+ 10
1316
+ 20
1317
+ 30
1318
+ 40
1319
+ 50
1320
+ 60
1321
+ generic
1322
+ specific
1323
+ Coefficient Distributions
1324
+ Coefficient Value
1325
+ Count
1326
+ 0.2
1327
+ 0.4
1328
+ 0.6
1329
+ 0.8
1330
+ 1.0
1331
+ 0
1332
+ 10
1333
+ 20
1334
+ 30
1335
+ 40
1336
+ 50
1337
+ generic
1338
+ specific
1339
+ Figure 3: Distributions of the best coefficient values a (generic) and b (specific). Left: full. Right: per-domain.
1340
+ When considering the per-domain generic models, SVM obtains the best score 7 times out
1341
+ of 15, followed by RF (5/15) and XGB (3/5). However, when used in the ensemble, XGB is the
1342
+ best (10/15).
1343
+ RF and XGB are better choices than SVM as they consistently top the scores and can be
1344
+ used in ensembles. In terms of performance though, there is no clear winner between the two.
1345
+ One or the other could be used interchangeably. However, XGBoost is superior in practice as
1346
+ far as deployment is concerned, as the Random Forest models take a lot of disk space, even after
1347
+ applying some compression tricks.
1348
+ 6. Conclusion
1349
+ We showed that generic models provide consistent and large improvements over company-
1350
+ specific models. Moreover, generic models issue finer-grained forecasts that are more useful in
1351
+ practice, as they predict more categories of each safety outcome.
1352
+ Generic models remove the needs for training company-specific models, saving a lot of time
1353
+ and resources, and give small companies, whose accident datasets are too limited to train their
1354
+ own models, access to safety outcome predictions.
1355
+ Per-domain generic models (trained on data from a specific industry sector) are not always
1356
+ better than full generic models (trained on all data). Ensembling generic and specific models is
1357
+ often very beneficial. Therefore, it might still be worth training specific models to combine their
1358
+ predictions with that of the generic models. If specific models are already in use, combining
1359
+ them with the generic models may provide a boost in performance.
1360
+ The forecasts are in essence clear and direct information that can be accessed via a user
1361
+ interface (as a desktop or mobile webpage or application), or via an API for integration into any
1362
+ existing ecosystem. In each case, the only input required is a set of attributes, and the output are
1363
+ probabilities for each category of each outcome.
1364
+ By learning lessons from a pool of datasets whose accumulated experience far exceeds that
1365
+ of any single company, and making these lessons easily accessible, generic models tackle the
1366
+ holy grail of safety cross-organizational learning and dissemination in the construction industry.
1367
+ 7. Acknowledgements
1368
+ We thank the Nvidia corporation for donating the Titan V GPU that was used in this re-
1369
+ search, as part of their GPU grant program.
1370
+ 13
1371
+
1372
+ 8. References
1373
+ References
1374
+ [1] M. Desvignes, Requisite empirical risk data for integration of safety with advanced technologies and intelligent
1375
+ systems, Ph.D. thesis, University of Colorado at Boulder (2014).
1376
+ URL https://scholar.colorado.edu/downloads/0r967398g
1377
+ [2] M. P. Villanova, Attribute-based risk model for assessing risk to industrial construction tasks, Ph.D. thesis,
1378
+ University of Colorado at Boulder (2014).
1379
+ URL https://scholar.colorado.edu/downloads/jd472w76t
1380
+ [3] A. J.-P. Tixier, M. R. Hallowell, B. Rajagopalan, D. Bowman, Automated content analysis for construction
1381
+ safety: a natural language processing system to extract precursors and outcomes from unstructured injury
1382
+ reports, Automation in Construction 62 (2016) 45–56. doi:10.1016/j.autcon.2015.11.001.
1383
+ [4] A. J.-P. Tixier, M. R. Hallowell, B. Rajagopalan, D. Bowman, Application of machine learning to construction
1384
+ injury prediction, Automation in Construction 69 (2016) 102–114. doi:10.1016/j.autcon.2016.05.
1385
+ 016.
1386
+ [5] H. Baker, M. R. Hallowell, A. J.-P. Tixier, Ai-based prediction of independent construction safety outcomes
1387
+ from universal attributes, Automation in Construction 118 (2020) 103146.
1388
+ [6] A. J.-P. Tixier, M. R. Hallowell, B. Rajagopalan, D. Bowman, Construction safety clash detection: identify-
1389
+ ing safety incompatibilities among fundamental attributes using data mining, Automation in Construction 74
1390
+ (2017) 39–54.
1391
+ [7] A. J.-P. Tixier, M. R. Hallowell, B. Rajagopalan, Construction safety risk modeling and simulation, Risk
1392
+ Analysis 37 (10) (2017) 1917–1935. doi:10.1111/risa.12772.
1393
+ [8] L. Tanguy, N. Tulechki, A. Urieli, E. Hermann, C. Raynal, Natural language processing for aviation safety
1394
+ reports: From classification to interactive analysis, Computers in Industry 78 (2016) 80–95.
1395
+ [9] A. L. Sepeda, Lessons learned from process incident databases and the process safety incident database (psid)
1396
+ approach sponsored by the center for chemical process safety, Journal of hazardous materials 130 (1-2) (2006)
1397
+ 9–14.
1398
+ [10] Q. T. Le, D. Y. Lee, C. S. Park, A social network system for sharing construction safety and health knowledge,
1399
+ Automation in Construction 46 (2014) 30–37.
1400
+ [11] A. Pedro, A.-T. Pham-Hang, P. T. Nguyen, H. C. Pham, Data-driven construction safety information sharing
1401
+ system based on linked data, ontologies, and knowledge graph technologies, International journal of environ-
1402
+ mental research and public health 19 (2) (2022) 794.
1403
+ [12] K. W. Edwin, Sharing incident experiences: a roadmap towards collective safety information in the norwegian
1404
+ construction industry, International Journal of Occupational Safety and Ergonomics (2022) 1–11.
1405
+ [13] K. Wasilkiewicz, Information flow and knowledge transfer of accident investigation results in the norwegian
1406
+ construction industry, in: Safety and Reliability–Safe Societies in a Changing World, CRC Press, 2018, pp.
1407
+ 2855–2862.
1408
+ [14] L. Breiman, Random forests, Machine Learning 45 (1) (2001) 5–32.
1409
+ [15] T. Chen, C. Guestrin, Xgboost: A scalable tree boosting system, in: Proceedings of the 22nd ACM SIGKDD
1410
+ International Conference on Knowledge Discovery and Data Mining, ACM, 2016, pp. 785–794.
1411
+ [16] B. E. Boser, I. M. Guyon, V. N. Vapnik, A training algorithm for optimal margin classifiers, in: ACM
1412
+ Proceedings of the Fifth Annual Workshop on Computational learning theory, 1992, pp. 144–152. doi:
1413
+ 10.1145/130385.130401.
1414
+ [17] A. Krizhevsky, I. Sutskever, G. E. Hinton, Imagenet classification with deep convolutional neural networks, in:
1415
+ Advances in neural information processing systems, 2012, pp. 1097–1105.
1416
+ 14
1417
+
1418
+ [18] A.
1419
+ Radford,
1420
+ K.
1421
+ Narasimhan,
1422
+ T.
1423
+ Salimans,
1424
+ I.
1425
+ Sutskever,
1426
+ Improving
1427
+ language
1428
+ under-
1429
+ standing
1430
+ by
1431
+ generative
1432
+ pre-training,
1433
+ URL
1434
+ https://s3-us-west-2.
1435
+ amazonaws.
1436
+ com/openai-
1437
+ assets/researchcovers/languageunsupervised/language understanding paper. pdf.
1438
+ [19] J. Devlin, M.-W. Chang, K. Lee, K. Toutanova, Bert: Pre-training of deep bidirectional transformers for lan-
1439
+ guage understanding, arXiv preprint arXiv:1810.04805.
1440
+ [20] M. Lewis, Y. Liu, N. Goyal, M. Ghazvininejad, A. Mohamed, O. Levy, V. Stoyanov, L. Zettlemoyer, Bart:
1441
+ Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension,
1442
+ arXiv preprint arXiv:1910.13461.
1443
+ [21] M. K. Eddine, A. J.-P. Tixier, M. Vazirgiannis, Barthez: a skilled pretrained french sequence-to-sequence
1444
+ model, arXiv preprint arXiv:2010.12321.
1445
+ [22] R Core Team, R: A Language and Environment for Statistical Computing, R Foundation for Statistical Com-
1446
+ puting, Vienna, Austria (2022).
1447
+ URL https://www.R-project.org/
1448
+ [23] F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer,
1449
+ R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, E. Duchesnay, Scikit-
1450
+ learn: Machine learning in Python, Journal of Machine Learning Research 12 (2011) 2825–2830.
1451
+ Appendices
1452
+ Appendix A. Attribute List
1453
+ adverse low temps
1454
+ fuses⋆
1455
+ machinery
1456
+ spark
1457
+ bolt
1458
+ grinding
1459
+ manlift
1460
+ splinter/sliver
1461
+ breaker⋆
1462
+ grout
1463
+ mud
1464
+ spool
1465
+ cable
1466
+ guardrail/handrail
1467
+ nail
1468
+ stairs
1469
+ cable tray
1470
+ hammer
1471
+ no/improper PPE
1472
+ steel/steel sections
1473
+ chipping
1474
+ hand size pieces
1475
+ object at height
1476
+ stripping
1477
+ cleaning
1478
+ hazardous substance
1479
+ object on the floor
1480
+ stud
1481
+ clearance⋆
1482
+ heat source/high temps
1483
+ piping
1484
+ switch/switching⋆
1485
+ concrete
1486
+ heater⋆
1487
+ pole⋆
1488
+ tank
1489
+ concrete liquid
1490
+ heavy material/tool
1491
+ pontoon
1492
+ transformer⋆
1493
+ conduit
1494
+ heavy vehicle
1495
+ poor housekeeping
1496
+ uneven surface
1497
+ confined work space
1498
+ hose
1499
+ poor visibility
1500
+ unpowered tool
1501
+ congested work space
1502
+ improper body position
1503
+ powered tool
1504
+ unpowered transporter
1505
+ crane
1506
+ improper procedure/inattention
1507
+ rebar
1508
+ unstable support/surface
1509
+ door
1510
+ improper security of materials
1511
+ relay⋆
1512
+ valve
1513
+ drill
1514
+ improper security of tools
1515
+ repetitive motion
1516
+ vault⋆
1517
+ dunnage
1518
+ insect/animal
1519
+ scaffold
1520
+ welding
1521
+ electricity
1522
+ job trailer
1523
+ screw
1524
+ wind
1525
+ exiting
1526
+ ladder
1527
+ sharp edge
1528
+ wire
1529
+ fan⋆
1530
+ lifting/pulling/manipulating
1531
+ slag
1532
+ working at height
1533
+ fatigued dizzy
1534
+ light vehicle
1535
+ slippery surface
1536
+ working below elev wksp/mat
1537
+ forklift
1538
+ LOTO/labeling⋆
1539
+ small particle
1540
+ working overhead
1541
+ formwork
1542
+ lumber
1543
+ soffit
1544
+ wrench
1545
+ Table A.7: 92 attributes used in this study. LOTO: lockout-tagout. PPE: personal protective equipment. ⋆: eleven
1546
+ new attributes added since [4, 5].
1547
+ 15
1548
+
1549
+ Appendix B. Detailed split counts
1550
+ Severity
1551
+ Train
1552
+ w
1553
+ Val
1554
+ Test
1555
+ Construction
1556
+ report-only
1557
+ 917
1558
+ 8.2
1559
+ 226
1560
+ 283
1561
+ 1st aid
1562
+ 7486
1563
+ 1.0
1564
+ 1876
1565
+ 2369
1566
+ medical
1567
+ 470
1568
+ 15.9
1569
+ 114
1570
+ 140
1571
+ recordable
1572
+ 147
1573
+ 50.9
1574
+ 28
1575
+ 42
1576
+ lost time
1577
+ 960
1578
+ 7.8
1579
+ 250
1580
+ 285
1581
+ total
1582
+ 9980
1583
+ 2494
1584
+ 3119
1585
+ Electric T&D
1586
+ report-only
1587
+ 2392
1588
+ 1.2
1589
+ 576
1590
+ 712
1591
+ 1st aid
1592
+ 2809
1593
+ 1.0
1594
+ 736
1595
+ 905
1596
+ medical
1597
+ 554
1598
+ 5.1
1599
+ 140
1600
+ 162
1601
+ recordable
1602
+ 310
1603
+ 9.1
1604
+ 74
1605
+ 101
1606
+ lost time
1607
+ 607
1608
+ 4.6
1609
+ 143
1610
+ 205
1611
+ total
1612
+ 6672
1613
+ 1669
1614
+ 2085
1615
+ Oil & Gas
1616
+ report-only
1617
+ 929
1618
+ 14.8
1619
+ 244
1620
+ 279
1621
+ 1st aid
1622
+ 13766
1623
+ 1.0
1624
+ 3405
1625
+ 4279
1626
+ medical
1627
+ 1919
1628
+ 7.2
1629
+ 489
1630
+ 618
1631
+ recordable
1632
+ 152
1633
+ 90.6
1634
+ 42
1635
+ 52
1636
+ lost time
1637
+ 1615
1638
+ 8.5
1639
+ 415
1640
+ 516
1641
+ total
1642
+ 18381
1643
+ 4595
1644
+ 5744
1645
+ Corporate
1646
+ report-only
1647
+ 97
1648
+ 3.3
1649
+ 31
1650
+ 22
1651
+ 1st aid
1652
+ 321
1653
+ 1.0
1654
+ 74
1655
+ 109
1656
+ total
1657
+ 418
1658
+ 105
1659
+ 131
1660
+ Full
1661
+ report-only
1662
+ 4335
1663
+ 5.6
1664
+ 1077
1665
+ 1296
1666
+ 1st aid
1667
+ 24382
1668
+ 1.0
1669
+ 6091
1670
+ 7662
1671
+ medical
1672
+ 2943
1673
+ 8.3
1674
+ 743
1675
+ 920
1676
+ recordable
1677
+ 609
1678
+ 40.0
1679
+ 144
1680
+ 195
1681
+ lost time
1682
+ 3182
1683
+ 7.7
1684
+ 808
1685
+ 1006
1686
+ total
1687
+ 35451
1688
+ 8863
1689
+ 11079
1690
+ Body Part
1691
+ Train
1692
+ w
1693
+ Val
1694
+ Test
1695
+ Construction
1696
+ arm
1697
+ 1059
1698
+ 2.6
1699
+ 285
1700
+ 338
1701
+ foot
1702
+ 694
1703
+ 3.9
1704
+ 167
1705
+ 232
1706
+ hand
1707
+ 2732
1708
+ 1.0
1709
+ 701
1710
+ 864
1711
+ head
1712
+ 1682
1713
+ 1.6
1714
+ 394
1715
+ 494
1716
+ leg
1717
+ 958
1718
+ 2.9
1719
+ 262
1720
+ 307
1721
+ trunk
1722
+ 1084
1723
+ 2.5
1724
+ 243
1725
+ 330
1726
+ total
1727
+ 8209
1728
+ 2052
1729
+ 2565
1730
+ Electric T&D
1731
+ arm
1732
+ 1061
1733
+ 1.4
1734
+ 274
1735
+ 319
1736
+ foot
1737
+ 372
1738
+ 4.0
1739
+ 89
1740
+ 135
1741
+ hand
1742
+ 1473
1743
+ 1.0
1744
+ 368
1745
+ 452
1746
+ head
1747
+ 1246
1748
+ 1.2
1749
+ 318
1750
+ 403
1751
+ leg
1752
+ 1084
1753
+ 1.4
1754
+ 251
1755
+ 307
1756
+ trunk
1757
+ 800
1758
+ 1.8
1759
+ 208
1760
+ 269
1761
+ total
1762
+ 6036
1763
+ 1508
1764
+ 1885
1765
+ Oil & Gas
1766
+ arm
1767
+ 1445
1768
+ 3.9
1769
+ 386
1770
+ 477
1771
+ foot
1772
+ 1741
1773
+ 3.2
1774
+ 421
1775
+ 568
1776
+ hand
1777
+ 5586
1778
+ 1.0
1779
+ 1385
1780
+ 1740
1781
+ head
1782
+ 3514
1783
+ 1.6
1784
+ 887
1785
+ 1088
1786
+ leg
1787
+ 2053
1788
+ 2.7
1789
+ 498
1790
+ 596
1791
+ trunk
1792
+ 1449
1793
+ 3.9
1794
+ 370
1795
+ 464
1796
+ total
1797
+ 15788
1798
+ 3947
1799
+ 4933
1800
+ Full
1801
+ arm
1802
+ 3565
1803
+ 2.7
1804
+ 945
1805
+ 1134
1806
+ foot
1807
+ 2807
1808
+ 3.5
1809
+ 677
1810
+ 935
1811
+ hand
1812
+ 9791
1813
+ 1.0
1814
+ 2454
1815
+ 3056
1816
+ head
1817
+ 6442
1818
+ 1.5
1819
+ 1599
1820
+ 1985
1821
+ leg
1822
+ 4095
1823
+ 2.4
1824
+ 1011
1825
+ 1210
1826
+ trunk
1827
+ 3333
1828
+ 2.9
1829
+ 821
1830
+ 1063
1831
+ total
1832
+ 30033
1833
+ 7507
1834
+ 9383
1835
+ Accident Type
1836
+ Train
1837
+ w
1838
+ Val
1839
+ Test
1840
+ Construction
1841
+ caught
1842
+ 396
1843
+ 2.3
1844
+ 105
1845
+ 137
1846
+ exposure
1847
+ 119
1848
+ 7.8
1849
+ 38
1850
+ 40
1851
+ fall
1852
+ 803
1853
+ 1.2
1854
+ 200
1855
+ 243
1856
+ overexertion
1857
+ 492
1858
+ 1.9
1859
+ 128
1860
+ 160
1861
+ struck
1862
+ 930
1863
+ 1.0
1864
+ 214
1865
+ 276
1866
+ total
1867
+ 2740
1868
+ 685
1869
+ 856
1870
+ Electric T&D
1871
+ caught
1872
+ 207
1873
+ 2.2
1874
+ 55
1875
+ 62
1876
+ exposure
1877
+ 454
1878
+ 1.0
1879
+ 123
1880
+ 142
1881
+ fall
1882
+ 403
1883
+ 1.1
1884
+ 102
1885
+ 143
1886
+ overexertion
1887
+ 288
1888
+ 1.6
1889
+ 51
1890
+ 65
1891
+ struck
1892
+ 248
1893
+ 1.8
1894
+ 69
1895
+ 88
1896
+ total
1897
+ 1600
1898
+ 400
1899
+ 500
1900
+ Oil & Gas
1901
+ caught
1902
+ 198
1903
+ 7.7
1904
+ 43
1905
+ 53
1906
+ exposure
1907
+ 526
1908
+ 2.9
1909
+ 127
1910
+ 184
1911
+ fall
1912
+ 1527
1913
+ 1.0
1914
+ 393
1915
+ 463
1916
+ struck
1917
+ 659
1918
+ 2.3
1919
+ 165
1920
+ 210
1921
+ total
1922
+ 2910
1923
+ 728
1924
+ 910
1925
+ Full
1926
+ caught
1927
+ 801
1928
+ 3.4
1929
+ 203
1930
+ 252
1931
+ exposure
1932
+ 1099
1933
+ 2.5
1934
+ 288
1935
+ 366
1936
+ fall
1937
+ 2733
1938
+ 1.0
1939
+ 695
1940
+ 849
1941
+ overexertion
1942
+ 780
1943
+ 3.5
1944
+ 179
1945
+ 225
1946
+ struck
1947
+ 1837
1948
+ 1.5
1949
+ 448
1950
+ 574
1951
+ total
1952
+ 7250
1953
+ 1813
1954
+ 2266
1955
+ Energy Source
1956
+ Train
1957
+ w
1958
+ Val
1959
+ Test
1960
+ Construction
1961
+ chemical
1962
+ 76
1963
+ 42.7
1964
+ 21
1965
+ 14
1966
+ gravity
1967
+ 1551
1968
+ 2.1
1969
+ 405
1970
+ 479
1971
+ motion
1972
+ 3248
1973
+ 1.0
1974
+ 792
1975
+ 1031
1976
+ total
1977
+ 4875
1978
+ 1218
1979
+ 1524
1980
+ Electric
1981
+ biological
1982
+ 221
1983
+ 7.6
1984
+ 52
1985
+ 88
1986
+ gravity
1987
+ 733
1988
+ 2.3
1989
+ 179
1990
+ 230
1991
+ motion
1992
+ 1683
1993
+ 1.0
1994
+ 429
1995
+ 507
1996
+ total
1997
+ 2637
1998
+ 660
1999
+ 825
2000
+ Oil & Gas
2001
+ chemical
2002
+ 70
2003
+ 21.2
2004
+ 13
2005
+ 21
2006
+ gravity
2007
+ 1485
2008
+ 1.0
2009
+ 361
2010
+ 448
2011
+ motion
2012
+ 914
2013
+ 1.6
2014
+ 246
2015
+ 300
2016
+ thermal
2017
+ 131
2018
+ 11.3
2019
+ 30
2020
+ 44
2021
+ total
2022
+ 2600
2023
+ 650
2024
+ 813
2025
+ Full
2026
+ biological
2027
+ 221
2028
+ 26.4
2029
+ 52
2030
+ 88
2031
+ chemical
2032
+ 146
2033
+ 40.0
2034
+ 34
2035
+ 35
2036
+ gravity
2037
+ 3769
2038
+ 1.6
2039
+ 945
2040
+ 1157
2041
+ motion
2042
+ 5845
2043
+ 1.0
2044
+ 1467
2045
+ 1838
2046
+ thermal
2047
+ 131
2048
+ 44.6
2049
+ 30
2050
+ 44
2051
+ total
2052
+ 10112
2053
+ 2528
2054
+ 3162
2055
+ Table B.8: Split counts (1/2). w: training weights.
2056
+ 16
2057
+
2058
+ Injury Type
2059
+ Train
2060
+ w
2061
+ Val
2062
+ Test
2063
+ Construction
2064
+ contusion
2065
+ 728
2066
+ 3.6
2067
+ 185
2068
+ 229
2069
+ cut
2070
+ 2644
2071
+ 1.0
2072
+ 682
2073
+ 795
2074
+ fob
2075
+ 399
2076
+ 6.6
2077
+ 84
2078
+ 118
2079
+ fracture
2080
+ 100
2081
+ 26.4
2082
+ 24
2083
+ 39
2084
+ pinch
2085
+ 267
2086
+ 9.9
2087
+ 90
2088
+ 97
2089
+ strain
2090
+ 2129
2091
+ 1.2
2092
+ 501
2093
+ 680
2094
+ total
2095
+ 6267
2096
+ 1566
2097
+ 1958
2098
+ Electric T&D
2099
+ bite
2100
+ 129
2101
+ 12.3
2102
+ 35
2103
+ 42
2104
+ burn
2105
+ 75
2106
+ 21.2
2107
+ 14
2108
+ 21
2109
+ contusion
2110
+ 861
2111
+ 1.8
2112
+ 216
2113
+ 277
2114
+ cut
2115
+ 1305
2116
+ 1.2
2117
+ 330
2118
+ 400
2119
+ fob
2120
+ 209
2121
+ 7.6
2122
+ 46
2123
+ 69
2124
+ fracture
2125
+ 176
2126
+ 9.0
2127
+ 39
2128
+ 53
2129
+ irritation
2130
+ 420
2131
+ 3.8
2132
+ 101
2133
+ 141
2134
+ strain
2135
+ 1589
2136
+ 1.0
2137
+ 410
2138
+ 486
2139
+ total
2140
+ 4764
2141
+ 1191
2142
+ 1489
2143
+ Oil & Gas
2144
+ bite
2145
+ 168
2146
+ 27.6
2147
+ 39
2148
+ 52
2149
+ burn
2150
+ 572
2151
+ 8.1
2152
+ 150
2153
+ 179
2154
+ contusion
2155
+ 3587
2156
+ 1.30
2157
+ 848
2158
+ 1091
2159
+ cut
2160
+ 4638
2161
+ 1.0
2162
+ 1160
2163
+ 1509
2164
+ exhaustion
2165
+ 75
2166
+ 61.8
2167
+ 24
2168
+ 25
2169
+ fob
2170
+ 1440
2171
+ 3.2
2172
+ 381
2173
+ 455
2174
+ fracture
2175
+ 622
2176
+ 7.5
2177
+ 160
2178
+ 199
2179
+ irritation
2180
+ 127
2181
+ 36.5
2182
+ 37
2183
+ 42
2184
+ pain
2185
+ 704
2186
+ 6.6
2187
+ 176
2188
+ 215
2189
+ pinch
2190
+ 720
2191
+ 6.4
2192
+ 181
2193
+ 231
2194
+ strain
2195
+ 2307
2196
+ 2.0
2197
+ 584
2198
+ 677
2199
+ total
2200
+ 14960
2201
+ 3740
2202
+ 4675
2203
+ Full
2204
+ bite
2205
+ 297
2206
+ 28.9
2207
+ 74
2208
+ 94
2209
+ burn
2210
+ 647
2211
+ 13.3
2212
+ 164
2213
+ 200
2214
+ contusion
2215
+ 5176
2216
+ 1.7
2217
+ 1249
2218
+ 1597
2219
+ cut
2220
+ 8587
2221
+ 1.0
2222
+ 2172
2223
+ 2704
2224
+ exhaustion
2225
+ 75
2226
+ 114.5
2227
+ 24
2228
+ 25
2229
+ fob
2230
+ 2048
2231
+ 4.2
2232
+ 511
2233
+ 642
2234
+ fracture
2235
+ 898
2236
+ 9.6
2237
+ 223
2238
+ 291
2239
+ irritation
2240
+ 547
2241
+ 15.7
2242
+ 138
2243
+ 183
2244
+ pain
2245
+ 704
2246
+ 12.2
2247
+ 176
2248
+ 215
2249
+ pinch
2250
+ 987
2251
+ 8.7
2252
+ 271
2253
+ 328
2254
+ strain
2255
+ 6025
2256
+ 1.4
2257
+ 1495
2258
+ 1843
2259
+ total
2260
+ 25991
2261
+ 6497
2262
+ 8122
2263
+ Table B.9: Split counts (2/2). w: training weights.
2264
+ Appendix C. Hyperparameter Optimization Details
2265
+ For Random Forest14, we searched the number of trees (ntree parameter, from 100 to
2266
+ 1600 with steps of 100), the number of variables to try when making each split (mtry, from 5
2267
+ to 45 with steps of 5), and the leaf size (nodesize, 1, 2, 5, 10, 25, and 50).
2268
+ For XGBoost15, we searched the maximum depth of a tree in the sequence (max depth,
2269
+ from 3 to 6 with steps of 1), the learning rate (learning rate, 0.01, 0.05, and 0.1), the
2270
+ minimum leaf size (min child weight, 1, 3, 5, and 10), the percentage of training instances
2271
+ to be used in building each tree (subsample, 0.3, 0.5, 0.7, and 1) , and the percentage of
2272
+ predictors to be considered in making each split of a given tree (colsample bylevel, 0.3,
2273
+ 0.5, 0.7, and 1). The number of trees in the sequence (ntrees) was set to 2000. The loss was
2274
+ the multinomial one. Finally, for the SVM model, we optimized the C parameter (C, 10x with
2275
+ x taking 3000 evenly spaced values in [−9, 9]).
2276
+ 14https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html
2277
+ 15https://xgboost.readthedocs.io/en/latest/parameter.html
2278
+ 17
2279
+
2280
+ Appendix D. Illustration of Task Difficulty vs. Number of Categories
2281
+ To illustrate how the prediction task gets more and more difficult as the number of cate-
2282
+ gories increases, we designed a synthetic example in which 105 observations were drawn from
2283
+ an increasing number of categories (2 to 12). Class imbalance was simulated by drawing from
2284
+ the categories with probabilities following the lognormal distribution (mean=0, sd=2). We con-
2285
+ sidered two baselines: a random baseline, that predicts categories uniformly at random, and a
2286
+ most frequent baseline, which always returns the most frequent category. Our proxy for diffi-
2287
+ culty was one minus the F1 score of the baselines. In other words, the less well the baselines
2288
+ are doing, the more difficult the task. We can see on Fig. D.4 that the task difficulty rapidly
2289
+ increases with the number of categories, and that going from 2 to 6 categories almost makes the
2290
+ task twice as hard.
2291
+ 2
2292
+ 4
2293
+ 6
2294
+ 8
2295
+ 10
2296
+ 12
2297
+ 0.5
2298
+ 0.6
2299
+ 0.7
2300
+ 0.8
2301
+ 0.9
2302
+ Prediction Task Difficulty vs Number of Categories
2303
+ Number of Categories
2304
+ 1 − F1 score
2305
+ Random Baseline
2306
+ Most Frequent Baseline
2307
+ Figure D.4
2308
+ Appendix E. Per-Company Results for the Full Generic Models
2309
+ Note: the ensemble (“ens”) rows are left blank whenever the specific model is a SVM, as
2310
+ we could not use ensembling in this case (the forecast of the SVM is not probabilistic).
2311
+ Appendix E.1. Severity
2312
+ Comp.1
2313
+ Comp.3
2314
+ Comp.5
2315
+ Comp.6
2316
+ Avg
2317
+ spec
2318
+ 29.51
2319
+ 32.62
2320
+ 45.35
2321
+ 33.9
2322
+ 35.34†
2323
+ SVM
2324
+ gen
2325
+ 20.23
2326
+ 25.64
2327
+ 34.01
2328
+ 46.76
2329
+ 31.66
2330
+ gen
2331
+ 25.75
2332
+ 21.54
2333
+ 29.75
2334
+ 31.99
2335
+ 27.26
2336
+ RF
2337
+ ens
2338
+ 28.68
2339
+ 31.62
2340
+ 30.69
2341
+ 30.33
2342
+ coef.
2343
+ (0.4,1)
2344
+ (0.8,1)
2345
+ (0.4,1)
2346
+ gen
2347
+ 27.58
2348
+ 23.26
2349
+ 27.58
2350
+ 29.48
2351
+ 26.98
2352
+ XGB
2353
+ ens
2354
+ 28.85
2355
+ 28.34
2356
+ 31.82
2357
+ 29.67
2358
+ coef.
2359
+ (0.1,1)
2360
+ (0.3,1)
2361
+ (0.5,1)
2362
+ #lev. spec
2363
+ 4
2364
+ 4
2365
+ 3
2366
+ 3
2367
+ 3.5
2368
+ #lev. gen
2369
+ 5
2370
+ 5
2371
+ 5
2372
+ 5
2373
+ 5
2374
+ Table E.10: Severity, construction. †: best model on average.
2375
+ 18
2376
+
2377
+ Comp.4
2378
+ Comp.6
2379
+ Comp.7
2380
+ Comp.9
2381
+ Avg
2382
+ spec
2383
+ 29.48
2384
+ 45.66
2385
+ 57.67
2386
+ 30.34
2387
+ 40.79
2388
+ SVM
2389
+ gen
2390
+ 20.93
2391
+ 42.56
2392
+ 46.67
2393
+ 31.9
2394
+ 35.52
2395
+ gen
2396
+ 27.46
2397
+ 38.61
2398
+ 39.97
2399
+ 27.42
2400
+ 33.37
2401
+ RF
2402
+ ens
2403
+ 28.73
2404
+ 53.62
2405
+ 41.17†
2406
+ coef.
2407
+ (1,0.9)
2408
+ (0.5,1)
2409
+ gen
2410
+ 27.39
2411
+ 53.02
2412
+ 39.24
2413
+ 25.95
2414
+ 36.4
2415
+ XGB
2416
+ ens
2417
+ 28.74
2418
+ 51.27
2419
+ 40.01⋆
2420
+ coef.
2421
+ (0.8,1)
2422
+ (0.2,1)
2423
+ #lev. spec
2424
+ 4
2425
+ 2
2426
+ 2
2427
+ 4
2428
+ 3
2429
+ #lev. gen
2430
+ 5
2431
+ 5
2432
+ 5
2433
+ 5
2434
+ 5
2435
+ Table E.11: Severity, electric T&D. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
2436
+ model.
2437
+ Comp.2
2438
+ Comp.3
2439
+ Comp.8
2440
+ Comp.7
2441
+ Avg
2442
+ spec
2443
+ 42.53
2444
+ 24.74
2445
+ 39.72
2446
+ 28.44
2447
+ 33.86†
2448
+ SVM
2449
+ gen
2450
+ 37.91
2451
+ 22.53
2452
+ 38.85
2453
+ 24.41
2454
+ 30.92
2455
+ gen
2456
+ 17.96
2457
+ 17.12
2458
+ 35.69
2459
+ 23.14
2460
+ 23.48
2461
+ RF
2462
+ ens
2463
+ 27.87
2464
+ 24.05
2465
+ 39.81
2466
+ 24.2
2467
+ 28.98
2468
+ coef.
2469
+ (0.2,1)
2470
+ (0.7,1)
2471
+ (0.7,1)
2472
+ (0.1,1)
2473
+ gen
2474
+ 16.75
2475
+ 23.25
2476
+ 35.27
2477
+ 21.72
2478
+ 24.25
2479
+ XGB
2480
+ ens
2481
+ 27.89
2482
+ 25.36
2483
+ 39.61
2484
+ 23.7
2485
+ 29.14
2486
+ coef.
2487
+ (0.2,1)
2488
+ (1,0.8)
2489
+ (0.3,1)
2490
+ (0.1,1)
2491
+ #lev. spec
2492
+ 3
2493
+ 4
2494
+ 3
2495
+ 4
2496
+ 3.5
2497
+ #lev. gen
2498
+ 5
2499
+ 5
2500
+ 5
2501
+ 5
2502
+ 5
2503
+ Table E.12: Severity, oil & gas. †: best model on average.
2504
+ Appendix E.2. Body Part
2505
+ Comp.1
2506
+ Comp.3
2507
+ Comp.5
2508
+ Comp.6
2509
+ Avg
2510
+ spec
2511
+ 34.14
2512
+ 26.48
2513
+ 32.09
2514
+ 31.39
2515
+ 31.03
2516
+ SVM
2517
+ gen
2518
+ 23.26
2519
+ 25.09
2520
+ 27.02
2521
+ 26.66
2522
+ 25.51
2523
+ gen
2524
+ 34.14
2525
+ 33.04
2526
+ 34.68
2527
+ 35.78
2528
+ 34.41†
2529
+ RF
2530
+ ens
2531
+ 33.49
2532
+ 22.43
2533
+ 32.7
2534
+ 32.7
2535
+ 30.33⋆
2536
+ coef.
2537
+ (0.4,1)
2538
+ (0.1,1)
2539
+ (0.7,1)
2540
+ (0.6,1)
2541
+ gen
2542
+ 31.92
2543
+ 30.57
2544
+ 34.73
2545
+ 34.22
2546
+ 32.86
2547
+ XGB
2548
+ ens
2549
+ 32.44
2550
+ 20.38
2551
+ 32.62
2552
+ 30.77
2553
+ 29.05⋆
2554
+ coef.
2555
+ (0.1,1)
2556
+ (0.2,1)
2557
+ (0.2,1)
2558
+ (0.5,1)
2559
+ #lev. spec
2560
+ 6
2561
+ 6
2562
+ 6
2563
+ 6
2564
+ 6
2565
+ #lev. gen
2566
+ 6
2567
+ 6
2568
+ 6
2569
+ 6
2570
+ 6
2571
+ Table E.13: Body part, construction. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
2572
+ model.
2573
+ 19
2574
+
2575
+ Comp.4
2576
+ Comp.6
2577
+ Comp.7
2578
+ Comp.9
2579
+ Avg
2580
+ spec
2581
+ 29.25
2582
+ 27.7
2583
+ 46.34
2584
+ 23.86
2585
+ 31.79
2586
+ SVM
2587
+ gen
2588
+ 19.21
2589
+ 28.86
2590
+ 38.26
2591
+ 20.4
2592
+ 26.68
2593
+ gen
2594
+ 27.96
2595
+ 32
2596
+ 51.02
2597
+ 28.76
2598
+ 34.94†
2599
+ RF
2600
+ ens
2601
+ 27.94
2602
+ 50.75
2603
+ 23.56
2604
+ 34.08
2605
+ coef.
2606
+ (0.4,1)
2607
+ (0.1,1)
2608
+ (0.4,1)
2609
+ gen
2610
+ 28.24
2611
+ 31.12
2612
+ 46.44
2613
+ 29.24
2614
+ 33.76
2615
+ XGB
2616
+ ens
2617
+ 27.96
2618
+ 41.17
2619
+ 27.81
2620
+ 32.31
2621
+ coef.
2622
+ (0.2,1)
2623
+ (0.1,1)
2624
+ (0.5,1)
2625
+ #lev. spec
2626
+ 6
2627
+ 6
2628
+ 4
2629
+ 6
2630
+ 5.5
2631
+ #lev. gen
2632
+ 6
2633
+ 6
2634
+ 6
2635
+ 6
2636
+ 6
2637
+ Table E.14: Body part, electric T&D. †: best model on average. Bold: better the company-specific model.
2638
+ Comp.2
2639
+ Comp.8
2640
+ Comp.7
2641
+ Avg
2642
+ spec
2643
+ 22.96
2644
+ 32.41
2645
+ 31.17
2646
+ 28.85
2647
+ SVM
2648
+ gen
2649
+ 22.66
2650
+ 26.23
2651
+ 22.06
2652
+ 23.65
2653
+ gen
2654
+ 26.11
2655
+ 32.34
2656
+ 32.21
2657
+ 30.22†
2658
+ RF
2659
+ ens
2660
+ 20.31
2661
+ 32.88
2662
+ 26.09
2663
+ 26.43
2664
+ coef.
2665
+ (0.1,1)
2666
+ (1,0.1)
2667
+ (0.1,1)
2668
+ gen
2669
+ 25.5
2670
+ 32.36
2671
+ 29.81
2672
+ 29.22
2673
+ XGB
2674
+ ens
2675
+ 16.26
2676
+ 32.28
2677
+ 30.56
2678
+ 26.37
2679
+ coef.
2680
+ (0.1,1)
2681
+ (1,0.3)
2682
+ (0.2,1)
2683
+ #lev. spec
2684
+ 6
2685
+ 6
2686
+ 6
2687
+ 6
2688
+ #lev. gen
2689
+ 6
2690
+ 6
2691
+ 6
2692
+ 6
2693
+ Table E.15: Body part, oil & gas. †: best model on average. Bold: better the company-specific model.
2694
+ Appendix E.3. Injury Type
2695
+ Comp.1
2696
+ Comp.3
2697
+ Comp.5
2698
+ Comp.6
2699
+ Avg
2700
+ spec
2701
+ 54
2702
+ 37.7
2703
+ 33.91
2704
+ 50.07
2705
+ 43.92
2706
+ SVM
2707
+ gen
2708
+ 34.67
2709
+ 36.66
2710
+ 34.78
2711
+ 48.81
2712
+ 38.73
2713
+ gen
2714
+ 47.84
2715
+ 33.86
2716
+ 33.11
2717
+ 45.3
2718
+ 40.03
2719
+ RF
2720
+ ens
2721
+ 47.6
2722
+ 31.98
2723
+ 45.67
2724
+ 41.75
2725
+ coef.
2726
+ (0.2,1)
2727
+ (0.1,1)
2728
+ (0.4,1)
2729
+ gen
2730
+ 46.46
2731
+ 23.99
2732
+ 31.9
2733
+ 44.7
2734
+ 36.76
2735
+ XGB
2736
+ ens
2737
+ 56.55
2738
+ 35.2
2739
+ 50.46
2740
+ 47.4†
2741
+ coef.
2742
+ (0.6,1)
2743
+ (0.4,1)
2744
+ (0.2,1)
2745
+ #lev. spec
2746
+ 3
2747
+ 3
2748
+ 6
2749
+ 4
2750
+ 4
2751
+ #lev. gen
2752
+ 11
2753
+ 11
2754
+ 11
2755
+ 11
2756
+ 11
2757
+ Table E.16: Injury type, construction. †: best model on average. Bold: better the company-specific model.
2758
+ Comp.4
2759
+ Comp.6
2760
+ Comp.7
2761
+ Comp.9
2762
+ Avg
2763
+ spec
2764
+ 39.21
2765
+ 43.4
2766
+ 47.28
2767
+ 44.98
2768
+ 43.72
2769
+ SVM
2770
+ gen
2771
+ 42.27
2772
+ 54.59
2773
+ 60.78
2774
+ 57.14
2775
+ 53.7†
2776
+ gen
2777
+ 26.44
2778
+ 42.41
2779
+ 56.74
2780
+ 44.16
2781
+ 42.44⋆
2782
+ RF
2783
+ ens
2784
+ 39.33
2785
+ 59.52
2786
+ 49.42
2787
+ coef.
2788
+ (1,0.5)
2789
+ (1,0.2)
2790
+ gen
2791
+ 28.57
2792
+ 41.6
2793
+ 51.45
2794
+ 42.49
2795
+ 41.03
2796
+ XGB
2797
+ ens
2798
+ 40.31
2799
+ 62.58
2800
+ 51.44
2801
+ coef.
2802
+ (1,0.8)
2803
+ (1,0.1)
2804
+ #lev. spec
2805
+ 5
2806
+ 6
2807
+ 4
2808
+ 6
2809
+ 5.25
2810
+ #lev. gen
2811
+ 11
2812
+ 11
2813
+ 11
2814
+ 11
2815
+ 11
2816
+ Table E.17: Injury type, electric T&D. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
2817
+ model.
2818
+ 20
2819
+
2820
+ Comp.2
2821
+ Comp.8
2822
+ Comp.7
2823
+ Avg
2824
+ spec
2825
+ 35.39
2826
+ 34.04
2827
+ 40.72
2828
+ 36.72
2829
+ SVM
2830
+ gen
2831
+ 27.97
2832
+ 30.67
2833
+ 46.69
2834
+ 35.11⋆
2835
+ gen
2836
+ 23.72
2837
+ 32.22
2838
+ 40.52
2839
+ 32.15
2840
+ RF
2841
+ ens
2842
+ 36.82
2843
+ 40.48
2844
+ 38.65†
2845
+ coef.
2846
+ (0.5,1)
2847
+ (0.7,1)
2848
+ gen
2849
+ 23.69
2850
+ 31.01
2851
+ 39.28
2852
+ 31.33
2853
+ XGB
2854
+ ens
2855
+ 35.09
2856
+ 41
2857
+ 38.05
2858
+ coef.
2859
+ (1,0.7)
2860
+ (1,0.6)
2861
+ #lev. spec
2862
+ 3
2863
+ 10
2864
+ 8
2865
+ 7
2866
+ #lev. gen
2867
+ 11
2868
+ 11
2869
+ 11
2870
+ 11
2871
+ Table E.18: Injury type, oil & gas. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
2872
+ model.
2873
+ Appendix E.4. Accident Type
2874
+ Comp.3
2875
+ Comp.5
2876
+ Avg
2877
+ spec
2878
+ 68.63
2879
+ 41.34
2880
+ 54.98†
2881
+ SVM
2882
+ gen
2883
+ 41.87
2884
+ 42.91
2885
+ 42.39
2886
+ gen
2887
+ 40.44
2888
+ 44.48
2889
+ 42.46
2890
+ RF
2891
+ ens
2892
+ 44.35
2893
+ 44.35
2894
+ coef.
2895
+ (1,0.7)
2896
+ gen
2897
+ 54.04
2898
+ 42.51
2899
+ 48.27
2900
+ XGB
2901
+ ens
2902
+ 42.02
2903
+ 42.02
2904
+ coef.
2905
+ (1,1)
2906
+ #lev. spec
2907
+ 2
2908
+ 5
2909
+ 3.5
2910
+ #lev. gen
2911
+ 5
2912
+ 5
2913
+ 5
2914
+ Table E.19: Accident type, construction. †: best model on average.
2915
+ Comp.4
2916
+ Comp.9
2917
+ Avg
2918
+ spec
2919
+ 43.15
2920
+ 53.2
2921
+ 48.17
2922
+ SVM
2923
+ gen
2924
+ 36.46
2925
+ 52.71
2926
+ 44.58
2927
+ gen
2928
+ 40.05
2929
+ 57.11
2930
+ 48.58†
2931
+ RF
2932
+ ens
2933
+ 41.29
2934
+ 41.29
2935
+ coef.
2936
+ (0.4,1)
2937
+ gen
2938
+ 38.13
2939
+ 57.46
2940
+ 47.8⋆
2941
+ XGB
2942
+ ens
2943
+ 41.08
2944
+ 41.08
2945
+ coef.
2946
+ (0.4,1)
2947
+ #lev. spec
2948
+ 5
2949
+ 4
2950
+ 4.5
2951
+ #lev. gen
2952
+ 5
2953
+ 5
2954
+ 5
2955
+ Table E.20: Accident type, electric T&D. †: best model on average. Bold/⋆: better/within 2pts of the company-
2956
+ specific model.
2957
+ 21
2958
+
2959
+ Comp.3
2960
+ Comp.8
2961
+ Comp.7
2962
+ Avg
2963
+ spec
2964
+ 80.91
2965
+ 85
2966
+ 53.58
2967
+ 73.16†
2968
+ SVM
2969
+ gen
2970
+ 58.06
2971
+ 78.09
2972
+ 45.92
2973
+ 60.69
2974
+ gen
2975
+ 61.67
2976
+ 78.03
2977
+ 49.71
2978
+ 63.14
2979
+ RF
2980
+ ens
2981
+ 78.46
2982
+ 54.35
2983
+ 66.4
2984
+ coef.
2985
+ (0.1,1)
2986
+ (1,0.1)
2987
+ gen
2988
+ 46.65
2989
+ 76.93
2990
+ 52.16
2991
+ 58.58
2992
+ XGB
2993
+ ens
2994
+ 73.8
2995
+ 55.31
2996
+ 64.56
2997
+ coef.
2998
+ (1,0.7)
2999
+ (1,0.7)
3000
+ #lev. spec
3001
+ 2
3002
+ 2
3003
+ 4
3004
+ 2.67
3005
+ #lev. gen
3006
+ 5
3007
+ 5
3008
+ 5
3009
+ 5
3010
+ Table E.21: Accident type, oil & gas. †: best model on average.
3011
+ Appendix E.5. Energy Source
3012
+ Comp.1
3013
+ Comp.3
3014
+ Comp.5
3015
+ Comp.6
3016
+ Avg
3017
+ spec
3018
+ 71.69
3019
+ 70.97
3020
+ 68.07
3021
+ 67.82
3022
+ 69.64
3023
+ SVM
3024
+ gen
3025
+ 74.76
3026
+ 78.16
3027
+ 70.86
3028
+ 72.69
3029
+ 74.12†
3030
+ gen
3031
+ 70.36
3032
+ 76.03
3033
+ 70.14
3034
+ 74.31
3035
+ 72.71
3036
+ RF
3037
+ ens
3038
+ 71.05
3039
+ 68.02
3040
+ 68.1
3041
+ 69.06⋆
3042
+ coef.
3043
+ (0.9,1)
3044
+ (0.2,1)
3045
+ (0.4,1)
3046
+ gen
3047
+ 74.33
3048
+ 83.44
3049
+ 64.62
3050
+ 72.7
3051
+ 73.77
3052
+ XGB
3053
+ ens
3054
+ 71.88
3055
+ 66.81
3056
+ 68.47
3057
+ 69.05⋆
3058
+ coef.
3059
+ (0.4,1)
3060
+ (0.1,1)
3061
+ (0.4,1)
3062
+ #lev. spec
3063
+ 2
3064
+ 2
3065
+ 3
3066
+ 2
3067
+ 2.25
3068
+ #lev. gen
3069
+ 5
3070
+ 5
3071
+ 5
3072
+ 5
3073
+ 5
3074
+ Table E.22: Energy source, construction.
3075
+ †: best model on average. Bold/⋆: better/within 2pts of the company-
3076
+ specific model.
3077
+ Comp.4
3078
+ Comp.6
3079
+ Comp.9
3080
+ Avg
3081
+ spec
3082
+ 79.5
3083
+ 73.22
3084
+ 81.05
3085
+ 77.92
3086
+ SVM
3087
+ gen
3088
+ 76.59
3089
+ 70.61
3090
+ 85.73
3091
+ 77.64⋆
3092
+ gen
3093
+ 74.99
3094
+ 73.06
3095
+ 83.32
3096
+ 77.12⋆
3097
+ RF
3098
+ ens
3099
+ 77.85
3100
+ 73.81
3101
+ 75.83
3102
+ coef.
3103
+ (0.9,1)
3104
+ (0.2,1)
3105
+ gen
3106
+ 76.43
3107
+ 72.85
3108
+ 87.21
3109
+ 78.83†
3110
+ XGB
3111
+ ens
3112
+ 79.41
3113
+ 73.52
3114
+ 76.47⋆
3115
+ coef.
3116
+ (0.2,1)
3117
+ (0.3,1)
3118
+ #lev. spec
3119
+ 3
3120
+ 2
3121
+ 3
3122
+ 2.67
3123
+ #lev. gen
3124
+ 5
3125
+ 5
3126
+ 5
3127
+ 5
3128
+ Table E.23: Energy source, electric T&D. †: best model on average. Bold/⋆: better/within 2pts of the company-
3129
+ specific model.
3130
+ 22
3131
+
3132
+ Comp.8
3133
+ Comp.7
3134
+ Avg
3135
+ spec
3136
+ 68.98
3137
+ 71.8
3138
+ 70.39
3139
+ SVM
3140
+ gen
3141
+ 68.73
3142
+ 70.36
3143
+ 69.54⋆
3144
+ gen
3145
+ 70.43
3146
+ 71.81
3147
+ 71.12
3148
+ RF
3149
+ ens
3150
+ 68.27
3151
+ 72.89
3152
+ 70.58
3153
+ coef.
3154
+ (0.4,1)
3155
+ (1,0.2)
3156
+ gen
3157
+ 70.44
3158
+ 74
3159
+ 72.22†
3160
+ XGB
3161
+ ens
3162
+ 68.72
3163
+ 73.25
3164
+ 70.98
3165
+ coef.
3166
+ (0.1,1)
3167
+ (0.3,1)
3168
+ #lev. spec
3169
+ 4
3170
+ 2
3171
+ 3
3172
+ #lev. gen
3173
+ 5
3174
+ 5
3175
+ 5
3176
+ Table E.24: Energy source, oil & gas. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
3177
+ model.
3178
+ Appendix F. Per-Company Results for the Per-Domain Generic Models
3179
+ Note: the ensemble (‘ens’) rows are left blank whenever the specific model is a SVM, as we
3180
+ could not use ensembling in this case (the forecast of the SVM is not probabilistic).
3181
+ Appendix F.1. Severity
3182
+ Comp.5
3183
+ Comp.3
3184
+ Comp.6
3185
+ Comp.1
3186
+ Avg
3187
+ spec
3188
+ 45.35
3189
+ 32.62
3190
+ 33.9
3191
+ 29.51
3192
+ 35.34†
3193
+ SVM
3194
+ gen
3195
+ 39.86
3196
+ 26.03
3197
+ 32.61
3198
+ 22.66
3199
+ 30.29
3200
+ gen
3201
+ 34.1
3202
+ 27.7
3203
+ 32.62
3204
+ 29.74
3205
+ 31.04
3206
+ RF
3207
+ ens
3208
+ 31.2
3209
+ 33.5
3210
+ 30.77
3211
+ 31.82
3212
+ Coeffs
3213
+ (0.8,1)
3214
+ (0.6,1)
3215
+ (1,0.3)
3216
+ gen
3217
+ 30.84
3218
+ 26.84
3219
+ 28.33
3220
+ 28.95
3221
+ 28.74
3222
+ XGB
3223
+ ens
3224
+ 31.3
3225
+ 34.14
3226
+ 30
3227
+ 31.81
3228
+ Coeffs
3229
+ (0.3,1)
3230
+ (1,0.6)
3231
+ (0.5,1)
3232
+ #categories spec
3233
+ 3
3234
+ 4
3235
+ 3
3236
+ 4
3237
+ 3.5
3238
+ #categories gen
3239
+ 5
3240
+ 5
3241
+ 5
3242
+ 5
3243
+ 5
3244
+ Table F.25: Severity, construction. †: best model on average.
3245
+ Comp.7
3246
+ Comp.4
3247
+ Comp.9
3248
+ Comp.6
3249
+ Avg
3250
+ spec
3251
+ 57.67
3252
+ 29.48
3253
+ 30.34
3254
+ 45.66
3255
+ 40.79
3256
+ SVM
3257
+ gen
3258
+ 36
3259
+ 30.47
3260
+ 24.62
3261
+ 52.06
3262
+ 35.79
3263
+ gen
3264
+ 47.19
3265
+ 30.91
3266
+ 28.5
3267
+ 58.53
3268
+ 41.28
3269
+ RF
3270
+ ens
3271
+ 54.8
3272
+ 32.97
3273
+ 43.88†
3274
+ Coeffs
3275
+ (1,0.6)
3276
+ (1,0.9)
3277
+ gen
3278
+ 44.23
3279
+ 30.59
3280
+ 26.49
3281
+ 57.44
3282
+ 39.69⋆
3283
+ XGB
3284
+ ens
3285
+ 54.95
3286
+ 26.96
3287
+ 40.95
3288
+ Coeffs
3289
+ (0.4,1)
3290
+ (0.1,1)
3291
+ #categories spec
3292
+ 2
3293
+ 4
3294
+ 4
3295
+ 2
3296
+ 3
3297
+ #categories gen
3298
+ 5
3299
+ 5
3300
+ 5
3301
+ 5
3302
+ 5
3303
+ Table F.26: Severity, elec. †: best model on average. Bold/⋆: better/within 2pts of the company-specific model.
3304
+ 23
3305
+
3306
+ Comp.7
3307
+ Comp.3
3308
+ Comp.8
3309
+ Comp.2
3310
+ Avg
3311
+ spec
3312
+ 28.44
3313
+ 24.74
3314
+ 39.72
3315
+ 42.53
3316
+ 33.86†
3317
+ SVM
3318
+ gen
3319
+ 26.7
3320
+ 21.05
3321
+ 40.83
3322
+ 27.31
3323
+ 28.97
3324
+ gen
3325
+ 26.22
3326
+ 19.82
3327
+ 35.7
3328
+ 22.59
3329
+ 26.08
3330
+ RF
3331
+ ens
3332
+ 27.59
3333
+ 22.38
3334
+ 40.22
3335
+ 32.86
3336
+ 30.76
3337
+ Coeffs
3338
+ (0.8,1)
3339
+ (1,0.9)
3340
+ (1,0.8)
3341
+ (0.7,1)
3342
+ gen
3343
+ 23.82
3344
+ 19.7
3345
+ 33.09
3346
+ 20.15
3347
+ 24.19
3348
+ XGB
3349
+ ens
3350
+ 27.97
3351
+ 25.73
3352
+ 40.1
3353
+ 31.96
3354
+ 31.44
3355
+ Coeffs
3356
+ (0.4,1)
3357
+ (1,0.7)
3358
+ (1,0.2)
3359
+ (0.7,1)
3360
+ #categories spec
3361
+ 4
3362
+ 4
3363
+ 3
3364
+ 3
3365
+ 3.5
3366
+ #categories gen
3367
+ 5
3368
+ 5
3369
+ 5
3370
+ 5
3371
+ 5
3372
+ Table F.27: Severity, oil & gas. †: best model on average.
3373
+ Appendix F.2. Body part
3374
+ Comp.5
3375
+ Comp.3
3376
+ Comp.6
3377
+ Comp.1
3378
+ Avg
3379
+ spec
3380
+ 32.09
3381
+ 26.48
3382
+ 31.39
3383
+ 34.14
3384
+ 31.03
3385
+ SVM
3386
+ gen
3387
+ 31.08
3388
+ 28.14
3389
+ 31.92
3390
+ 34.13
3391
+ 31.32
3392
+ gen
3393
+ 32.19
3394
+ 27.06
3395
+ 33.64
3396
+ 34.34
3397
+ 31.81
3398
+ RF
3399
+ ens
3400
+ 31.23
3401
+ 25.77
3402
+ 35.14
3403
+ 29.41
3404
+ 30.39⋆
3405
+ Coeffs
3406
+ (0.1,1)
3407
+ (0.2,1)
3408
+ (0.6,1)
3409
+ (0.1,1)
3410
+ gen
3411
+ 33.6
3412
+ 29.91
3413
+ 32.48
3414
+ 32.92
3415
+ 32.23†
3416
+ XGB
3417
+ ens
3418
+ 32.34
3419
+ 20.72
3420
+ 32.33
3421
+ 31.41
3422
+ 29.2⋆
3423
+ Coeffs
3424
+ (0.1,1)
3425
+ (0.2,1)
3426
+ (0.5,1)
3427
+ (0.1,1)
3428
+ #categories spec
3429
+ 6
3430
+ 6
3431
+ 6
3432
+ 6
3433
+ 6
3434
+ #categories gen
3435
+ 6
3436
+ 6
3437
+ 6
3438
+ 6
3439
+ 6
3440
+ Table F.28: Body part, construction. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
3441
+ model.
3442
+ Comp.7
3443
+ Comp.4
3444
+ Comp.9
3445
+ Comp.6
3446
+ Avg
3447
+ spec
3448
+ 46.34
3449
+ 29.25
3450
+ 23.86
3451
+ 27.7
3452
+ 31.79
3453
+ SVM
3454
+ gen
3455
+ 34.02
3456
+ 18.16
3457
+ 19.6
3458
+ 29.17
3459
+ 25.24
3460
+ gen
3461
+ 48.21
3462
+ 26.31
3463
+ 25.71
3464
+ 31.97
3465
+ 33.05
3466
+ RF
3467
+ ens
3468
+ 39.52
3469
+ 28.63
3470
+ 19.59
3471
+ 29.25
3472
+ Coeffs
3473
+ (0.1,1)
3474
+ (0.1,1)
3475
+ (0.1,1)
3476
+ gen
3477
+ 53.03
3478
+ 28.55
3479
+ 26.56
3480
+ 32.7
3481
+ 35.21†
3482
+ XGB
3483
+ ens
3484
+ 49.41
3485
+ 29.72
3486
+ 25.01
3487
+ 34.71
3488
+ Coeffs
3489
+ (1,1)
3490
+ (0.6,1)
3491
+ (0.2,1)
3492
+ #categories spec
3493
+ 4
3494
+ 6
3495
+ 6
3496
+ 6
3497
+ 5.5
3498
+ #categories gen
3499
+ 6
3500
+ 6
3501
+ 6
3502
+ 6
3503
+ 6
3504
+ Table F.29: Body part, elec. †: best model on average. Bold/⋆: better/within 2pts of the company-specific model.
3505
+ 24
3506
+
3507
+ Comp.7
3508
+ Comp.8
3509
+ Comp.2
3510
+ Avg
3511
+ spec
3512
+ 31.17
3513
+ 32.41
3514
+ 22.96
3515
+ 28.85†
3516
+ SVM
3517
+ gen
3518
+ 27.22
3519
+ 25.91
3520
+ 20.84
3521
+ 24.66
3522
+ gen
3523
+ 29.64
3524
+ 31.8
3525
+ 25.12
3526
+ 28.85†
3527
+ RF
3528
+ ens
3529
+ 30.15
3530
+ 31.66
3531
+ 20.81
3532
+ 27.54⋆
3533
+ Coeffs
3534
+ (0.1,1)
3535
+ (0.1,1)
3536
+ (0.4,1)
3537
+ gen
3538
+ 29.69
3539
+ 32.36
3540
+ 23.72
3541
+ 28.59⋆
3542
+ XGB
3543
+ ens
3544
+ 31.84
3545
+ 32.1
3546
+ 19.28
3547
+ 27.74⋆
3548
+ Coeffs
3549
+ (1,0.5)
3550
+ (1,0.1)
3551
+ (0.2,1)
3552
+ #categories spec
3553
+ 6
3554
+ 6
3555
+ 6
3556
+ 6
3557
+ #categories gen
3558
+ 6
3559
+ 6
3560
+ 6
3561
+ 6
3562
+ Table F.30: Body part, oil & gas.
3563
+ †: best model on average. Bold/⋆: better/within 2pts of the company-specific
3564
+ model.
3565
+ Appendix F.3. Injury type
3566
+ Comp.5
3567
+ Comp.3
3568
+ Comp.6
3569
+ Comp.1
3570
+ Avg
3571
+ spec
3572
+ 33.91
3573
+ 37.7
3574
+ 50.07
3575
+ 54
3576
+ 43.92
3577
+ SVM
3578
+ gen
3579
+ 34.16
3580
+ 36.31
3581
+ 51.7
3582
+ 48.97
3583
+ 42.78⋆
3584
+ gen
3585
+ 33.56
3586
+ 33.91
3587
+ 49.34
3588
+ 51.64
3589
+ 42.11⋆
3590
+ RF
3591
+ ens
3592
+ 33.38
3593
+ 48.57
3594
+ 54.42
3595
+ 45.46†
3596
+ Coeffs
3597
+ (0.1,1)
3598
+ (0.1,1)
3599
+ (1,0.2)
3600
+ gen
3601
+ 33.3
3602
+ 38.19
3603
+ 48.17
3604
+ 50.54
3605
+ 42.55⋆
3606
+ XGB
3607
+ ens
3608
+ 34.74
3609
+ 47.08
3610
+ 54.53
3611
+ 45.45
3612
+ Coeffs
3613
+ (0.3,1)
3614
+ (0.1,1)
3615
+ (0.5,1)
3616
+ #categories spec
3617
+ 6
3618
+ 3
3619
+ 4
3620
+ 3
3621
+ 4
3622
+ #categories gen
3623
+ 6
3624
+ 6
3625
+ 6
3626
+ 6
3627
+ 6
3628
+ Table F.31: Injury type, construction. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
3629
+ model.
3630
+ Comp.7
3631
+ Comp.4
3632
+ Comp.9
3633
+ Comp.6
3634
+ Avg
3635
+ spec
3636
+ 47.28
3637
+ 39.21
3638
+ 44.98
3639
+ 43.4
3640
+ 43.72
3641
+ SVM
3642
+ gen
3643
+ 57.28
3644
+ 28.12
3645
+ 44.54
3646
+ 41.7
3647
+ 42.91⋆
3648
+ gen
3649
+ 53.99
3650
+ 29.2
3651
+ 43.07
3652
+ 40.47
3653
+ 41.68⋆
3654
+ RF
3655
+ ens
3656
+ 51.33
3657
+ 38.87
3658
+ 45.1
3659
+ Coeffs
3660
+ (0.8,1)
3661
+ (0.4,1)
3662
+ gen
3663
+ 49.62
3664
+ 29.63
3665
+ 40.26
3666
+ 46.42
3667
+ 41.48
3668
+ XGB
3669
+ ens
3670
+ 59.48
3671
+ 39.09
3672
+ 49.28†
3673
+ Coeffs
3674
+ (1,0.3)
3675
+ (1,0.3)
3676
+ #categories spec
3677
+ 4
3678
+ 5
3679
+ 6
3680
+ 6
3681
+ 5.25
3682
+ #categories gen
3683
+ 8
3684
+ 8
3685
+ 8
3686
+ 8
3687
+ 8
3688
+ Table F.32: Injury type, elec. †: best model on average. Bold/⋆: better/within 2pts of the company-specific model.
3689
+ 25
3690
+
3691
+ Comp.7
3692
+ Comp.8
3693
+ Comp.2
3694
+ Avg
3695
+ spec
3696
+ 40.72
3697
+ 34.04
3698
+ 35.39
3699
+ 36.72
3700
+ SVM
3701
+ gen
3702
+ 50.26
3703
+ 33.24
3704
+ 18.18
3705
+ 33.89
3706
+ gen
3707
+ 39.57
3708
+ 33.87
3709
+ 25.02
3710
+ 32.82
3711
+ RF
3712
+ ens
3713
+ 41.69
3714
+ 36.25
3715
+ 38.97
3716
+ Coeffs
3717
+ (1,0.7)
3718
+ (0.8,1)
3719
+ gen
3720
+ 38.32
3721
+ 32.64
3722
+ 26.07
3723
+ 32.34
3724
+ XGB
3725
+ ens
3726
+ 42.88
3727
+ 36.7
3728
+ 39.79†
3729
+ Coeffs
3730
+ (1,0.7)
3731
+ (1,0.1)
3732
+ #categories spec
3733
+ 8
3734
+ 10
3735
+ 3
3736
+ 7
3737
+ #categories gen
3738
+ 11
3739
+ 11
3740
+ 11
3741
+ 11
3742
+ Table F.33: Injury type, oil & gas. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
3743
+ model.
3744
+ Appendix F.4. Accident type
3745
+ Comp.5
3746
+ Comp.3
3747
+ Avg
3748
+ spec
3749
+ 41.34
3750
+ 68.63
3751
+ 54.98†
3752
+ SVM
3753
+ gen
3754
+ 44.25
3755
+ 40.15
3756
+ 42.2
3757
+ gen
3758
+ 41.37
3759
+ 43.48
3760
+ 42.42
3761
+ RF
3762
+ ens
3763
+ 40.8
3764
+ 40.8
3765
+ Coeffs
3766
+ (0.1,1)
3767
+ gen
3768
+ 43.21
3769
+ 55.21
3770
+ 49.21
3771
+ XGB
3772
+ ens
3773
+ 43.4
3774
+ 43.4
3775
+ Coeffs
3776
+ (1,0.1)
3777
+ #categories spec
3778
+ 5
3779
+ 2
3780
+ 3.5
3781
+ #categories gen
3782
+ 5
3783
+ 5
3784
+ 5
3785
+ Table F.34: Accident type, construction. †: best model on average.
3786
+ Comp.4
3787
+ Comp.9
3788
+ Avg
3789
+ spec
3790
+ 43.15
3791
+ 53.2
3792
+ 48.17
3793
+ SVM
3794
+ gen
3795
+ 39.45
3796
+ 50.22
3797
+ 44.84
3798
+ gen
3799
+ 44.13
3800
+ 56.28
3801
+ 50.2†
3802
+ RF
3803
+ ens
3804
+ 39.72
3805
+ 39.72
3806
+ Coeffs
3807
+ (0.3,1)
3808
+ gen
3809
+ 40.96
3810
+ 58.21
3811
+ 49.58
3812
+ XGB
3813
+ ens
3814
+ 43.53
3815
+ 43.53
3816
+ Coeffs
3817
+ (0.4,1)
3818
+ #categories spec
3819
+ 5
3820
+ 4
3821
+ 4.5
3822
+ #categories gen
3823
+ 5
3824
+ 5
3825
+ 5
3826
+ Table F.35: Accident type, elec. †: best model on average. Bold/⋆: better/within 2pts of the company-specific model.
3827
+ Comp.7
3828
+ Comp.3
3829
+ Comp.8
3830
+ Avg
3831
+ spec
3832
+ 53.58
3833
+ 80.91
3834
+ 85
3835
+ 73.16†
3836
+ SVM
3837
+ gen
3838
+ 55.04
3839
+ 59.76
3840
+ 79.75
3841
+ 64.85
3842
+ gen
3843
+ 51.77
3844
+ 58.06
3845
+ 82.53
3846
+ 64.12
3847
+ RF
3848
+ ens
3849
+ 53.02
3850
+ 78.46
3851
+ 65.74
3852
+ Coeffs
3853
+ (1,0.1)
3854
+ (0.1,1)
3855
+ gen
3856
+ 49.03
3857
+ 62.16
3858
+ 77.93
3859
+ 63.04
3860
+ XGB
3861
+ ens
3862
+ 55.7
3863
+ 78.56
3864
+ 67.13
3865
+ Coeffs
3866
+ (1,0.9)
3867
+ (0.1,1)
3868
+ #categories spec
3869
+ 4
3870
+ 2
3871
+ 2
3872
+ 2.67
3873
+ #categories gen
3874
+ 4
3875
+ 4
3876
+ 4
3877
+ 4
3878
+ Table F.36: Accident type, oil & gas. †: best model on average.
3879
+ 26
3880
+
3881
+ Appendix F.5. Energy source
3882
+ Comp.5
3883
+ Comp.3
3884
+ Comp.6
3885
+ Comp.1
3886
+ Avg
3887
+ spec
3888
+ 68.07
3889
+ 70.97
3890
+ 67.82
3891
+ 71.69
3892
+ 69.64
3893
+ SVM
3894
+ gen
3895
+ 70.3
3896
+ 76.99
3897
+ 73.32
3898
+ 74.5
3899
+ 73.78
3900
+ gen
3901
+ 68.17
3902
+ 79.98
3903
+ 74.28
3904
+ 73.21
3905
+ 73.91†
3906
+ RF
3907
+ ens
3908
+ 67.85
3909
+ 69.62
3910
+ 71.45
3911
+ 69.64⋆
3912
+ Coeffs
3913
+ (0.1,1)
3914
+ (0.9,1)
3915
+ (0.4,1)
3916
+ gen
3917
+ 62.88
3918
+ 73.28
3919
+ 74.91
3920
+ 73.09
3921
+ 71.04
3922
+ XGB
3923
+ ens
3924
+ 68.05
3925
+ 70.75
3926
+ 71.9
3927
+ 70.23
3928
+ Coeffs
3929
+ (0.1,1)
3930
+ (0.7,1)
3931
+ (0.5,1)
3932
+ #categories spec
3933
+ 3
3934
+ 2
3935
+ 2
3936
+ 2
3937
+ 2.25
3938
+ #categories gen
3939
+ 3
3940
+ 3
3941
+ 3
3942
+ 3
3943
+ 3
3944
+ Table F.37: Energy source, construction.
3945
+ †: best model on average. Bold/⋆: better/within 2pts of the company-
3946
+ specific model.
3947
+ Comp.4
3948
+ Comp.9
3949
+ Comp.6
3950
+ Avg
3951
+ spec
3952
+ 79.5
3953
+ 81.05
3954
+ 73.22
3955
+ 77.92
3956
+ SVM
3957
+ gen
3958
+ 78.31
3959
+ 85.83
3960
+ 72.46
3961
+ 78.87†
3962
+ gen
3963
+ 80.13
3964
+ 83.73
3965
+ 71.96
3966
+ 78.61
3967
+ RF
3968
+ ens
3969
+ 79.75
3970
+ 73.22
3971
+ 76.48⋆
3972
+ Coeffs
3973
+ (0.5,1)
3974
+ (0.1,1)
3975
+ gen
3976
+ 75.63
3977
+ 82.34
3978
+ 68.86
3979
+ 75.61
3980
+ XGB
3981
+ ens
3982
+ 77.15
3983
+ 72.91
3984
+ 75.03
3985
+ Coeffs
3986
+ (1,0.8)
3987
+ (0.1,1)
3988
+ #categories spec
3989
+ 3
3990
+ 3
3991
+ 2
3992
+ 2.67
3993
+ #categories gen
3994
+ 3
3995
+ 3
3996
+ 3
3997
+ 3
3998
+ Table F.38: Energy source, elec.
3999
+ †: best model on average. Bold/⋆: better/within 2pts of the company-specific
4000
+ model.
4001
+ Comp.7
4002
+ Comp.8
4003
+ Avg
4004
+ spec
4005
+ 71.8
4006
+ 68.98
4007
+ 70.39
4008
+ SVM
4009
+ gen
4010
+ 49.94
4011
+ 61.33
4012
+ 55.63
4013
+ gen
4014
+ 69.34
4015
+ 72.11
4016
+ 70.72†
4017
+ RF
4018
+ ens
4019
+ 70.44
4020
+ 67.8
4021
+ 69.12⋆
4022
+ Coeffs
4023
+ (1,0.5)
4024
+ (0.4,1)
4025
+ gen
4026
+ 72.58
4027
+ 68.12
4028
+ 70.35⋆
4029
+ XGB
4030
+ ens
4031
+ 72.06
4032
+ 68.69
4033
+ 70.38⋆
4034
+ Coeffs
4035
+ (0.2,1)
4036
+ (0.1,1)
4037
+ #categories spec
4038
+ 2
4039
+ 4
4040
+ 3
4041
+ #categories gen
4042
+ 4
4043
+ 4
4044
+ 4
4045
+ Table F.39: Energy source, oil & gas. †: best model on average. Bold/⋆: better/within 2pts of the company-specific
4046
+ model.
4047
+ 27
4048
+
ENE1T4oBgHgl3EQf-QbO/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
EtAzT4oBgHgl3EQfw_5J/content/tmp_files/2301.01730v1.pdf.txt ADDED
@@ -0,0 +1,925 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.01730v1 [quant-ph] 4 Jan 2023
2
+ Multitime Quantum Communication: Interesting But
3
+ Not Counterfactual
4
+ Robert B. Griffiths∗
5
+ Department of Physics
6
+ Carnegie Mellon University
7
+ Pittsburgh, PA 15213
8
+ Version of 4 January 2023
9
+ Abstract
10
+ A protocol for transmission of information between two parties introduced by Salih
11
+ et al., Phys. Rev. Lett. 110 (2013) 170502 (hereafter SLAZ), involves sending quan-
12
+ tum amplitude back and forth through a quantum channel in a series of steps, rather
13
+ than simply sending a signal in one direction. The authors claimed that their protocol
14
+ was “counterfactual” in the sense that while a quantum channel is needed to connect
15
+ the parties, its actual usage becomes vanishingly small in the asymptotic limit as the
16
+ number of steps tends to infinity. Here we show that this claim is incorrect because it
17
+ uses probabilistic reasoning that is not valid at intermediate times in the presence of
18
+ quantum interference. When ill-defined probabilities are replaced with a well-defined
19
+ measure of channel usage here called “Cost”, equal to the absolute square of the am-
20
+ plitude sent through the channel, the total Cost does not go to zero in the asymptotic
21
+ limit of a large number of steps, but is bounded below by a rigorous inequality. A
22
+ detailed analysis shows that this bound is satisfied in the SLAZ protocol. The analysis
23
+ leading to the bound uses the fact that the Gram matrix formed by inner products of
24
+ a collection of pure quantum states is additive over Hilbert subspaces and invariant
25
+ under unitary time transformations. Its off-diagonal elements, which in general are not
26
+ positive, play a significant role in the formal argument as well as providing a somewhat
27
+ strange way of visualizing the transfer of information.
28
+ Contents
29
+ I
30
+ Introduction
31
+ 2
32
+ II
33
+ One-Way Protocols
34
+ 3
35
+ II.1
36
+ Multiple Channels in Parallel . . . . . . . . . . . . . . . . . . . . . . . . . .
37
+ 3
38
+ II.2
39
+ One Channel Used Multiple Times . . . . . . . . . . . . . . . . . . . . . . .
40
+ 5
41
+ ∗Electronic address: rgrif@cmu.edu
42
+ 1
43
+
44
+ III Two-way Protocols
45
+ 6
46
+ III.1 Gram Matrices
47
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
48
+ 6
49
+ III.2 Basic Two-Way Protocol
50
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . .
51
+ 6
52
+ III.3 Sending One Classical Bit
53
+ . . . . . . . . . . . . . . . . . . . . . . . . . . .
54
+ 7
55
+ III.4 Lower Bound on Costs
56
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
57
+ 10
58
+ IV The SLAZ Protocol
59
+ 11
60
+ IV.1 Description of the Protocol . . . . . . . . . . . . . . . . . . . . . . . . . . .
61
+ 11
62
+ IV.2 Calculation of Costs and Overlap
63
+ . . . . . . . . . . . . . . . . . . . . . . .
64
+ 13
65
+ IV.3 Discussion of Costs and Probabilities . . . . . . . . . . . . . . . . . . . . . .
66
+ 14
67
+ V
68
+ Conclusion
69
+ 15
70
+ I
71
+ Introduction
72
+ The motivation for this paper is a scheme for the transmission of quantum informatiom
73
+ introduced by Salih et. al [1] with the title “Protocol for direct counterfactual quantum com-
74
+ munication”, and referred to hereafter as SLAZ, the initials of the authors. One ordinarily
75
+ thinks of the transmission of information as sending a signal through a channel from sender
76
+ to receiver. However the idea in SLAZ is that information can be sent from Bob to Alice if
77
+ the quantum particle used to carry the information starts off in Alice’s domain, and a part
78
+ of its quantum amplitude is sent to Bob through a quantum channel. Bob modifies this is
79
+ some way before sending (or possibly not sending) it back to Alice, depending on the signal
80
+ he wants to send. Alice then employs what Bob has returned to begin a second round of
81
+ sending amplitude to Bob, who again modifies it before returning it, and so forth. This
82
+ back-and-forth motion can continue for a large number of rounds until the information that
83
+ Bob is sending has arrived in Alice’s domain, where she can carry out a measurement or
84
+ perhaps perform additional processing. A key feature of protocols of this type is that all the
85
+ intermediate steps can be represented by purely unitary time evolution, with intermediate
86
+ time measurements, if any replaced by unitaries—a process of purification.
87
+ The use of amplitude rather than particle in the previous paragraph is intentional, be-
88
+ cause the state of the photon or other particle is in general a coherent superposition of parts
89
+ associated with different spatial locations: Alice’s domain, Bob’s domain, and the channel
90
+ connecting them. One generally thinks of a particle as something with a spatial location,
91
+ but in quantum mechanics one cannot simultaneously ascribe particle and wave properties
92
+ to the same entity at the same time because of wave-particle duality. In Hilbert-space quan-
93
+ tum mechanics physical properties, such as location in space, are represented by projectors
94
+ (Sec. III.5 of [2]), and when a projector representing a wave, think of |ψ⟩⟨ψ|, does not com-
95
+ mute with a projector specifying a spatial location, ignoring this fact can rapidly lead to
96
+ paradoxes. The double-slit paradox is an example: when a coherent wave passes through
97
+ the slit system one cannot say through which slit the particle passed.
98
+ The term “counterfactual” in the original SLAZ paper has the following significance. A
99
+ quantum channel connecting the communicating parties is essential: this is not a case of
100
+ mysterious nonlocal influences of the sort which are sometimes invoked to explain quantum
101
+ violations of Bell inequalities.
102
+ However, if the number of steps in an SLAZ protocol is
103
+ 2
104
+
105
+ sufficiently large, the magnitude of the amplitude sent through the channel in each step can
106
+ be made very small, and vanishes in the limit as the number of steps tends to infinity.
107
+ A similar claim of counterfactuality has been made in much of the rather substantial
108
+ literature motivated by the original SLAZ publication, which contains various modifications
109
+ and extensions of the original protocol. There have also been criticisms of these counter-
110
+ factual claims, and (of course) replies to criticisms. The Conclusion, Sec. V, of the present
111
+ paper contains a few remarks about how its results apply to some of these publications, but
112
+ a review, much less a detailed discussion, lies far outside its scope. The interested reader is
113
+ referred to the extensive bibliographies found in [3,4].
114
+ The aim of the present paper is to study the use of quantum channels in protocols of the
115
+ SLAZ type, in particular the sense in which this usage is or is not counterfactual. To this end
116
+ the concept of the Cost of using a quantum channel, the absolute square of the amplitude
117
+ passing through it in a particular step in the protocol, is suggested, for reasons discussed
118
+ in Sec. II, as a useful substitute for “probabiilities”, which in a quantum context are often
119
+ ill-defined. The example of multiple channels in parallel, which few would want to claim are
120
+ “counterfactual”, serves as an introduction to how information can be sent through a single
121
+ channel in a single direction at multiple times, in a process in which all of the intermediate
122
+ steps are represented by unitary maps.
123
+ The main mathematical results of this paper are in Sec. III: Gram matrices and some of
124
+ their properties are discssed in Sec. III.1, while Sec. III.2 gives the basic structure of simple
125
+ two-way multiple time protocols. Section III.3 considers simple schemes for transmitting one
126
+ classical bit, while the rigorous lower bound that undermines various counterfactual claims
127
+ is the topic of Sec. III.4.
128
+ The original SLAZ protocol is studied in detail in Sec. IV. In particular the total Cost
129
+ of transmitting a classical bit λ = 0, in which Bob reflects the amplitude back to Alice, and
130
+ for transmitting λ = 1, in which he absorbs rather than returns it, are evaluated explicitly.
131
+ It turns out that in the asymptotic limit the λ = 1 Cost is miniscule, but that for λ = 0
132
+ is enormous, while the product of the two remains finite and satisfies the rigourous bound
133
+ in Sec. III.4. The mistaken claim that the SLAZ protocol is counterfactual results from two
134
+ errors: a concept of channel use which would be questionable even for a classical stochastic
135
+ process, and an improper use of probabilities in a way that violates quantum principles.
136
+ The concluding Sec. V has a summary of the main results of this paper, a few comments
137
+ on some parts of the literature related to SLAZ, and some suggestions for future directions
138
+ of research.
139
+ This author believes that protocols of the SLAZ type are quite interesting,
140
+ deserve further exploration, and might contribute to useful ways of studying multipartite
141
+ and multitime transmission of quantum information, as in quantum networks. And that such
142
+ studies would prove more fruitful in the absence of misleading claims of counterfactuality.
143
+ II
144
+ One-Way Protocols
145
+ II.1
146
+ Multiple Channels in Parallel
147
+ Think of quantum information as the information carried by a photon as it passes through
148
+ a quantum channel, such as an optical fiber.
149
+ The information could be encoded in its
150
+ polarization. Rather than using a single channel, one could imagine sending the photon as a
151
+ 3
152
+
153
+ coherent state through a set of N channels in parallel, using a collection of beamsplitters to
154
+ divide up the initial amplitude among the different channels, and a corresponding collection
155
+ to later recombine them. Let us suppose that the normalized |Φ⟩ that represents the photon
156
+ at some intermediate time is a superposition of amplitudes
157
+ |Φ⟩ =
158
+ N
159
+
160
+ n=1
161
+ cn|φn⟩
162
+ (1)
163
+ associated with the individual channels, labeled by n. Define the Cost qn associated with
164
+ the use of channel n, and the total Cost Q for the channel system as:
165
+ qn := |cn|2,
166
+ Q :=
167
+ N
168
+
169
+ n=1
170
+ qn.
171
+ (2)
172
+ If the |φn⟩ and |Φ⟩ are normalized, Q is equal to 1, so one might identify qn with the prob-
173
+ ability that the photon is in channel n. But what does that mean? In standard (textbook)
174
+ quantum mechanics probability refers to the outcome of a measurement, but a measurement
175
+ carried out at an intermediate time, when the quantum state is a coherent superposition
176
+ over various locations, can alter what occurs later, and hence it is dangerous to associate
177
+ such a probability with a situation in which a measurement does not take place.
178
+ Another way of viewing this difficulty is to recall that von Neumann (Sec. III.5 of [2])
179
+ identified quantum physical properties—which in classical physics are asociated with sets
180
+ of points in the classical phase space—with projectors, self-adjoint idempotent operators,
181
+ P = P † = P 2, on the quantum Hilbert space. For example, in the case of a spin-half particle
182
+ the projectors
183
+ P = (I − σz)/2
184
+ R = (I + σx)/2,
185
+ (3)
186
+ where I is the identity and σz and σx are Pauli operators, represent the properties Sz =
187
+ −¯h/2 and Sx = +¯h/2, respectively. In general, if two projectors P and R commute their
188
+ product PR = RP represents the property P AND R. But if they do not commute, neither
189
+ PR nor RP is a projector, and so neither represents a quantum property. In some sense
190
+ noncommutation is the very essence of quantum mechanics; it is what distinguishes it from
191
+ classical physics. The use of standard (Kolmogorov) probabilities requires a sample space of
192
+ mutually-exclusive possibilities, one and only one of which occurs in a particular run of an
193
+ experiment. In quantum theory such a sample space is a collection of mutually orthogonal
194
+ projectors that sum to the identity, a projective decomposition of the identity. For example, R
195
+ and I −R in (3) in the case of spin half; see (7) below for the general definition. In quantum
196
+ mechanics there are often many possible sample spaces that one might be interested in,
197
+ and carelessly combining incompatible spaces—some projectors in one do not commute with
198
+ projectors in the other—inevitably leads to paradoxes rather than physical understanding.
199
+ In the present context the dyad |Φ⟩⟨Φ| is a projector that does not commute with any
200
+ of the projectos |φn⟩⟨φn| for which cn is nonzero, and thus it is meaningless to assign a
201
+ probability to the latter in a situation where the coherent state |Φ⟩ will later be transformed
202
+ by the final beamsplitters into the original state that entered the channel system.
203
+ 4
204
+
205
+ II.2
206
+ One Channel Used Multiple Times
207
+ The possible advantanges, if any, of using many channels in parallel can also be realized
208
+ by employing a single channel and sending quantum amplitude through it at a succession of
209
+ times; this is what makes protocols of the SLAZ type of some interest. Let us suppose that
210
+ information is being sent from Bob to Alice. One can think of the photon at a particular time
211
+ as being in a coherent superposition of amplitudes in three different physical locations: Alice’s
212
+ domain A, Bob’s domain B, and the channel C connecting them. The same symbols can be
213
+ used for the Hilbert-space projectors associated with these locations, thus operators which are
214
+ self-adjoint and idempotent, A = A† = A2, and mutually orthogonal, AB = BC = AC = 0.
215
+ They sum to the identity
216
+ A + C + B = I
217
+ (4)
218
+ and hence form a projective decomposition of the identity—see the general definition in
219
+ (7) below. A projective decomposition of the identity is the quantum counterpart of the
220
+ sample space of mutually exclusive possibilities essential for using standard (Kolmogorov)
221
+ probability theory in the case of a quantum system. Note that A, B, and C are subspaces of a
222
+ single Hilbert space, not subsystems represented by a tensor product. If the quantum particle
223
+ possesses other degrees of freedom, these projectors are to be understood using the usual
224
+ convention as including the identity operator on these additional degrees of freedom. Thus
225
+ for a photon, A means that it is located in Alice’s domain, whatever may be its polarization.
226
+ Bob can send a particular type of information λ to Alice by starting with a normalized
227
+ reference state |ψ0⟩ = B|ψ0⟩, the particle is somewhere in his domain B, and using a unitary
228
+ transformation Bλ acting on the subspace B + C to place it in a state
229
+ |ψλ
230
+ 1⟩ = C|ψλ
231
+ 1⟩ = Bλ|ψ0⟩,
232
+ (5)
233
+ in the channel, at which point it travels through the channel to Alice. As the channel has no
234
+ effect except to transmit the particle from one end to the other, we simplify the discussion
235
+ (here and later) by using the same symbol for the ket that arrives at Alice’s end. She then
236
+ applies a unitary A that does not depend on λ, for she does not know what Bob is sending,
237
+ to empty the channel and arrive at a final state
238
+ |ψλ
239
+ 2⟩ = A|ψλ
240
+ 2⟩ = A|ψλ
241
+ 1⟩,
242
+ (6)
243
+ which she can then measure or subject to further processing.
244
+ This single-round transmission process can be carried out in a number of rounds in which
245
+ during the n’th round Bob employs a unitary Bλ
246
+ n acting on the B + C subspace to map an
247
+ amplitude cn|ψ0⟩ into C, which is initially empty, and which travels to Alice, who uses a
248
+ unitary An acting on A + C to remove it from the channel, which is then empty and ready
249
+ for the next round. One way to visualize this is that Bob has a domain B of high dimension,
250
+ and at the outset splits up the initial amplitude |ψ0⟩ into pieces placed in different subspaces
251
+ of B with the help of a suitable set of beamsplitters. At round n the unitary Bλ
252
+ n interchanges
253
+ the appropriate subspace of B with the empty C. Alice’s A is also large, and her An maps
254
+ whatever Bob has sent into an empty subspace reserved for this purpose. When the run is
255
+ completed Alice can then combine the amplitudes in these different subspaces into a smaller
256
+ space—e.g., using beamsplitters—or she can do a similar combination at the end of each
257
+ 5
258
+
259
+ round. Of course Alice’s and Bob’s unitaries cannot be chosen independently; the two must
260
+ work together to design the protocol. What is unknown to Alice is Bob’s choice of λ for a
261
+ particular run; this is the information that she can extract at the end.
262
+ Some multiple-time protocols employ measurements by Alice at intermediate times. In
263
+ cases such as the original SLAZ scheme, discussed below in Sec. IV, it is possible to store the
264
+ amplitude that could have triggered the measuring device in an empty subspace in Alice’s
265
+ domain and put off the measurement until the protocol is finished. Of course, amplitudes
266
+ that correspond to several measurements in succession can be combined, just as in the case
267
+ of simultaneous transmission through several channels in parallel, as discussed earlier.
268
+ III
269
+ Two-way Protocols
270
+ III.1
271
+ Gram Matrices
272
+ Let {Pj} be a projective decomposition of the Hilbert space identity I:
273
+ I =
274
+
275
+ j
276
+ Pj,
277
+ Pj = P †
278
+ j ,
279
+ PjPk = δjkPj,
280
+ (7)
281
+ and let {|ψµ⟩}, µ = 0, 1, . . ., be a collection of kets on the same Hilbert space. The Gram
282
+ matrix
283
+ Gµν = ⟨ψµ|ψν⟩ =
284
+
285
+ j
286
+ Gµν(Pj) =
287
+
288
+ j
289
+ ⟨ψµ|Pj|ψν⟩
290
+ (8)
291
+ is additive in that it is a sum over contributions from the different subspaces. In addition,
292
+ Gµν is invariant (or conserved) under a unitary operation U that acts on every ket in the
293
+ collection {|ψµ⟩}. Also, if this unitary acts on only some of the subspaces, say P1 and P2, and
294
+ is the identity operator on the others, then while both Gµν(P1) and Gµν(P2) may change,
295
+ their sum Gµν(P1) + Gµν(P2) remains unchanged. That Gram matrices are additive and
296
+ conserved plays an important role in what follows.
297
+ We shall refer to the diagonal elements Gµµ(Pj), which are non-negative, as weights.
298
+ As these are rather like probabilities, their additivity and conservation is not surprising.
299
+ However, that the same is true of the nondiagonal elements Gµν(Pj) with µ ̸= ν, hereafter
300
+ referred to as overlaps, comes as something of a surprise, especially since |ψµ⟩ and |ψν⟩ may
301
+ refer to two different runs of an experiment, one on Friday and one on Monday. Nonetheless,
302
+ overlaps play a key role in the following analysis, not only as part of the mathematics but
303
+ also in a surprising but useful “intuitive” way of thinking about what is going on. The
304
+ absolute value of an overlap corresponds to a notion of fidelity in quantum information, but
305
+ in general an overlap is a complex number, and the fact that it can be negative as well as
306
+ positive is a key element in what follows.
307
+ III.2
308
+ Basic Two-Way Protocol
309
+ In the following discussion the projective decomposition of the identity (7) that will
310
+ concern us is {A, C, B}, where A means that the photon or other quantum particle is in
311
+ Alice’s domain, B that it is Bob’s domain, and C in the channel connecting them. At the
312
+ 6
313
+
314
+ beginning of a two-way protocol of the SLAZ type in which Bob is sending information to
315
+ Alice of the photon amplitude is in Alice’s domain A. She initiates the run by sending some
316
+ amplitude to Bob through the channel. He then modifies it and returns some or all of it to
317
+ Alice, in a manner that depends on the information λ he wishes to transmit. Alice processes
318
+ what Bob has returned, and begins the second round by again sending amplitude to Bob,
319
+ who again returns it, etc. This can go on for N rounds, following which Alice makes a
320
+ measurement to determine the value of λ.
321
+ In further detail: At the beginning of round n, Alice uses a unitary An1 acting on A + C
322
+ to map some of the amplitude in A into an empty channel C. This amplitude then flows
323
+ through the channel to Bob, where he empties the channel into B, does some processing,
324
+ and then maps some amplitude back into C. This flows to Alice, who empties C into A
325
+ using a unitary An2. We assume that “flow through the channel” does not change anything,
326
+ and hence it is convenient not to think of C as divided into close-to-Alice, close-to-Bob, and
327
+ in-between subspaces, but simply imagine that Alice and then Bob and then Alice are acting
328
+ on a single C subspace. Alice uses unitaries that act on A + C and are independent of λ,
329
+ while Bob uses unitaries Bλ
330
+ n, that depend on the information λ he wants to transmit, which
331
+ act on C + B. Both the Alice and Bob unitaries will in general depend upon the round n,
332
+ but Alice’s do not depend upon λ. In addition we impose the restriction that Bob’s actions
333
+ are passive in the sense that that the magnitude of the amplitude he sends back to Alice
334
+ in round n cannot be greater than what he has just received. This last condition clearly
335
+ differentiates these two-way protocols from the one-way protocols of Sec. II.2.
336
+ The requirement that Alice and Bob only employ unitary operations simplifies the anal-
337
+ ysis. It is true that various published protocols of this type, including the original SLAZ
338
+ version to be discussed in Sec. IV, employ nonunitary measurements at intermediate times.
339
+ In the cases of interest to us these measurements can be replaced by unitary operations
340
+ which allow the measurements to be put off until the end of the run, in a manner indicated
341
+ in Sec. II.2 and employed in the discussion in Sec. IV.
342
+ To quantify the channel usage for these protocols we use the notions of Cost, equal to the
343
+ absolute square of the amplitude for a single use of the channel, and total Cost for the sum of
344
+ the Costs involved in a single experimental run, as in Sec. II.1, see (2). An important issue
345
+ connected to claims that these protocols are counterfactual has to do with the difference
346
+ between Cost and probability, as will be discussed later for the SLAZ protocol in Sec. IV—
347
+ the importance of this has already been noted in Sec. II.1. In particular we will be interested
348
+ in identifying protocols that minimize the overall Cost, as in the example discussed next.
349
+ III.3
350
+ Sending One Classical Bit
351
+ In the simplest SLAZ protocol Bob wants to send a single classical bit, λ = 0 or 1, to
352
+ Alice. At the start all of the amplitude is in A for both a λ = 0 and a λ = 1 run, so all four
353
+ of the initial Gram matrix elements Gµν
354
+ 0 (A), µ and ν equal to 0 or 1, are equal to 1. The
355
+ goal is that after N rounds the result will be
356
+ Gµν
357
+ N (A) = δµν,
358
+ (9)
359
+ so that Alice can determine the value of λ Bob has sent by making a measurement in an
360
+ appropriate basis. Thus the desired change is that during the course of the run the overlaps,
361
+ 7
362
+
363
+ the off diagonal elements G01(A) and G10(A), decrease from 1 to 0, while the weights G00(A)
364
+ and G11(A), remain equal to 1.
365
+ At this point it is worth noting that if both weights are not maintained—for example if
366
+ at the end G00(A) = 1 while G11(A) = G01(A) = 0, Alice can still extract the value of λ by
367
+ measuring whether or not the photon is in the state |ψ0⟩. Let us call this, for want of a better
368
+ term, a partial protocol in contrast to a full protocol that results in (9). A partial protocol
369
+ can be used for one-way transmission, and the obvious advantage is that it costs nothing to
370
+ transmit λ = 1. A possible disadvantage is that when Alice’s measurement reveals nothing
371
+ it could be because of some failure in the channel or in the measuring device. In the present
372
+ discussion we focus on full protocols.
373
+ A very simple way to implement such a protocol is that on the very first step Alice sends
374
+ the entire amplitude to Bob, with a Cost of 1 for this use of the channel. Bob then simply
375
+ modifies this using the unitary Bλ and sends it back to Alice, either in one round or several
376
+ rounds, with Alice sending nothing back. The Cost for using the channel in the Bob-to-Alice
377
+ direction is also 1, see the discussion in Sec. II.2. Hence a total Cost of 2 for the protocol
378
+ as a whole. Notice that since there is no restriction on Bλ this rather trivial protocol can be
379
+ used to send “quantum” information. From the perspective of Cost, two-way protocols of
380
+ the kind under discussion are interesting because a classical bit, λ = 0 or 1, can be sent at
381
+ a total Cost of 1 rather than 2. And as shown below in Sec. III.4, the product of the Costs
382
+ for λ = 0 and 1 cannot be less than 1.
383
+ To discuss the successive steps in protocols that optimize the Cost, we need an appropriate
384
+ notation. We will represent kets as row vectors as in the following example
385
+ |ψ⟩ = |a; c; b⟩ = |a1, a2, a3; c1, c2; b1, b2⟩
386
+ (10)
387
+ where the dimensions of the A, B, and C subspaces are d(A) = 3, d(B) = 2 and d(C) = 2.
388
+ Note that we are dealing with a direct sum of subspaces, A ⊕ B ⊕ C, not a tensor product
389
+ of subsystems. In much of what follows, B is empty or can be ignored, so |a; c⟩ will suffice;
390
+ this and other minor variants in notation should be self-explanatory.
391
+ Let us start with an extremely simple one-round full protocol with d(A) = 2, d(C) = 1.
392
+ It consists of the following steps:
393
+ |a1, a2; c⟩ = |1, 0; 0⟩ → |1/
394
+
395
+ 2, 0; 1/
396
+
397
+ 2⟩
398
+ ⇒ |1/
399
+
400
+ 2, 0; (−1)λ/
401
+
402
+ 2⟩ → |1/
403
+
404
+ 2, (−1)λ/
405
+
406
+ 2; 0⟩,
407
+ (11)
408
+ where 0 means this amplitude is equal to zero; do not confuse it with the label 0 for one of
409
+ the two orthogonal states of a qubit. Here → indicates the action of a unitary on A + C
410
+ carried out by Alice, and ⇒ a λ-dependent unitary on C carried out by Bob. The action by
411
+ Bob could involve intermediate steps requiring the B subspace, but its net effect is only to
412
+ change the contents of C, so there is no need to include B in the discussion.
413
+ In words: At the outset all of the amplitude is in Alice’s A, a1 = 1. She maps half (in
414
+ the sense of the absolute square) of it into C and sends it to Bob, who either sends it back
415
+ unchanged in order to transmit λ = 0, or with the opposite phase to send λ = 1. Alice then
416
+ empties the channel into the a2 position, using a unitary on A + C that is independent of λ,
417
+ as it simply requires interchanging two subspaces. A final measurement by Alice determines
418
+ which of the two orthogonal states is present in A, and thus which bit Bob was sending.
419
+ 8
420
+
421
+ Next consider what is happening to the Gram matrices Gµν(A) and Gµν(C) during the
422
+ successive steps. In particular, the overlap G01(A) is equal to 1 at the outset, and the first
423
+ step reduces it to 1/2 by placing 1/2 in C. Bob’s action changes G01(C) from +1/2 to −1/2,
424
+ and this negative contribution to the overlap moves back into A when Alice empties the
425
+ channel, leading to the desired G01(A) = 0. On the other hand, whereas the weight G00(A)
426
+ is reduced to 1/2 during the first step, Bob’s action does not change the sign of G00(C), so in
427
+ the final step Alice moves this weight back to its initial value of 1, and similarly for G11(A).
428
+ Thus the goals of a full protocol have been achieved.
429
+ The Costs of using the channel are easily evaluated: 1/2 for the Alice-to-Bob step and
430
+ the same for Bob-to-Alice, for a total Cost of Qλ = 1, the same for λ = 0 and 1. These
431
+ satisfy the rigorous lower bound worked out below in Sec. III.4, so this protocol is optimal
432
+ if one uses total Cost as an appropriate measure of channel usage.
433
+ This protocol is easily extended to an equally efficient version involving N rounds, N any
434
+ positive integer. Let
435
+ ǫ = 1/2N,
436
+ (12)
437
+ and for the first, n = 1, round replace (11) with
438
+ |1, 0; 0⟩ → |
439
+
440
+ 1 − ǫ, 0; √ǫ ⟩ ⇒ |
441
+
442
+ 1 − ǫ, 0; (−1)λ√ǫ ⟩ → |
443
+
444
+ 1 − ǫ, (−1)λ√ǫ; 0⟩,
445
+ (13)
446
+ while for round n + 1,
447
+ |
448
+
449
+ 1 − nǫ, (−1)λ√nǫ; 0⟩ → |
450
+
451
+ 1 − (n + 1)ǫ, (−1)λ√nǫ; √ǫ⟩
452
+ ⇒ |
453
+
454
+ 1 − (n + 1)ǫ, (−1)λ√nǫ; (−1)λ√ǫ ⟩ → |
455
+
456
+ 1 − (n + 1)ǫ, (−1)λ�
457
+ (n + 1)ǫ; 0⟩,
458
+ (14)
459
+ where it is straightforward to show that there exists a λ-independent unitary for the last
460
+ step. The final result at the end of round N is the same as in (11), the case in which N = 1,
461
+ and again the total Cost is Q0 = Q1 = 1, independent of λ. One can also let ǫ depend on n,
462
+ thus ǫn > 0 for round n, subject to the condition
463
+
464
+ n
465
+ ǫn = 1/2,
466
+ (15)
467
+ and the Cost is again equal to 1.
468
+ There are other protocols with larger Costs which may have some practical advantage.
469
+ Thus rather than a scalar amplitude, Alice might use photon polarization, say horizontal
470
+ H, which Bob could return as H to send λ = 0 or rotate to vertical V to send λ = 1.
471
+ In this case the Costs are Q0 = Q1 = 2, so twice that for an optimal one-way protocol.
472
+ However, there is now no need to maintain a particular phase relation between what is in
473
+ Alice’s domain and what is available to Bob during each round. If polarization is easier to
474
+ maintain than phase—one leaves that up to the experts—one could imagine the added Cost
475
+ being worthwhile if Alice has a large apparatus capable of generating single photons, while
476
+ Bob, off on a trip to spy on Eve, needs only something easily carried in a suitcase.
477
+ The protocol used in SLAZ, in which Bob returns the amplitude for λ = 0, but absorbs
478
+ it or feeds it to a measuring apparatus for λ = 1, looks less promising. Because the λ = 1
479
+ weight only moves from Alice to Bob it is difficult to have G11(A) = 1 at the end of the
480
+ protocol. In fact SLAZ, discussed in Sec. IV, employs a clever trick (“Zeno effect”) to get
481
+ around this problem, albeit at the cost of a large number of rounds to keep the probability
482
+ of failure small, and a large channel usage Cost for one of the bits.
483
+ 9
484
+
485
+ III.4
486
+ Lower Bound on Costs
487
+ The additivity and conservation properties of the Gram matrix Gµν introduced in Sec. III.1
488
+ will now be used to obtain lower bounds on the total Cost of two-way protocols of the sort
489
+ exemplified by, but not limited to, the case of 1 classical bit discussed above in Sec. III.3.
490
+ Using the |a; c⟩ notation of (10)—the b entry is not needed in the following discussion—round
491
+ n of an N round protocol consists of the following steps carried out on A + C:
492
+ |aµ; 0⟩n → |¯aµ; cµ⟩n ⇒ |¯aµ; ˆcµ⟩n → |aµ; 0⟩n+1.
493
+ (16)
494
+ Here µ labels the bit which Bob is transmitting during this run. Thus after Alice uses a
495
+ unitary An1 on A + C to move some amplitude, |cµ⟩n into an initially empty channel. Bob
496
+ applies a unitary Bµ
497
+ n to C + B, leading to an amplitude |ˆcµ⟩n—note the hat added to c—in
498
+ the channel. If Bob’s action is passive, as assumed in Sec. III.3 (and in the later discussion
499
+ of SLAZ in Sec. IV), one would have
500
+ ∥ˆcµ∥n ≤ ∥cµ∥n,
501
+ (17)
502
+ but this conditions is actually not needed to obtain the general results and inequalities given
503
+ below, which thus apply equally to one-way multi-time transmission. As a final step Alice
504
+ employs a unitary An2 on A + C to empty the channel by placing its amplitude into A. It is
505
+ important that Alice’s unitaries An1 and An2, unlike Bob’s Bµ
506
+ n, do not depend upon µ, which
507
+ can be different in different runs of the experiment.
508
+ The change in the Gram matrix associated with A during round n is given by
509
+ Gµν
510
+ n+1(A) − Gµν
511
+ n (A) = ⟨aµ|aν⟩n+1 − ⟨aµ|aν⟩n = ⟨ˆcµ|ˆcν⟩n − ⟨cµ|cν⟩n,
512
+ (18)
513
+ where ⟨aµ|aν⟩n is the inner product of |aµ⟩n and |aν⟩n. The equality follows from the fact that
514
+ Gµν(A + C) is invariant under An1 and An2, and additive: Gµν(A + C) = Gµν(A) + Gµν(C).
515
+ To discuss the total change during N rounds, n = 1, 2, . . . N, it is convenient to define
516
+ |Cµ⟩ := {|cµ⟩1, |cµ⟩2, . . . |cµ⟩N},
517
+ | ˆCµ⟩ := {|ˆcµ⟩1, |ˆcµ⟩2, . . . |ˆcµ⟩N}
518
+ (19)
519
+ with inner products
520
+ ⟨Cµ|Cν⟩ =
521
+ N
522
+
523
+ n=1
524
+ ⟨cµ|cν⟩n,
525
+ ⟨ ˆCµ| ˆCν⟩ =
526
+ N
527
+
528
+ n=1
529
+ ⟨ˆcµ|ˆcν⟩n.
530
+ (20)
531
+ Summing (18) over N rounds yields the following formula
532
+ ∆Gµν(A) = Gµν
533
+ N (A) − Gµν
534
+ 0 (A) = ⟨ ˆCµ| ˆCν⟩ − ⟨Cµ|Cν⟩,
535
+ (21)
536
+ for the total change in the A portion of the Gram matrix during the full protocol. This
537
+ quantity is bounded by
538
+ |∆Gµν(A)| ≤ |⟨ ˆCµ| ˆCν⟩| + |⟨Cµ|Cν⟩| ≤ ∥ ˆCµ∥ · ∥ ˆCν∥ + ∥Cµ∥ · ∥Cν∥
539
+ (22)
540
+ using the norm ⟨Cµ|Cµ⟩ = ∥Cµ∥2.
541
+ 10
542
+
543
+ Next define the total Cost Kµ for Alice-to-Bob and ˆKµ for Bob-to-Alice uses of the
544
+ channel, with Qµ their sum:
545
+ Kµ = ⟨Cµ|Cµ⟩ = ∥Cµ∥2,
546
+ ˆKµ = ⟨Cµ|Cµ⟩ = ∥ ˆCµ∥2,
547
+ Qµ = Kµ + ˆKµ.
548
+ (23)
549
+ Combining (22) and (23) gives
550
+ |∆Gµν(A)| ≤
551
+
552
+ KµKν +
553
+
554
+ ˆKµ ˆKν ≤
555
+
556
+ QµQν.
557
+ (24)
558
+ This yields an upper bound
559
+ ∆Gµµ(A) ≤ Qµ
560
+ (25)
561
+ for a non-negative diagonal weight, and for the off-diagonal overlap:
562
+ |∆Gµν(A)| ≤
563
+
564
+ QµQν.
565
+ (26)
566
+ In the particular case of the 1-bit two-way protocol, Sec. III.3, the aim is to reduce G01(A)
567
+ from its initial value of 1 to 0 after N rounds. Setting µ = 0 and ν = 1 in (26), we see that
568
+ to achieve this result it is necessarily the case that the Costs Q0 and Q1 for sending bits
569
+ λ = 0 and λ = 1 must satisfy the condition
570
+ Q0Q1 ≥ 1.
571
+ (27)
572
+ This is satisfied as an equality with Q0 = Q1 = 1 for the specific protocols discussed in
573
+ Sec. III.3, which shows that they are optimal if total Cost is used as a measure. For more
574
+ general protocols there is no reason to expect that the two Costs will be equal, and in that
575
+ case if, say, the Cost for λ = 1 is made very small, that for λ = 0 must be very large. This
576
+ is in fact the case for the original SLAZ protocol, as discussed below in Sec. IV, which thus
577
+ provides an interesting illustration of such a tradeoff.
578
+ IV
579
+ The SLAZ Protocol
580
+ IV.1
581
+ Description of the Protocol
582
+ The original SLAZ protocol differs from the simpler situation discussed in Sec. III.3 in
583
+ two respects. First, it has a hierarchical structure: there are a large number M of outer
584
+ rounds or cycles, each of which consists of a large number N of inner rounds or cycles, and
585
+ the protocol will succeed with high probability provided
586
+ 1 ≪ M ≪ N.
587
+ (28)
588
+ Second, while Bob sends a bit λ = 0 by reflecting the amplitude sent by Alice back into
589
+ the channel, for λ = 1 he simply empties the channel, which can be described as a unitary
590
+ operation in which the C amplitude is placed in Bob’s subspace B. In addition, the original
591
+ SLAZ protocol and some of its modifications involve measurements at intermediate times,
592
+ and these will be replaced in the discussion below by unitary operations in the manner
593
+ suggested at the end of Sec. II.2.
594
+ 11
595
+
596
+ We use a notation
597
+ |ψ⟩ = |a1, a2, a3, a4; c; b⟩
598
+ (29)
599
+ of the form introduced in (10), where the aj are scalar amplitudes in Alice’s domain A =
600
+ A1+A2+A3+A4, c is the amplitude the channel C, and b is in Bob’s domain B. Here capital
601
+ letters are used to denote subspaces and the corresponding projectors, while lower case letters
602
+ indicate (in general complex) scalar amplitudes. While A4 and B are one-dimensional, one
603
+ can also make these larger spaces for reasons that will appear during the discussion. An
604
+ abbreviated notation is often convenient: |a2, a3⟩ in the case of a unitary acting on A2 + A3
605
+ while all the other amplitudes remain unchanged.
606
+ Central to the discussion are unitary operators that represent a rotation by an angle θ
607
+ on a 2-dimensional space:
608
+ R(θ)|α, β⟩ = |α cos θ − β sin θ, α sin θ + β cos θ⟩.
609
+ (30)
610
+ In particular, RM and RN, defined in terms of small angles, play a central role:
611
+ RM := R(θM),
612
+ θM := π/(2M),
613
+ RN := R(θN),
614
+ θN := π/(2N).
615
+ (31)
616
+ Note in particular that
617
+ (RM)M = (RN)N = R(π/2);
618
+ R(π/2) |α, β⟩ = | − β, α⟩.
619
+ (32)
620
+ In view of the fact that θN is a small angle, the following approximations turn out to be
621
+ userful:
622
+ cos θN ≈ exp[−θ2
623
+ N/2] = exp[−π2/(8N2)] ≈ 1 − π2/(8N2),
624
+ (cos θN)N ≈ exp[−π2/(8N)] ≈ 1 − π2/(8N) ≈ 1,
625
+ (33)
626
+ and similarly if N is replaced by M.
627
+ These approximations are useful for understanding the overall structure of the protocol,
628
+ which is the following. At the beginning of outer round m, 1 ≤ m ≤ M, RM is applied to
629
+ A1 + A2 to yield,
630
+ |a1, a2⟩λ = RM|¯a1, ¯a2⟩λ,
631
+ (34)
632
+ where ¯a1 and ¯a2 are the values of these amplitudes at the end of the previous outer round. In
633
+ general they depend upon which bit λ = 0 or 1 is being transmitted, whence the superscript
634
+ label, even though Alice’s operations do not depend upon λ. The very first outer round
635
+ m = 1 begins by applying (34) to the starting state (29) with a1 = 1 and all the other
636
+ amplitudes equal to zero.
637
+ The initial step (34) of outer round m is followed by a sequance of N inner rounds, each
638
+ involving the following steps, here displayed using the type of notation employed in Sec. III.3,
639
+ but now with reference to the subspace A2 + A3 + C.
640
+ |a2, a3; c = 0⟩ → |a′
641
+ 2, a′
642
+ 3; c = 0⟩ → |a′
643
+ 2, 0; a′
644
+ 3⟩
645
+ ⇒ |a′
646
+ 2, 0; (1 − λ)a′
647
+ 3⟩ → |a′
648
+ 2, (1 − λ)a′
649
+ 3; 0⟩,
650
+ (35)
651
+ where
652
+ |a′
653
+ 2, a′
654
+ 3⟩ = RN|a2, a3⟩.
655
+ (36)
656
+ 12
657
+
658
+ In words, Alice applies the unitary rotation RN, (31), to A2 +A3, and then maps A3 into the
659
+ empty channel. Next comes Bob’s action, indicated by ⇒, to either reflect the amplitude a′
660
+ 3
661
+ back into C if he is sending λ = 0, or shift it into his domain B, leaving the channel empty
662
+ if sending λ = 1. Alice, who does not know the value of λ, maps whatever is in the channel
663
+ back into A3 by a unitary that simply exchanges the contents of A3 and C, and then begins
664
+ the next inner round. The result of N inner rounds in succession is
665
+ |a2, a3⟩ →
666
+
667
+ |0, a2⟩ for λ = 0,
668
+ |(cos θN)Na2, 0⟩ ≈ |a2, 0⟩ for λ = 1.
669
+ (37)
670
+ where the λ = 1 approximation is justified when N is very large, see (33).
671
+ Following the N inner rounds Alice completes this outer round by applying a unitary to
672
+ A3 + A4 that empties the contents of A3 into A4. For λ = 1, a3 = 0, (37), so this emptying
673
+ step is trivial, while for λ = 0 it is nontrivial, and plays a signficant role in understanding
674
+ the true Costs of the protocol. In the original SLAZ protocol this emptying step is replaced
675
+ by a measurement, but instead of a measurement one can just as well let the amplitudes
676
+ accumulate in A4, which is the perspective used here.
677
+ At the end of the protocol after
678
+ completing M outer rounds the final result is
679
+ λ = 0 : |a1 = 1 − r1, a2 = 0, a3 = 0, a4 = r4, c = 0, b = 0⟩
680
+ λ = 1 : |a1 = s1, a2 = 1 − s2, a3 = 0, a4 = 0, c = 0, b = sb⟩,
681
+ (38)
682
+ where the quantities denoted by rj and sk are small corrections, of order 1/M or M/N
683
+ If these are ignored, all the amplitude is in A1 for λ = 0 or A2 for λ = 1, and a simple
684
+ measurement allows Alice to determine which bit Bob sent.
685
+ IV.2
686
+ Calculation of Costs and Overlap
687
+ It is fairly straightforward to work out the Costs for the SLAZ protocol using approxi-
688
+ mations justified by 1 ≪ M ≪ N, and the results are summarized in Sec. IV.3 below. We
689
+ begin with the case λ = 1. If one ignores small quantities, the nonzero components of |ψ⟩m
690
+ at the beginning and at the end of outer round m are
691
+ a1 = cos(mθM),
692
+ a2 = sin(mθM),
693
+ (39)
694
+ and since MθM = π/2, at the end of outer round M the result is the λ = 1 line in (38).
695
+ The probability that the photon arrives in B during outer round m—the probability that
696
+ Bob will detect it if he uses a measuring device—is the sum of the absolute squares of the
697
+ amplitudes in the channel C in the N inner rounds, as this is an incoherent process:
698
+ N(sin(mθM))2(sin(θN))2 ≈ (π2/4)(sin(mθ/M))2/N.
699
+ (40)
700
+ Summing over m gives the total probability
701
+ K1 = Q1 = (π2/8)(M/N).
702
+ (41)
703
+ that the photon will end up in Bob’s domain by the end of the protocol, which is the same
704
+ as the total Cost for λ = 1.
705
+ 13
706
+
707
+ In the case λ = 0, any amplitude placed by Alice in C is immediately returned by Bob,
708
+ and at the end of each outer round is emptied into a4, so that at the end of outer round m
709
+ the state is
710
+ |ψ⟩m = |a1 = (cos θM)m, a2 = 0, a3 = 0, a4, c = 0, b = 0⟩.
711
+ (42)
712
+ For m = M this is (38) with r1 = (π2/8M). Thus at the end of the protocol a2, a3, c and
713
+ b are strictly zero. The Cost associated with inner round n—note that the channel is used
714
+ twice—is
715
+ 2[sin θM · sin(nπ/2N)]2.
716
+ (43)
717
+ Summing over n gives a total of (π2/4)(N/M2) for each outer round, and hence for M outer
718
+ rounds a total Cost of
719
+ Q0 = (π2/4)(N/M2).
720
+ (44)
721
+ To compute the total change in overlap ∆G01(A), note that since for λ = 1 Bob does not
722
+ return an amplitude, only the ⟨Cµ|Cν⟩ term in (21) contributes. The contribution for inner
723
+ round n of outer round m is the product of the factors
724
+ [sin θM sin(nθN)] · [sin(mθM) sin θN]
725
+ (45)
726
+ corresponding to λ = 0 and 1. Summing them yields
727
+ (sin θM sin θN)
728
+ M,N
729
+
730
+ m,n
731
+ sin(mθM) sin(nθN) = (π2/4MN)(4MN/π2) = 1,
732
+ (46)
733
+ and hence
734
+ ∆G01(A) = −1,
735
+ (47)
736
+ as expected.
737
+ IV.3
738
+ Discussion of Costs and Probabilities
739
+ To summarize the results of Sec. IV.2: The total Costs Q0 and Q1 for λ = 0 and 1 are:
740
+ Q0 = (π2/4)(N/M),
741
+ Q1 = (π2/8)(M/N),
742
+ Q0Q1 ≈ 3.044.
743
+ Q0/Q1 = 2N2/M2.
744
+ (48)
745
+ Given that M ≪ N, Q1 is miniscule, Q0 is enormous, while their product is of order 1, and
746
+ satisfies the rigorous bound (27). The case λ = 1 is the easiest to understand. Since Bob
747
+ does not return the amplitude put into the channel by Alice, the Bob-to-Alice Cost ˆK1 is
748
+ zero. The Alice-to-Bob Cost is |sb|2 in (38), i.e., the probability that at the very end the
749
+ photon is in Bob’s domain. The physical reason for this is that the process by which the
750
+ amplitude gets there is incoherent, no quantum interference, since no amplitude goes back
751
+ through the channel. Bob could either accumulate these amplitudes until the end of the
752
+ protocol and then measure to see if the photon is in B, or carry out a measurement at the
753
+ end of each inner round; in either case the probabilility of his detecting the photon is |sb|2 in
754
+ (38). The situation is analogous to the use of intermediate time measurements in a one-way
755
+ protocol as discussed at the end of Sec. II.2.
756
+ The enormous Cost Q0 for λ = 0 comes about because Bob repeatedly returns the
757
+ amplitude sent by Alice in a coherent process. While the amplitude bouncing back and
758
+ 14
759
+
760
+ forth through the channel is relatively small, of order 1/M, multiplying its absolute square
761
+ by 2N, the number of times this amplitude is is in the channel during each outer round,
762
+ leads to a Cost of order N/M2 for each outer round, and hence a total of order N/M for the
763
+ complete process.
764
+ Clearly the large value of Q0 means the claim that protocol is counterfactual cannot
765
+ be maintained if Cost is used as a criterion for channel use, so it is worth discussing how
766
+ the authors of SLAZ reached a different conclusion. In essence their reasoning was based
767
+ on the small value of the amplitude in A3 at the end of an outer round just before it is
768
+ transferred to A4, as per the discussion in Sec. IV.1. The absolute square of this amplitude
769
+ is the probability that the corresponding detector D3 in Fig. 2(b) in the SLAZ paper will
770
+ be triggered. This amplitude was earlier oscillating back and forth inside the subspace with
771
+ projector S = A2+A3+C, and hence it is reasonable to assume that if this detector triggers,
772
+ the photon was earlier in S during all N inner rounds making up this particular outer round1.
773
+ As this probability is of order 1/M2, the probability that one of the D3 detectors triggers
774
+ during the M outer rounds that make up a given run is of order 1/M, and hence small.
775
+ There are two serious objections to using this small probability to justify the claim that
776
+ the protocol is counterfactual: one classical and the other quantum. Let us start with the
777
+ former. During a particular outer round the photon amplitude in a λ = 0 run rattles back
778
+ and forth inside S a total of N times, and in particular it is in C a total of 2N times.
779
+ Consider a stochastic classical protocol for transmitting information in which most of the
780
+ time Alice and Bob exchange no information at all. However, with a small probability ǫ
781
+ Alice sends a little white ball into the channel leading to Bob, who colors it green or red
782
+ and sends it back to Alice to convey one bit of information. She records the color, paints
783
+ the ball white, and returns it to Bob who again colors it to send a second bit, and so forth,
784
+ for a total of N rounds. The average rate of transmitting information is Nǫ bits, and one
785
+ cannot simply throw away the factor of N and claim that this protocol is in some sense
786
+ ‘counterfactual’.
787
+ The quantum difficulty has to do with what can be inferred from the probability that
788
+ the photon was in S = A2 + A3 + C during the inner rounds that make up a particular outer
789
+ round. One may be tempted to use classical reasoning and assume that the probabilities of
790
+ being in each of the mutually exclusive regions, A2, A3, and C, that combine to make up S
791
+ are well-defined and sum to the probability of being in S. But in the presence of quantum
792
+ interference this sort of reasoning is invalid and leads to paradoxes. See the discussion of
793
+ parallel channels in Sec. II.1.
794
+ V
795
+ Conclusion
796
+ The original SLAZ proposal has motivated a large number of papers; see the extensive
797
+ bibliographies in [3, 4]. Merely trying to summarize them, much less provide a detailed re-
798
+ view, lies outside the scope of the present paper. Broadly speaking, this literature consists
799
+ of modifications, extensions, or improvements of the original SLAZ scheme; along with crit-
800
+ icisms of the claim that these protocols are counterfactual and replies to such criticisms. It
801
+ is hoped that the following rather brief comments will provide some orientation.
802
+ 1This assumption can be justified using Consistent Histories; see the discussion of measurements in [5,6]
803
+ 15
804
+
805
+ Significant extensions of the original SLAZ scheme by the last three members of the
806
+ original collaboration include: the use of a phase change rather than absorption to transmit
807
+ the λ = 1 bit [7]; a scheme to transmit quantum states by multiple iterations of the original
808
+ SLAZ scheme [8]; using many photons in place of a single photon to transmit a classical
809
+ bit [4]. These and others are certainly interesting ideas from the perspective of transmitting
810
+ quantum information, and worth further exploration.
811
+ On the other hand, in these and all other extensions or modifications of SLAZ this author
812
+ has examined, the claim that the protocol is “counterfactual,” in the sense that the total use
813
+ of a quantum channel is negligible in the asymptotic limit, is subject to the same objections
814
+ discussed in Sec. IV.3: An improper use of probabilistic reasoning in a situation where
815
+ quantum interference means probabilities cannot be defined, and where even in a classical
816
+ situation Cost would be better than probability as a measure of channel usage. The total
817
+ Cost remains finite in the asymptotic limit of a very large number of steps, which means that
818
+ counterfactual claims should be dropped. Doing so will aid, not hinder, the serious study of
819
+ these interesting quantum schemes for transmitting information.
820
+ Shortly after the original SLAZ publication, Vaidman published a Comment [9] claiming
821
+ that in the λ = 0 case in which Bob reflects the amplitude rather than absorbing it, the
822
+ photon which was later (with high probability) detected by Alice must at an earlier time have
823
+ been in the channel C. In their Reply [10] the SLAZ authors pointed out this way of reasoning
824
+ about events at an intermediate time in the presence of quantum interference was invalid,
825
+ and leads to paradoxes, a position supported by the analysis in Sec. IV.2 above. However,
826
+ they then repeated their original counterfactual claim which itself is based on a defective
827
+ understanding of probabilities at an intermediate time. A later and much more extended
828
+ criticism of counterfactuality claims by Vaidman [11] suffers from the same difficulty as his
829
+ earlier Comment.
830
+ Some years later Aharonov and Vaidman [12] claimed to have found a scheme of the
831
+ general SLAZ type which is genuinely counterfactual.
832
+ However, when measurements or
833
+ absortion of a photon at intermediate times are replaced by unitary processes—mapping
834
+ amplitude into an empty subspace reserved for this purpose, as discussed in Sec. IV.1—the
835
+ inequality in Sec. III.4 applies to this case and undermines the counterfactual claim. The
836
+ fundamental difficulty with such claims is that the Hilbert space projector which identifies the
837
+ position of a particle at some intermediate time does not commute with the one representing
838
+ the quantum state evolving unitarily in time.
839
+ The most significant contributions of the present paper to the analysis of SLAZ-type
840
+ protocols is the use of Cost as a measure of channel usage, and the use of Gram matrices for
841
+ discussing information transfer at intermediate times in the presence of quantum interference.
842
+ In particular, the fact that these Gram matrices are additive over subspaces and invariant
843
+ (“conserved”) under unitary time transformations, plays a key part in the discussions in
844
+ Sec. III. A rather surprising feature is the role of off-diagonal elements, “overlaps”, as a type
845
+ of information measure which, unlike most such measures, is not in general positive. That it
846
+ can be negative plays a very signficant part in understanding its intuitive role in information
847
+ transfer. That its total change on Alice’s side must be −1 during the course of a successful
848
+ protocol is confirmed for the SLAZ protocol in Sec. IV.2.
849
+ This use of Gram matrices requires that the intermediate time steps be unitary. In the
850
+ case of SLAZ, measurements at intermediate times can be eliminated by mapping photon
851
+ 16
852
+
853
+ amplitude into empty subspaces, and this can be achieved in certain other cases, e.g., the
854
+ Aharonov and Vaidman protocol [12]. However, it is less clear whether something similar
855
+ could be done in a case in which, for example, Alice uses measurements at intermediate times
856
+ to change later steps in the protocol in hopes of reducing the total Cost. This author believes
857
+ that such an improvement is impossible, because measurements themselves are quantum
858
+ processes whose description simply requires a large enough Hilbert space in Alice’s domain
859
+ [13]. But this has not yet been demonstrated.
860
+ And what is special about classical information? Sending an arbitrary one-qubit quantum
861
+ state from Alice to Bob using the 2-way protocol of Sec. III.3 could be done with a Cost of 2,
862
+ which is to say twice that of simply using a 1-way protocol from Bob to Alice. That this is
863
+ the minimum seems likely, but has not been demonstrated. What about a two-way protocol
864
+ with all the amplitude starting on Alice’s side, with the aim of a perfect transmission of each
865
+ of two specified nonorthogonal states from Bob to Alice—what would be the minimum total
866
+ Cost?
867
+ An interesting feature of the original SLAZ protocol is the enormous ratio 2N2/M2, see
868
+ (48), of the Costs to transmit λ = 0 and 1, in contrast to the relatively simple protocols
869
+ discussed in Sec. III.3 for which the ratio is 1. Because the success of SLAZ depends upon N
870
+ being much larger than M, this large ratio presumably has something to do with Bob’s not
871
+ sending anything back through the channel when λ = 1. Might there be some interesting
872
+ physical principles, in addition to the Zeno effect, hiding here and waiting to be explored?
873
+ In conclusion it is hoped that the thinking and tools employed in this paper will be useful
874
+ for studying other problems of quantum information at intermediate times in situations
875
+ where the careless use of ill-defined probabilities generates paradoxes rather than physical
876
+ understanding. In particular, information transfer among three or more parties, of current
877
+ interest in the study of quantum networks, might benefit from the sort of analysis used here.
878
+ Acknowledgements
879
+ The author expresses his appreciation to Carnegie-Mellon University and its Physics
880
+ Department for continuing support of his activities as an emeritus faculty member.
881
+ References
882
+ [1] Hatim Salih, Zheng-Hong Li, M. Al-Amri, and M. Suhail Zubairy. Protocol for di-
883
+ rect counterfactual quantum communication.
884
+ Phys. Rev. Lett., 110:170502, 2013.
885
+ arXiv:1206.2042.
886
+ [2] Johann von Neumann. Mathematische Grundlagen der Quantenmechanik. Springer-
887
+ Verlag, Berlin, 1932. English translation by R. T. Beyer: Mathematical Foundations of
888
+ Quantum Mechanics, Princeton University Press, Princeton, New Jersey (1955).
889
+ [3] Jonte R. Hance, James Ladyman, and John Rarity. How quantum is quantum counter-
890
+ factual communication? Found. Phys., 51:12, 2021. arXiv:1909.07530.
891
+ 17
892
+
893
+ [4] Zheng-Hong Li, Shang-Yue Feng, M. Al-Amri, and M. Suhail Zubairy. Direct counter-
894
+ factual quantum communication protocol beyond a single photon source. Phys. Rev. A,
895
+ 106:032610, 2022. arXiv:2202.03935.
896
+ [5] Robert B. Griffiths. What quantum measurements measure. Phys. Rev. A, 96:032110,
897
+ 2017. arXiv:1704.08725.
898
+ [6] Robert B. Griffiths.
899
+ The Consistent Histories Approach to Quantum Mechanics.
900
+ Stanford Encyclopedia of Philosophy, 2019.
901
+ https://plato.stanford.edu/entries/qm-
902
+ consistent-histories/.
903
+ [7] Zheng-Hong Li, M. Al-Amri, and M. Suhail Zubairy. Direct quantum communication
904
+ with almost invisible photons. Phys. Rev. A, 89:052334, 2014.
905
+ [8] Zheng-Hong Li, M. Al-Amri, and M. Suhail Zubairy. Direct counterfactual transmission
906
+ of a quantum state. Phys. Rev. A, 92:052315, 2015.
907
+ [9] Lev Vaidman. Tracing the past of a quantum particle. Phys. Rev. A, 89:024102, 2014.
908
+ arXiv:1312.7566.
909
+ [10] Hatim Salih, Zheng-Hong Li, M. Al-Amri, and M. Suhail Zubairy. Salih et al. reply.
910
+ Phys. Rev. Lett., 112:208902, 2014. arXiv:1404.5392.
911
+ [11] L. Vaidman.
912
+ Counterfactuality of ‘counterfactual’ communication.
913
+ J. Phys. A,
914
+ 48:465303, 2015. arXiv:1410.2723.
915
+ [12] Yakir Aharonov and Lev Vaidman.
916
+ Modification of counterfactual communication
917
+ protocols that eliminates weak particle traces.
918
+ Phys. Rev. A, 99:010103, 2019.
919
+ arXiv:1805.10634.
920
+ [13] For a consistent quantum-mechanical description of the measuring process, see [5], the
921
+ relevant sections of [6], and Chs. 17 and 18 of [14].
922
+ [14] Robert B. Griffiths. Consistent Quantum Theory. Cambridge University Press, Cam-
923
+ bridge, U.K., 2002. http://quantum.phys.cmu.edu/CQT/.
924
+ 18
925
+
EtAzT4oBgHgl3EQfw_5J/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
F9E4T4oBgHgl3EQfgA1N/content/tmp_files/2301.05112v1.pdf.txt ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.05112v1 [gr-qc] 12 Jan 2023
2
+ GWitchHunters: Machine Learning and citizen science
3
+ to improve the performance of Gravitational Wave
4
+ detector
5
+ Massimiliano Razzanoa,b,1,∗, Francesco Di Renzoa,b, Francesco Fidecaroa,b,
6
+ Gary Hemmingc, Stavros Katsanevasc
7
+ aDepartment of Physics, University of Pisa, Largo B. Pontecorvo 3, Pisa, I-56127
8
+ bINFN Section of Pisa, Largo B. Pontecorvo 3, Pisa, I-56127
9
+ cEuropean Gravitational Observatory (EGO),Via E. Amaldi, 5, Cascina,I-56021
10
+ Abstract
11
+ The Gravitational waves have opened a new window on the Universe and
12
+ paved the way to a new era of multimessenger observations of cosmic sources.
13
+ Second-generation ground-based detectors such as Advanced LIGO and Ad-
14
+ vanced Virgo have been extremely successful in detecting gravitational wave
15
+ signals from coalescence of black holes and/or neutron stars. However, in
16
+ order to reach the required sensitivities, the background noise must be inves-
17
+ tigated and removed. In particular, transient noise events called “glitches”
18
+ can affect data quality and mimic real astrophysical signals, and it is there-
19
+ fore of paramount importance to characterize them and find their origin,
20
+ a task that will support the activities of detector characterization of Virgo
21
+ and other interferometers. Machine learning is one of the most promising
22
+ approaches to characterize and remove noise glitches in real time, thus im-
23
+ proving the sensitivity of interferometers. A key input to the preparation of
24
+ a training dataset for these machine learning algorithms can originate from
25
+ citizen science initiatives, where volunteers contribute to classify and analyze
26
+ signals collected by detectors. We will present GWitchHunters, a new citi-
27
+ zen science project focused on the study of gravitational wave noise, that has
28
+ been developed within the REINFORCE project (a ”Science With And For
29
+ Society” project funded under the EU’s H2020 program). We will present
30
+ ∗Corresponding author
31
+ Email address: massimiliano.razzano@unipi.it (Massimiliano Razzano)
32
+ 1on behalf of the REINFORCE Consortium
33
+ Preprint submitted to Nuclear Instruments and Methods in Physics Research AJanuary 13, 2023
34
+
35
+ the project, its development and the key tasks that citizens are participating
36
+ in, as well as its impact on the study of noise in the Advanced Virgo detector.
37
+ Keywords:
38
+ gravitational waves, machine learning, citizen science
39
+ PACS: 04.20.–q, 04.30.Tv,
40
+ 2000 MSC: 83C35,
41
+ 1. Introduction
42
+ Gravitational wave physics is opening an entire new window on the Uni-
43
+ verse. Since their discovery in 2015 [1], the Advanced LIGO [2] and Advanced
44
+ Virgo [3] detectors have carried on three observing runs (O1, O2, O3) and
45
+ unveiled 90 signals produced by the coalescence of compact objects, mostly
46
+ binary black hole with a small fraction of neutron star and/or black hole
47
+ binaries [4].
48
+ Advanced LIGO and Virgo are second-generation laser interferometers with
49
+ Fabry-Perot cavities hosted in km perpendicular arms, that are capable of
50
+ detecting the tiny deformations induced in the fabric of spacetime by the
51
+ passage of gravitational waves. In order to improve the sensitivity of the
52
+ detectors, there is a continuous effort to reduce the background noise due to
53
+ local disturbances. In particular, at low frequencies the noise is dominated by
54
+ seismic and Newtonian noise, while at mid frequencies the main component
55
+ is related to the thermal noise and at high frequencies the noise is mostly
56
+ related to quantum effects.
57
+ The activity of detector characterization and
58
+ noise hunting in gravitational wave detectors is focused on the investigation
59
+ of stationary and non stationary noise sources. In particular, non station-
60
+ ary transient noise events called glitches are of particular importance in the
61
+ noise studies. In fact, glitches can affect data quality and stability and mimic
62
+ real astrophysical signals, thus reducing the effective duty cycle of interfer-
63
+ ometers. The classification and characterization of glitches is therefore key
64
+ to understand the origin of noise in detector. However, glitches have com-
65
+ plex temporal signatures, that make difficult to classify them using standard
66
+ methods. Various works have shown that Machine Learning methods can be
67
+ promising for the classification of glitches [5, 7]. In particular, images built
68
+ from the time-frequency spectrograms of glitches are very effective in show-
69
+ ing the complex morphology of glitches and can be easily given in input to
70
+ machine learning algorithms, including deep convolutional neural networks
71
+ [6]. A possible approach to this problem is based on supervised learning,
72
+ 2
73
+
74
+ that requires large number of labeled glitch samples, that could be produced
75
+ by dedicated citizen science initiatives, where volunteers look at images and
76
+ clssify them.
77
+ A successful example of this method is provided by Gravi-
78
+ tySpy2, a citizen science project focused on the classification of glitches in
79
+ LIGO and Virgo[8]. Here we present GWitchHunters3, a new citizen science
80
+ project complementary to GravitySpy and aimed at improving sensitivity of
81
+ gravitational wave detectors combining citizen science and machine learning.
82
+ 2. The REINFORCE Project
83
+ GwitchHunters has been developed within the Research Infrastructures
84
+ FOR Citizens in Europe (REINFORCE) project4. REINFORCE is a Re-
85
+ search & Innovation Project, supported by the EU H2020 SWAFS “Science
86
+ with and for Society” work programme and aimed at creating a series of
87
+ cutting-edge citizen science projects on Frontier Physics research, with the
88
+ goal of engaging >100,000 citizens. REINFORCE is based on four citizen
89
+ science demonstrators focused Gravitational Waves (GWitchHunters), Astro-
90
+ physical neutrinos (Deep Sea Explorers), High Energy Physics (New Particle
91
+ Search at CERN) and muon-based tomography (Cosmic Muon Images). All
92
+ demonstrators are hosted on Zooniverse [9], the world leading platform for
93
+ citizen science projects.
94
+ 3. Overview of GWitchHunters
95
+ GwitchHunters has been officially launched on Zooniverse in November
96
+ 2021 after a dedicated review phase and offers to citizens a set of different
97
+ tasks of increasing difficulty. Data are presented as spectrograms and come
98
+ from the Virgo O3 run. A Playground task is specifically devoted to learning
99
+ the basics of glitch morphology and its classification. Three other levels offer
100
+ (1) the possibility to classify glitches among a larger set of classes, (2) localize
101
+ the glitches in the time-frequency space, and (3) compare the spectrogram
102
+ in the main channel of Virgo with that produced by auxiliary sensors. This
103
+ last task is particularly innovative, since it offer the possibility of linking the
104
+ glitches observed in the main channel to local disturbancies in the detector,
105
+ 2http://https://gravityspy.org/
106
+ 3https://www.zooniverse.org/projects/reinforce/gwitchhunters
107
+ 4https://www.reinforceeu.eu/
108
+ 3
109
+
110
+ Figure 1: Example of a GWitchHunters spectrogram showing two glitches, as well as the
111
+ rectagle drawn by citizens to locate them
112
+ thus suggesting a possible hint to the origin of each glitch. These tasks can
113
+ be carried both on a personal computer and on mobile devices. The project
114
+ also features a set of tutorials and examples to teach the volunteers how to
115
+ perform the different tasks, as well as a ”Field Guide” containing information
116
+ on the Advanced Virgo detector, the various glitch classes and the auxiliary
117
+ channels used in the project.
118
+ 4. First Results and Conclusions
119
+ Since its official launch in November 2021, ∼2800 volunteers have sub-
120
+ scribed to the project, although another significant amount have contributed
121
+ without officially registering. This collective effort has produced more than
122
+ ∼400000 classifications of ∼ 40000 data samples so far. In order to pro-
123
+ mote the project and engage citizens, the REINFORCE consortium has or-
124
+ ganized many initiatives, including workshops, press activities, online chal-
125
+ 4
126
+
127
+ Virgo strain channel
128
+ Frequency [Hz]
129
+ Normalizedenergy
130
+ 100
131
+ 0.8
132
+ 0.6
133
+ -0.4
134
+ -0.2
135
+ 0:2
136
+ 0.4
137
+ 0.6
138
+ 0.8
139
+ Time [s]lenges5 and training school6. A monitoring of the project website has been
140
+ carried on, showing that these initiatives successfully attracted more volun-
141
+ teers to GWitchHunters, leading to peaks of ∼5000 classifications per day.
142
+ The results of the volunteers analysis are used for training a machine learning
143
+ algorithm that automatically analyze the glitch data. In particular, we fo-
144
+ cused on a 2D convolutional neural network architecture, that has been also
145
+ tested on simulations [6, 10] reaching an accuracy greater than 99%. These
146
+ first tests show how the GWitchHunters project could be successfully used
147
+ to join citizen science and machine learning with the goal of contributing to
148
+ increase the sensitivity of gravitational wave detectors.
149
+ Acknowledgements
150
+ REINFORCE has received funding from the European Union’s Horizon
151
+ 2020 research and innovation program, under Grant Agreement no. 872859.
152
+ References
153
+ [1] Abbott, B. P. et al. 2016, Physical Review Letters, 116, 061102.
154
+ doi:10.1103/PhysRevLett.116.061102
155
+ [2] Aasi, J. et al. 2015, Classical and Quantum Gravity, 32, 074001.
156
+ doi:10.1088/0264-9381/32/7/074001
157
+ [3] Acernese, F. et al. 2015, Classical and Quantum Gravity, 32, 024001.
158
+ doi:10.1088/0264-9381/32/2/024001
159
+ [4] Abbott, B. et al. 2021, arXiv:2111.03606
160
+ [5] George, D., Shen, H., & Huerta, E. A. 2017, arXiv:1711.07468
161
+ [6] Razzano, M. & Cuoco, E. 2018, Classical and Quantum Gravity, 35,
162
+ 095016. doi:10.1088/1361-6382/aab793
163
+ [7] Powell, J. et al. 2015, Classical and Quantum Gravity, 32, 215012.
164
+ doi:10.1088/0264-9381/32/21/215012
165
+ 5e.g. https://www.reinforceeu.eu/winter-challenge-2022
166
+ 6e.g. https://reinforce.ea.gr/international-training-course/
167
+ 5
168
+
169
+ [8] Zevin, M. et al. 2017, Classical and Quantum Gravity, 34, 064003.
170
+ doi:10.1088/1361-6382/aa5cea
171
+ [9] Lintott, C. J. et al. 2008, MNRAS, 389, 1179. doi:10.1111/j.1365-
172
+ 2966.2008.13689.x
173
+ [10] Cuoco E., et al 2021 Mach. Learn.: Sci. Technol. 2 011002
174
+ 6
175
+
F9E4T4oBgHgl3EQfgA1N/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf,len=127
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
3
+ page_content='05112v1 [gr-qc] 12 Jan 2023 GWitchHunters: Machine Learning and citizen science to improve the performance of Gravitational Wave detector Massimiliano Razzanoa,b,1,∗, Francesco Di Renzoa,b, Francesco Fidecaroa,b, Gary Hemmingc, Stavros Katsanevasc aDepartment of Physics, University of Pisa, Largo B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
4
+ page_content=' Pontecorvo 3, Pisa, I-56127 bINFN Section of Pisa, Largo B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
5
+ page_content=' Pontecorvo 3, Pisa, I-56127 cEuropean Gravitational Observatory (EGO),Via E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
6
+ page_content=' Amaldi, 5, Cascina,I-56021 Abstract The Gravitational waves have opened a new window on the Universe and paved the way to a new era of multimessenger observations of cosmic sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
7
+ page_content=' Second-generation ground-based detectors such as Advanced LIGO and Ad- vanced Virgo have been extremely successful in detecting gravitational wave signals from coalescence of black holes and/or neutron stars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
8
+ page_content=' However, in order to reach the required sensitivities, the background noise must be inves- tigated and removed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
9
+ page_content=' In particular, transient noise events called “glitches” can affect data quality and mimic real astrophysical signals, and it is there- fore of paramount importance to characterize them and find their origin, a task that will support the activities of detector characterization of Virgo and other interferometers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
10
+ page_content=' Machine learning is one of the most promising approaches to characterize and remove noise glitches in real time, thus im- proving the sensitivity of interferometers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
11
+ page_content=' A key input to the preparation of a training dataset for these machine learning algorithms can originate from citizen science initiatives, where volunteers contribute to classify and analyze signals collected by detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
12
+ page_content=' We will present GWitchHunters, a new citi- zen science project focused on the study of gravitational wave noise, that has been developed within the REINFORCE project (a ”Science With And For Society” project funded under the EU’s H2020 program).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
13
+ page_content=' We will present ∗Corresponding author Email address: massimiliano.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
14
+ page_content='razzano@unipi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
15
+ page_content='it (Massimiliano Razzano) 1on behalf of the REINFORCE Consortium Preprint submitted to Nuclear Instruments and Methods in Physics Research AJanuary 13, 2023 the project, its development and the key tasks that citizens are participating in, as well as its impact on the study of noise in the Advanced Virgo detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
16
+ page_content=' Keywords: gravitational waves, machine learning, citizen science PACS: 04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
17
+ page_content='20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
18
+ page_content='–q, 04.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
19
+ page_content='30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
20
+ page_content='Tv, 2000 MSC: 83C35, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
21
+ page_content=' Introduction Gravitational wave physics is opening an entire new window on the Uni- verse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
22
+ page_content=' Since their discovery in 2015 [1], the Advanced LIGO [2] and Advanced Virgo [3] detectors have carried on three observing runs (O1, O2, O3) and unveiled 90 signals produced by the coalescence of compact objects, mostly binary black hole with a small fraction of neutron star and/or black hole binaries [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
23
+ page_content=' Advanced LIGO and Virgo are second-generation laser interferometers with Fabry-Perot cavities hosted in km perpendicular arms, that are capable of detecting the tiny deformations induced in the fabric of spacetime by the passage of gravitational waves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
24
+ page_content=' In order to improve the sensitivity of the detectors, there is a continuous effort to reduce the background noise due to local disturbances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
25
+ page_content=' In particular, at low frequencies the noise is dominated by seismic and Newtonian noise, while at mid frequencies the main component is related to the thermal noise and at high frequencies the noise is mostly related to quantum effects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
26
+ page_content=' The activity of detector characterization and noise hunting in gravitational wave detectors is focused on the investigation of stationary and non stationary noise sources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
27
+ page_content=' In particular, non station- ary transient noise events called glitches are of particular importance in the noise studies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
28
+ page_content=' In fact, glitches can affect data quality and stability and mimic real astrophysical signals, thus reducing the effective duty cycle of interfer- ometers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
29
+ page_content=' The classification and characterization of glitches is therefore key to understand the origin of noise in detector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
30
+ page_content=' However, glitches have com- plex temporal signatures, that make difficult to classify them using standard methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
31
+ page_content=' Various works have shown that Machine Learning methods can be promising for the classification of glitches [5, 7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
32
+ page_content=' In particular, images built from the time-frequency spectrograms of glitches are very effective in show- ing the complex morphology of glitches and can be easily given in input to machine learning algorithms, including deep convolutional neural networks [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
33
+ page_content=' A possible approach to this problem is based on supervised learning, 2 that requires large number of labeled glitch samples, that could be produced by dedicated citizen science initiatives, where volunteers look at images and clssify them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
34
+ page_content=' A successful example of this method is provided by Gravi- tySpy2, a citizen science project focused on the classification of glitches in LIGO and Virgo[8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
35
+ page_content=' Here we present GWitchHunters3, a new citizen science project complementary to GravitySpy and aimed at improving sensitivity of gravitational wave detectors combining citizen science and machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
36
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
37
+ page_content=' The REINFORCE Project GwitchHunters has been developed within the Research Infrastructures FOR Citizens in Europe (REINFORCE) project4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
38
+ page_content=' REINFORCE is a Re- search & Innovation Project, supported by the EU H2020 SWAFS “Science with and for Society” work programme and aimed at creating a series of cutting-edge citizen science projects on Frontier Physics research, with the goal of engaging >100,000 citizens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
39
+ page_content=' REINFORCE is based on four citizen science demonstrators focused Gravitational Waves (GWitchHunters), Astro- physical neutrinos (Deep Sea Explorers), High Energy Physics (New Particle Search at CERN) and muon-based tomography (Cosmic Muon Images).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
40
+ page_content=' All demonstrators are hosted on Zooniverse [9], the world leading platform for citizen science projects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
41
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
42
+ page_content=' Overview of GWitchHunters GwitchHunters has been officially launched on Zooniverse in November 2021 after a dedicated review phase and offers to citizens a set of different tasks of increasing difficulty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
43
+ page_content=' Data are presented as spectrograms and come from the Virgo O3 run.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
44
+ page_content=' A Playground task is specifically devoted to learning the basics of glitch morphology and its classification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
45
+ page_content=' Three other levels offer (1) the possibility to classify glitches among a larger set of classes, (2) localize the glitches in the time-frequency space, and (3) compare the spectrogram in the main channel of Virgo with that produced by auxiliary sensors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
46
+ page_content=' This last task is particularly innovative, since it offer the possibility of linking the glitches observed in the main channel to local disturbancies in the detector, 2http://https://gravityspy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
47
+ page_content='org/ 3https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
48
+ page_content='zooniverse.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
49
+ page_content='org/projects/reinforce/gwitchhunters 4https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
50
+ page_content='reinforceeu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
51
+ page_content='eu/ 3 Figure 1: Example of a GWitchHunters spectrogram showing two glitches, as well as the rectagle drawn by citizens to locate them thus suggesting a possible hint to the origin of each glitch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
52
+ page_content=' These tasks can be carried both on a personal computer and on mobile devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
53
+ page_content=' The project also features a set of tutorials and examples to teach the volunteers how to perform the different tasks, as well as a ”Field Guide” containing information on the Advanced Virgo detector, the various glitch classes and the auxiliary channels used in the project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
54
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
55
+ page_content=' First Results and Conclusions Since its official launch in November 2021, ∼2800 volunteers have sub- scribed to the project, although another significant amount have contributed without officially registering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
56
+ page_content=' This collective effort has produced more than ∼400000 classifications of ∼ 40000 data samples so far.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
57
+ page_content=' In order to pro- mote the project and engage citizens, the REINFORCE consortium has or- ganized many initiatives, including workshops, press activities, online chal- 4 Virgo strain channel Frequency [Hz] Normalizedenergy 100 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
58
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
59
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
60
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
61
+ page_content='2 0:2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
62
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
63
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
64
+ page_content='8 Time [s]lenges5 and training school6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
65
+ page_content=' A monitoring of the project website has been carried on, showing that these initiatives successfully attracted more volun- teers to GWitchHunters, leading to peaks of ∼5000 classifications per day.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
66
+ page_content=' The results of the volunteers analysis are used for training a machine learning algorithm that automatically analyze the glitch data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
67
+ page_content=' In particular, we fo- cused on a 2D convolutional neural network architecture, that has been also tested on simulations [6, 10] reaching an accuracy greater than 99%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
68
+ page_content=' These first tests show how the GWitchHunters project could be successfully used to join citizen science and machine learning with the goal of contributing to increase the sensitivity of gravitational wave detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
69
+ page_content=' Acknowledgements REINFORCE has received funding from the European Union’s Horizon 2020 research and innovation program, under Grant Agreement no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
70
+ page_content=' 872859.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
71
+ page_content=' References [1] Abbott, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
72
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
73
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
74
+ page_content=' 2016, Physical Review Letters, 116, 061102.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
75
+ page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
76
+ page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
77
+ page_content='116.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
78
+ page_content='061102 [2] Aasi, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
79
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
80
+ page_content=' 2015, Classical and Quantum Gravity, 32, 074001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
81
+ page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
82
+ page_content='1088/0264-9381/32/7/074001 [3] Acernese, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
83
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
84
+ page_content=' 2015, Classical and Quantum Gravity, 32, 024001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
85
+ page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
86
+ page_content='1088/0264-9381/32/2/024001 [4] Abbott, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
87
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
88
+ page_content=' 2021, arXiv:2111.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
89
+ page_content='03606 [5] George, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
90
+ page_content=', Shen, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
91
+ page_content=', & Huerta, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
92
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
93
+ page_content=' 2017, arXiv:1711.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
94
+ page_content='07468 [6] Razzano, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
95
+ page_content=' & Cuoco, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
96
+ page_content=' 2018, Classical and Quantum Gravity, 35, 095016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
97
+ page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
98
+ page_content='1088/1361-6382/aab793 [7] Powell, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
99
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
100
+ page_content=' 2015, Classical and Quantum Gravity, 32, 215012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
101
+ page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
102
+ page_content='1088/0264-9381/32/21/215012 5e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
103
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
104
+ page_content=' https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
105
+ page_content='reinforceeu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
106
+ page_content='eu/winter-challenge-2022 6e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
107
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
108
+ page_content=' https://reinforce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
109
+ page_content='ea.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
110
+ page_content='gr/international-training-course/ 5 [8] Zevin, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
111
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
112
+ page_content=' 2017, Classical and Quantum Gravity, 34, 064003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
113
+ page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
114
+ page_content='1088/1361-6382/aa5cea [9] Lintott, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
115
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
116
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
117
+ page_content=' 2008, MNRAS, 389, 1179.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
118
+ page_content=' doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
119
+ page_content='1111/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
120
+ page_content='1365- 2966.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
121
+ page_content='2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
122
+ page_content='13689.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
123
+ page_content='x [10] Cuoco E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
124
+ page_content=', et al 2021 Mach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
125
+ page_content=' Learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
126
+ page_content=' : Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
127
+ page_content=' Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
128
+ page_content=' 2 011002 6' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/F9E4T4oBgHgl3EQfgA1N/content/2301.05112v1.pdf'}
FdE0T4oBgHgl3EQfzALi/content/2301.02668v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096c2cc72813cd1bf4c9476f349fdc16265e816da92021fe1695f48b011f5c50
3
+ size 4724319
FdE1T4oBgHgl3EQfEwOD/content/2301.02894v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c49449c79ec1b6b23b64dc89b3ea76c002a3ad42888c3af6e6c1e65b33f5323
3
+ size 432647
FdE1T4oBgHgl3EQfEwOD/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:373a95f2685c44669db41187b22084b94b619d6af8e4465e97796881b48ddd1d
3
+ size 70372
H9E0T4oBgHgl3EQfhwGK/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf,len=508
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
3
+ page_content='02436v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
4
+ page_content='CO] 6 Jan 2023 Vertex-Critical (P5, chair)-Free Graphs Shenwei Huang*† Zeyu Li‡§ January 4, 2022 Abstract Given two graphs H1 and H2, a graph G is (H1, H2)-free if it contains no induced subgraph isomorphic to H1 or H2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
5
+ page_content=' A Pt is the path on t vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
6
+ page_content=' A chair is a P4 with an additional vertex adjacent to one of the middle vertices of the P4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
7
+ page_content=' A graph G is k-vertex-critical if G has chromatic number k but every proper induced subgraph of G has chromatic number less than k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
8
+ page_content=' In this paper, we prove that there are finitely many 5-vertex-critical (P5, chair)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
9
+ page_content=' Keywords.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
10
+ page_content=' Graph coloring;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
11
+ page_content=' k-vertex-critical graphs;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
12
+ page_content=' forbidden induced subgraphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
13
+ page_content=' 1 Introduction All graphs in this paper are finite and simple.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
14
+ page_content=' We say that a graph G contains a graph H if H is isomorphic to an induced subgraph of G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
15
+ page_content=' A graph G is H-free if it does not contain H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
16
+ page_content=' For a family of graphs H, G is H-free if G is H-free for every H ∈ H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
17
+ page_content=' When H consists of two graphs, we write (H1, H2)-free instead of {H1, H2}- free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
18
+ page_content=' As usual, Pt and Cs denote the path on t vertices and the cycle on s vertices, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
19
+ page_content=' A clique (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
20
+ page_content=' independent set) in a graph is a set of pairwise adjacent (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
21
+ page_content=' nonadjacent) vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
22
+ page_content=' The complete graph on n vertices is denoted by Kn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
23
+ page_content=' The graph K3 is also referred to as the triangle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
24
+ page_content=' The clique number of G, denoted by ω(G), is the size of a largest clique in G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
25
+ page_content=' For two graphs G and H, we use G + H to denote the disjoint union of G and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
26
+ page_content=' If a graph G can be partitioned into k independent sets S1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
27
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
28
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
29
+ page_content=' , Sk such that there is an edge between every vertex in Si and every vertex in Sj for all 1 ≤ i < j ≤ k, G is called a complete k-partite graph;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
30
+ page_content=' each Si is called a part of G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
31
+ page_content=' If we do not specify the number of parts in G, we simply say that G is a complete multipartite graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
32
+ page_content=' We denote by Kn1,.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
33
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
34
+ page_content=',nk the complete k-partite graph such that the ith part Si has size ni, for each 1 ≤ i ≤ k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
35
+ page_content=' A q-coloring of a graph G is a function φ : V (G) −→ {1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
36
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
37
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
38
+ page_content=' , q} such that φ(u) ̸= φ(v) whenever u and v are adjacent in G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
39
+ page_content=' And a q-coloring of G is also a partition of V (G) into q independent sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
40
+ page_content=' A graph is q-colorable if it admits a q-coloring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
41
+ page_content=' The College of Computer Science, Nankai University, Tianjin 300350, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
42
+ page_content=' Email: shenweihuang@nankai.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
43
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
44
+ page_content='cn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
45
+ page_content=' Supported by Natural Science Foundation of Tianjin (20JCY- BJC01190).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
46
+ page_content=' †Tianjin Key Laboratory of Network and Data Security Technology, Nankai University, Tianjin 300071, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
47
+ page_content=' ‡College of Computer Science, Nankai University, Tianjin 300350, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
48
+ page_content=' §Tianjin Key Laboratory of Network and Data Security Technology, Nankai University, Tianjin 300071, China.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
49
+ page_content=' 1 Figure 1: The graph chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
50
+ page_content=' chromatic number of a graph G, denoted by χ(G), is the minimum number q for which G is q-colorable.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
51
+ page_content=' We call a graph G is k-chromatic when χ(G) = k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
52
+ page_content=' A graph G is k-critical if it is k-chromatic and χ(G − e) < χ(G) for any edge e ∈ E(G).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
53
+ page_content=' We call a graph is critical if it is k-critical for some integer k ≥ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
54
+ page_content=' A graph G is k-vertex-critical if χ(G) = k and χ(G−v) < k for any v ∈ V (G).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
55
+ page_content=' For a set H of graphs and a graph G, we say that G is k-vertex-critical H-free if it is k-vertex-critical and H-free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
56
+ page_content=' Our research is mainly motivated by the following theorems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
57
+ page_content=' Theorem 1 ([7]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
58
+ page_content=' For any fixed k ≥ 5, there are infinitely many k-vertex-critical P5- free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
59
+ page_content=' Thus, it is natural to consider which subclasses of P5-free graphs have finitely many k-vertex-critical graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
60
+ page_content=' The reason for finiteness is that if we know there are only finitely many k-vertex-critical graphs, then there is a polynomial-time algorithm for (k − 1)-coloring graphs in that class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
61
+ page_content=' In 2021, Kameron, Goedgebeur, Huang and Shi [4] obtained the following dichotomy result for k-vertex-critical (P5, H)-free graphs when |H| = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
62
+ page_content=' Theorem 2 ([4]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
63
+ page_content=' Let H be a graph of order 4 and k ≥ 5 be a fixed integer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
64
+ page_content=' Then there are infinitely many k-vertex-critical (P5, H)-free graphs if and only if H is 2P2 or P1 + K3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
65
+ page_content=' In [4], it was also asked which five-vertex graphs H can lead to finitely many k-vertex-critical (P5, H)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
66
+ page_content=' It is known that there are finitely many 5-vertex- critical (P5,banner)-free graphs [3, 9], and finitely many k-vertex-critical (P5, P5)- free graphs for every fixed k [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
67
+ page_content=' Hell and Huang proved that there are finitely many k-vertex-critical (P6, C4)-free graphs [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
68
+ page_content=' This was later generalized to (Pt, Kr,s)- free graphs in the context of H-coloring [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
69
+ page_content=' This gives an affirmative answer for H = K2,3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
70
+ page_content=' Recently, it was also shown that the answer to the above question is positive if H is gem or P2 + P3 [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
71
+ page_content=' Moreover, it was proved that there are finitely many 5-vertex-critical (P5, bull)-free graphs [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
72
+ page_content=' In this article, we continue such a study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
73
+ page_content=' A chair is a P4 with an additional vertex adjacent to one of the middle vertices of the P4 (see Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
74
+ page_content=' In particular, we prove the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
75
+ page_content=' Theorem 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
76
+ page_content=' There are finitely many 5-vertex-critical (P5, chair)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
77
+ page_content=' 2 Preliminaries For general graph theory notation we follow [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
78
+ page_content=' Let G = (V, E) be a graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
79
+ page_content=' If uv ∈ E, we say that u and v are neighbors or adjacent;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
80
+ page_content=' otherwise u and v are nonneighbors 2 or nonadjacent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
81
+ page_content=' We use u ∼ v to mean that u and v are neighbors and u ≁ v to mean that u and v are nonneighbors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
82
+ page_content=' The neighborhood of a vertex v, denoted by NG(v), is the set of neighbors of v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
83
+ page_content=' For a set X ⊆ V (G), let NG(X) = � v∈X NG(v) \\ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
84
+ page_content=' We shall omit the subscript whenever the context is clear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
85
+ page_content=' For X, Y ⊆ V , we say that X is complete (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
86
+ page_content=' anticomplete) to Y if every vertex in X is adjacent (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
87
+ page_content=' nonadjacent) to every vertex in Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
88
+ page_content=' If X = {x}, we write “x is complete (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
89
+ page_content=' anticomplete) to Y ” instead of “{x} is complete (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
90
+ page_content=' anticomplete) to Y ”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
91
+ page_content=' If a vertex v is neither complete nor anticomplete to a set S, we say that v is mixed on S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
92
+ page_content=' If a vertex v is neither complete nor anticomplete to two ends of an edge, we say that v is distinguish the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
93
+ page_content=' We say that H is a homogeneous set if no vertex in V − H is mixed on H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
94
+ page_content=' More generally, we say that H is homogeneous with respect to a subset S ⊆ V if no vertex in S can be mixed on H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
95
+ page_content=' For S ⊆ V , the subgraph induced by S, is denoted by G[S].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
96
+ page_content=' A pair of comparable vertices of G is pairwise nonadjacent vertices u, v such that N(v) ⊆ N(u) or N(u) ⊆ N(v).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
97
+ page_content=' It is well-known that k-vertex-critical graphs cannot contain comparable vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
98
+ page_content=' We shall use the following generalization in later proofs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
99
+ page_content=' Lemma 1 ([4]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
100
+ page_content=' Let G be a k-vertex-critical graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
101
+ page_content=' Then G has no two nonempty disjoint subsets X and Y of V (G) that satisfy all the following conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
102
+ page_content=' X and Y are anticomplete to each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
103
+ page_content=' χ(G[X]) ≤ χ(G[Y ]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
104
+ page_content=' Y is complete to N(X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
105
+ page_content=' 3 New Results In this section, we prove our new results: there are finitely many 5-vertex-critical (P5, chair)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
106
+ page_content=' To prove Theorem 3, we prove the following.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
107
+ page_content=' Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
108
+ page_content=' Let G be a 5-vertex-critical (P5, chair)-free graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
109
+ page_content=' If G contains a C5, then G has finite order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
110
+ page_content=' Proof of Theorem 3 assuming Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
111
+ page_content=' Let G be a 5-vertex-critical(P5, chair)-free graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
112
+ page_content=' If G contains C5, then G has finite order by Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
113
+ page_content=' If G is C5-free, then G has finite order by a result in [7] that there are only thirteen 5-vertex-critical (P5, C5)- free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
114
+ page_content=' In either case, G has finite order.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
115
+ page_content=' This completes the proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
116
+ page_content=' Next we prove Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
117
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
118
+ page_content='1 Structure Around C5 In this subsection, we discuss some structural properties of (P5, chair)-free graphs containing a C5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
119
+ page_content=' Let G be a connected (P5, chair)-free graph containing an induced C5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
120
+ page_content=' Let C = v1, v2, v3, v4, v5 be an induced C5 with vivi+1 being an edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
121
+ page_content=' We divide V \\V (C) as follows, where all indices are modulo 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
122
+ page_content=' S0 = {v ∈ V \\V (C) : NC(v) = ∅}, S1(i) = {v ∈ V \\V (C) : NC(v) = {vi}}, S1 2(i) = {v ∈ V \\V (C) : NC(v) = {vi, vi+1}}, 3 S2 2(i) = {v ∈ V \\V (C) : NC(v) = {vi, vi+2}}, S1 3(i) = {v ∈ V \\V (C) : NC(v) = {vi−1, vi, vi+1}}, S2 3(i) = {v ∈ V \\V (C) : NC(v) = {vi−2, vi, vi+2}}, S4(i) = {v ∈ V \\V (C) : NC(v) = {vi−2, vi−1, vi+1, vi+2}}, S5 = {v ∈ V \\V (C) : NC(v) = V (C)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
123
+ page_content=' We use Sm 3 (i ± 1) to denote Sm 3 (i + 1) ∪ Sm 3 (i − 1) for m = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
124
+ page_content=' The notations Sm 3 (i±2), S4(i±1) and S4(i±2) are defined similarly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
125
+ page_content=' We now prove some properties about these sets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
126
+ page_content=' Claim 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
127
+ page_content=' S1(i) ∪ S1 2(i) ∪ S2 2(i) = ∅, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
128
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
129
+ page_content=' Suppose not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
130
+ page_content=' Let u, v be arbitrary two vertices such that v ∈ S1(i) ∪ S1 2(i), u ∈ S2 2(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
131
+ page_content=' Then {v, vi, vi−1, vi−2, vi−3} induces a P5, and {u, vi, vi−1, vi−2} and {vi+1} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
132
+ page_content=' Claim 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
133
+ page_content=' S0 = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
134
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
135
+ page_content=' Suppose not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
136
+ page_content=' We will first show that N(S0) ⊆ S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
137
+ page_content=' Since G is connected, there is a pair of vertices u and v such that u ∈ S0, v ∈ V (G)\\S0 and u ∼ v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
138
+ page_content=' If v ∈ S1 3(i) for any i, then {u, v, vi+1, vi+2, vi−2} induces a P5, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
139
+ page_content=' If v ∈ S2 3(i) ∪ S4(i + 1) for any i, then {vi+1, vi, v, vi−2} and {u} induce a chair, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
140
+ page_content=' Thus, v can only belong to S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
141
+ page_content=' Then, two nonempty disjoint subsets S0 and C of V (G) satisfy the three conditions of Lemma 1, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
142
+ page_content=' Therefore, S0 = ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
143
+ page_content=' Claim 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
144
+ page_content=' S1 3(i) is clique, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
145
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
146
+ page_content=' Suppose not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
147
+ page_content=' We assume that there are two vertices u, v ∈ S1 3(i) with u ≁ v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
148
+ page_content=' Then {v, vi+1, vi+2, vi−2} and {u} induce a chair in G, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
149
+ page_content=' Claim 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
150
+ page_content=' Each vertex in S4(i) ∪ S5 is either complete or anticomplete to a component of S2 3(i), for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
151
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
152
+ page_content=' We assume that there is an edge uv of S2 3(i) can be distinguished by vertex s ∈ S4(i) ∪ S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
153
+ page_content=' Without loss of generality, let s ∼ u, s ≁ v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
154
+ page_content=' Then {vi−1, s, u, v} and {vi+1} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
155
+ page_content=' Claim 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
156
+ page_content=' Each vertex in V (G)−(S2 3(i)∪S4(i)∪S5) is either complete or anticomplete to S2 3(i), for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
157
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
158
+ page_content=' By symmetry, it suffices to prove the claim for i, i+ 1 and i+ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
159
+ page_content=' Let v ∈ S2 3(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
160
+ page_content=' If v is adjacent to s1 ∈ S1 3(i + 1), then {vi−1, vi−2, v, s1, vi+1} is an induced P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
161
+ page_content=' If v is not adjacent to s2 ∈ S1 3(i) ∪ S2 3(i + 1) ∪ S4(i + 2), then {vi−1, s2, vi+1, vi+2, v} is an induced P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
162
+ page_content=' If v is not adjacent to s3 ∈ S2 3(i + 2) ∪ S4(i + 1), then {vi−1, s3, vi+2, v} and {vi+1} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
163
+ page_content=' If v is not adjacent to s4 ∈ S1 3(i + 2), then {vi−1, vi, vi+1, v} and {s4} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
164
+ page_content=' Claim 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
165
+ page_content=' Every component of S2 3(i) is a homogeneous set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
166
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
167
+ page_content=' By Claim 4 and Claim 5, there is no vertex of G\\S2 3(i) that can distinguish an edge of S2 3(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
168
+ page_content=' 4 Let Ti = S1 3(i ± 2) ∪ S2 3(i ± 1) ∪ S2 3(i ± 2) for each i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
169
+ page_content=' Claim 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
170
+ page_content=' S4(i) is complete to Ti, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
171
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
172
+ page_content=' By the symmetry, it suffers to prove the claim for S1 3(i+2)∪S2 3(i+1)∪S2 3(i+2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
173
+ page_content=' Let v ∈ S4(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
174
+ page_content=' If v is not adjacent to s1 ∈ S1 3(i + 2), then {vi, vi−1, v, vi+2, s1} in- duces a P5, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
175
+ page_content=' If v is not adjacent to s2 ∈ S2 3(i + 1), then {vi, vi−1, v, vi+2} and {s2} induce a chair, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
176
+ page_content=' If v is not adjacent to s3 ∈ S2 3(i + 2), then {s3, vi, vi+1, v, vi−2} induces a P5, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
177
+ page_content=' Claim 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
178
+ page_content=' For each s ∈ S1 3(i) ∪ S4(i ± 2), u, v ∈ S4(i) with uv /∈ E, s cannot mix on {u, v}, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
179
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
180
+ page_content=' By the symmetry, it suffers to prove the claim for S1 3(i) ∪ S4(i + 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
181
+ page_content=' Let s ∈ S1 3(i) ∪ S4(i + 2) with s ∼ u, s ≁ v , then {vi, s, u, vi+2, v} induces a P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
182
+ page_content=' Let Ri = S1 3(i ± 1) ∪ S2 3(i) ∪ S4(i ± 1) ∪ S5, for each i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
183
+ page_content=' Claim 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
184
+ page_content=' For each s ∈ Ri, u, v ∈ S4(i) with uv /∈ E, s is adjacent to at least one of {u, v}, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
185
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
186
+ page_content=' By the symmetry, it suffers to prove the claim for S1 3(i + 1) ∪ S2 3(i) ∪ S4(i + 1) ∪ S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
187
+ page_content=' Let s1 ∈ S1 3(i + 1) ∪ S2 3(i) ∪ S4(i − 1), if s1 is nonadjacent to both {u, v}, then {v, vi−1, vi, s1} and {u} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
188
+ page_content=' Let s2 ∈ S5, if s2 is nonadjacent to both {u, v}, then {vi, s2, vi−2, v} and {u} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
189
+ page_content=' Claim 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
190
+ page_content=' Every vertex in S4(i ± 2) is complete to x, y ∈ S4(i) with xy /∈ E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
191
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
192
+ page_content=' By symmetry, let v ∈ S4(i + 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
193
+ page_content=' v can not mix on x, y by Claim 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
194
+ page_content=' If v ≁ x and v ≁ y, {vi, v, vi−2, x} and {y} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
195
+ page_content=' Then v is complete to {x, y}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
196
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
197
+ page_content='2 Proof of Theorem 4 Let graph family F = {K5, W, P, Q1, Q2, Q3} (see Figure 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
198
+ page_content=' The adjacency lists of F are given in the Appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
199
+ page_content=' It is routine to verify that every graph in F is a 5-vertex- critical (P5, chair)-free graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
200
+ page_content=' Proof of Theorem 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
201
+ page_content=' Let G be a 5-vertex-critical (P5, chair)-free graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
202
+ page_content=' If G contains a induced F ∈ F, then G is isomorphic to F since G is 5-vertex-critical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
203
+ page_content=' Therefore, we may assume that G is F-free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
204
+ page_content=' By Claim 1 and Claim 2, G has a finite order if and only if S3 ∪ S4 ∪ S5 has finite size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
205
+ page_content=' Claim 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
206
+ page_content=' |S1 3(i)| ≤ 2, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
207
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
208
+ page_content=' If |S1 3(i)| ≥ 3, then S1 3(i) ∪ {vi, vi+1} contains a K5 by Claim 3, a contradic- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
209
+ page_content=' Claim 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
210
+ page_content=' χ(S2 3(i) ∪ S4(i) ∪ S5) ≤ 2, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
211
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
212
+ page_content=' If χ(S2 3(i) ∪ S4(i) ∪ S5) ≥ 3, then the proper subgraph S2 3(i) ∪ S4(i) ∪ S5 ∪ {vi−2, vi+2} has chromatic number at least 5, contradicting that G is 5-vertex-critical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
213
+ page_content=' Claim 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
214
+ page_content=' S5 is an independent set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
215
+ page_content=' 5 0 1 2 3 4 K5 0 1 2 3 4 5 6 W 0 1 2 3 4 5 6 7 8 P 0 1 2 3 4 5 6 7 8 Q1 0 1 2 3 4 5 6 7 8 Q2 0 1 2 3 4 5 6 7 8 Q3 Figure 2: Graph Family F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
216
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
217
+ page_content=' If there are two adjacent vertices u, v ∈ S5, then G contains a W ∈ F, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
218
+ page_content=' Claim 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
219
+ page_content=' Every homogeneous component of S2 3(i) or S4(i) is isomorphic to K1 or K2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
220
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
221
+ page_content=' Let K be a component of S2 3(i) or S4(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
222
+ page_content=' Since G has no K5 or W, K has no triangles or C5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
223
+ page_content=' Since G is P5-free, G is bipartite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
224
+ page_content=' So χ(K) ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
225
+ page_content=' Clearly, if χ(K) = 1, then K is isomorphic to K1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
226
+ page_content=' Now assume that χ(K) = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
227
+ page_content=' Let X and Y be the bipartition of K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
228
+ page_content=' Let x ∈ X and y ∈ Y with xy ∈ E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
229
+ page_content=' Suppose that (X ∪ Y ) \\ {x, y} ̸= ∅.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
230
+ page_content=' Since G is 5-vertex-critical, G − ((X ∪ Y ) \\ {x, y}) has a 4-coloring φ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
231
+ page_content=' Without loss of generality, we may assume that φ(x) = 1 and φ(y) = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
232
+ page_content=' Now if we color every vertex in X with color 1 and color every vertex in Y with color 2, the resulting coloring is a 4-coloring of G by Claim 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
233
+ page_content=' This contradicts that G is 5-vertex-critical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
234
+ page_content=' So K is isomorphic to K2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
235
+ page_content=' Claim 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
236
+ page_content=' |S2 3(i)| ≤ 3, for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
237
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
238
+ page_content=' Let K be a component of S2 3(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
239
+ page_content=' We say that K is of type i if χ(K) = i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
240
+ page_content=' We show that there is at most one component of type i for i = 1, 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
241
+ page_content=' Take two components K, K′ of the same type.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
242
+ page_content=' Let k ∈ K and k′ ∈ K′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
243
+ page_content=' By Lemma 1, there are vertices u, v such that u ∈ N(K) \\ N(K′) and v ∈ N(K′) \\ N(K).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
244
+ page_content=' By Claim 6, uk ∈ E, vk′ ∈ E and uk′, vk /∈ E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
245
+ page_content=' Any vertex in V (G)−(S2 3(i)∪S4(i)∪S5) can’t mix on two vertices of S2 3(i) by Claim 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
246
+ page_content=' So u, v ∈ S4(i) ∪ S5 by our assumption about k, k′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
247
+ page_content=' If u ≁ v, {k, u, vi+1, v, k′} induces a P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
248
+ page_content=' Therefore, u ∼ v.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
249
+ page_content=' By Claim 13, u, v cannot be in S5 at the same time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
250
+ page_content=' It is easy to see that C ∪ {k, k′, u, v} contains an induced P, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
251
+ page_content=' As a result, |S2 3(i)| ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
252
+ page_content=' Claim 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
253
+ page_content=' S4(i) is a star, or S4(i) is complete to S4(i + 2) ∪ S4(i − 2), for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
254
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
255
+ page_content=' If S4(i) is disconnected, S4(i) is complete to S4(i+2)∪S4(i − 2) by Claim 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
256
+ page_content=' If S4(i) is connected, then S4(i) is a bipartite graph by Claim 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
257
+ page_content=' If χ(S4(i)) = 1, S4(i) is isomorphic to K1 and we are done.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
258
+ page_content=' Now assume that |S4(i)| ≥ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
259
+ page_content=' Let X, Y be the bipartition of S4(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
260
+ page_content=' If |X| ≥ 2 and |Y | ≥ 2, then every vertex in S4(i ± 2) is 6 complete to X ∪ Y by Claim 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
261
+ page_content=' Thus, S4(i) is complete to S4(i ± 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
262
+ page_content=' Therefore, we may assume that |X| = 1 and so S4(i) is a star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
263
+ page_content=' Recall that Ri = S1 3(i ± 1) ∪ S2 3(i) ∪ S4(i ± 1) ∪ S5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
264
+ page_content=' Claim 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
265
+ page_content=' If S4(i) is a star, then |S4(i)| ≤ 2 for all 1 ≤ i ≤ 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
266
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
267
+ page_content=' Suppose that S4(i) = X ∪ Y with Y = {y}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
268
+ page_content=' We show that |X| ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
269
+ page_content=' Suppose not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
270
+ page_content=' Let x1, x2 ∈ X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
271
+ page_content=' By Lemma 1, there exist a ∈ N(x1)\\N(x2) and b ∈ N(x2)\\N(x1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
272
+ page_content=' Note that any vertex of G − Ri can’t mix on two nonadjacent vertices of X by Claim 7 - Claim 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
273
+ page_content=' So a, b ∈ Ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
274
+ page_content=' If a ≁ b, {x1, a, vi, b, x2} induces a P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
275
+ page_content=' So a ∼ b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
276
+ page_content=' It is not hard to check that G contains one of Q1, Q2 and Q3, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
277
+ page_content=' Thus, there are at most two vertices in X, and so |S4(i)| ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
278
+ page_content=' Claim 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
279
+ page_content=' For each i, when S4(i) is complete to S4(i ± 2) and Ri is not empty, then |S4(i)| ≤ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
280
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
281
+ page_content=' When S4(i) is (P1 + P2)-free, S4(i) is a complete bipartite graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
282
+ page_content=' Let (X, Y ) be a partition of S4(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
283
+ page_content=' We show that |X|, |Y | ≤ 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
284
+ page_content=' Suppose not.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
285
+ page_content=' Let x1, x2, x3, x4 be vertices in X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
286
+ page_content=' By Lemma 1, there vertices a1 ∈ N(x1)\\N(x2), a2 ∈ N(x2)\\N(x1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
287
+ page_content=' Notice that a1, a2 ∈ Ri by Claim 7 - Claim 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
288
+ page_content=' If a1 ≁ a2, G contains an induced P5 = {x1, a1, vi, a2, x2}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
289
+ page_content=' So a1 ∼ a2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
290
+ page_content=' Then a1 ∈ S1 3(i − 1) ∪ S4(i + 1) and a2 ∈ S1 3(i + 1) ∪ S4(i − 1), otherwise, it is easy to check that G contains one of Q1 and Q2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
291
+ page_content=' Similarly, there exists a3 ∈ N(x3)\\N(x4), a4 ∈ N(x4)\\N(x3) and a3, a4 ∈ Ri, a3 ∼ a4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
292
+ page_content=' Thus {x3, x4} is complete to {a1, a2}, and {x1, x2} is complete to {a3, a4}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
293
+ page_content=' This shows that a1, a2, a3, a4 are pairwise different vertices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
294
+ page_content=' Then a3 ∈ S1 3(i − 1) ∪ S4(i + 1), a4 ∈ S1 3(i + 1) ∪ S4(i − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
295
+ page_content=' Recall that S1 3(i − 1) or S1 3(i + 1) is a clique by Claim 3, and S1 3(i − 1) is complete to S4(i + 1), S1 3(i + 1) is complete to S4(i − 1) by Claim 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
296
+ page_content=' If a1 ≁ a3 and a2 ≁ a4, then a1, a3 ∈ S4(i + 1) and a2, a4 ∈ S4(i − 1), then {vi−2, vi+2, x3, a1, a2} is an induced K5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
297
+ page_content=' Otherwise, if a1 ∼ a3, {vi−1, vi−2, x3, a1, a3} induces K5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
298
+ page_content=' So a2 ∼ a4, then {vi+1, vi+2, x3, a2, a4} induces a K5, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
299
+ page_content=' So |S4(i)| ≤ 6 if S4(i) is (P1 + P2)-free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
300
+ page_content=' Now suppose that S4(i) contains a P1 + P2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
301
+ page_content=' Let P1 + P2 = {a, b, c : a ≁ b, a ≁ c, b ∼ c}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
302
+ page_content=' We first prove some useful facts about P1 + P2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
303
+ page_content=' S1 3(i) is anticomplete to P1 + P2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
304
+ page_content=' (1) Every x ∈ S1 3(i) is either complete or anticomplete to {a, b, c} by Claim 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
305
+ page_content=' If x is complete to {a, b, c}, then G contains an induced W, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
306
+ page_content=' So x is anticom- plete to {a, b, c}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
307
+ page_content=' This completes the proof of (1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
308
+ page_content=' For any y ∈ Ri, {y, a, b, c} induces either a P4 or a 2P2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
309
+ page_content=' (2) Let y ∈ Ri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
310
+ page_content=' Note that {y} ∪ S4(i) is triangle-free or else G contains a K5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
311
+ page_content=' If y is not adjacent to a, then y ∼ b, y ∼ c by Claim 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
312
+ page_content=' Now G induces a K5, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
313
+ page_content=' So y ∼ a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
314
+ page_content=' If y ≁ b, y ≁ c, then {y, a, b, c} induces a 2P2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
315
+ page_content=' If y is adjacent to exact one vertex of {b, c}, we assume by symmetry that y ∼ b, y ≁ c and so {a, y, b, c} induces a P4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
316
+ page_content=' This completes the proof of (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
317
+ page_content=' Next we discuss about S4(i)\\{a, b, c}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
318
+ page_content=' Let x ∈ S1 3(i), z ∈ S4(i)\\{a, b, c}, and we define Y1 = {y1 ∈ Ri : {y1, a, b, c} induces a P4}, and Y2 = {y2 ∈ Ri : 7 {y2, a, b, c} induces a 2P2}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
319
+ page_content=' S1 3(i) is anticomplete to S4(i)\\{a, b, c}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
320
+ page_content=' (3) If z ∼ x, then z is complete to {a, b, c} by (1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
321
+ page_content=' Now G contains an induced W, a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
322
+ page_content=' So z ≁ x.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
323
+ page_content=' This completes the proof of (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
324
+ page_content=' So S1 3(i) is anticomplete to S4(i) by (1) and (3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
325
+ page_content=' For any y1 ∈ Y1, z1 ∈ S4(i)\\{a, b, c}, z1y1, z1c ∈ E, and z1a, z1b /∈ E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
326
+ page_content=' (4) If z1 ≁ y1, then z1 ∼ c by y1c /∈ E and Claim 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
327
+ page_content=' So z1 ≁ b by Claim 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
328
+ page_content=' If z1 ≁ a, {y1, a, b, c, z} induces a P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
329
+ page_content=' So z1 ∼ a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
330
+ page_content=' Then there is an induced C5 = {a, y1, b, c, z1}, contradicting Claim 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
331
+ page_content=' So z1 ∼ y1, then z1 ≁ a and z1 ≁ b since S4(i) is triangle- free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
332
+ page_content=' If z1 ≁ c, {a, y1, b, c} and {z1} induce a chair.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
333
+ page_content=' So z1 ∼ c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
334
+ page_content=' This completes the proof (4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
335
+ page_content=' For any y2 ∈ Y2, z2 ∈ S4(i)\\{a, b, c}, z2y2 ∈ E, and z2a, z2b, z2c /∈ E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
336
+ page_content=' (5) If z2 ≁ y2, then z2 ∼ b and z2 ∼ c by y2b, y2c /∈ E and Claim 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
337
+ page_content=' Then {z2, b, c} induces a triangle, contradicting Claim 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
338
+ page_content=' So z2 ∼ y2 and then z2 ≁ a by the fact that {y2} ∪ S4(i) is triangle-free.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
339
+ page_content=' If z2 is adjacent to exact one of b, c, then {z2, y2, a, b, c} induces a P5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
340
+ page_content=' So z2 ≁ b and z2 ≁ c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
341
+ page_content=' This completes the proof (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
342
+ page_content=' We can infer that any vertex in Ri is complete to S4(i)\\{a, b, c} by (4) and (5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
343
+ page_content=' Suppose that there exist two vertices z, z′ ∈ S4(i)\\{a, b, c}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
344
+ page_content=' If Y1 ̸= ∅ and Y2 ̸= ∅, z is adjacent to c by (4) and is nonadjacent to c by (5), a contradiction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
345
+ page_content=' So Ri = Y1 or Ri = Y2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
346
+ page_content=' Note that any vertex in Ri is complete to two ends of an edge of C5 ∩ N(S4(i)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
347
+ page_content=' Since G is K5-free, z ≁ z′.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
348
+ page_content=' Then N(z) = N(z′) by Claim 7, contradicting to Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
349
+ page_content=' So |S4(i)\\{a, b, c}| ≤ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
350
+ page_content=' Then |S4(i)| ≤ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
351
+ page_content=' Claim 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
352
+ page_content=' For each i, when S4(i) is complete to S4(i±2) and Ri is empty, |S4(i)| ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
353
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
354
+ page_content=' If S4(i) is disconnected, then there are two components K1, K2 of S4(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
355
+ page_content=' Every vertex of S1 3(i) is either complete or anticomplete to K1 ∪ K2 by Claim 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
356
+ page_content=' So K1 and K2 are homogeneous components by Claim 7 - Claim 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
357
+ page_content=' Moreover, N(K1) = N(K2) ⊆ Ti ∪ S1 3(i) ∪ S4(i ± 2) ∪ C5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
358
+ page_content=' This contradicts Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
359
+ page_content=' Therefore, S4(i) is connected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
360
+ page_content=' Recall that χ(S4(i)) ≤ 2 by Claim 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
361
+ page_content=' If χ(S4(i)) = 1, then |S4(i)| = |K1| = 1 and we are done.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
362
+ page_content=' When χ(S4(i)) = 2, S4(i) is a bipartite graph.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
363
+ page_content=' Let (X, Y ) be the bipartition of S4(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
364
+ page_content=' Every vertex s ∈ S1 3(i) is either complete or anticomplete to X(resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
365
+ page_content=' Y ) by Claim 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
366
+ page_content=' So X(resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
367
+ page_content=' Y ) is homogeneous with respect to G − Y (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
368
+ page_content=' G − X).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
369
+ page_content=' If there are x ∈ X, y ∈ Y with x ≁ y, then every vertex s ∈ S1 3(i) cannot mix on S4(i).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
370
+ page_content=' Then S4(i) is a homogeneous set, and |S4(i)| = |K2| = 2 by Claim 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
371
+ page_content=' If X is complete to Y .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
372
+ page_content=' Then X is a homogeneous set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
373
+ page_content=' For any pairwise vertices x1, x2 ∈ X, we have N(x1) = N(x2), contradicting Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
374
+ page_content=' So |X| = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
375
+ page_content=' In the same way, |Y | = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
376
+ page_content=' Therefore, |S4(i)| ≤ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
377
+ page_content=' Claim 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
378
+ page_content=' |S4(i)| ≤ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
379
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
380
+ page_content=' It follows from Claim 17 to Claim 19 that |S4(i)| ≤ 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
381
+ page_content=' 8 Claim 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
382
+ page_content=' |S5| ≤ 255.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
383
+ page_content=' Proof.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
384
+ page_content=' Suppose that |S5| > 255.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
385
+ page_content=' We know any two vertices in S5 are nonadjacent by Claim 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
386
+ page_content=' By the pigeonhole principle, there are two vertices u, v ∈ S5 such that N(u) = N(v), contradicting Lemma 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
387
+ page_content=' So |S5| ≤ 25(|S1 3(i)∪S2 3(i)∪S4(i)|) ≤ 25(2+3+6) = 255.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
388
+ page_content=' The lemma follows from Claim 11, Claim 15, Claim 20 and Claim 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
389
+ page_content=' 4 Appendix Below we give the adjacency lists of graphs in F other than K5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
390
+ page_content=' Graph W: {0: 1 4 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
391
+ page_content=' 1: 0 2 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
392
+ page_content=' 2: 1 3 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
393
+ page_content=' 3: 2 4 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
394
+ page_content=' 4: 0 3 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
395
+ page_content=' 5: 0 1 2 3 4 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
396
+ page_content=' 6: 0 1 2 3 4 5} Graph P: {0: 1 4 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
397
+ page_content=' 1: 0 2 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
398
+ page_content=' 2: 1 3 5 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
399
+ page_content=' 3: 2 4 5 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
400
+ page_content=' 4: 0 3 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
401
+ page_content=' 5: 0 2 3 7;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
402
+ page_content=' 6: 0 2 3 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
403
+ page_content=' 7: 1 2 3 4 5 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
404
+ page_content=' 8: 1 2 3 4 6 7} Graph Q1: {0: 1 4 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
405
+ page_content=' 1: 0 2 5 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
406
+ page_content=' 2: 1 3 5 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
407
+ page_content=' 3: 2 4 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
408
+ page_content=' 4: 0 3 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
409
+ page_content=' 5: 0 1 2 6 7;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
410
+ page_content=' 6: 0 1 2 5 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
411
+ page_content=' 7: 1 2 3 4 5;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
412
+ page_content=' 8: 1 2 3 4 6} Graph Q2: {0: 1 4 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
413
+ page_content=' 1: 0 2 5 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
414
+ page_content=' 2: 1 3 5 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
415
+ page_content=' 3: 2 4 5 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
416
+ page_content=' 4: 0 3 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
417
+ page_content=' 5: 0 2 3 6 7;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
418
+ page_content=' 6: 0 2 3 5 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
419
+ page_content=' 7: 1 2 3 4 5;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
420
+ page_content=' 8: 1 2 3 4 6} Graph Q3: {0: 1 4 5 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
421
+ page_content=' 1: 0 2 5 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
422
+ page_content=' 2: 1 3 5 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
423
+ page_content=' 3: 2 4 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
424
+ page_content=' 4: 0 3 6 7 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
425
+ page_content=' 5: 0 1 2 6;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
426
+ page_content=' 6: 0 3 4 5 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
427
+ page_content=' 7: 1 2 3 4 8;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
428
+ page_content=' 8: 1 2 3 4 6 7} References [1] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
429
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
430
+ page_content=' Bondy and U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
431
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
432
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
433
+ page_content=' Murty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
434
+ page_content=' Graph Theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
435
+ page_content=' Springer, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
436
+ page_content=' [2] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
437
+ page_content=' Cai, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
438
+ page_content=' Goedgebeur, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
439
+ page_content=' Huang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
440
+ page_content=' Some results on k-critical P5-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
441
+ page_content=' arXiv:2108.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
442
+ page_content='05492 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
443
+ page_content='CO], 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
444
+ page_content=' [3] Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
445
+ page_content=' Cai, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
446
+ page_content=' Huang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
447
+ page_content=' Li, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
448
+ page_content=' Shi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
449
+ page_content=' Vertex-critical (P5, banner)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
450
+ page_content=' In Yijia Chen, Xiaotie Deng, and Mei Lu, editors, Frontiers in Algorithmics - 13th International Workshop, FAW 2019, Sanya, China, April 29–May 3, 2019, Proceedings, volume 11458 of Lecture Notes in Computer Science, pages 111– 120, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
451
+ page_content=' [4] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
452
+ page_content=' Cameron, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
453
+ page_content=' Goedgebeur, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
454
+ page_content=' Huang, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
455
+ page_content=' Shi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
456
+ page_content=' k-critical graphs in P5-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
457
+ page_content=' Theoretical Computer Science, 864:80–91, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
458
+ page_content=' [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
459
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
460
+ page_content=' Dhaliwal, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
461
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
462
+ page_content=' Hamel, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
463
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
464
+ page_content=' Ho`ang, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
465
+ page_content=' Maffray, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
466
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
467
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
468
+ page_content=' McConnell, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
469
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
470
+ page_content=' Panait.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
471
+ page_content=' On color-critical (P5, co-P5)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
472
+ page_content=' Discrete Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
473
+ page_content=' Mathemat- ics, 216:142–148, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
474
+ page_content=' [6] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
475
+ page_content=' Hell and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
476
+ page_content=' Huang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
477
+ page_content=' Complexity of coloring graphs without paths and cycles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
478
+ page_content=' Discrete Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
479
+ page_content=' Mathematics, 216:211–232, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
480
+ page_content=' [7] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
481
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
482
+ page_content=' Ho`ang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
483
+ page_content=' Moore, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
484
+ page_content=' Recoskiez, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
485
+ page_content=' Sawada, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
486
+ page_content=' Vatshelle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
487
+ page_content=' Constructions of k-critical P5-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
488
+ page_content=' Discrete Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
489
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
490
+ page_content=', 182:91–98, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
491
+ page_content=' 9 [8] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
492
+ page_content=' Huang and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
493
+ page_content=' Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
494
+ page_content=' Critical (P5,bull)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
495
+ page_content=' arXiv:2211.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
496
+ page_content='04179 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
497
+ page_content='CO], 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
498
+ page_content=' [9] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
499
+ page_content=' Huang, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
500
+ page_content=' Li, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
501
+ page_content=' Shi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
502
+ page_content=' Critical (P6, banner)-free graphs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
503
+ page_content=' Discrete Applied Mathematics, 258:143–151, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
504
+ page_content=' [10] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
505
+ page_content=' Kami´nski and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
506
+ page_content=' Pstrucha.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
507
+ page_content=' Certifying coloring algorithms for graphs without long induced paths.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
508
+ page_content=' Discrete Applied Mathematics, 261:258–267, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
509
+ page_content=' 10' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/H9E0T4oBgHgl3EQfhwGK/content/2301.02436v1.pdf'}
I9AzT4oBgHgl3EQfjv2R/content/2301.01521v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81424c293ca4647f4aa5ec762d3aaff330b02f378b6c5856b89dadc141809ad3
3
+ size 1888406