jackkuo commited on
Commit
9eab722
·
verified ·
1 Parent(s): 16e5a49

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +71 -0
  2. 0dFQT4oBgHgl3EQf0Daw/content/tmp_files/2301.13415v1.pdf.txt +1215 -0
  3. 0dFQT4oBgHgl3EQf0Daw/content/tmp_files/load_file.txt +0 -0
  4. 1dAyT4oBgHgl3EQf1fkT/content/tmp_files/2301.00734v1.pdf.txt +1547 -0
  5. 1dAyT4oBgHgl3EQf1fkT/content/tmp_files/load_file.txt +0 -0
  6. 1tE4T4oBgHgl3EQfaQwc/vector_store/index.pkl +3 -0
  7. 29E1T4oBgHgl3EQfAQI2/content/tmp_files/2301.02836v1.pdf.txt +1606 -0
  8. 3NAyT4oBgHgl3EQfo_gV/content/2301.00515v1.pdf +3 -0
  9. 3NAyT4oBgHgl3EQfo_gV/vector_store/index.faiss +3 -0
  10. 3NFST4oBgHgl3EQfYjhQ/vector_store/index.faiss +3 -0
  11. 3tAyT4oBgHgl3EQfb_f6/content/2301.00276v1.pdf +3 -0
  12. 3tAyT4oBgHgl3EQfb_f6/vector_store/index.faiss +3 -0
  13. 3tAzT4oBgHgl3EQfuf3H/content/tmp_files/2301.01693v1.pdf.txt +940 -0
  14. 3tAzT4oBgHgl3EQfuf3H/content/tmp_files/load_file.txt +0 -0
  15. 5NFIT4oBgHgl3EQf7itm/content/2301.11398v1.pdf +3 -0
  16. 5NFIT4oBgHgl3EQf7itm/vector_store/index.faiss +3 -0
  17. 5NFIT4oBgHgl3EQf7itm/vector_store/index.pkl +3 -0
  18. 5NFKT4oBgHgl3EQf-C45/content/tmp_files/2301.11956v1.pdf.txt +2309 -0
  19. 5NFKT4oBgHgl3EQf-C45/content/tmp_files/load_file.txt +0 -0
  20. 5dE0T4oBgHgl3EQfegDz/content/tmp_files/2301.02393v1.pdf.txt +2304 -0
  21. 5dE0T4oBgHgl3EQfegDz/content/tmp_files/load_file.txt +0 -0
  22. 69AzT4oBgHgl3EQfEvpR/vector_store/index.faiss +3 -0
  23. 6NE0T4oBgHgl3EQfewDQ/content/tmp_files/2301.02396v1.pdf.txt +1674 -0
  24. 6NE0T4oBgHgl3EQfewDQ/content/tmp_files/load_file.txt +0 -0
  25. 6NE4T4oBgHgl3EQf1g37/content/2301.05292v1.pdf +3 -0
  26. 6NE4T4oBgHgl3EQf1g37/vector_store/index.faiss +3 -0
  27. 6NE4T4oBgHgl3EQf1g37/vector_store/index.pkl +3 -0
  28. 6NFKT4oBgHgl3EQfTS23/content/tmp_files/2301.11779v1.pdf.txt +568 -0
  29. 6NFKT4oBgHgl3EQfTS23/content/tmp_files/load_file.txt +301 -0
  30. 7NE2T4oBgHgl3EQfPQZw/vector_store/index.pkl +3 -0
  31. 89E0T4oBgHgl3EQfwgGc/content/2301.02634v1.pdf +3 -0
  32. 89E0T4oBgHgl3EQfwgGc/vector_store/index.faiss +3 -0
  33. 89E0T4oBgHgl3EQfwgGc/vector_store/index.pkl +3 -0
  34. 8dAyT4oBgHgl3EQfp_jS/content/tmp_files/2301.00536v1.pdf.txt +5015 -0
  35. 8dAyT4oBgHgl3EQfp_jS/content/tmp_files/load_file.txt +0 -0
  36. 8tE2T4oBgHgl3EQfPwar/content/tmp_files/2301.03763v1.pdf.txt +2171 -0
  37. 8tE2T4oBgHgl3EQfPwar/content/tmp_files/load_file.txt +0 -0
  38. 9tFJT4oBgHgl3EQfoyxM/content/2301.11597v1.pdf +3 -0
  39. 9tFJT4oBgHgl3EQfoyxM/vector_store/index.faiss +3 -0
  40. 9tFJT4oBgHgl3EQfoyxM/vector_store/index.pkl +3 -0
  41. AtAzT4oBgHgl3EQfhv1C/content/tmp_files/2301.01488v1.pdf.txt +1850 -0
  42. AtAzT4oBgHgl3EQfhv1C/content/tmp_files/load_file.txt +0 -0
  43. C9E4T4oBgHgl3EQfFwy_/vector_store/index.pkl +3 -0
  44. CtE0T4oBgHgl3EQfQQAs/content/2301.02189v1.pdf +3 -0
  45. CtE0T4oBgHgl3EQfQQAs/vector_store/index.faiss +3 -0
  46. DNE4T4oBgHgl3EQfGAw7/content/tmp_files/2301.04890v1.pdf.txt +0 -0
  47. DNE4T4oBgHgl3EQfGAw7/content/tmp_files/load_file.txt +0 -0
  48. DdAzT4oBgHgl3EQfwf7y/content/2301.01725v1.pdf +3 -0
  49. DdAzT4oBgHgl3EQfwf7y/vector_store/index.faiss +3 -0
  50. FtAzT4oBgHgl3EQfHPtI/vector_store/index.pkl +3 -0
.gitattributes CHANGED
@@ -3987,3 +3987,74 @@ q9AzT4oBgHgl3EQfA_op/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
3987
  NNFRT4oBgHgl3EQfGTcp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3988
  UtAzT4oBgHgl3EQfX_xg/content/2301.01327v1.pdf filter=lfs diff=lfs merge=lfs -text
3989
  69AzT4oBgHgl3EQfEvpR/content/2301.00998v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3987
  NNFRT4oBgHgl3EQfGTcp/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3988
  UtAzT4oBgHgl3EQfX_xg/content/2301.01327v1.pdf filter=lfs diff=lfs merge=lfs -text
3989
  69AzT4oBgHgl3EQfEvpR/content/2301.00998v1.pdf filter=lfs diff=lfs merge=lfs -text
3990
+ 3tAyT4oBgHgl3EQfb_f6/content/2301.00276v1.pdf filter=lfs diff=lfs merge=lfs -text
3991
+ GNE0T4oBgHgl3EQfhQF8/content/2301.02429v1.pdf filter=lfs diff=lfs merge=lfs -text
3992
+ DdAzT4oBgHgl3EQfwf7y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3993
+ CtE0T4oBgHgl3EQfQQAs/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3994
+ UtAzT4oBgHgl3EQfX_xg/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3995
+ mNE_T4oBgHgl3EQf6xyS/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3996
+ NtE3T4oBgHgl3EQfwwva/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3997
+ rdE3T4oBgHgl3EQfMgkz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3998
+ vNAyT4oBgHgl3EQfafc7/content/2301.00242v1.pdf filter=lfs diff=lfs merge=lfs -text
3999
+ qdE2T4oBgHgl3EQf0gjz/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4000
+ 89E0T4oBgHgl3EQfwgGc/content/2301.02634v1.pdf filter=lfs diff=lfs merge=lfs -text
4001
+ RdE3T4oBgHgl3EQfygvM/content/2301.04721v1.pdf filter=lfs diff=lfs merge=lfs -text
4002
+ DdAzT4oBgHgl3EQfwf7y/content/2301.01725v1.pdf filter=lfs diff=lfs merge=lfs -text
4003
+ jdE1T4oBgHgl3EQfNQNY/content/2301.02999v1.pdf filter=lfs diff=lfs merge=lfs -text
4004
+ CtE0T4oBgHgl3EQfQQAs/content/2301.02189v1.pdf filter=lfs diff=lfs merge=lfs -text
4005
+ 3NFST4oBgHgl3EQfYjhQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4006
+ 6NE4T4oBgHgl3EQf1g37/content/2301.05292v1.pdf filter=lfs diff=lfs merge=lfs -text
4007
+ WNE2T4oBgHgl3EQfDgYF/content/2301.03624v1.pdf filter=lfs diff=lfs merge=lfs -text
4008
+ 69AzT4oBgHgl3EQfEvpR/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4009
+ hNE3T4oBgHgl3EQf4AvA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4010
+ vNAyT4oBgHgl3EQfafc7/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4011
+ 3NAyT4oBgHgl3EQfo_gV/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4012
+ wdE3T4oBgHgl3EQflQpC/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4013
+ cNE0T4oBgHgl3EQf4wL5/content/2301.02744v1.pdf filter=lfs diff=lfs merge=lfs -text
4014
+ 3tAyT4oBgHgl3EQfb_f6/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4015
+ 5NFIT4oBgHgl3EQf7itm/content/2301.11398v1.pdf filter=lfs diff=lfs merge=lfs -text
4016
+ wdE3T4oBgHgl3EQflQpC/content/2301.04604v1.pdf filter=lfs diff=lfs merge=lfs -text
4017
+ ydE2T4oBgHgl3EQf3wjf/content/2301.04175v1.pdf filter=lfs diff=lfs merge=lfs -text
4018
+ stE_T4oBgHgl3EQf8xx6/content/2301.08377v1.pdf filter=lfs diff=lfs merge=lfs -text
4019
+ zdAyT4oBgHgl3EQfbPea/content/2301.00259v1.pdf filter=lfs diff=lfs merge=lfs -text
4020
+ GNAyT4oBgHgl3EQfSvfA/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4021
+ 6NE4T4oBgHgl3EQf1g37/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4022
+ ptAyT4oBgHgl3EQfzflz/content/2301.00702v1.pdf filter=lfs diff=lfs merge=lfs -text
4023
+ 89E0T4oBgHgl3EQfwgGc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4024
+ ntE2T4oBgHgl3EQfJwYe/content/2301.03694v1.pdf filter=lfs diff=lfs merge=lfs -text
4025
+ JdAyT4oBgHgl3EQfsPlo/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4026
+ a9E4T4oBgHgl3EQfoQ0b/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4027
+ bNAyT4oBgHgl3EQfivjL/content/2301.00403v1.pdf filter=lfs diff=lfs merge=lfs -text
4028
+ jdA0T4oBgHgl3EQfIv9Y/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4029
+ aNE3T4oBgHgl3EQfdAo0/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4030
+ zdAyT4oBgHgl3EQfbPea/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4031
+ ZtAyT4oBgHgl3EQfvvnv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4032
+ cNE0T4oBgHgl3EQf4wL5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4033
+ RdA0T4oBgHgl3EQfDv9U/content/2301.02007v1.pdf filter=lfs diff=lfs merge=lfs -text
4034
+ mtFPT4oBgHgl3EQf4zVh/content/2301.13194v1.pdf filter=lfs diff=lfs merge=lfs -text
4035
+ a9FLT4oBgHgl3EQfXi8l/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4036
+ ZtAyT4oBgHgl3EQfvvnv/content/2301.00638v1.pdf filter=lfs diff=lfs merge=lfs -text
4037
+ HNA0T4oBgHgl3EQfBv_D/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4038
+ b9FPT4oBgHgl3EQfBTTg/content/2301.12985v1.pdf filter=lfs diff=lfs merge=lfs -text
4039
+ T9E4T4oBgHgl3EQfmg0h/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4040
+ ntE2T4oBgHgl3EQfJwYe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4041
+ HNA0T4oBgHgl3EQfBv_D/content/2301.01981v1.pdf filter=lfs diff=lfs merge=lfs -text
4042
+ VtAyT4oBgHgl3EQfV_ek/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4043
+ 9tFJT4oBgHgl3EQfoyxM/content/2301.11597v1.pdf filter=lfs diff=lfs merge=lfs -text
4044
+ ldAyT4oBgHgl3EQfk_iC/content/2301.00444v1.pdf filter=lfs diff=lfs merge=lfs -text
4045
+ jdA0T4oBgHgl3EQfIv9Y/content/2301.02079v1.pdf filter=lfs diff=lfs merge=lfs -text
4046
+ T9E4T4oBgHgl3EQfmg0h/content/2301.05168v1.pdf filter=lfs diff=lfs merge=lfs -text
4047
+ vdE4T4oBgHgl3EQfXAw5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4048
+ 5NFIT4oBgHgl3EQf7itm/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4049
+ c9FIT4oBgHgl3EQfnivD/content/2301.11315v1.pdf filter=lfs diff=lfs merge=lfs -text
4050
+ XNAyT4oBgHgl3EQfh_h5/content/2301.00387v1.pdf filter=lfs diff=lfs merge=lfs -text
4051
+ 3NAyT4oBgHgl3EQfo_gV/content/2301.00515v1.pdf filter=lfs diff=lfs merge=lfs -text
4052
+ c9FIT4oBgHgl3EQfnivD/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4053
+ bdFST4oBgHgl3EQfCjh5/content/2301.13707v1.pdf filter=lfs diff=lfs merge=lfs -text
4054
+ xdE0T4oBgHgl3EQf-gIr/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4055
+ stFKT4oBgHgl3EQf1y5_/content/2301.11921v1.pdf filter=lfs diff=lfs merge=lfs -text
4056
+ XNAyT4oBgHgl3EQfh_h5/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4057
+ stFKT4oBgHgl3EQf1y5_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
4058
+ NtE3T4oBgHgl3EQfwwva/content/2301.04706v1.pdf filter=lfs diff=lfs merge=lfs -text
4059
+ a9E4T4oBgHgl3EQfoQ0b/content/2301.05182v1.pdf filter=lfs diff=lfs merge=lfs -text
4060
+ 9tFJT4oBgHgl3EQfoyxM/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
0dFQT4oBgHgl3EQf0Daw/content/tmp_files/2301.13415v1.pdf.txt ADDED
@@ -0,0 +1,1215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LOGAI: A LIBRARY FOR LOG ANALYTICS AND INTELLIGENCE
2
+ Qian Cheng, Amrita Saha, Wenzhuo Yang, Chenghao Liu, Doyen Sahoo, Steven Hoi
3
+ Salesforce AI Research
4
+ {qcheng, amrita.saha, wenzhuo.yang, chenghao.liu, dsahoo, shoi}@salesforce.com
5
+ ABSTRACT
6
+ Software and System logs record runtime information about processes executing within a system.
7
+ These logs have become the most critical and ubiquitous forms of observability data that help
8
+ developers understand system behavior, monitor system health and resolve issues. However, the
9
+ volume of logs generated can be humongous (of the order of petabytes per day) especially for complex
10
+ distributed systems, such as cloud, search engine, social media, etc. This has propelled a lot of
11
+ research on developing AI-based log based analytics and intelligence solutions that can process
12
+ huge volume of raw logs and generate insights. In order to enable users to perform multiple types
13
+ of AI-based log analysis tasks in a uniform manner, we introduce LogAI (https://github.com/
14
+ salesforce/logai), a one-stop open source library for log analytics and intelligence. LogAI
15
+ supports tasks such as log summarization, log clustering and log anomaly detection. It adopts the
16
+ OpenTelemetry data model, to enable compatibility with different log management platforms. LogAI
17
+ provides a unified model interface and provides popular time-series, statistical learning and deep
18
+ learning models. Alongside this, LogAI also provides an out-of-the-box GUI for users to conduct
19
+ interactive analysis. With LogAI, we can also easily benchmark popular deep learning algorithms for
20
+ log anomaly detection without putting in redundant effort to process the logs. We have opensourced
21
+ LogAI to cater to a wide range of applications benefiting both academic research and industrial
22
+ prototyping.
23
+ Keywords Log Analysis · Machine Learning · Anomaly Detection · Clustering · Artifical Intelligence · AIOps
24
+ 1
25
+ Introduction
26
+ System and Software logs are text messages that are embedded by software and application developers in the source
27
+ code and are designed to carry useful runtime information about the process, which are typically dumped as raw log
28
+ files, once the system starts executing. In modern computer systems, especially for large distributed systems that run
29
+ complex software, such as search engines, social network websites, and cloud platforms, logs are one of the most
30
+ critical observability data. Logs are widely used in a variety of operational tasks, covering use cases such as system
31
+ availability, reliability and security. In scenarios when users have no direct access to the physical servers, logs are often
32
+ the ground truth about the systems and applications. As such, Log Management has become a very important task in
33
+ the industrial landscape. In fact, log management market size grew to $2.29 billion in 2023, at a compound annual
34
+ growth rate (CAGR) of 15.9%, according to the report from The Business [1].
35
+ Ideally, logs should be capturing the runtime information at a very granular level and stored permanently so that
36
+ when any disruptive incident occurs, developers and operators can always look up the correct log file and inspect the
37
+ log messages to debug what caused the incident. In reality though, because of the colossal size of the log dumps,
38
+ storing them permanently in the raw form is often impractical. This challenge can be mitigated with the help of large
39
+ cloud-based logging systems such as AWS Cloudwatch and Microsoft Azure Logs where it is possible to even store
40
+ the entire log data and retain them for a substantial period of time. Moreover, these logging systems also provide
41
+ capabilities to help efficient log querying and visualization, enabling developers and operators to quickly access the log
42
+ dumps or log streams of their software. With these capabilities, the main open question is, how to explore raw logs and
43
+ find the right set of logs associated with an incident? followed by a more advanced one - Is there a way to automatically
44
+ analyze the logs and tell if there are issues with a system, create incidents and provide additional insights?
45
+ arXiv:2301.13415v1 [cs.AI] 31 Jan 2023
46
+
47
+ Cheng et. al
48
+ Depending on which operational stage logs are involved in, the goal of log analysis in that specific situation could be
49
+ different. Logs can be used for incident detection, where reliability engineers and developers need to continuously
50
+ monitor the log streams in order to detect any unexpected behavior that might be indicative of an incident. For post
51
+ incident detection, log data can play a critical role in root-cause analysis, where operators examine the raw logs to
52
+ identify the loglines that show anomalous patterns and thus localize the anomaly and eventually the root cause of the
53
+ incident to a single service, component or module or a group of them. The situation becomes even more complex in
54
+ large distributed systems, where people (typically reliability engineers) who inspect the logs to resolve incidents may
55
+ not necessarily be the same group of people (i.e. software and application developers) who write the logging statements
56
+ in software code. In these situations, understanding even simple dump logs can take significant amount of time and
57
+ effort, owing to the open-ended nature of the log data.
58
+ Over the past decade there have been various effort targeted at developing both commercial and open-source software
59
+ to cater to automated log analysis. Though, most of the initial work used either domain specific rules or heuristics,
60
+ with the proliferation of AI and ML, more and more data-driven techniques have been adopted and popularized in this
61
+ community. However, most of the AI-driven effort has been applied in an isolated manner, focusing on specific log
62
+ analysis tasks (like how to extract structure out of the raw logs or how to detect anomaly patterns in it). There is still an
63
+ urgent need for bringing together all the AI, ML and NLP techniques to a unified platform that can cater to the entire
64
+ suite of different log analysis tasks. Nevertheless, creating such a one-stop library to serve a diverse set of log-based
65
+ analytics can be quite non-trivial, with some of the potential challenges being, as follows:
66
+ • Lack of unified log data model for log analysis. Different logs are in different formats and as a result
67
+ analysis tools need to be customized for different log formats and schemas. It is not easy to generalize
68
+ analytical algorithms without a unified data model that can handle heterogenous forms of log data.
69
+ • Redundant effort in data preprocessing and information extraction. The current status of log analytics
70
+ in this community is that there is a lack of a consolidated pipeline for data preprocessing and information
71
+ extraction across all log analysis models and tasks - i.e. different log analysis algorithms have been implemented
72
+ independently, with each adopting their own pipelines and workflows. For different tasks, or even different
73
+ algorithms of the same task, developers need to implement multiple redundant preprocessing and information
74
+ extraction process modules.
75
+ • Difficulty in managing log analysis experiments and benchmarking. Empirical benchmarking forms a
76
+ critical part of research and applied science. In the existing literature, there is no unified workflow management
77
+ mechanism to run log analysis benchmarking experiments. For example, while there has been some isolated
78
+ pockets of deep learning research for log anomaly detection, it is quite challenging for other organizations or
79
+ users to adopt them or reproduce their experimental results, due to the lack of a common unified framework
80
+ for log analysis.
81
+ In this inter-disciplinary community of AIOps, users may have different needs while working on log analysis in
82
+ academic and industrial settings when they are in different roles. For example, 1) Machine learning researchers may
83
+ need a hassle-free way to perform benchmarking experiments on public log datasets and reproduce the experimental
84
+ results from peer research groups in order to develop new log analysis algorithms; 2) Industrial data scientists and
85
+ AIOps practitioners may need an intuitive workflow to quickly experiment with existing log analysis algorithms on
86
+ their own log data and select the best performing algorithm, hyperparameters and experimental configurations as their
87
+ log analysis solution, and 3) Data and software engineers need to integrate the selected algorithm into production and
88
+ deploy them in a smooth and efficient way. Unfortunately, we realize there is no existing open source toolkit that can
89
+ satisfy all the above needs.
90
+ We are thus motivated to develop a holistic LogAI solution - a python library aimed for conducting AI-based log
91
+ analytics and intelligence tasks to serve a variety of academic and industrial use-cases. LogAI (https://github.
92
+ com/salesforce/logai) provides a unified way to conduct various of log analysis tasks such as log summarization,
93
+ clustering, anomaly detection. LogAI also provides a unified data model, inheriting from OpenTelemetry log data
94
+ model, to handle logs in different formats. LogAI is also the first open source log analytics library that incorporate
95
+ time-series algorithms, statistical learning algorithms and deep learning algorithms. Moreover, LogAI implemented an
96
+ out-of-the-box GUI portal to conduct log analysis in interactive way, more naturally align with the user experience of
97
+ real-world log analysis.
98
+ Besides, in this technical report we also demonstrate how to use LogAI to easily benchmark deep learning algorithms
99
+ for log anomaly detection without any redundant effort in log preprocessing and cleaning. In this community, there are
100
+ existing libraries like LogLizer and Deep-Loglizer [2, 3] which have consolidated some of the AI/ML effort for the log
101
+ domain. However, they still suffer from a few limitations - for example lacking a unified data processing pipeline that is
102
+ generic across all tasks or algorithms or catering to only anomaly detection as the log analysis task or covering only a
103
+ 2
104
+
105
+ Cheng et. al
106
+ specific types of algorithms. In Section 5, we elaborate on the limitations of these existing libraries and also show how
107
+ LogAI provides a more intuitive framework for designing and managing the experimental settings while performing
108
+ comparable to Deep-Loglizer.
109
+ 2
110
+ Related Work
111
+ Recently, researchers and engineers have been working on a variety of problems about automated log analysis in
112
+ academia and industry [4]. Based on the existing solutions, we can summarize a common workflow to conduct
113
+ automated log analysis. The common workflow contains four steps: log collection, log cleaning and preprocessing, log
114
+ information extraction and log analysis and intelligence applications, Figure 1. Log collection is the data loading step
115
+ that collects logs from local log dump files or log management platforms. Log cleaning and preprocessing is the step
116
+ to use predefined rules and domain knowledge to clean noisy log data, remove or replace known log templates. This
117
+ step usually does not involve any ML process. Log information extraction is the step where ML models are involved
118
+ to extract information from log data, and feed the log representation or features to train ML models for analytics and
119
+ intelligence application tasks. Log information extraction usually contains several steps like log parsing, partitioning,
120
+ feature extraction, etc. The final step, log analytics and intelligence, is to train ML models for a specific log downstream
121
+ task. For example, log clustering and summarization are common log analytics tasks, while log based anomaly detection
122
+ and root-cause analysis are common log intelligence tasks.
123
+ Figure 1: Common Log Analytics and Intelligence Workflow. The common workflow contains four steps: 1) log
124
+ collection from local log files or log platforms, 2) log cleaning and preprocessing, 3) log information extraction and 4)
125
+ log analytics tasks (such as clustering and summarization) and log intelligence tasks (such as anomaly detection and
126
+ root-cause analysis).
127
+ Log analysis has a very long history and there are a lot of tools for log analysis. Almost all commercial log management
128
+ software/SaaS have associated log analysis/ log insights offerings. This includes log management products such as
129
+ Splunk, DataDog, NewRelic, etc., as well as cloud providers such as Amazon AWS, Microsoft Azure and Google
130
+ Cloud. In open source community, there are also very popular log management and analysis projects such as GreyLogs,
131
+ Grafana, Prometheus, etc. However, neither these commercial log management platform nor open-source log analysis
132
+ tools are incorporated with comprehensive AI techniques such as deep learning, large language models (LLM), BERT,
133
+ etc.
134
+ Meanwhile, there are a few open-source AI-based log analysis tools that started to support more comprehensive AI
135
+ techniques. For example, LOGPAI (https://github.com/logpai/) is one of the most famous log anaysis community on
136
+ GitHub. LOGPAI provides logparser for automated log parsing. LOGPAI also provides loglizer [5] and deep-loglizer [6]
137
+ for log anomaly detection. Besides LOGPAI, there are other open-source projects, most of which are open source code
138
+ from research outcomes, such as LogClass and Log2Vec from NetManAIOps (https://github.com/orgs/NetManAIOps).
139
+ 3
140
+ Design Principles
141
+ In this section we discuss about the design principles of LogAI library. LogAI provides a unified framework for log
142
+ analysis. In order to achieve this, LogAI follows the following design principles: 1) high compatibility with data from
143
+ different log sources, 2) reusable components to avoid reproducing effort, 3) unified setup process for customized
144
+ applications and 4) easy-to-use GUI for out-of-box interactive log analysis.
145
+ 3
146
+
147
+ Log Information
148
+ Log Analytics and
149
+ Log Collection
150
+ Log Cleaning and
151
+ Extraction
152
+ Intelligence
153
+ Preprocessing
154
+ : From local files
155
+ : Cleaning Noisy Data
156
+ • Log Parsing
157
+ : Analytics:
158
+ : From log platforms
159
+ : Log Partitioning
160
+ 。 Clustering
161
+ : Remove or Replace
162
+ Custom Log
163
+ . Feature Extraction
164
+ Summarization
165
+ Templates
166
+ : Intelligence
167
+ : Anomaly Detection
168
+ • Rootcause AnalysisCheng et. al
169
+ 3.1
170
+ Compatible with data from different log sources
171
+ One of the attractive qualities of log data is its open-ended form, where developers can design them to capture useful
172
+ runtime and performance information to any arbitrary level of granularity as per the needs of the application. Different
173
+ software can generate very different logs. Even in the same software, there are different levels of logs, such as service
174
+ logs, application logs, systems logs, etc. These logs can be in different formats, either structured, semi-structured or
175
+ unstructured. LogAI takes these factors into consideration and ensures that the data loader can consume and process
176
+ these heterogeneous types of logs in a seamless way, by converting these logs into log record with unified log data
177
+ model.
178
+ 3.2
179
+ Reusable components to avoid duplicated effort
180
+ As briefly motivated in Sec 1, a particular challenge of building log analytics in both academic and industrial settings, is
181
+ the lack of an unified framework that allows reusal of data processing and information extraction components across
182
+ different log analysis tasks, even on the same data source or dataset. For instance, engineers and researchers have to
183
+ build separate pipelines to perform log anomaly detection, log clustering or summarization even to deal with the same
184
+ log data source. This burden significantly impacts efficiency in every development stage. from experiments, prototyping
185
+ all the way to productization. Also running multiple pipelines in production increases the system complexity and brings
186
+ additional operational cost. Thus, building a library that unifies the interface of common components across multiple
187
+ downstream tasks is necessary to improve efficiency of all stages of log analysis.
188
+ 3.3
189
+ Unified setup process for customized applications
190
+ Even for the same application, the design choice behind the log analysis pipeline might have different variations, based
191
+ on the various needs or limitations of the use-case. For example, log anomaly detection may involve different steps in
192
+ the end-to-end (E2E) workflow. Some may include log parsing, while others might choose to skip this step either due
193
+ to the computational overhead or simply because the downstream analysis models do not need a well-defined parsed
194
+ structure. Also, when converting the raw log text data to machine-readable vectors there can be various choices - either
195
+ to convert log messages into time-series counter vectors or into event sequences by representing each log line as a id
196
+ or as a sequence of natural language tokens. In production setup, adding, removing or replacing a component in the
197
+ E2E workflow could be very time consuming. LogAI is designed to support building customized applications with
198
+ easy plug-in / plug-out components, enabling users to quickly try out various combinations through simple intuitive
199
+ mechanisms like configurable json or yaml files.
200
+ 3.4
201
+ Easy-to-use GUI for out-of-box interactive log analysis
202
+ Another learning while we work with different types of log data is about visual examination. Unlike many machine
203
+ learning domains where the model performance evaluation can heavily rely on metrics, such as Precision, Recall,
204
+ F-scores, log analysis tasks usually need more visual examination to validate the performance. Thus, LogAI is developed
205
+ with a graphic user interface (GUI), or a portal, to integrate with interactive analytical features for tasks such as log
206
+ summarization, clustering and anomaly detection. We believe this portal can reduce the cognitive overhead on the
207
+ LogAI users in onboarding to the library and help them execute the log analysis tasks quickly and intuitively.
208
+ 4
209
+ Architecture
210
+ LogAI is separated into the GUI module and core library module. The GUI module contains the implementation of a GUI
211
+ portal that talks to backend analysis applications. The portal is supported using Plotly Dash (https://plotly.com/dash/).
212
+ The core library module contains four main layers: data layer, pre-processing layer, information extraction layer and
213
+ analysis layer. Each layer contains the components to process logs in a standard way. LogAI applications, such as log
214
+ summarization, log clustering, unsupervised log anomaly detection, are created on top of the components of the four
215
+ layers.
216
+ 4.1
217
+ Core Library Modules
218
+ LogAI is implemented in the architecture described in Figure 2. In this section we describe the technical details of each
219
+ layer. Including the implementation of components and how the components communicate across layers.
220
+ 4
221
+
222
+ Cheng et. al
223
+ Figure 2: LogAI Architecture
224
+ 4.1.1
225
+ Data Layer
226
+ Data layer contains two component classes: LogRecordObject class and DataLoader class.
227
+ LogRecordObject class defines the data model of log records. As we mentioned in Introduction, logs are free-form
228
+ text and can be unstructured or semi-structured. Even for structured logs, different software applications may name their
229
+ log data in different ways. LogRecordObject is to adapt log data from different sources to a more unified structure in
230
+ order to provide a data object that can be used in all follow-up processes without modification. In LogAI, data model
231
+ of LogRecordObject is a subset of the log and event record definition by OpenTelemetry (https://opentelemetry.io/),
232
+ containing fields in Table 1.
233
+ Table 1: LogRecordObject Data Model
234
+ Field
235
+ Description
236
+ Timestamp
237
+ Timestamp when event occurred.
238
+ Body
239
+ loglines or the content of log messages.
240
+ Attributes
241
+ a map<key,value> for structured information of log record.
242
+ TraceId
243
+ Request trace id as defined in W3C Trace Context. Can be set for logs that are part of
244
+ request processing and have an assigned trace id. This field is optional.
245
+ SpanId
246
+ Trace flag as defined in W3C Trace Context specification. At the time of writing the
247
+ specification defines one flag - the SAMPLED flag. This field is optional.
248
+ SeverityText
249
+ String represents the severity. This field is optional.
250
+ SeverityNumber
251
+ Numeric values of severity, TRACE(1-4), DEBUG(5-8), INFO(9-12), WARN(13-16),
252
+ ERROR(17-20), FATAL(21-24). This field is optional.
253
+ Resource
254
+ Description of. the source of the log.
255
+ InstrumentationScope
256
+ Multiple occurrences of events coming from the same scope can happen across time and
257
+ they all have the same value of InstrumentationScope.
258
+ DataLoader is a class that implements functions to load data from sources. In current version we implement
259
+ FileDataLoader to load data from local files, e.g. .log,.csv,.tsv,.json. The associated DataLoaderConfig
260
+ class defines the configuration of how data will be loaded. load_data() method will load data from target source and
261
+ return LogRecordObject. In the future versions we will support data loaders with connectors to consume data directly
262
+ from log platforms such as Splunk, Datadog, AWS Cloudwatch, etc.
263
+ 4.1.2
264
+ Preprocessing Layer
265
+ Preprocessing. Preprocessor is a class to conduct logline level preprocessing. Users can initialize a preprocessor
266
+ instance with configuration and execute .clean_log() method to obtain cleaned loglines. The supported configuration
267
+ includes custom_delimiters_regex to parse logs with custom delimiters and custom_replace_list to identify
268
+ and replace the substrings that match regex patterns in this list, examples are show in Figure 3.
269
+ 5
270
+
271
+ Data Layer
272
+ preprocessing Layer
273
+ Information Extraction Layer
274
+ Application Layer
275
+ Custom log
276
+ Log
277
+ datafiles
278
+ FileDataLoader
279
+ Preprocess
280
+ Auto-Parsing
281
+ Summarization
282
+ Loglines
283
+ Unstructured)
284
+ Open Log
285
+ OpenDataset
286
+ Log
287
+ Feature
288
+ Log
289
+ Datasets
290
+ Records
291
+ Vectorization
292
+ DataLoader
293
+ Extraction
294
+ Clustering
295
+ Attributes
296
+ (Structured)
297
+ Log streams
298
+ from log
299
+ Connector
300
+ Categorical
301
+ Log Anomaly
302
+ Partitioning
303
+ platforms
304
+ DataLoader
305
+ Encoding
306
+ DetectionCheng et. al
307
+ Figure 3: Example of preprocessor execution
308
+ Partitioning. Partitioner is a class that helps partitioning the logs. As part of the preprocessing, there are needs
309
+ to shuffle, concatenate and sequentialize raw logs into different forms, for example using time-based partitions or
310
+ identifier-based partitions or sliding window partitions of fixed lengths. This class provides optional functions for this
311
+ type of process.
312
+ 4.1.3
313
+ Information Extraction Layer
314
+ Information extraction layer contains modules to convert log records into vectors that can be used as input of machine
315
+ learning models for the actual analytical tasks. Current log analysis research and applications indicate three main
316
+ input data types are used in the ML approaches: 1) converting log records into counter vectors to use time-series ML
317
+ techniques, 2) converting log records into feature vectors to use tabular-based ML techniques and 3) converting log
318
+ records into sequences to use sequential ML techniques.
319
+ LogAI implemented four components in the information extraction layer to extract information from the log records
320
+ and convert logs to the target formats. Log parser component implements a series of automatic parsing algorithms
321
+ in order to extract templates from the input loglines. Log vectorizer implements a bag of vectorization algorithms to
322
+ convert free-form log text into numerical representations for each logline. Categorical encoder implements algorithms
323
+ that encoding categorical attributes into numerical representations for each logline. Last but not least, feature extractor
324
+ implements methods to group the logline level representation vectors into log event level representations.
325
+ Automated Log Parsing. LogParser is a class that conducts automated log parsing tasks. Currently LogAI covers
326
+ three automated log parsing algorithms: DRAIN[7], IPLoM[8] and AEL[9]. LogParser takes the unstructured logline
327
+ text as input and generate two sets of results: parsed_logline are the static pattern of all logs in this category,
328
+ parameter_list are the lists of values for each “*” position in the log pattern for the same set of loglines.
329
+ Log Vectorization. LogVectorizer is a class that converts unstructured loglines into semantic vectors. Each semantic
330
+ vector is an array of numeric values that represents this logline text. LogVectorizer supports popular text vectorization
331
+ algorithms such as TF-IDF [10], FastText [11], Word2Vec [12], etc.
332
+ Categorical Encoding. CategoricalEncoder is a class that encodes log attributes, the structured portion of logs.
333
+ The string type attributes will be transformed into categorical representations. CategoricalEncoder supports popular
334
+ categorical encoding algorithms such as label encoding, one-hot encoding, ordinal encoding etc.
335
+ Feature Extraction. FeatureExtractor is a class that conducts final transformation of raw log data into log feature
336
+ set that machine learning models can consume. In LogAI, we primarily cover three types of log features: 1) time-series
337
+ counters, 2) semantic feature sets and 3) sequence vectors. Time-series counters will be used to feed time-series models
338
+ such as ETS, ARIMA. Semantic feature set can be widely used in a variety of machine learning and deep learning
339
+ models. Sequence vectors are a specific type of feature format that are required by sequence-modeling based deep
340
+ learning methods, for example Recurrent Neural Network or Convolutional Neural Networks.
341
+ 6
342
+
343
+ PreprocessorConfig
344
+ Raw logs
345
+ config =PreprocessorConfig(
346
+ 20171223-22:15:29:615|Step_LSC|30002312|onExte..
347
+ custom_delimiters_regex=[r"\l"],
348
+ 20171223-22:15:29:633|Step_StandReportReceiver...
349
+ 3
350
+ 20171223-22:15:29:635/StepLSC/30002312|proces..
351
+ custom_replace_list=[
352
+ +
353
+ 20171223-22:15:29:635|Step_StandStepCounter|30...
354
+ (r'Step_lw+','<Operations>')
355
+ Clean logs
356
+ ParameterList
357
+ 20171223-22:15:29:615<Operations>30002312on...
358
+ <Operations>
359
+ 20171223-22:15:29:633<Operations>30002312on...
360
+ [Step_LSC]
361
+ 3
362
+ 20171223-22:15:29:635 <Operations> 30002312 pr...
363
+ 2
364
+ [StepStandReportReceiver]
365
+ 20171223-22:15:29:635<Operations>30002312 fl...
366
+ 3
367
+ [Step_LSC]
368
+ 4
369
+ [Step_StandStepCounter]Cheng et. al
370
+ 4.1.4
371
+ Analysis Layer
372
+ The analysis layer contains modules that conduct the analysis tasks, including but not limit to semantic anomaly
373
+ detector, time-series anomaly detector, sequence anomaly detector, clustering, etc. Each analysis module provides
374
+ unified interface for multiple underlying algorithms.
375
+ Anomaly Detection. AnomalyDetector is a class to conduct anomaly detection analysis to find abnormal logs from
376
+ semantic perspective. AnomalyDetector takes log features of the given logs as input. The output are the anomaly
377
+ scores. LogAI supports two different types of anomaly detection: 1) anomaly detection based on log counter vectors,
378
+ 2) anomaly detection based on log semantic representations. The supported anomaly detection algorithms includes
379
+ univariate and multivariate time-series analysis algorithms from Merlion [13], unsupervised outlier detection models
380
+ like one-class SVM [14] and local outlier filter (LOF) [15] from scikit-learn [16].
381
+ Deep-learning based anomaly detection.
382
+ NNAnomalyDetector class supports deep-learning model based log
383
+ anomaly detection algorithms, most of which are taking log sequence vectors as input. LogAI integrate some of
384
+ the popular deep learning based algorithms like recurrent neural network (RNN) based model LSTM [17], convolutional
385
+ neural network (CNN), Transformers [18] and pretrained Transformer based Language Model BERT [19]. The output
386
+ are anomaly scores for each log sequence.
387
+ Clustering. Clustering is a class to conduct log clustering analysis tasks. The input for log clustering are the
388
+ semantic log features. Clustering is integrated different clustering models, such as k-Means [20], DBSCAN [21] etc.
389
+ The output is a map between each log feature record and a cluster label.
390
+ 4.1.5
391
+ E2E Applications
392
+ Depending on the component modules from data layer, preprocessing layer, feature extraction layer and analysis layer,
393
+ LogAI provides the flexibility to build end-to-end log analysis applications. And the applications follows below design
394
+ principles 4. LogAI is launched with several out-of-the-box applications.
395
+ Figure 4: Design Principles of E2E Applications
396
+ Log Summarization. It is very important to understand your logs before using them for downstream tasks. Log
397
+ summarization leverages machine learning to process, aggregate and summarize logs. Please refer to the GUI module
398
+ Section 4.2 for more detail about how to use.
399
+ Log Clustering. Log clustering can be used to categorize logs. Finding meaningful clusters can bring benefits in a
400
+ variety of use cases like anomaly detection, log storage, query, etc. Please refer to the GUI module Section 4.2 for more
401
+ detail about how to use.
402
+ Log Anomaly Detection. Log anomaly detection is an application that detect anomalous loglines. Here in LogAI log
403
+ anomaly detection can detect both time-series anomalies and semantic anomalies. Please refer to the GUI module
404
+ Section 4.2 for more detail about how to use.
405
+ 7
406
+
407
+ Log record
408
+ Data Preparation
409
+ ApplicationWorkflow
410
+ objects
411
+ Workflow Configuration
412
+ Dataloader
413
+ Preprocessing
414
+ IE-component-N
415
+ Analysis-Component
416
+ Configuration
417
+ Configuration
418
+ Configuration
419
+ Configuration
420
+ FileDataloader
421
+ Algorithm-1
422
+ Algorithm-1
423
+ Preprocessing
424
+ OpenSetDataloader
425
+ Algorithm-2
426
+ Algorithm-2
427
+ Partitioning
428
+ OtherDataloader
429
+ Algorithm-3
430
+ Algorithm-3Cheng et. al
431
+ Figure 5: LogAI GUI portal
432
+ 4.2
433
+ GUI Module
434
+ The GUI module is implemented to provide a web portal for the out-of-the-box log analysis applications, including
435
+ log summarization, log clustering and log anomaly detection. Figure 5 shows the log summarization of LogAI portal.
436
+ LogAI portal is developed using Plotly Dash framework.
437
+ Control Panel. Control panel is on the left side of the page. In the control panel, users can upload files, configure file
438
+ and algorithm settings. When the user click "Run" button, the analysis execution is triggered. This behavior is uniform
439
+ for all three different applications. After analysis execution completed, the results will be displayed on the right side of
440
+ the page.
441
+ Main Display Panel. On the right side of the page we display the analysis results. Different applications may have
442
+ different layouts. The portal supports interactive visualization. The users can click or hover on parts in the charts to
443
+ drill down and get more detailed information.
444
+ The interaction between frontend and backend of different applications are designed to be unified. The control panel
445
+ collects user input and generate configuration for application and send to backend. Backend consumes the configuration
446
+ to create component instances to execute the workflow. After finishing the job, it will send the result table to frontend.
447
+ The display panel for each application controls how the result table will be rendered for visualization. Users can expand
448
+ the GUI portal to support customized analysis applications by following the same design pattern and reusing the existing
449
+ components.
450
+ 4.3
451
+ Summary of Supported ML Algorithms in LogAI
452
+ This section summarizes the machine learning algorithms supported in LogAI. LogAI provides an algorithms compo-
453
+ nent to implement all supported algorithms with algorithm factory. The algorithm contains five algorithmic mod-
454
+ ules, notably: parsing_algo, vectorization_algo, categorical_encoding_algo, clustering_algo, anomaly_detection_algo.
455
+ algorithms component also contains a nn_model module to implement all neural network models. LogAI has defined
456
+ unified algorithm interfaces for each module and we can implement more algorithms and integrated it with LogAI in
457
+ future development. The current LogAI algorithm coverage is shown in Table 4.3.
458
+ The deep-learning models generally being much more parameter-heavy, require more high-end compute devices like
459
+ GPU. In such cases, their LogAI implementations provide options to use different devices (CPU or GPU) or multiple
460
+ GPUs seamlessly through the algorithm parameter configurations.
461
+ 8
462
+
463
+ a LOG Al Powered by Salesforce AI Research
464
+ Al-based Log Analysis
465
+ Summary
466
+ Attributes
467
+ Log.Summarization
468
+ Total Number of Loglines: 20000
469
+ Level
470
+ Log. Clustering
471
+ Total Number of Log Patterns: 15
472
+ INFO
473
+ AnomalyDetection
474
+ Charts
475
+ Trend of Occurrence at Freq(1s)
476
+ File Settings
477
+ Log Type
478
+ HDFS
479
+ 50
480
+ 1000
481
+ Select Log File
482
+ HDFS_20000.log
483
+ 40
484
+ Attributes
485
+ ×Level
486
+ Time Interval
487
+ 1s
488
+ 10
489
+ Parsing Algortihm
490
+ DRAIN
491
+ 20:35:30
492
+ 20:36:00
493
+ 20:36:30
494
+ 20:37:00
495
+ 20:37:30
496
+ 20:38:00
497
+ 20:38:30
498
+ Nov 9, 2008
499
+ RUN
500
+ log pattern
501
+ timestamp
502
+ Log Patterns
503
+ dfs.FSNamesystemBLoCK*NameSystem.addStoredBlockblockMap updated*50010isaddedto* size*
504
+ Dynamic Values
505
+ Position
506
+ Count
507
+ Value
508
+ POSITION_O
509
+ 4123
510
+ 10.251.215.192,10.251.110.196,10.251.91.159,10.250.18.114,10.251.194.245,10.251.90.239,10.251.203.4,10.251.42.246,10.251.30.6,10.251.
511
+ POSITION_1
512
+ 4123
513
+ b1k_-8426566918839220582,blk_8892946833207246710,blk_4685864904040870678,blk_3733773024533525840,blk_1329134914737185064,blk_-542351385442Cheng et. al
514
+ Table 2: Summary of supported machine learning algorithms in LogAI
515
+ Module
516
+ Algorithm
517
+ Task
518
+ Log parser
519
+ DRAIN
520
+ Information Extraction
521
+ IPLoM
522
+ AEL
523
+ Log vectorizer
524
+ Fast-text
525
+ Unstructured Log Representation
526
+ TF-IDF
527
+ Word2Vec
528
+ Semantic
529
+ Sequential
530
+ BertTokenizer
531
+ Categorical Encoder
532
+ Label encoding
533
+ Structured Log Representation
534
+ OneHot Encoding
535
+ Ordinal Encoding
536
+ Clustering
537
+ DBSCAN
538
+ Analysis: Log Clustering
539
+ K-means
540
+ BIRCH
541
+ Anomaly Detection
542
+ One-class SVM
543
+ Analysis: Outlier Detection
544
+ Isolation Forest
545
+ LOF
546
+ Distribution divergence
547
+ ETS
548
+ Analysis: Time-series Anomaly Detection
549
+ Dynamic Baseline
550
+ ARIMA
551
+ NN models
552
+ CNN
553
+ Analysis: Sequential Anomaly Detection
554
+ LSTM
555
+ Transformers
556
+ LogBERT
557
+ Analysis: (Sequential / Non-Sequential) Anomaly Detection
558
+ 5
559
+ Experiments: Benchmarking Log Anomaly Detection
560
+ In this section, we elaborate some of the experimental effort at building pipelines for specific log analysis tasks on
561
+ publicly available log datasets. The purpose of this is to benchmark the performance of our LogAI library on these
562
+ standard tasks with the performances reported in existing literature or other well-known log libraries.
563
+ Amongst the different log analysis tasks, log based anomaly detection is perhaps the most objective task, where domain
564
+ experts like reliability and performance engineers can provide some supervision around which log sequences show
565
+ anomalous behavior. The other tasks like log clustering, summarization are much more subjective in nature while log
566
+ based root cause analysis is too specific and tightly coupled with the application or environment it is deployed in. Hence
567
+ for these tasks it is often impossible to collect supervision labels for benchmarking purposes. Consequently most of the
568
+ publicly available log analysis datasets and benchmarks have focused on the anomaly detection task. While a small
569
+ subset of these datasets have also been redesigned to serve log clustering and log summarization in past literature, they
570
+ can at best be considered as pseudo-oracle data for these tasks and are still are not large-scale enough for benchmarking
571
+ purposes. Hence, for this reason, in our LogAI library we focus on benchmarking only the log based anomaly detection
572
+ task.
573
+ Following the advances of Artificial Intelligence (AI), Machine Learning (ML) and Natural Language Processing
574
+ (NLP), for log anomaly detection task also traditional statistical ML based solutions (like SVM, Isolation Forest
575
+ etc.) have gradually given way to more powerful and sophisticated neural models. Some of these newer models can
576
+ leverage self-supervised learning to achieve comparable anomaly detection performance in unsupervised settings in
577
+ comparison to older traditional supervised models. Additionally, the traditional ML models having being around for
578
+ quite a while, have been more extensively studied with fairly well-reproduced benchmarks in existing literature. Hence
579
+ in our benchmarking experiments, we have only focused on the more recent neural models.
580
+ 9
581
+
582
+ Cheng et. al
583
+ 5.1
584
+ Limitations of Existing Libraries and Benchmarking Practices
585
+ Over the past decade have been numerous literature [22, 23, 24, 25, 26, 27, 28, 29, 3] reporting the log anomaly
586
+ detection performance on some of the standard open-sourced log datasets, as well as various effort at open-sourcing
587
+ libraries catering the log anomaly detection task. For example, [2, 3] had released libraries (Loglizer and Deep-Loglizer)
588
+ for log based anomaly detection using traditional machine learning and more recent deep learning models, respectively.
589
+ In their library they had consolidated some of the benchmarking effort, bringing together all the popular log anomaly
590
+ detection models for a more fair comparison on a few public log datasets.
591
+ However, despite this, there is still a lack of rigorous standardisation and benchmarking amongst these works, especially
592
+ the ones employing neural models. Below we list some of the specific limitations of Loglizer and Deep-Loglizer library
593
+ which necessitates the need for an unified, generic framework for log analysis tasks:
594
+ • Generic Log Data Processing Pipeline: There is a lack of libraries that provide a generic data processing pipeline
595
+ that is common across different log datasets or different log anomaly algorithms. While Loglizer [5] and Deep-
596
+ Loglizer [3] has achieved this to some degree, they still require some dataset-specific preprocessing and customization
597
+ which are quite open-ended. For users wishing to replicate on their own datasets or other public datasets, there is no
598
+ clear framework guiding the necessary steps and output-structure of the dataset-specific preprocessing to follow. On
599
+ the other hand, LogAI library provides a an unified generic data-processing pipeline across all public datasets and
600
+ log analysis algorithms. It only requires a very minimal dataset-specific customization with a clear template of the
601
+ kind of preprocessing needed for each dataset - for e.g. each dataset has its own way of specifying the fields of the
602
+ LogRecordObject (governed by OpenTelemetry data models) e.g. labels or identifiers of the loglines - which are
603
+ either directly part of the raw log data or have to be derived based on some rules.
604
+ • Catering to multiple Log Analysis Tasks: There is a lack of libraries that can cater to all kinds of log analysis tasks
605
+ (including log clustering, summarization, anomaly detection etc) under a single generic platform. Each of the existing
606
+ log libraries are tailored for a specific kind of log analysis task. For example libraries like loglizer and Deep-Loglizer
607
+ specifically focus on log based anomaly detection, log-parser on parsing log data and log3C cater to clustering and
608
+ correlation specific analysis. On the other hand, logAI enables all of these analysis tasks along with others, like,
609
+ summarization, visualization etc under an unified framework.
610
+ • Coverage of Log Analysis Models: The existing Loglizer library provides the more traditional machine learning
611
+ algorithms for log based anomaly detection, with the Deep-Loglizer being a deep-learning based counterpart of it,
612
+ providing only neural ML models. LogAI on the other hand, provides a generic framework encompassing most of the
613
+ popular AI/ML algorithms - starting from traditional statistical ML models to popular neural models as well as more
614
+ recent pretrained Transformer (BERT) based models. Going ahead, our logAI library can provide a more extended
615
+ platform for integrating with more upcoming and powerful neural models as the mainstream deep learning research
616
+ progresses. For all of these models, logAI provides a single unified data processing platform, that is independent of
617
+ the kind of downstream analysis task or models.
618
+ Thus, with LogAI library, we aim at a more intuitive and easy-to-use log analysis framework for practitioners of
619
+ different areas and levels of expertise to perform log analysis, without being impeded by the technical nuances of the
620
+ task.
621
+ 5.2
622
+ Log based Anomaly Detection Workflow
623
+ In order to handle the complex and heterogenous nature of log data, log based anomaly detection typically follows a
624
+ multi-step pipeline. Starting with the raw log data dump or data streams, the log analysis workflow does some initial
625
+ preprocessing and cleaning-up of the raw logs to make them amenable to ML models. This is typically followed by log
626
+ parsing which extracts a loose structure from the semi-structured data and then performs grouping and partitioning of
627
+ the log lines into log sequences in order to model the sequence characteristics of the data. After this, the logs or log
628
+ sequences are vectorized i.e. represented as a machine-readable vector, by first tokenizing each instance and converting
629
+ each token to a d-dimensional embedding. On this vectorized version of the log data, various anomaly detection models
630
+ can be applied.
631
+ The choices of each of these steps (for e.g. whether to apply parsing or not, or whether to partition based on sessions or
632
+ sliding windows, or whether to apply clustering or not) can be guided by various factors - nature of the application
633
+ generating the log data or the model requirements or other efficiency or performance related constraints.
634
+ i) Log Preprocessing: In LogAI, this step involves handling the formatting of timestamps, logline-identifiers and any
635
+ associated labels (e.g. anomaly labels) in the raw log data to make it compatible to openTelemetry data. Additionally it
636
+ also provides customised filtering of specific regular expression patterns (like IP addresses or memory locations or file
637
+ paths) that are deemed irrelevant for the actual log analysis.
638
+ 10
639
+
640
+ Cheng et. al
641
+ Figure 6: Example of Log Parsing
642
+ ii)Log Parsing: To enable downstream processing, un-
643
+ structured log messages first need to be parsed into a
644
+ structured event template (i.e. constant part that was ac-
645
+ tually designed by the developers) and parameters (i.e.
646
+ variable part which contain the dynamic runtime informa-
647
+ tion). Figure 6 provides one such example of parsing a
648
+ logline. In LogAI library we provide three popular log
649
+ parsers which use heuristic-based techniques - Drain [30],
650
+ IPLoM [31] and AEL [32].
651
+ iii) Log Partitioning: After parsing the next step is to
652
+ partition the log data into groups, based on some seman-
653
+ tics where each group represents a finite chunk of log lines or log sequences. The main purpose behind this is to
654
+ decompose the original log dump, which typically consists of millions of log lines into logical chunks, so as to enable
655
+ explicit modeling on these chunks and allow the models to capture anomaly patterns over sequences of log templates or
656
+ log parameter values or both. In literature, various Log partitioning techniques have been applied [27, 33]. In LogAI we
657
+ provide different schemes like - Fixed or Sliding window based partitions, where the length of window is determined by
658
+ length of log sequence or a period of time, and Identifier based partitions where logs are partitioned based on some
659
+ identifier (e.g. the session or process they originate from). Figure 7 illustrates these different choices of log grouping
660
+ and partitioning. A log event is eventually deemed to be anomalous or not, either at the level of a log line or a log
661
+ partition.
662
+ Figure 7: Different types of log partition-
663
+ ing
664
+ iv) Log Vectorization: After log partitioning, the next step is to represent
665
+ each partition in a machine-readable way (e.g. a vector or a matrix) by
666
+ extracting features from them. This can be done in various ways [34, 33].
667
+ In LogAI we provide the following vectorization techniques -
668
+ • i) sequential representation which converts each partition to an ordered
669
+ sequence of log event ids
670
+ • ii) quantitative representation which uses count vectors, weighted by the
671
+ term and inverse document frequency information of the log events
672
+ • iii) semantic representation captures the linguistic meaning from the se-
673
+ quence of language tokens in the log events and learns a high-dimensional
674
+ embedding vector for each token in the dataset.
675
+ The nature of log representation chosen has direct consequence in terms of
676
+ which patterns of anomalies they can support - for example, for capturing
677
+ keyword based anomalies, semantic representation might be key, while for anomalies related to template count and
678
+ variable distribution, quantitative representations are possibly more appropriate. The semantic embedding vectors
679
+ themselves can be either obtained using pretrained neural language models like GloVe, FastText, pretrained Transformer
680
+ like BERT, RoBERTa etc. Or they can also be learnt from scratch on the available training data, by building custom
681
+ vocabulary and using these neural language models.
682
+ v) Log Anomaly Detection Models for benchmarking: The task of log based anomaly detection is to analyze a dump
683
+ of log data, consisting of a series of timestamped log lines and identify the anomalous log lines that are potentially
684
+ incident-indicating. Based on the kind of application, log anomaly signals can either be used to detect or localize an
685
+ already occurred incident or disruption or used to forecast future potential faults or failures. In literature, log based
686
+ anomaly detection models have been broadly categorized into two types - Supervised and Unsupervised, based on the
687
+ kind of training framework they follow. Since our objective is to benchmark only neural models, we limit our discussion
688
+ in this section to this class of models alone.
689
+ Supervised Anomaly Detection models require the anomaly label to be available at the level of each log line or a log
690
+ group or partition. Furthermore, they typically assume that each of the training, development and test data will contain
691
+ a mix of both anomalous and non-anomalous log data. These models use the supervised losses like cross entropy loss or
692
+ squared error loss. But they can suffer due to the under-representativeness of the anomalous class of logs, especially if
693
+ they occur very rarely in the training and development data. Due to their direct dependency on modeling the anomalous
694
+ class explicitly these models also lack robustness when the anomaly distribution changes.
695
+ 11
696
+
697
+ LogLine: 081109 204655 556 INFO dfs.DataNode$PacketResp0nder:
698
+ Received block blk_3587508140051953248 of size 67108864 from
699
+ /10.251.42.84
700
+ Timestamp: 081109 204655 556
701
+ Level: INFO
702
+ Component: dfs.DataNodesPacketResponder
703
+ Template: Received block <*> of size <*> from <*>
704
+ Parameter:["blk 3587508140051953248",“67108864",“10.251.42.84"]Fixed Partitions
705
+ Sliding Partitions
706
+ Identifier Partitions
707
+ 1
708
+ 2
709
+ 1
710
+ 1
711
+ 2
712
+ 2
713
+ 3
714
+ 3
715
+ 3
716
+ 1
717
+ 2
718
+ 1
719
+ 1
720
+ 2
721
+ 2
722
+ 3
723
+ 3
724
+ 3
725
+ 2
726
+ 2
727
+ 2
728
+ 3
729
+ 3
730
+ 3Cheng et. al
731
+ Unsupervised Anomaly Detection models do not require any anomaly label for the log data. But the existing
732
+ unsupervised models in the literature typically assume that the entire training data is comprised of only normal or
733
+ non-anomalous logs and generally show a sharp decline in performance when the training data is adulterated with even
734
+ a small fraction of anomalous logs. Amongst the most popular unsupervised anomaly detection models, mainly two
735
+ paradigms have been followed:
736
+ • Forecasting based models: These models learn the representations of the log lines through a forecasting based
737
+ self-supervision i.e. by learning to predict the label of next log line given an input context of log sequence. For
738
+ all of these models, following Deep-Loglizer paper, the label has been taken as the event id of the next log line.
739
+ This category of models includes various sequence encoding networks that have been popular in deep-learning -
740
+ like recurrent neural network or convolutional neural network based models or the more recent, more powerful
741
+ self-attention based Transformer models. These models are typically trained a cross-entropy loss between the true
742
+ and predicted distributions, which aims to maximise the likelihood of the true label, conditional to the given input
743
+ sequence.
744
+ • Reconstruction based models: This includes Auto-encoder based models which try to reconstruct a given sequence
745
+ of loglines through a learnable hidden layer that learn an n-dimensional representation of each log-line. The other
746
+ more recent models in this category are Transformer based models which are trained using masked-language modeling
747
+ principles. During training a certain fraction of the input tokens would be masked and the model would learn to
748
+ predict these tokens using the remaining input context; and in the process learning the contextual representation of
749
+ each token in a log-line or a log-sequence. This is the fundamental principle behind BERT Language model with the
750
+ masked language modeling providing the learning objective when training on the log data in a self-supervised way.
751
+ Forecasting based Anomaly Detection: For our benchmarking with forecasting based models, we select three core
752
+ deep learning models which have been the basis of the some of the most popular recent neural log anomaly detection
753
+ methods
754
+ • LSTM: This model corresponds to a long-short term memory (LSTM) network to encode a given log sequence. It
755
+ also provides various options - i) whether to utilize uni-directional or bi-directional encoding of tokens in a given
756
+ input sequence ii) whether to have a learnable attention network over the input sequence, which linearly combines the
757
+ hidden representations with the attention weights.
758
+ • CNN: This model corresponds to a convolutional neural network (CNN) to encode a given log sequence. Different
759
+ convolutional layers with different shape settings are applied on the input followed by a 1-d max-pooling operation.
760
+ The outputs from each of these are then concatenated and fed into a fully-connected layer.
761
+ • Transformer: This model corresponds to a Transformer based encoder network with a multi-headed self-attention
762
+ mechanism to encode a given log sequence. Since the Transformer outputs a d-dimensional representation for
763
+ each token in the input log-sequence, a mean-pooling operation is applied over those representations, to get a fixed
764
+ representation for the entire sequence.
765
+ Since the LSTM, CNN and Transformer models need a d-dimensional representation of each log, first an embedding
766
+ layer is applied to the raw log input features. In case of sequential feature representation, each log event id is embedded
767
+ as a d-dimensional vector, while for semantic feature representation, the embedding layer is initialized with the
768
+ pretrained embeddings (e.g. Word2Vec or FastText etc) and embeds each log token id to a d-dimensional vector.
769
+ The output of the LSTM, CNN or Transformer a fixed d-dimensional representation of the input sequence which is
770
+ then downward projected to 1-d space, followed by a softmax layer. For supervised versions of these models, since the
771
+ explicit label (anomalous or not) exists for each log-line or log-sequence, the output of the softmax layer is aimed to
772
+ directly predict this label. For forecasting based unsupervised versions, the output of the softmax layer is aimed to
773
+ predict the id of the next log-line, that is succeeding the given input log sequence. During inference, for forecasting
774
+ based unsupervised models make a prediction for a given input log sequence, which is then compared against the actual
775
+ log event following the input sequence. We follow the similar inference strategy as [3] and predict a test instance as
776
+ anomalous if the ground truth is not one of the k (=10) most probable log events predicted by the model. A smaller k
777
+ imposes more demanding requirements on the model’s performance.
778
+ In literature, LSTM based models have been used by DeepLog [35], LogAnomaly [34] and LogRobust [36]. While
779
+ DeepLog uses sequential representations, where each log message is represented by the index of its log event,
780
+ LogAnomaly uses semantic representations. While both of these use unidirectional LSTM in an unsupervised setting,
781
+ LogRobust uses supervised version of an bi-directional LSTM with the attention network. CNN has been used by [37]
782
+ but only in a supervised setting. Transformer based model has been applied in LogSy [38], but they additionally use
783
+ auxiliary log datasets as pseudo-anomalous data. This helps them to learn a better representation of normal log data
784
+ from the target system of interest while regularizing against overfitting. In order to ensure better reproducibility, in our
785
+ benchmarking we do not use any additional log datasets and hence in some of the supervised settings, our Transformer
786
+ based models suffer from overfitting issues and yield somewhat poorer results and are not directly comparable to the
787
+ 12
788
+
789
+ Cheng et. al
790
+ results obtained by [37]. Following [3] for all of these models, in both the supervised and unsupervised settings, we
791
+ report the F1-Scores.
792
+ Reconstruction based Anomaly Detection: For our benchmarking with reconstruction based models, we select the
793
+ LogBERT model from the work LanoBERT [39]. Following that literature, the preprocessing configurations are set
794
+ before the BERT model can be applied - i) Since LogBERT is a parser-free technique, no log parsing is applied. ii)
795
+ For obtaining the vectorized log representation, the preprocessed log sequences are tokenized using the WordPiece
796
+ (Wu et al. 2016) model used in BERT. iii) The tokenizer is trained from scratch on each log dataset to ensure that the
797
+ dataset-specific custom vocabulary can be learned. During training the usual masked language modeling principles
798
+ of BERT is followed. During inference, multiple masked versions of each test instance is generated, by passing a
799
+ fixed-size masking window over the token sequence, ignoring masking of special characters. Thus a test instance of
800
+ sequence length N will result in an average of N
801
+ n masked instances, each have a masked n-gram of length upto n. After
802
+ running the inference on the masked test instance, the anomaly score is obtained as the average of the top-prediction
803
+ probabilities (or log-probabilities) over the k-most confident masked tokens. Following LanoBERT, we report AUROC
804
+ (Area under ROC) metric over this anomaly score.
805
+ All unsupervised models, (forecasting or reconstruction based) are trained only on the normal log data. Following
806
+ Deep-Loglizer, for the forecasting based models, around 20% of the data and for LogBERT, following LanoBERT,
807
+ around 30% of the data is sequestered for test. These percentages include the entire set of anomalous logs in the dataset.
808
+ In LogAI, we take out 10% of the training data as development set for validation and model selection purposes.
809
+ 5.3
810
+ Datasets:
811
+ Following Deep-Loglizer and LanoBERT, we perform our benchmarking experiments on two of the most popular
812
+ public log anomaly detection datasets - HDFS and BGL. Additionally for LogBERT we also benchmark on the public
813
+ dataset, Thunderbird. Further, similar to Deep-Loglizer, for BGL dataset we also perform a fixed-window based log
814
+ partitioning by grouping log-lines over every 6-hourly window. However for LogBERT model, following LanoBERT,
815
+ we treat each individual log-line as a train or test instance, without doing any log partitioning. On the other hand, for
816
+ HDFS dataset, since anomaly labels are available only at the level of each session-id (which is also known as BLOCK
817
+ in the raw dataset), we use identifier based log partitioning, by constructing log-sequences for each session-id. These
818
+ resulting log partitions are treated as the training or test instances for all algorithms.
819
+ 5.4
820
+ Experimental Settings and Results:
821
+ For our benchmarking we conduct experiments on the above choice of anomaly detection algorithms under various
822
+ settings and compare our experimental results with those published in Deep-Loglizer [3] and LanoBERT [39] papers In
823
+ Table 3 we list the performance of the different supervised and unsupervised forecasting-based models (LSTM, CNN
824
+ and Transformer), while 4 shows the results using the unsupervised reconstruction-based LogBERT model.
825
+ Evaluation Metrics: In order to compare the performances, for all supervised and unsupervised forecasting-based
826
+ models we use F1-Score as the metric, following Deep-Loglizer paper. Whereas, for LogBERT, following LanoBERT
827
+ paper we report the AUROC metric. LanoBERT paper also provides F1-Score, but the F1-Score calculation needs
828
+ fixing a threshold, which is challenging to do over the training data that only has normal logs. According to the paper,
829
+ their reported scores are the best F1 value that was calculated using the threshold that yields the best performance for
830
+ the test dataset. This is not a fair metric, as it involves label-knowledge of the blind test set and hence we only compare
831
+ using AUROC metric.
832
+ Configuration Settings for Evaluation: For each of LSTM and Transformer models, we benchmark on 8 different
833
+ configuration settings for each dataset - based on the kind of supervision (supervised or unsupervised), whether log
834
+ parsing is applied or not, whether the log representation is sequential or semantics based. For CNN models, we found
835
+ the semantics based log representation results in very slow convergence rate, hence we have benchmarked our results
836
+ using only the sequential feature representations of the logs. On the other hand, Deep-Loglizer showcases only specific
837
+ settings for these models - for e.g. forecasting based unsupervised anomaly detection is done using Unidirectional
838
+ LSTM with no-attention and Transformer network while supervised models are Bidirectional LSTM with attention and
839
+ CNN network, whereas all of these methods can be applied on both supervised and unsupervised settings. Each of their
840
+ models use the Log Parsing step and have two variants that use sequential and semantic feature representations for the
841
+ logs. However Deep-Loglizer paper [3] provides only 8 configurations for each dataset whereas LogAI is benchmarked
842
+ on a more exhaustive set of 20 configurations per dataset.
843
+ Performance Comparison: In most of these configurations the performance achieved by LogAI is comparable to that
844
+ of Deep-Loglizer. The 2-3% difference in performance between the models is not quite statistically significant and can
845
+ 13
846
+
847
+ Cheng et. al
848
+ Model
849
+ Details
850
+ Supervision
851
+ Log Pars-
852
+ ing
853
+ Log
854
+ Represen-
855
+ tation
856
+ HDFS
857
+ BGL
858
+ LogAI
859
+ Deep-
860
+ Loglizer LogAI
861
+ Deep-
862
+ Loglizer
863
+ LSTM
864
+ Unidirectional, No
865
+ Attention
866
+ Unsupervised
867
+ 
868
+ sequential
869
+ 0.981
870
+ 0.944
871
+ 0.938
872
+ 0.961
873
+ semantic
874
+ 0.981
875
+ 0.945
876
+ 0.924
877
+ 0.967
878
+ 
879
+ sequential
880
+ 0.979
881
+ -
882
+ 0.925
883
+ -
884
+ semantic
885
+ 0.981
886
+ -
887
+ 0.924
888
+ -
889
+ Bidirectional, With
890
+ Attention
891
+ Supervised
892
+ 
893
+ sequential
894
+ 0.984
895
+ 0.96
896
+ 0.983
897
+ 0.983
898
+ semantic
899
+ 0.964
900
+ 0.964
901
+ 0.95
902
+ 0.983
903
+ 
904
+ sequential
905
+ 0.989
906
+ -
907
+ 0.931
908
+ -
909
+ semantic
910
+ 0.971
911
+ -
912
+ 0.983
913
+ -
914
+ CNN
915
+ 2-D Convolution with
916
+ 1-D Max pooling
917
+ Unsupervised
918
+ 
919
+ sequential
920
+ 0.981
921
+ -
922
+ 0.929
923
+ -
924
+ 
925
+ sequential
926
+ 0.981
927
+ -
928
+ 0.922
929
+ -
930
+ Supervised
931
+ 
932
+ sequential
933
+ 0.943
934
+ 0.97
935
+ 0.983
936
+ 0.972
937
+ 
938
+ sequential
939
+ 0.946
940
+ -
941
+ 0.990
942
+ -
943
+ Transformer
944
+ Multihead
945
+ single-
946
+ layer self-attention
947
+ model,
948
+ trained
949
+ from scratch
950
+ Unsupervised
951
+ 
952
+ sequential
953
+ 0.971
954
+ 0.905
955
+ 0.933
956
+ 0.956
957
+ semantic
958
+ 0.978
959
+ 0.925
960
+ 0.921
961
+ 0.957
962
+ 
963
+ sequential
964
+ 0.98
965
+ -
966
+ 0.92
967
+ -
968
+ semantic
969
+ 0.975
970
+ -
971
+ 0.917
972
+ -
973
+ Supervised
974
+ 
975
+ sequential
976
+ 0.934
977
+ -
978
+ 0.986
979
+ -
980
+ semantic
981
+ 0.784
982
+ -
983
+ 0.963
984
+ -
985
+ 
986
+ sequential
987
+ 0.945
988
+ -
989
+ 0.994
990
+ -
991
+ semantic
992
+ 0.915
993
+ -
994
+ 0.977
995
+ -
996
+ Table 3: Comparison between different supervised and unsupervised Forecasting-based neural anomaly detection
997
+ models in LogAI and Deep-Loglizer library [3], using F1-Score as the performance metric. The dashed (-) cells indicate
998
+ that there are no reported numbers in the Deep-Loglizer paper corresponding to those configurations.
999
+ mostly be attributed to the following factors: Following the implementation open-sourced by authors of Deep-Loglizer
1000
+ in https://github.com/logpai/deep-loglizer, it is evident that the library does not utilize any development (or
1001
+ validation) set and directly performs model selection based on the test performance. LogAI on the other hand, selects
1002
+ the model checkpoint on the validation performance and reports the results on the blind test set. Secondly, because of
1003
+ the same reason the resulting the training and test splits used by LogAI and Deep-Loglizer are not identical. Especially
1004
+ for BGL data, perhaps the performance difference is somewhat more observeable, since both libraries apply fixed
1005
+ time-partitions of 6 hours and reports the evaluation at the level of the partitions, instead of the logline level evaluation.
1006
+ This also adds to the possibility of more significant differences in the training/test data setup between the two models.
1007
+ For Transformer based models, especially in the supervised setting, we observe a reduced performance. Similar effect
1008
+ had been studied in the original work [38] that used Transformer model as Log Anomaly Detector in the supervised
1009
+ setting. Their model suffered from overfitting on the target system’s log data due to the presence of only rare and sparse
1010
+ anomaly patterns in the train data. To overcome the overfitting issue, they additionally involve other external system’s
1011
+ logs as auxiliary data - treating them as pesudo “anomalous” logs. But in order to keep our benchmarking reproducible,
1012
+ we do not use any additional auxiliary data abd subsequently report a poorer performance. The Deep-Loglizer paper
1013
+ also benchmarks with only the unsupervised setting of the Transformer model, which is much less prone to overfitting.
1014
+ For LogBERT model, we benchmark the test results taking various inferencing strategies. Given a test instance, which
1015
+ has been converted to multiple masked versions (each having a continuous n-gram masked), either we average the
1016
+ inference score either over all the masked tokens or over the top-6 most confident ones, based on the the model
1017
+ prediction likelihood. For the latter we consider different inference scores - mean predictive loss or maximum predictive
1018
+ probability or log probability or the entropy of the prediction distribution. All of these metrics are quite correlated and
1019
+ our objective is to simply show that our LogBERT implementation yields reasonably stable results across these different
1020
+ inferencing strategies. While LanoBERT also uses Predictive Loss and Probability based scores, they provide AUROC
1021
+ evaluation metric metric only for the latter and they also evaluate only HDFS and BGL dataset. In the predictive
1022
+ probability based inference strategy, results obtained by LogAI and LanoBERT are quite comparable, with small
1023
+ differences owing to the variability of the train, test splits used in the two implementations (The authors of LanoBERT
1024
+ have used their own train test split due to the general lack of standardized data splits for these datasets).
1025
+ 14
1026
+
1027
+ Cheng et. al
1028
+ Inference Strategy
1029
+ Datasets
1030
+ HDFS
1031
+ BGL
1032
+ Thunderbird
1033
+ LogAI
1034
+ LanoBERT
1035
+ LogAI
1036
+ LanoBERT
1037
+ LogAI
1038
+ LanoBERT
1039
+ Averaged over all masked tokens
1040
+ Mean Predictive Loss
1041
+ 0.983
1042
+ -
1043
+ 0.998
1044
+ -
1045
+ 0.953
1046
+ -
1047
+ Averaged over top-6 most-confident masked tokens
1048
+ Mean Predictive Loss
1049
+ 0.98
1050
+ -
1051
+ 0.964
1052
+ -
1053
+ 0.937
1054
+ -
1055
+ Max Predictive Prob.
1056
+ 0.976
1057
+ 0.99
1058
+ 0.972
1059
+ 0.972
1060
+ 0.953
1061
+ -
1062
+ Max Predictive LogProb.
1063
+ 0.976
1064
+ -
1065
+ 0.969
1066
+ -
1067
+ 0.917
1068
+ -
1069
+ Mean Predictive Entropy
1070
+ 0.976
1071
+ -
1072
+ 0.973
1073
+ -
1074
+ 0.967
1075
+ -
1076
+ Table 4: Comparison of LogBERT model performance achieved by our LogAI library and by LanoBERT [39], using the
1077
+ AUROC metric. Both versions of the model are in unsupervised setting (trained on normal logs only) and do not need
1078
+ any log parsing. The dashed (-) cells indicate that there are no reported numbers in the LanoBERT paper corresponding
1079
+ to those configurations.
1080
+ Overall our experiments on the suite deep learning based log anomaly detection models suggests that their implemen-
1081
+ tations in the LogAI library is able to reproduce the established performance benchmarks on standard open-source
1082
+ datasets with reasonable accuracy. Additionally, owing to a more generic data processing pipeline we are seamlessly
1083
+ able to extend to a more exhaustive set of experimental settings, than what has been explored or implemented before in
1084
+ existing literature and libraries.
1085
+ 6
1086
+ Conclusion
1087
+ In this technical report we introduced LogAI, an open source library for AI-based log analytics and intelligence. LogAI
1088
+ library uses the same unified log data model as OpenTelemetry to ensure the analytical processes to be agnostic to any
1089
+ log platforms that supports OpenTelemetry. LogAI also abstracts common processes in different downstream tasks and
1090
+ provides reusable components to execute these processes. LogAI also provides a large varieties of AI capabilities, from
1091
+ time-series anlaysis, traditional statistical learning to deep learning and pre-trained transformer models. We showed
1092
+ how LogAI can be used to conduct a variety of common log analysis tasks such as log summarization, clustering and
1093
+ anomaly detection and also provide extensive benchmarking results on Log Anomaly Detection. LogAI version v0.1.0
1094
+ is released as open-source code under BSD-3-Clause license. Our team will provide continuous support and further
1095
+ improvements in the future versions.
1096
+ Acknowledgments
1097
+ We would like to thank a number of leaders and colleagues from Salesforce.com Inc. who have provided strong support,
1098
+ advice, and contributions to this open-source project.
1099
+ References
1100
+ [1] The Business Research Company. Log Management Global Market Report. 2023.
1101
+ [2] Shilin He, Jieming Zhu, Pinjia He, and Michael R. Lyu. Experience report: System log analysis for anomaly
1102
+ detection. In 2016 IEEE 27th International Symposium on Software Reliability Engineering (ISSRE), pages
1103
+ 207–218, 2016.
1104
+ [3] Zhuangbin Chen, Jinyang Liu, Wenwei Gu, Yuxin Su, and Michael R. Lyu. Experience report: Deep learning-based
1105
+ system log analysis for anomaly detection. CoRR, abs/2107.05908, 2021.
1106
+ [4] Jiang Zhaoxue, Li Tong, Zhang Zhenguo, Ge Jingguo, You Junling, and Li Liangxiong. A survey on log research
1107
+ of aiops: Methods and trends. Mobile Networks and Applications, pages 1–12, 2022.
1108
+ [5] Shilin He, Jieming Zhu, Pinjia He, and Michael R. Lyu. Experience report: System log analysis for anomaly
1109
+ detection. In 27th IEEE International Symposium on Software Reliability Engineering, ISSRE 2016, Ottawa, ON,
1110
+ Canada, October 23-27, 2016, pages 207–218. IEEE Computer Society, 2016.
1111
+ [6] Zhuangbin Chen, Jinyang Liu, Wenwei Gu, Yuxin Su, and Michael R. Lyu. Experience report: Deep learning-based
1112
+ system log analysis for anomaly detection, 2021.
1113
+ 15
1114
+
1115
+ Cheng et. al
1116
+ [7] Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R Lyu. Drain: An online log parsing approach with fixed depth
1117
+ tree. In 2017 IEEE international conference on web services (ICWS), pages 33–40. IEEE, 2017.
1118
+ [8] Adetokunbo AO Makanju, A Nur Zincir-Heywood, and Evangelos E Milios. Clustering event logs using iterative
1119
+ partitioning. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and
1120
+ data mining, pages 1255–1264, 2009.
1121
+ [9] Zhen Ming Jiang, Ahmed E Hassan, Gilbert Hamann, and Parminder Flora. An automated approach for abstracting
1122
+ execution logs to execution events. Journal of Software Maintenance and Evolution: Research and Practice,
1123
+ 20(4):249–267, 2008.
1124
+ [10] Juan Ramos et al. Using tf-idf to determine word relevance in document queries. In Proceedings of the first
1125
+ instructional conference on machine learning, volume 242, pages 29–48. Citeseer, 2003.
1126
+ [11] Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. Enriching word vectors with subword
1127
+ information. Transactions of the association for computational linguistics, 5:135–146, 2017.
1128
+ [12] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient estimation of word representations in vector
1129
+ space. arXiv preprint arXiv:1301.3781, 2013.
1130
+ [13] Aadyot Bhatnagar, Paul Kassianik, Chenghao Liu, Tian Lan, Wenzhuo Yang, Rowan Cassius, Doyen Sahoo,
1131
+ Devansh Arpit, Sri Subramanian, Gerald Woo, Amrita Saha, Arun Kumar Jagota, Gokulakrishnan Gopalakrishnan,
1132
+ Manpreet Singh, K C Krithika, Sukumar Maddineni, Daeki Cho, Bo Zong, Yingbo Zhou, Caiming Xiong, Silvio
1133
+ Savarese, Steven Hoi, and Huan Wang. Merlion: A machine learning library for time series. 2021.
1134
+ [14] Bernhard Schölkopf, John C. Platt, John C. Shawe-Taylor, Alex J. Smola, and Robert C. Williamson. Estimating
1135
+ the support of a high-dimensional distribution. Neural Comput., 13(7):1443–1471, jul 2001.
1136
+ [15] Markus M. Breunig, Hans-Peter Kriegel, Raymond T. Ng, and Jörg Sander. Lof: Identifying density-based local
1137
+ outliers. In Proceedings of the 2000 ACM SIGMOD International Conference on Management of Data, SIGMOD
1138
+ ’00, page 93–104, New York, NY, USA, 2000. Association for Computing Machinery.
1139
+ [16] F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss,
1140
+ V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duchesnay. Scikit-learn:
1141
+ Machine learning in Python. Journal of Machine Learning Research, 12:2825–2830, 2011.
1142
+ [17] Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural Computation, 9(8):1735–1780, 1997.
1143
+ [18] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Łukasz Kaiser,
1144
+ and Illia Polosukhin. Attention is all you need. In Proceedings of the 31st International Conference on Neural
1145
+ Information Processing Systems, NIPS’17, page 6000–6010, Red Hook, NY, USA, 2017. Curran Associates Inc.
1146
+ [19] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional
1147
+ transformers for language understanding. In Jill Burstein, Christy Doran, and Thamar Solorio, editors, Proceedings
1148
+ of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human
1149
+ Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short
1150
+ Papers), pages 4171–4186. Association for Computational Linguistics, 2019.
1151
+ [20] D. Sculley. Web-scale k-means clustering. In Proceedings of the 19th International Conference on World Wide
1152
+ Web, WWW ’10, page 1177–1178, New York, NY, USA, 2010. Association for Computing Machinery.
1153
+ [21] Erich Schubert, Jörg Sander, Martin Ester, Hans Peter Kriegel, and Xiaowei Xu. Dbscan revisited, revisited: Why
1154
+ and how you should (still) use dbscan. ACM Trans. Database Syst., 42(3), jul 2017.
1155
+ [22] Jiang Zhaoxue, Li Tong, Zhang Zhenguo, Ge Jingguo, You Junling, and Li Liangxiong. A survey on log research
1156
+ of aiops: Methods and trends. Mob. Netw. Appl., 26(6):2353–2364, dec 2021.
1157
+ [23] Shilin He, Pinjia He, Zhuangbin Chen, Tianyi Yang, Yuxin Su, and Michael R. Lyu. A survey on automated log
1158
+ analysis for reliability engineering. ACM Comput. Surv., 54(6), jul 2021.
1159
+ [24] Paolo Notaro, Jorge Cardoso, and Michael Gerndt. A survey of aiops methods for failure management. ACM
1160
+ Trans. Intell. Syst. Technol., 12(6), nov 2021.
1161
+ [25] Xiao Han and Shuhan Yuan. Unsupervised cross-system log anomaly detection via domain adaptation. In
1162
+ Proceedings of the 30th ACM International Conference on Information & Knowledge Management, CIKM ’21,
1163
+ page 3068–3072, New York, NY, USA, 2021. Association for Computing Machinery.
1164
+ [26] Van-Hoang Le and Hongyu Zhang. Log-based anomaly detection with deep learning: How far are we? In
1165
+ Proceedings of the 44th International Conference on Software Engineering, ICSE ’22, page 1356–1367, New
1166
+ York, NY, USA, 2022. Association for Computing Machinery.
1167
+ 16
1168
+
1169
+ Cheng et. al
1170
+ [27] Nengwen Zhao, Honglin Wang, Zeyan Li, Xiao Peng, Gang Wang, Zhu Pan, Yong Wu, Zhen Feng, Xidao Wen,
1171
+ Wenchi Zhang, Kaixin Sui, and Dan Pei. An empirical investigation of practical log anomaly detection for online
1172
+ service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference
1173
+ and Symposium on the Foundations of Software Engineering, ESEC/FSE 2021, page 1404–1415, New York, NY,
1174
+ USA, 2021. Association for Computing Machinery.
1175
+ [28] Yichen Zhu, Weibin Meng, Ying Liu, Shenglin Zhang, Tao Han, Shimin Tao, and Dan Pei. Unilog: Deploy one
1176
+ model and specialize it for all log analysis tasks. CoRR, abs/2112.03159, 2021.
1177
+ [29] Jacopo Soldani and Antonio Brogi. Anomaly detection and failure root cause analysis in (micro) service-based
1178
+ cloud applications: A survey. ACM Comput. Surv., 55(3), feb 2022.
1179
+ [30] Pinjia He, Jieming Zhu, Zibin Zheng, and Michael R. Lyu. Drain: An online log parsing approach with fixed
1180
+ depth tree. In 2017 IEEE International Conference on Web Services (ICWS), pages 33–40, 2017.
1181
+ [31] Adetokunbo A.O. Makanju, A. Nur Zincir-Heywood, and Evangelos E. Milios. Clustering event logs using
1182
+ iterative partitioning. In Proceedings of the 15th ACM SIGKDD International Conference on Knowledge Discovery
1183
+ and Data Mining, KDD ’09, page 1255–1264, New York, NY, USA, 2009. Association for Computing Machinery.
1184
+ [32] Zhen Ming Jiang, Ahmed E. Hassan, Parminder Flora, and Gilbert Hamann. Abstracting execution logs to
1185
+ execution events for enterprise applications (short paper). In 2008 The Eighth International Conference on Quality
1186
+ Software, pages 181–186, 2008.
1187
+ [33] Mostafa Farshchi, Jean-Guy Schneider, Ingo Weber, and John Grundy. Experience report: Anomaly detection of
1188
+ cloud application operations using log and cloud metric correlation analysis. In 2015 IEEE 26th International
1189
+ Symposium on Software Reliability Engineering (ISSRE), pages 24–34, 2015.
1190
+ [34] Weibin Meng, Ying Liu, Yichen Zhu, Shenglin Zhang, Dan Pei, Yuqing Liu, Yihao Chen, Ruizhi Zhang, Shimin
1191
+ Tao, Pei Sun, and Rong Zhou. Loganomaly: Unsupervised detection of sequential and quantitative anomalies in
1192
+ unstructured logs. In Proceedings of the 28th International Joint Conference on Artificial Intelligence, IJCAI’19,
1193
+ page 4739–4745. AAAI Press, 2019.
1194
+ [35] Min Du, Feifei Li, Guineng Zheng, and Vivek Srikumar. Deeplog: Anomaly detection and diagnosis from
1195
+ system logs through deep learning. In Proceedings of the 2017 ACM SIGSAC Conference on Computer and
1196
+ Communications Security, CCS ’17, page 1285–1298, New York, NY, USA, 2017. Association for Computing
1197
+ Machinery.
1198
+ [36] Xu Zhang, Yong Xu, Qingwei Lin, Bo Qiao, Hongyu Zhang, Yingnong Dang, Chunyu Xie, Xinsheng Yang, Qian
1199
+ Cheng, Ze Li, Junjie Chen, Xiaoting He, Randolph Yao, Jian-Guang Lou, Murali Chintalapati, Furao Shen, and
1200
+ Dongmei Zhang. Robust log-based anomaly detection on unstable log data. In Proceedings of the 2019 27th ACM
1201
+ Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software
1202
+ Engineering, ESEC/FSE 2019, page 807–817, New York, NY, USA, 2019. Association for Computing Machinery.
1203
+ [37] Siyang Lu, Xiang Wei, Yandong Li, and Liqiang Wang. Detecting anomaly in big data system logs using
1204
+ convolutional neural network. In 2018 IEEE 16th Intl Conf on Dependable, Autonomic and Secure Computing,
1205
+ 16th Intl Conf on Pervasive Intelligence and Computing, 4th Intl Conf on Big Data Intelligence and Computing and
1206
+ Cyber Science and Technology Congress, DASC/PiCom/DataCom/CyberSciTech 2018, Athens, Greece, August
1207
+ 12-15, 2018, pages 151–158. IEEE Computer Society, 2018.
1208
+ [38] Sasho Nedelkoski, Jasmin Bogatinovski, Alexander Acker, Jorge Cardoso, and Odej Kao.
1209
+ Self-attentive
1210
+ classification-based anomaly detection in unstructured logs. In 2020 IEEE International Conference on Data
1211
+ Mining (ICDM), pages 1196–1201, 2020.
1212
+ [39] Yukyung Lee, Jina Kim, and Pilsung Kang. Lanobert : System log anomaly detection based on BERT masked
1213
+ language model. CoRR, abs/2111.09564, 2021.
1214
+ 17
1215
+
0dFQT4oBgHgl3EQf0Daw/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
1dAyT4oBgHgl3EQf1fkT/content/tmp_files/2301.00734v1.pdf.txt ADDED
@@ -0,0 +1,1547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Nonlinear Non-Hermitian Landau-Zener-St¨uckelberg-Majorana interferometry
2
+ Xin Wang,1 H. D. Liu,1, ∗ and L. B. Fu2, †
3
+ 1Center for Quantum Sciences and School of Physics, Northeast Normal University, Changchun 130024, China
4
+ 2Graduate School of China Academy of Engineering Physics,
5
+ No. 10 Xibeiwang East Road, Haidian District, Beijing, 100193, China
6
+ (Dated: January 3, 2023)
7
+ In this work, we have studied the non-Hermitian nonlinear LZSM interferometry in a non-Hermitian N-body
8
+ interacting boson system in which the non-Hermicity is from the nonreciprocal tunnelings between the bosons.
9
+ By using the mean-field approximation and projective Hilbert space, the effect of nonreciprocity and nonlin-
10
+ earity on the energy spectrum, the dynamics, and the formation of the interference fringes have been studied.
11
+ The different symmetries and the impact of the two different types of reciprocity, i.e. the in-phase tunneling and
12
+ anti-phase tunneling, on the energy spectrum and the phase transition between the Josephson oscillation and the
13
+ self-trapping have been investigated. For the LZSM interferometry, the strength of the nonreciprocity is found
14
+ to take an essential role in the population of the projective state and the strengths of the interference patterns in
15
+ the projective space. While the conditions of destructive and constructive interference under the weak-coupling
16
+ approximation still only depend on the strength of nonlinearity. Our result provides an application of the non-
17
+ linear non-Hermitian LZSM interferometry in studying the parameters of a non-Hermitian nonlinear two-level
18
+ system which related to the nonlinearity and the non-Hermicity.
19
+ I.
20
+ INTRODUCTION
21
+ The quantum two-level system (TLS) is the most basic part
22
+ of physical systems. Among them, the Landau-Zener (LZ)
23
+ transition between two levels at an avoided crossing [1–3]
24
+ has received widespread attention. When these two-level sys-
25
+ tems are under a strong periodic driving field, a series of
26
+ LZ transitions occur and the transitions probability exhibit a
27
+ periodic dependence on the phase (St¨uckelberg phase) accu-
28
+ mulated between transitions [1, 4]. The periodic change is
29
+ called Landau-Zener-St¨uckelberg-Majorana(LZSM) interfer-
30
+ ometry [5, 6]. With the development of research, LZSM inter-
31
+ ferometry has become an important phenomenon in quantum
32
+ science and technology. On the one hand, LZSM interfer-
33
+ ometry is used for ultra-fast universal quantum control of a
34
+ quantum-dot charge qubit [7] and characterized qubit dephas-
35
+ ing [8], etc. On the other hand, it has involved many fields
36
+ so far, such as molecular nanomagnets [9, 10], quasi-one-
37
+ dimensional layered materials [11, 12], ultracold molecules
38
+ [13], quantum noise [14], Bose-Einstein condensates [15–19],
39
+ Rydberg atoms [20], etc. Interestingly, if a two-level system
40
+ takes account of the nonlinear interaction, it may produce un-
41
+ expected interference features [21–26]. For the non-linear LZ
42
+ model, the self-trapping phase transition may occur in LZSM
43
+ interferometry [27–31], and there may be exceptional ring
44
+ structures in the energy spectra [32, 33].
45
+ In recent years, the non-Hermitian quantum systems with
46
+ real energy spectra received widespread attention in the-
47
+ ory and experiment [34–41]. There are two kinds of non-
48
+ Hermicity, asymmetric coupling strengths in nonreciprocal
49
+ systems and the gain-loss in reciprocal system.
50
+ There are
51
+ two kinds of non-Hermitian Hamiltonians, describing nonre-
52
+ ciprocal systems with asymmetric coupling strengths [42–46]
53
+ ∗ liuhd100@nenu.edu.cn
54
+ † lbfu@gscaep.ac.cn
55
+ and gain-loss systems [37–41]. Bender and Boettcher dis-
56
+ covered a series of parity-time (PT) -symmetric Hamiltonians
57
+ [47], which could result in real energy spectra. Mostafazadeh
58
+ generalized this type of Hamiltonian to a η-pseudo-Hermitian
59
+ quantum theory which explains the conditions for the non-
60
+ Hermitian system to have the real energy spectra (η is a pos-
61
+ itive Hermitian operator) [48–50]. The theory has been ap-
62
+ plied in many fields for more than ten years of development,
63
+ such as quantum field theory [51–55], super-symmetric quan-
64
+ tum mechanics [56, 57], non-commutative field theory [58],
65
+ quantum information [59], etc. Especially, there always ex-
66
+ ists some exceptional points (EPs) in the real energy spec-
67
+ trum of the non-Hermitian system [60, 61], at which two or
68
+ more eigenstates of the system coalesce. These EPs of the en-
69
+ ergy spectrum in the parameter space are closely related to the
70
+ symmetry, topological properties, and phase transitions of the
71
+ system [34–36]. Consequently, efforts have been put forward
72
+ to extend the study of LZ problem to non-Hermitian system
73
+ [6, 62–65]. Therefore, for non-Hermitian systems and nonlin-
74
+ ear LZSM interference, it is natural to ask how will the en-
75
+ ergy spectrum of the nonlinear LZ system changes if the non-
76
+ Hermiticity emerges? Will non-linearity affect EPs? Since
77
+ the populations of the bare states on the adiabatic eigenstates
78
+ normally can not be normalized by a time-independent coeffi-
79
+ cient [66]. Can the interesting self-trapping effect in the case
80
+ of nonlinear non-Hermitian still be observed? We shed lights
81
+ on these questions in this paper. By setting up the projec-
82
+ tive Hilbert space, we show that the populations of the projec-
83
+ tive quantum states can still achieve LZSM interferometry and
84
+ analyzed the influence of non-Hermicity and nonlinearity on
85
+ the energy spectra and the interference. Then, we discussed
86
+ the influence of non-Hermitian on the self-trapping effect. Fi-
87
+ nally, under the weak-coupling approximation of the projec-
88
+ tive quantum states, we further demonstrated the validity and
89
+ accuracy of the proposed method.
90
+ The structure of the paper is as follows.
91
+ In Sec.II, we
92
+ introduce a non-Hermitian N-body interacting boson system
93
+ which is equivalent to a nonlinear nonreciprocal two-level
94
+ arXiv:2301.00734v1 [quant-ph] 2 Jan 2023
95
+
96
+ 2
97
+ system with periodic driving in the mean-field approxima-
98
+ tion, and discussed the energy spectrum of this two-level sys-
99
+ tem, In Sec.III, the influence of nonlinear strength and non-
100
+ Hermiticity on LZSM interferometry and the self-trapping ef-
101
+ fects has been studied. Under the weak-coupling limit, the
102
+ non-Hermicity does not affect the conditions of destructive
103
+ interference and constructive interference. Finally, the con-
104
+ clusions are summarized in Sec.IV.
105
+ II.
106
+ NONLINEAR NONHERMITIAN TWO-LEVEL MODEL
107
+ The second quantized Hamiltonian of a nonreciprocal
108
+ interacting-boson system is
109
+ ˆH0 = γ
110
+ 2(ˆa†ˆa − ˆb†ˆb) + ∆2
111
+ 2 ˆa†ˆb + ∆1
112
+ 2 ˆaˆb† − c
113
+ 4N (ˆa†ˆa − ˆb†ˆb)2, (1)
114
+ where annihilation operators ˆa, ˆb and generation operators
115
+ ˆa†, ˆb† are for the different quantum states that are the left and
116
+ right well in the double-well BEC system. γ = A sin(ωt) + ϵ0
117
+ is the monochromatic driving field with amplitude A, fre-
118
+ quency ω, and offset ϵ0. c is the interaction strength between
119
+ bosons, ∆i (i = 1, 2) is the tunneling amplitude. When the
120
+ total number of bosons N → ∞, all particles are assumed to
121
+ be in the same spin coherent state in the mean-field approx-
122
+ imation [67, 68]. Considering that the quantum states of the
123
+ non-Hermitian system are in a dual Hilbert space to keep the
124
+ normalize condition [50], the selected coherent states need to
125
+ be defined by both left and right states as
126
+ |Ψr
127
+ sc⟩ =
128
+ 1
129
+
130
+ N!
131
+ (α1ˆa† + β1ˆb†)N|∅⟩,
132
+ |Ψl
133
+ sc⟩ =
134
+ 1
135
+
136
+ N!
137
+ (α2ˆa† + β2ˆb†)N|∅⟩,
138
+ (2)
139
+ Based on this, we derive the semi-classical Hamiltonian (see
140
+ Appendix. A)
141
+ ˆHM = ⟨Ψl
142
+ sc| ˆH0|Ψr
143
+ sc⟩
144
+ N
145
+ = γ
146
+ 2(α1α∗
147
+ 2 − β1β∗
148
+ 2) + ∆2
149
+ 2 α∗
150
+ 2β1 + ∆1
151
+ 2 α1β∗
152
+ 2 − c
153
+ 4(β1β∗
154
+ 2 − α1α∗
155
+ 2)2,
156
+ (3)
157
+ by the dynamical evolution of the semiclassical Hamiltonian
158
+ [67]
159
+ i˙α1 = ∂ ˆHm
160
+ ∂α∗
161
+ 2
162
+ ,
163
+ i˙β1 = ∂ ˆHm
164
+ ∂β∗
165
+ 2
166
+ ,
167
+ (4)
168
+ we can construct the following dimensionless Schr¨odinger
169
+ equation
170
+ i ∂
171
+ ∂t
172
+
173
+ α1
174
+ β1
175
+
176
+ = ˆHmF
177
+
178
+ α1
179
+ β1
180
+
181
+ ,
182
+ (5)
183
+ with the MF Hamiltonian
184
+ ˆHmF =
185
+ � γ
186
+ 2 + c
187
+ 2(β1β∗
188
+ 2 − α1α∗
189
+ 2)
190
+ ∆1
191
+ 2
192
+ ∆2
193
+ 2
194
+ − γ
195
+ 2 − c
196
+ 2(β1β∗
197
+ 2 − α1α∗
198
+ 2)
199
+
200
+ ,
201
+ (6)
202
+ t3
203
+ t1
204
+ t2
205
+ t3
206
+ t2
207
+ t1
208
+ ωt/π
209
+ (b) ϵ0 = 5
210
+ (a) ϵ0 = 0
211
+ En(t)
212
+ 0
213
+ 1
214
+ 2
215
+ 3
216
+ 4
217
+ -6
218
+ -4
219
+ -2
220
+ 0
221
+ 2
222
+ 4
223
+ 0
224
+ 1
225
+ 2
226
+ 3
227
+ 4
228
+ -5
229
+ 0
230
+ 5
231
+ En(t)
232
+ c/Δ=0
233
+ c/Δ=3
234
+ FIG. 1. Time evolution of the energy levels for different offsets: (a)
235
+ ϵ0 = 0 and (b) ϵ0 = 5, where A = 10, ω = 1 and ∆1∆2 > 0. The
236
+ time-dependent adiabatic energy levels (i.e., ∆ = 1) are shown by the
237
+ red (c = 0) and black (c = 3) dashed lines, while the diabatic energy
238
+ levels (i.e., ∆ = 0 ) are shown by the blue (c = 0) and green (c = 3)
239
+ solid lines.
240
+ and state |ψr⟩ = (α1, β1)T. Therefore, the model Hamiltonian
241
+ under periodic driving can be described by a nonlinear nonre-
242
+ ciprocal two-level Hamiltonian
243
+ ˆH = ∆1 + ∆2
244
+ 4
245
+ ˆσx+ ∆1 − ∆2
246
+ 4
247
+ i ˆσy+ γ(t) + c(β1β∗
248
+ 2 − α1α∗
249
+ 2)
250
+ 2
251
+ ˆσz (7)
252
+ where ˆσx,y,z are the Pauli matrices, α1, α2, β1, β2 are the prob-
253
+ ability amplitudes. The dynamic equations of the system are
254
+ [50]
255
+ i ∂
256
+ ∂t|ψr⟩ = ˆH|ψr⟩,
257
+ i ∂
258
+ ∂t|ψl⟩ = ˆH†|ψl⟩,
259
+ (8)
260
+ where ⟨ψl|ψr⟩ = 1 and the quantum states
261
+ |ψr⟩ = α1 |↑⟩ + β1 |↓⟩ ,
262
+ |ψl⟩ = α2 |↑⟩ + β2| |↓⟩
263
+ (9)
264
+ are represented under the diabatic basis {|↑⟩ , |↓⟩} with spin
265
+ eigenstates |↑⟩ and |↓⟩.
266
+ For the adiabatic basis, the left and right instantaneous
267
+ eigenstates of the time-dependent Hamiltonian ˆH are derived
268
+ by[50]
269
+ ˆH|φr
270
+ n⟩ = En|φr
271
+ n⟩,
272
+ ˆH†|φl
273
+ n⟩ = E∗
274
+ n|φl
275
+ n⟩,
276
+ (10)
277
+ where ⟨φl
278
+ m|φr
279
+ n⟩ = δnm (n = 1, 2), the eigenenergies En(t) are
280
+ determined by the quartic equation (see Appendix. B)
281
+ E4+cE3+ 1
282
+ 4(c2−γ2−∆1∆2)E2− c∆1∆2
283
+ 4
284
+ E− ∆1∆2c2
285
+ 16
286
+ = 0. (11)
287
+ By solving equation (11), we draw the energy spectrum of the
288
+ system (7) (see Fig.1 and Fig.2). The two parameters
289
+ ∆ ≡
290
+
291
+ |∆1∆2|,
292
+ k ≡
293
+
294
+ |∆1/∆2|
295
+ (12)
296
+
297
+ 3
298
+ Ep
299
+ t3
300
+ t2
301
+ t1
302
+ t1
303
+ Ep
304
+ (b) ϵ0 = 5
305
+ c/Δ=0
306
+ c/Δ=3
307
+ (a) ϵ0 = 0
308
+ 0
309
+ 1
310
+ 2
311
+ 3
312
+ 4
313
+ -6
314
+ -4
315
+ -2
316
+ 0
317
+ 2
318
+ 4
319
+ 1
320
+ 2
321
+ 3
322
+ 4
323
+ -5
324
+ 0
325
+ 5
326
+ En(t)
327
+ En(t)
328
+ ωt/π
329
+ t3
330
+ FIG. 2. Time evolution of the energy levels for different offsets: (a)
331
+ ϵ0 = 0 and (b) ϵ0 = 5, where A = 10, ω = 1 and ∆1∆2 < 0. The
332
+ time-dependent adiabatic energy levels (i.e., ∆ = √|∆1∆2| = 1) are
333
+ shown by the red (c = 0) and black (c = 3) dashed lines, while the
334
+ diabatic energy levels (i.e., ∆ = 0 ) are shown by the blue (c = 0) and
335
+ green (c = 3) solid lines.
336
+ are introduced to describe the mean tunneling amplitude and
337
+ the nonreciprocity.
338
+ In the in-phase tunneling case ∆1∆2 > 0 as shown in Fig.1,
339
+ the energy spectrum of the system (7) is the same as the Her-
340
+ mitian Hamiltonian ˆHh = ∆
341
+ 2 ˆσx + γ(t)+c(|β|2−|α|2)
342
+ 2
343
+ ˆσz. Therefore,
344
+ the Hamiltonian ˆH and quantum states |ψr⟩ of the two non-
345
+ reciprocal systems can be related to the Hermitian system by
346
+ following relation
347
+ ˆHh = ˆS ˆH ˆS −1,
348
+ |ψ⟩ = ˆS |ψr⟩ =
349
+
350
+ α1
351
+ kβ1
352
+
353
+ .
354
+ (13)
355
+ where ˆS =
356
+
357
+ 1 0
358
+ 0 k
359
+
360
+ . Compared with ˆHh, the nonreciproc-
361
+ ity, which only affects the eigenstates of the system, neither
362
+ changes the eigenvalue nor destroys the symmetry of the sys-
363
+ tem. In the anti-phase tunneling case ∆1∆2 < 0 as shown in
364
+ Fig.2 , the non-adiabatic energy levels have a series of de-
365
+ generate points (EPs) when c = 0 (see the crossing points of
366
+ red dash lines in Fig.2, and the imaginary parts of En are not
367
+ shown). Interestingly, when the nonlinearity is added (c � 0),
368
+ the EPs disappear and the near-degenerate regions are formed
369
+ (see the black dashed lines in Fig.2). When considering the
370
+ offset (ϵ0 � 0), the near-degenerate regions disappear near the
371
+ times t
372
+
373
+ n =
374
+ t1+t3
375
+ 2
376
+ + 2nπ
377
+ ω (with n being an integer), the period
378
+ changes from nπ
379
+ ω to 2nπ
380
+ ω , and the ring energy levels will tend to
381
+ degenerate at times t1 + 2mπ
382
+ ω (with m being an integer) as ϵ0 in-
383
+ creases as shown in Fig.2. Obviously, the nonlinearity affects
384
+ the EPs. By equation (11), En = 0 is the root of the equation
385
+ iff c∆1∆2 = 0. Therefore, the existence of c does not allow the
386
+ existence of EPs in the anti-phase tunneling case ∆1∆2 < 0.
387
+ Next, we analyzed the cases of the existence of real roots of
388
+ 0
389
+ 1
390
+ 2
391
+ 3
392
+ 4
393
+ 5
394
+ c/
395
+ -4
396
+ -2
397
+ -1012
398
+ 4
399
+ (t)/
400
+ FIG. 3. Different regions for parameter space of
401
+ c
402
+ ∆ and
403
+ γ
404
+ ∆ in the
405
+ anti-phase tunneling case. Region I for f( c
406
+ ∆, γ
407
+ ∆) < 0, Region II for
408
+ γ2
409
+ ∆2 > 1 when f( c
410
+ ∆, γ
411
+ ∆) > 0, Region III for γ2
412
+ ∆2 < 1. Naturally, when
413
+ f( c
414
+ ∆, γ
415
+ ∆) < 0, the inequality γ2
416
+ ∆2 > 1 is guaranteed.
417
+ the energy spectrum.
418
+ For the special cases c = 0, the eigenenergies of the system
419
+ are ±
420
+
421
+ γ2(t) + ∆1∆2. It is easy to find that the EPs emerge
422
+ at γ2(t) = −∆1∆2 in the anti-phase tunneling case ∆1∆2 < 0.
423
+ For c � 0, the nature (real or not) of the roots of the energy
424
+ equation (11) depend on the sign of
425
+ δ = −c2γ2∆1∆2ξ,
426
+ (14)
427
+ with ξ = ((c2 − γ2 − ∆1∆2)3 − 27c2γ2∆1∆2).
428
+ When δ > 0, there are two real roots and a pair of conjugate
429
+ complex roots. The system will always have real eigenener-
430
+ gies. When δ < 0, the equation has four unequal real roots if
431
+ c2 + 2(∆1∆2 + γ2) and (∆1∆2 + γ2)(2c2 + ∆1∆2 + γ2) are both
432
+ positive. Otherwise, the equation has two pairs of unequal
433
+ conjugate complex roots. Obviously, for the in-phase tunnel-
434
+ ing case ∆1∆2 > 0, there always exists real eigenenergies of
435
+ the system.
436
+ For the anti-phase tunneling case with δ < 0, the conditions
437
+ that the energy equation has real roots can be simply described
438
+ as γ2
439
+ ∆2 > 1 in f( c
440
+ ∆, γ
441
+ ∆) = [( c
442
+ ∆)2−( γ
443
+ ∆)2+1]3+27( c
444
+ ∆)2( γ
445
+ ∆)2 < 0. In-
446
+ terestingly, γ
447
+ ∆ = ±1 are exactly the tangent lines of f( c
448
+ ∆, γ
449
+ ∆) =
450
+ 0. Therefore, the condition is naturally satisfied (as shown in
451
+ Fig.3), so we get the same conclusion as ∆1∆2 > 0.
452
+ Finally, we consider another two special case: γ = 0 and
453
+ ξ = 0. The energy spectrum are all complex only when δ = 0,
454
+ c(∆1∆2 − γ2) = 0, (∆1∆2 + γ2)(2c2 + ∆1∆2 + γ2) = 0 and
455
+ c2 + 2(∆1∆2 + γ2) < 0. For, c � 0 and ∆1∆2 � 0, these
456
+ conditions cannot be satisfied at the same time.
457
+ In a word, the system will always have real eigen energies.
458
+ These results on the nature of the eigenenergies can be ex-
459
+ plained by the symmetry related to the different types of non-
460
+ reciprocal. For the in-phase tunneling case ∆1∆2 > 0, the
461
+ symmetry of the system is unbroken since the system can be
462
+ transformed into a Hermitian one with ˆS . Therefore, the real
463
+ eigen energies are guaranteed. While it is not a necessary re-
464
+ sult for the anti-phase case ∆1∆2 < 0 . Although the non-
465
+ linearity c makes EPs disappear in the evolution of En, the
466
+ eigenvalues of one energy state are still complex. For these
467
+ two cases, it is inevitable to have different effects on the evo-
468
+ lution of states. So next we will analyze the dynamic evolution
469
+
470
+ 4
471
+ FIG. 4.
472
+ The interference patterns of the population probability
473
+ |α1|2 at time t = 50/∆ as a function of ϵ0/∆ and ω/∆ in the state
474
+ (α1(0), β1(0)) = (0, 1), (α2(0), β2(0)) = (0, 1) with (a) c/∆ = 0,
475
+ ∆1∆2 > 0, (b) c/∆ = 1.05, ∆1∆2 > 0, (c) c/∆ = 0, ∆1∆2 < 0,
476
+ and (d) c/∆ = 1.05, ∆1∆2 < 0. The other parameters are chosen
477
+ as k = 2, A/∆ = 2.5. The white area is singular, and |α1|2 tends to
478
+ infinity.
479
+ of the two cases based on the method of the projective Hilbert
480
+ space.
481
+ III.
482
+ NONLINEAR NON-HERMITIAN LZSM
483
+ INTERFEROMETRY
484
+ In the nonlinear Hermitian LZ system, The LZSM inter-
485
+ ference patterns can be destructive or constructive, which are
486
+ determined by the St¨uckelberg phases and the nonlinearity can
487
+ strongly change the features of the LZSM interferometry. As
488
+ shown in Fig. 4, the interference pattern of |α1|2 is axisymmet-
489
+ ric for the linear in-phase tunneling case (c = 0, ∆1∆2 > 0). In
490
+ the nonlinear case (c � 0), the symmetry of the interference
491
+ pattern is destroyed (as shown in Fig. 4b). When c = 0 and
492
+ ∆1∆2 < 0, the Eps make the interference patterns divergent
493
+ and form a singular region (white area in Fig. 4c). It is hard
494
+ to study the influence of each parameter on the features of the
495
+ LZSM interferometry. Next, we propose the concept of pro-
496
+ jective Hilbert space (see AppendixC for detail) and find the
497
+ effect of the nonreciprocity k.
498
+ Through equations (8), without losing generality, the quan-
499
+ tum state |ψr⟩ can be defined as
500
+ |ψr⟩ = eµ(t)+iν(t)| ˜ψ⟩ = eµ(t)+iν(t)
501
+ � ˜a
502
+ ˜b
503
+
504
+ ,
505
+ (15)
506
+ with the normalization relation ⟨ ˜ψ| ˜ψ⟩ = 1 (µ and ν are two real
507
+ parameters), where | ˜ψ⟩ =
508
+ � ˜a
509
+ ˜b
510
+
511
+ is the quantum state in the pro-
512
+ jective Hilbert space. Then, we draw the normalized interfer-
513
+ ence patterns |˜a|2 = |α1|2/(|α1|2+|β1|2) (see Fig.5). Comparing
514
+ with |α1|2, the regulation of the parameters on the |˜a|2 interfer-
515
+ ence pattern are emerge when c = 0. This is because the
516
+ LZSM interference is determined by the St¨uckelberg phases.
517
+ The phases accumulated in the evolution process are retained
518
+ in the quantum states | ˜ψ⟩ in the projective Hilbert space by
519
+ FIG. 5. The interference patterns of the projective state population
520
+ probability |˜a|2 at time t = 50/∆ as a function of ϵ0/∆ and ω/∆ in the
521
+ state (α1(t0), β1(t0)) = (0, 1), (α2(t0), β2(t0)) = (0, 1) in the anti-phase
522
+ tunneling case ∆1∆2 < 0 with (a) c/∆ = 0, k = 2, (b) c/∆ = 1.05, k =
523
+ 2, (c) c/∆ = 0, k = 1/2, and (d) c/∆ = 1.05, k = 1/2.
524
+ removing the divergence caused by the non-Hermitian term
525
+ em(t). In Fig.5, when c = 0, the populations of the correspond-
526
+ ing the projective quantum states in the singular region of the
527
+ quantum states are limited to the values affected by the nonre-
528
+ ciprocity k. To further reveal the influence of parameter k, we
529
+ next start from the simplest case with c = 0 and then analyze
530
+ the case with c � 0. Then, we demonstrated the validity and
531
+ accuracy of the proposed method and numerical results in the
532
+ weak-coupling limit.
533
+ A.
534
+ The effect of noncrciprocity and the projective quantum
535
+ states in the linear non-Hermitian system
536
+ Assuming c = 0, the Hamiltonian of the system (7) be-
537
+ comes
538
+ ˆHmF =
539
+
540
+ γ
541
+ 2
542
+ ∆1
543
+ 2
544
+ ∆2
545
+ 2
546
+ − γ
547
+ 2
548
+
549
+ ,
550
+ (16)
551
+ where ∆1∆2 < 0. Consider the quantum state |ψr⟩ = eµ+iν| ˜ψ⟩ =
552
+ eµ+iν
553
+ � ˜a
554
+ ˜b
555
+
556
+ , and Eq. (8), one can get
557
+ ˙µ = − i
558
+ 2⟨ ˜ψ| ˆH − ˆH†| ˜ψ⟩,
559
+ ˙ν = −1
560
+ 2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + i⟨ ˜ψ| ˙˜ψ⟩,
561
+ (17)
562
+ Substituting Eq.
563
+ (17) and the definition | ˜ψ⟩ =
564
+ � ˜a
565
+ ˜b
566
+
567
+
568
+
569
+ sin θ
570
+ 2eiϕ
571
+ cos θ
572
+ 2
573
+
574
+ into equation (8), we have (see Appendix C for
575
+
576
+ 43
577
+ 3
578
+ (a)c/△=0
579
+ (b)c/ △=1
580
+ 0
581
+ 9
582
+ 6
583
+ 3
584
+ 3
585
+ (c)c/ △=0
586
+ (d)c/ △=1
587
+ 0
588
+ -9
589
+ -6
590
+ -3
591
+ 0
592
+ 3
593
+ 6
594
+ 9
595
+ -9
596
+ -6
597
+ -3
598
+ 0
599
+ 3
600
+ 6
601
+ Eo/△
602
+ Eo/△2
603
+ .05
604
+ 0
605
+ 30
606
+ 20
607
+ 10
608
+ .05
609
+ 0
610
+ 99
611
+ 60.8
612
+ 0.63
613
+ 3
614
+ (a) c/ △=0
615
+ (b) c
616
+ 0
617
+ 9
618
+ 6
619
+ 3
620
+ 3
621
+ (c) c/ △=0
622
+ (d) c
623
+ 0
624
+ -9
625
+ -6
626
+ -3
627
+ 0
628
+ 3
629
+ 6
630
+ 9
631
+ -9
632
+ -6
633
+ -3
634
+ 0
635
+ 3
636
+ /△
637
+ E0/△
638
+ Eo0.4
639
+ 0.2
640
+ /△=1.05
641
+ 0
642
+ 0.2
643
+ 0.1
644
+ △=1.05
645
+ 0
646
+ 6
647
+ 99
648
+ 65
649
+ FIG. 6. The dynamical evolution trajectory of the projective right
650
+ quantum state of the system (16) on the Bloch sphere with the dif-
651
+ ferent non-Hermitian: (a) k = 2 and (b) k = 1/2. The numerical
652
+ simulation parameters:
653
+ A
654
+ ∆ = 2.5, ϵ0 = 0 and the initial condition is
655
+ (˜a, ˜b) = (0, 1). The z-axis coordinates of the points of the red dashed
656
+ circle on the Bloch sphere are z0 = cos θ0 = 1−k2
657
+ 1+k2 .
658
+ details)
659
+ ˙θ = −∆1 sin ϕ cos2 θ
660
+ 2 − ∆2 sin ϕ sin2 θ
661
+ 2,
662
+ ˙ϕ = −γ − ∆1
663
+ 2 cot θ
664
+ 2 cos ϕ + ∆2
665
+ 2 tan θ
666
+ 2 cos ϕ,
667
+ ˙µ = ∆2 − ∆1
668
+ 4
669
+ sin θ sin ϕ,
670
+ ˙ν = γ
671
+ 2 − ∆2
672
+ 2 tan θ
673
+ 2 cos ϕ.
674
+ (18)
675
+ For ϵ0 = 0, when the time is long enough, the projective state
676
+ will always be on a certain circle (˙θ = 0) of the Bloch sphere
677
+ (see Fig.6). By Eq. (18), we can get the equation of the circle
678
+ where the projective quantum state finally lies. surprisingly,
679
+ we find the correlation between k and θ0 = limt→∞ θ as
680
+ k2 = tan2 θ0
681
+ 2 .
682
+ (19)
683
+ Therefore, in combination with Fig.5, we can explain why
684
+ |˜a|2 is limited to a certain value in the singular region.
685
+ B.
686
+ The influence of interaction and non-Hermitian on
687
+ population in the projective Hilbert space
688
+ In the nonlinear Hermitian system[33], i.e ∆ = ∆1 = ∆2,
689
+ when ϵ0 = 0 and A ≪ ω, the population of the system will
690
+ have the self-trapping phase transition and the Josephson os-
691
+ cillation under the different nonlinearities, and the boundary
692
+ line is c/∆ = 2[67, 69]. Based on this, we next study the non-
693
+ linear non-Hermitian LZSM interference patterns for ϵ0 = 0
694
+ with different nonlinearities c, non-Hermitian parameters k
695
+ and mean amplitudes ∆ [see Fig.7 and Fig.9].
696
+ Firstly, we consider the in-phase tunneling case ∆1∆2 > 0,
697
+ where the symmetry of the system is unbroken. For the Her-
698
+ mitian Hamiltonian ˆHh, near the boundary of two different os-
699
+ cillations, the maximum population of the self-trapping region
700
+ is 0.5, and then the amplitude gradually decreases with the in-
701
+ crease of c/∆. The populations of the state for non-Hermitian
702
+ FIG. 7. The nonlinear non-Hermitian LZSM interference patterns
703
+ with different nonlinearities (a) k = 2, and (b) k = 1/2 for weak
704
+ driving at ϵ0 = 0 and the in-phase tunneling case ∆1∆2 > 0: the
705
+ projective population |˜a|2 as a function of ∆/ω and c/ω for A/ω =
706
+ 0.05 from the initial time t0 = 0 to t = 2π/ω , The red dashed-dotted
707
+ line (with slope 1/2) is plotted to denote the boundary between the
708
+ different oscillations.
709
+ Hamiltonian ˆH with ∆1 � ∆2 is only different from those for
710
+ the Hermitian Hamiltonian ˆHh in a weight of k as shown in
711
+ Eq. (13). Therefore, we can get |˜a|2 = k2|˜b|2 at the boundary
712
+ similar with the Hermitian case. Therefore, the boundary line
713
+ c/∆ = 2 (red dashed line in Fig.7) between the two regions
714
+ (self-trapping and Josephson oscillation) is the same as that in
715
+ the Hermitian system. The amplitude of the population of the
716
+ projective quantum state is determined by the nonreciprocal k
717
+ as show in Fig.7(a) and (b). Then, we consider the dynamical
718
+ evolution of the projective quantum state near the boundary,
719
+ by Eq. (8) and (15), one can obtain
720
+ ˙θr =ImA sin θr − ∆1 sin ϕr cos2 θr
721
+ 2 − ∆2 sin ϕr sin2 θr
722
+ 2 ,
723
+ ˙ϕr = − γ − ReA − ∆1
724
+ 2 cot θr
725
+ 2 cos ϕr + ∆2
726
+ 2 tan θr
727
+ 2 cos ϕr,
728
+ ˙µr = − ImA
729
+ 2
730
+ cos θr + ∆2 − ∆1
731
+ 4
732
+ sin θr sin ϕr,
733
+ ˙νr =γ
734
+ 2 + ReA
735
+ 2
736
+ − ∆2
737
+ 2 tan θr
738
+ 2 cos ϕr.
739
+ (20)
740
+ with the right quantum state |ψr⟩ =
741
+
742
+ α1
743
+ β1
744
+
745
+ = eµr+iνr � ˜a
746
+ ˜b
747
+
748
+ =
749
+ eµr+iνr �
750
+ sin θr
751
+ 2 eiϕr
752
+ cos θr
753
+ 2
754
+
755
+ , and
756
+ ˙θl = − ImA sin θl − ∆2 sin ϕl cos2 θl
757
+ 2 − ∆1 sin ϕl sin2 θl
758
+ 2 ,
759
+ ˙ϕl = − γ − ReA − ∆2
760
+ 2 cot θl
761
+ 2 cos ϕl + ∆1
762
+ 2 tan θl
763
+ 2 cos ϕl,
764
+ ˙µl =ImA
765
+ 2
766
+ cos θl + ∆1 − ∆2
767
+ 4
768
+ sin θl sin ϕl,
769
+ ˙νl =γ
770
+ 2 + ReA
771
+ 2
772
+ − ∆1
773
+ 2 tan θl
774
+ 2 cos ϕl.
775
+ (21)
776
+ with the left quantum state |ψl⟩ =
777
+
778
+ α2
779
+ β2
780
+
781
+ = eµl+iνl � ˜al
782
+ ˜bl
783
+
784
+ =
785
+ eµl+iνl �
786
+ sin θl
787
+ 2 eiϕl
788
+ cos θl
789
+ 2
790
+
791
+ , where A ≡ c(α1α∗
792
+ 2 − β1β∗
793
+ 2). By numerical
794
+ simulation, we give the dynamical evolution of the projective
795
+ right state on the Bloch sphere near the boundary c/∆ = 2 in
796
+ Fig.8.
797
+
798
+ 1/2XZ
799
+ (a) k=2
800
+ (b) k=13
801
+ 5
802
+ (a) k=2
803
+ 0
804
+ 0
805
+ 50.5
806
+ 0
807
+ 01013
808
+ 5
809
+ (b) k=1/2
810
+ 0
811
+ 0
812
+ 50.5
813
+ 0
814
+ 0106
815
+ FIG. 8. The dynamics of the projective states represented by the
816
+ trajectories spherical coordinates (θ, φ) on the Bloch sphere in the
817
+ in-phase tunneling case ∆1∆2 > 0 with different strengths of nonlin-
818
+ earity and nonreciprocity: (a) c/∆ = 1.9, k = 2, (b) c/∆ = 2, k = 2,
819
+ (c) c/∆ = 2.1, k = 2, (d) c/∆ = 1.9, k = 1/2, (e) c/∆ = 2, k = 1/2,
820
+ and (f) c/∆ = 2.1, k = 1/2. The other parameters are chosen as
821
+ A
822
+ ω = 0.05, ϵ0 = 3, and the initial state is (˜a, ˜b) = (0, 1). The z-
823
+ axis axis coordinates of the red dashed circle on the Bloch sphere
824
+ are z0 = cos θ0 = 1−k2
825
+ 1+k2 , and the z-axis axis coordinates of the green
826
+ dashed circle on the Bloch sphere are z
827
+
828
+ 0 = 0.
829
+ When c/∆ > 2, the projective states can only evolve on
830
+ the surface of the Bloch sphere above the red dashed circle as
831
+ shown in Fig. 8 (b), (c), (e) and (f). The red circle represent
832
+ the projective states of which the relative population differ-
833
+ ence |˜b|2 − |˜a|2 is 1−k2
834
+ k2+1 = cos θ0. By |˜a|2 = k2|˜b|2 and the nor-
835
+ malization condition, cos θ0 = |˜b|2 − |˜a|2 labels the boundary
836
+ between the self-trapping region and the Josephson oscilla-
837
+ tion region. As we discussed before, the nonreciprocal k does
838
+ not affect the constructive interference and destructive inter-
839
+ ference, but affects the the relative population difference of
840
+ the state. When k is larger, the relative population difference
841
+ at the boundary between the two regions are smaller [see the
842
+ red circle in Fig. 8(a-c) and (d-f)] and the projective popula-
843
+ tion probability |˜a|2 are smaller [see Fig. 7 (a) and (b)].
844
+ For
845
+ the anti-phase tunneling case ∆1∆2 < 0, because of the exis-
846
+ tence of EPs in the linear case c = 0, the projective quantum
847
+ states reaches self-trapping region no matter how weak the
848
+ nonlinearity is. The trajectories of the projective states on the
849
+ Bloch sphere will always above the red dashed circles which
850
+ label the boundaries between the self-trapping region and the
851
+ Josephson oscillation region as shown in Fig.9. the maximum
852
+ population of the projective quantum state is still affected by
853
+ the nonreciprocity k as shown in Eq. (19) and Fig.10(a-d).
854
+ FIG. 9. The nonlinear non-Hermitian LZSM interference patterns
855
+ with different nonlinearities (a) k = 2, and (b) k = 1/2 for weak
856
+ driving at ϵ0 = 0 and the anti-phase tunneling case ∆1∆2 < 0: the
857
+ projective population |˜a|2 as a function of ∆/ω and c/ω for A/ω =
858
+ 0.05 from the initial time t0 = 0 to t = 2π/ω.
859
+ FIG. 10. The dynamics of the projective states represented by the tra-
860
+ jectories spherical coordinates (θ, φ) on the Bloch sphere in the anti-
861
+ phase tunneling case ∆1∆2 < 0 with different strengths of nonlinear-
862
+ ity and nonreciprocity: (a) c/∆ = 0.1, k = 2, (b) c/∆ = 1, k = 2, (c)
863
+ c/∆ = 0.1, k = 1/2, and (d) c/∆ = 1, k = 1/2. The other parameters
864
+ are chosen as A
865
+ ω = 0.05, ϵ0 = 3, and the initial state is (˜a, ˜b) = (0, 1).
866
+ The z-axis coordinates of the red dashed circle on the Bloch sphere
867
+ are z0 = cos θ0 = 1−k2
868
+ 1+k2 , and the z-axis coordinates of the green dashed
869
+ circle on the Bloch sphere are z
870
+
871
+ 0 = 0.
872
+ Compare Fig Fig.10(b) and (d) with Fig.10(a) and (c), it is
873
+ easy to find that the stronger the nonlinearity, the stronger the
874
+ degree of self-trapping effect.
875
+ C.
876
+ Weak-coupling limit of the projective quantum states:
877
+ ∆ ≪ ω
878
+ When the weak-coupling limit is considered, the adia-
879
+ batic energy levels will be difficult to transition in the near-
880
+ degenerate region. However, in this approximation, we only
881
+ make |˜ag(t)|2 ∼ |˜ag(t0)|2 and |˜bg(t)|2 ∼ |˜bg(t0)|2 where g = r, l.
882
+ Assuming that the initial condition is (˜ag(t0), ˜bg(t0)) = (0, 1),
883
+ the quantum state can always be written in the following form:
884
+ |ψg(t)⟩ = eµg(t)+iνg(t)
885
+
886
+ 0
887
+ 1
888
+
889
+ ,
890
+ (22)
891
+
892
+ 0.83
893
+ 5
894
+ (a) k=2
895
+ 0
896
+ 0
897
+ 50.6
898
+ 0.4
899
+ 0.2
900
+ 0
901
+ 10100.23
902
+ 5
903
+ A
904
+ (b) k=1/2
905
+ 0
906
+ 0
907
+ 50.1
908
+ 0
909
+ 10101(c) c/△=0.1
910
+ (d) c/ △=1(a) c/△=0.1
911
+ (b) c/ △=
912
+ Z↑ Z
913
+ X(c) c/△=2.1
914
+ (d) c/ △=1.
915
+ (e) c/ △=2
916
+ (f) c/ △=2.9(a) c/△=1.9
917
+ (b) c/ △=27
918
+ 0
919
+ 0.02
920
+ 0.04
921
+ 0
922
+ 2
923
+ 4
924
+ 10-3
925
+ 0
926
+ 0.02
927
+ 0.04
928
+ 0
929
+ 1
930
+ 2
931
+ 10-3
932
+ 0
933
+ 5
934
+ 10
935
+ 0
936
+ 1
937
+ 2
938
+ 10-4
939
+ 0
940
+ 5
941
+ 10
942
+ 0
943
+ 1
944
+ 2
945
+ 10-3
946
+ exact
947
+ approximate
948
+ (a) c/ =0
949
+ (b) c/ =0.5
950
+ (d) c/ =0
951
+ (e) c/ =0.5
952
+ (c) c/ =1
953
+ (f) c/ =1
954
+ FIG. 11. Time evolution of the projective population probability |˜a|2
955
+ for weak coupling in the in-phase tunneling case ∆1∆2 > 0, with
956
+ different nonlinearities: (a) c/ω = 0, k = 2, (b) c/ω = 0.5, k = 2 and
957
+ (c) c/ω = 1, k = 2. (d) c/ω = 0, k = 1/2, (e) c/ω = 0.5, k = 1/2
958
+ and (f) c/ω = 1, k = 1/2. The other parameters are A/ω = 10.5,
959
+ ∆/ω = 0.05, and ϵ0/ω = 3.
960
+ where g = r, l. By Eqs. (8),(17) and (22), we get ˙µr(t)+i˙νr(t)+
961
+ ˙µl(t) − i˙νl(t) = 0. This means
962
+ β1(t)β∗
963
+ 2(t) − α1(t)α∗
964
+ 2(t) ∼ β1(t0)β∗
965
+ 2(t0) − α1(t0)α∗
966
+ 2(t0),
967
+ (23)
968
+ Based on this approximation, we can transform the dynamic
969
+ of the system from Schr¨odinger picture to Dirac picture by in-
970
+ troducing the gauge transformation φr(t) = U(t)ϕr(t) [U(t) =
971
+ ϵ0
972
+ 2t − A cos(ωt)
973
+
974
+ + c
975
+ 2(β1β∗
976
+ 2 − α1α∗
977
+ 2) with ϕr(t) = [˜α1, ˜β1]T ] [33].
978
+ Under the new basis, the nonlinear dynamic Eqs. (8) become
979
+ (Assuming ∆1 > 0):
980
+ i ∂
981
+ ∂t
982
+ � ˜α1
983
+ ˜β1
984
+
985
+ =
986
+
987
+ 0
988
+ kΩ
989
+ (−1)j
990
+ k Ω∗
991
+ 0
992
+ � � ˜α1
993
+ ˜β1
994
+
995
+ ,
996
+ (24)
997
+ and
998
+ i ∂
999
+ ∂t
1000
+ � ˜α2
1001
+ ˜β2
1002
+
1003
+ =
1004
+
1005
+ 0
1006
+ (−1)j
1007
+ k Ω∗
1008
+ kΩ
1009
+ 0
1010
+ � � ˜α2
1011
+ ˜β2
1012
+
1013
+ (25)
1014
+ with
1015
+ Ω = ∆
1016
+ 2 eiΦ(t),
1017
+ Φ(t) = ϵ0t − A cos(ωt)
1018
+ ω
1019
+ + ct,
1020
+ (26)
1021
+ and j = 1, 2 corresponding to the anti-phase case ∆2 < 0
1022
+ and in-phase case ∆2 > 0, respectively. Ω denotes the field-
1023
+ induced Rabi frequency where Φ(t) is the relative phase of
1024
+ two diabatic energy levels. The nonreciprocity k in front of
1025
+ 0
1026
+ 0.02
1027
+ 0.04
1028
+ 0
1029
+ 2
1030
+ 4
1031
+ 10-3
1032
+ 0
1033
+ 5
1034
+ 10
1035
+ 0
1036
+ 1
1037
+ 2
1038
+ 10-3
1039
+ 0
1040
+ 5
1041
+ 10
1042
+ 0
1043
+ 1
1044
+ 2
1045
+ 10-4
1046
+ (d) c/ =0.5
1047
+ (b) c/ =0.5
1048
+ (c) c/ =0
1049
+ (a) c/ =0
1050
+ exact
1051
+ approximate
1052
+ FIG. 12. Time evolution of the Projective quantum state population
1053
+ probability |˜a|2 for weak coupling in the anti-phase tunneling case
1054
+ ∆1∆2 < 0, with different nonlinearities: (a) c/ω = 0, k = 2 and (b)
1055
+ c/ω = 0.5, k = 2. (c) c/ω = 0, k = 1/2 and (d) c/ω = 0.5, k = 1/2.
1056
+ The other parameters are A/ω = 10.5, ∆/ω = 0.05, and ϵ0/ω = 3.
1057
+ Ω correspond to the weight of the populations of the projec-
1058
+ tive quantum state. Thus, we can understand the fact that the
1059
+ maximums value of the populations under the self-trapping
1060
+ regions change with k2 in the in-phase case ∆1∆2 > 0. In a
1061
+ full cycle, Φ(t) can be approximately written as
1062
+ Φ(t) ⋍
1063
+ � t3
1064
+ t1
1065
+ (ϵ0 + c − nω)dt = 2π
1066
+ ω (ϵ0 + c − nω)
1067
+ (27)
1068
+ with n = 0, ±1, ±2, .... When Φm = 2mπ, i.e. c + ϵ0 ≃ (n +
1069
+ m)ω = dω (m, d = 0, ±1, ±2, ...), the patterns are constructive.
1070
+ While, the patterns will be destructive when Φm = (2m+ 1
1071
+ 2)π,.
1072
+ By calculating the nonlinear equation (8), the linear equa-
1073
+ tion(24), we can get the exact solution and approximate solu-
1074
+ tion respectively. In Fig.11, we show multi-period LZSM in-
1075
+ terference fringes with different characteristics in the in-phase
1076
+ tunneling case ∆2 > 0. when c = 0, 1, i.e., Φm = 2mπ,
1077
+ the patterns are constructive, and when c = 0.5, 1.5, i.e.,
1078
+ Φm = (2m + 1
1079
+ 2)π, the patterns are destructive. In all non-
1080
+ linear cases, the two are consistent. In Fig.12, we show the
1081
+ anti-phase tunneling case ∆2 < 0. Like the in-phase tunneling
1082
+ case, the constructive interference and destructive interference
1083
+ only depend on m, and the nonreciprocity k only affect the
1084
+ maximal value of the projective population probability |˜a|2.
1085
+ IV.
1086
+ CONCLUSION
1087
+ In this work, we have studied the non-Hermitian nonlin-
1088
+ ear LZSM interferometry in which the non-Hermicity is from
1089
+ the nonreciprocal tunnelings between the bosons. By using
1090
+ the mean-field approximation and projective Hilbert space,
1091
+ the effect of nonreciprocity and nonlinearity on the energy
1092
+
1093
+ 8
1094
+ spectrum, the dynamics, and the formation of the interfer-
1095
+ ence fringes have been studied. The results show that dif-
1096
+ ferent types of reciprocity correspond to different types of
1097
+ symmetries of the system. For the in-phase tunneling case
1098
+ ∆1∆2 > 0, the system can be transformed into a Hermitian one
1099
+ with a nonunitary transformation. It has the same energy spec-
1100
+ trum and boundary between the Josephson region and the self-
1101
+ trapping region as the Hermitian one. While it is not a neces-
1102
+ sary result for the anti-phase case ∆1∆2 < 0. The EPs can only
1103
+ exist in its linear case c = 0 and the eigenvalues of one en-
1104
+ ergy state will be complex in its nonlinear case. There is only
1105
+ a self-trapping region in this case since the evolution of the
1106
+ projective states will always be above the boundary when the
1107
+ nonlinearity exists. For the LZSM interferometry, the strength
1108
+ of the nonreciprocity k is found to take an essential role in the
1109
+ population of the projective state and determine the maximal
1110
+ values and strengths of the interference patterns in the projec-
1111
+ tive space. Finally, under the weak-coupling approximation,
1112
+ we found that the types and strengths of the nonreciprocity do
1113
+ not affect the conditions of destructive and constructive inter-
1114
+ ference. It only depends on the strength of nonlinearity. Our
1115
+ result provides a possible way to study the parameters of a
1116
+ non-Hermitian nonlinear two-level system and its related ex-
1117
+ ternal fields by the LZSM interferometry.
1118
+ ACKNOWLEDGMENTS
1119
+ We thank S. C. Li and F. Q. Dou for their helpful discus-
1120
+ sions. This work is supported by the National Natural Sci-
1121
+ ence Foundation of China (NSFC) (Grants Nos. 11875103,
1122
+ 12147206, 11725417, 12088101, 12047548, and U1930403),
1123
+ and Science Challenge Project (Grant No. TZ2018005)).
1124
+ Appendix A: Semi-classical Hamiltonian
1125
+ In the non-Hermitian system, let ˆH be a non-Hermitian Hamiltonian with a complete biorthonormal eigenbasis {|ψr
1126
+ n⟩, |ψl
1127
+ n⟩},
1128
+ the orthogonal normalization of the quantum states are
1129
+ ⟨ψr
1130
+ n|ψl
1131
+ m⟩ = δnm.
1132
+ (A1)
1133
+ Similarly, for system (1), in the mean-field approximation, the coherent state should be written as
1134
+ |Ψr
1135
+ sc⟩ =
1136
+ 1
1137
+
1138
+ N!
1139
+ (α1ˆa† + β1ˆb†)N|∅⟩,
1140
+ (A2)
1141
+ |Ψl
1142
+ sc⟩ =
1143
+ 1
1144
+
1145
+ N!
1146
+ (α2ˆa† + β2ˆb†)N|∅⟩,
1147
+ (A3)
1148
+ According to the normalization condition ⟨Ψl
1149
+ sc|Ψr
1150
+ sc⟩ = 1:
1151
+ α1α∗
1152
+ 2 + β1β∗
1153
+ 2 = 1.
1154
+ (A4)
1155
+ Then, applying the Hamiltonian of system (1) to the right quantum state |Ψr
1156
+ sc⟩ , one can obtain
1157
+ ˆH|ψr
1158
+ SC⟩ =
1159
+ �γ
1160
+ 2 ˆa†ˆa − ˆb†ˆb + ∆2
1161
+ 2 ˆa†ˆb + ∆1
1162
+ 2 ˆaˆb† − c
1163
+ 4N (ˆa†ˆa − ˆb†ˆb)2)
1164
+
1165
+ 1
1166
+
1167
+ N!
1168
+ N
1169
+
1170
+ r=0
1171
+ Cr
1172
+ N(α1ˆa†)N−r(β1ˆb†)r|∅⟩,
1173
+ (A5)
1174
+ When calculating the expectation value of an observable, the quantum states of the systems are normalized. So in the system
1175
+ (1), the expectation value of ˆH0 should be written as
1176
+ ⟨Ψl
1177
+ sc| ˆH0|Ψr
1178
+ sc⟩ =Nγ
1179
+ 2
1180
+ N
1181
+
1182
+ r=0
1183
+ (N − 1)!
1184
+ (N − r − 1)!r!(α1α∗
1185
+ 2)N−r−1(β1β∗
1186
+ 2)rα1α∗
1187
+ 2 − Nγ
1188
+ 2
1189
+ N
1190
+
1191
+ r=0
1192
+ (N − 1)!
1193
+ (N − r)!(r − 1)!(α1α∗
1194
+ 2)N−r(β1β∗
1195
+ 2)r−1β1β∗
1196
+ 2
1197
+ +N(∆2
1198
+ 2
1199
+ N
1200
+
1201
+ r=0
1202
+ Cr
1203
+ N−1(N − r)(α1α∗
1204
+ 2)N−r−1(β1β∗
1205
+ 2)rα∗
1206
+ 2β1 + ∆1
1207
+ 2
1208
+ N
1209
+
1210
+ r=0
1211
+ Cr−1
1212
+ N−1r(α1α∗
1213
+ 2)N−r(β1β∗
1214
+ 2)r−1α1β∗
1215
+ 2)
1216
+ +
1217
+ N
1218
+
1219
+ r=0
1220
+ Cr−1
1221
+ N−1r(α1α∗
1222
+ 2)N−r(β1β∗
1223
+ 2)r−1α1β∗
1224
+ 2) − cN
1225
+ 4 (β1β∗
1226
+ 2 − α1α∗
1227
+ 2)2
1228
+ =Nγ
1229
+ 2 (α1α∗
1230
+ 2 − β1β∗
1231
+ 2) + N∆2
1232
+ 2 (α∗
1233
+ 2β1) + N∆1
1234
+ 2 (α1β∗
1235
+ 2) − cN
1236
+ 4 (β1β∗
1237
+ 2 − α1α∗
1238
+ 2)2,
1239
+ (A6)
1240
+ The expectation value of each particle is
1241
+ ˆHM = ⟨Ψl
1242
+ sc| ˆH0|Ψr
1243
+ sc⟩
1244
+ N
1245
+ = −c
1246
+ 4(β1β∗
1247
+ 2 − α1α∗
1248
+ 2)2 + ∆2
1249
+ 2 (α∗
1250
+ 2β1) + ∆2
1251
+ 2 (α1β∗
1252
+ 2) + γ
1253
+ 2(α1α∗
1254
+ 2 − β1β∗
1255
+ 2).
1256
+ (A7)
1257
+
1258
+ 9
1259
+ Appendix B: Derivation of the Energy level equation
1260
+ In the non-Hermitian system, the Hamiltonian ˆH has a complete biorthonormal eigenbasis {|ψr
1261
+ n⟩, |ψl
1262
+ n⟩} of satisfying
1263
+ ˆH|φr
1264
+ n⟩ = En|φr
1265
+ n⟩,
1266
+ (B1)
1267
+ ˆH†|φl
1268
+ n⟩ = E∗
1269
+ n|φl
1270
+ n⟩,
1271
+ (B2)
1272
+ ⟨φl
1273
+ m|φr
1274
+ n⟩ = δmn,
1275
+ (n = 1, 2, ...)
1276
+ (B3)
1277
+ By equations (B1), we can naturally conclude that the adiabatic basis of the system (7) satisfies
1278
+ Fα1 + i∆
1279
+ 2 β1 = Eα1,
1280
+ i∆
1281
+ 2 α1 − Fβ1 = Eβ1,
1282
+ (B4)
1283
+ F∗α2 − i∆
1284
+ 2 β2 = E∗α1,
1285
+ − i∆
1286
+ 2 α2 − F∗β2 = E∗β2,
1287
+ (B5)
1288
+ α1α∗
1289
+ 2 + β1β∗
1290
+ 2 = 1.
1291
+ (B6)
1292
+ where F ≡ γ
1293
+ 2 + c
1294
+ 2(β1β∗
1295
+ 2 − α1α∗
1296
+ 2). To derive non-trivial solutions of Eqs. (B1) and (B2), we must ensure that | ˆH − E ˆI| = 0 and
1297
+ | ˆH† − E∗ ˆI| = 0 (ˆI is an identity matrix). Namely,
1298
+ E2 − F2 + ∆2
1299
+ 4 = 0,
1300
+ (B7)
1301
+ E∗2 − F∗2 + ∆2
1302
+ 4 = 0,
1303
+ (B8)
1304
+ By (B4) and the complex conjugate of Eq. (B5), we have
1305
+ α1α∗
1306
+ 2
1307
+ β1β∗
1308
+ 2
1309
+ = −4(E + F)2
1310
+ ∆2
1311
+ ,
1312
+ (B9)
1313
+ By the normalization (B6) and Eq. (B7), it becomes
1314
+ β1β∗
1315
+ 2 = E − F
1316
+ 2E
1317
+ ,
1318
+ (B10)
1319
+ Therefore,
1320
+ F ≡ γ
1321
+ 2 + c
1322
+ 2(β1β∗
1323
+ 2 − α1α∗
1324
+ 2) = γ
1325
+ 2 − cF
1326
+ 2E .
1327
+ (B11)
1328
+ Substitute Eq. (B11) into Eq. (B7), we finally have
1329
+ E4 + cE3 + 1
1330
+ 4(c2 − γ2 + ∆2)E2 + c∆2
1331
+ 4 E + ∆2c2
1332
+ 16
1333
+ = 0.
1334
+ (B12)
1335
+ Appendix C: The projective space for non-Hermitian quantum system
1336
+ Consider the following Schr¨odinger equation
1337
+ i d
1338
+ dt|ψ(t)⟩ = ˆH|ψ(t)⟩,
1339
+ (C1)
1340
+
1341
+ 10
1342
+ where ˆH is generally a non-Hermitian Hamiltonian. Let us define |ψ(t)⟩ = eµ+iν| ˜ψ(t)⟩ with the normalization relation ⟨ ˜ψ(t)| ˜ψ(t)⟩ =
1343
+ 1 (µ and ν are two real parameters). From Eq. (C1) and its Hermitian conjugation, one can get
1344
+ ˙µ = − i
1345
+ 2⟨ ˜ψ| ˆH − ˆH†| ˜ψ⟩,
1346
+ (C2)
1347
+ and
1348
+ ˙ν = −1
1349
+ 2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + i⟨ ˜ψ| ˙˜ψ⟩.
1350
+ (C3)
1351
+ One has to keep mind that the above deduction is some different from what had been done by using adjoint equation of (C1).
1352
+ In quantum theory with Hermitian Hamiltonian systems, |ψ(t)⟩ and | ˜ψ(t)⟩ are equivalence, since the time evolution is unitary
1353
+ (probability preserving) and they are only different in a global phase. Under this equivalence, | ˜ψ(t)⟩ can be employed as a vector
1354
+ on so-called projective Hilbert space of the system. However, for a system with a non-Hermitian Hamiltonian, the time evolution
1355
+ is not unitary. Hence, though the state vectors only differ in norms, they may describe different system states. Nevertheless, we
1356
+ can still formally set up the projective Hilbert space for a non-Hermitian system by using | ˜ψ(t)⟩ as a state on it.
1357
+ Based on the above definition, from Eqs. (C2) and (C3), we can see that one can obtain the norm increment and the global
1358
+ phase of the state acquiring in its time evolution only from the trace in the projective space, the latter is as the same as for
1359
+ Hermitian systems. The global phase and its relation with the projective Hilbert space plays significant role in geometric
1360
+ (topology) properties of Hermitian quantum systems. Therefore, it may be interesting to study the geometric properties of a
1361
+ non-Hermitian system in such a point of view.
1362
+ In order to show such discussions clearly, we employ a two-level system, describing physics of two coupled sites with gain
1363
+ and loss, of which the counterpart Hermitian system also plays a role in illustrating the geometric properties of quantum systems.
1364
+ The time evolution of such a two-level system is described by a 2 × 2 matrix Hamiltonian system by the following equation,
1365
+ i d
1366
+ dt
1367
+
1368
+ a
1369
+ b
1370
+
1371
+ =
1372
+
1373
+ H11 H12
1374
+ H21 H22
1375
+ � �
1376
+ a
1377
+ b
1378
+
1379
+ ,
1380
+ (C4)
1381
+ Then following the definition |ψ(t)⟩ = eµ+iν| ˜ψ(t)⟩, one can get
1382
+ d
1383
+ dt(iµ − ν)˜a + i d
1384
+ dt ˜a = H11˜a + H12˜b,
1385
+ (C5)
1386
+ d
1387
+ dt(iµ − ν)˜b + i d
1388
+ dt
1389
+ ˜b = H21˜a + H22˜b,
1390
+ (C6)
1391
+ Combining with their complex conjugations, and considering |˜a|2 + |˜b|2 = 1, we can easily verify the equations (C2) and (C3).
1392
+ For convenience and without losing generality, we then construct the vector in the projective space for a state |ψ(t)⟩ =
1393
+
1394
+ a
1395
+ b
1396
+
1397
+ with | ˜ψ(t)⟩ =
1398
+ � ˜aeiϕ
1399
+ ˜b
1400
+
1401
+ , ˜a =
1402
+ a
1403
+
1404
+ |a|2+|b|2 , ˜b =
1405
+ b
1406
+
1407
+ |a|2+|b|2 , and ϕ = arg(a) − arg(b). By denoting z = |b|2 − |a|2 which is just the relative
1408
+ population difference of the two levels, it then can be mapped to a sphere, the so-called Bloch sphere, with the coordinates (ϕ, z).
1409
+ From Eq. (C3), we can obtain the evolution of the total phase
1410
+ d
1411
+ dtβ = −1/2⟨ ˜ψ| ˆH + ˆH†| ˜ψ⟩ + 1/2(1 − z)dϕ
1412
+ dt .
1413
+ (C7)
1414
+ This equation is the same as what had been obtained for Hermitian systems by Aharonov and Anandan excepting that in the
1415
+ dynamic part Hermitian Hamiltonian ˆH is replaced by ( ˆH + ˆH†)/2. The second part in the right hand of the above equation
1416
+ is known as the geometric part. One can easily prove that, if the trace of the evolution is closed in the projective space, the
1417
+ geometric phase just equals to the half of solid angle of the close path on the Bloch sphere, which is just the so-called AA phase,
1418
+ the geometric phase of cyclic state.
1419
+ [1] L. D. Landau, Phys. Z. Sowjetunion 2 , 46 (1932).
1420
+ [2] C. Zener and R. H. Fowler, Proc. R. Soc. Lond. A 137, 696
1421
+
1422
+ 11
1423
+ (1932).
1424
+ [3] E. C. G. Stueckelberg, Helv. Phys Acta 5, 369 (1932).
1425
+ [4] L. D. Landau, Phys. Z. Sowjetunion 1 , 88 (1932).
1426
+ [5] S. Shevchenko, S. Ashhab, and F. Nori, Physics Reports 492, 1
1427
+ (2010).
1428
+ [6] B. T. Torosov and N. V. Vitanov, Phys. Rev. A 96, 013845
1429
+ (2017).
1430
+ [7] G. Cao, H. O. Li, T. Tu, L. Wang, C. Zhou, M. Xiao, G. C. Guo,
1431
+ H. W. Jiang, and G. P. Guo, Nat. Commun. 4, 1401 (2013).
1432
+ [8] F. Forster, G. Petersen, S. Manus, P. H¨anggi, D. Schuh,
1433
+ W. Wegscheider, S. Kohler, and S. Ludwig, Phys. Rev. Lett.
1434
+ 112, 116803 (2014).
1435
+ [9] P. F¨oldi, M. G. Benedict, J. M. Pereira, and F. M. Peeters, Phys.
1436
+ Rev. B 75, 104430 (2007).
1437
+ [10] C. Calero, E. M. Chudnovsky, and D. A. Garanin, Phys. Rev.
1438
+ B 72, 024409 (2005).
1439
+ [11] B. K. Cooper and V. M. Yakovenko, Phys. Rev. Lett. 96, 037001
1440
+ (2006).
1441
+ [12] A. Banerjee and V. M. Yakovenko, Phys. Rev. B 78, 125404
1442
+ (2008).
1443
+ [13] M. Mark, T. Kraemer, P. Waldburger, J. Herbig, C. Chin, H.-C.
1444
+ N¨agerl, and R. Grimm, Phys. Rev. Lett. 99, 113201 (2007).
1445
+ [14] L. Du, M. Wang, and Y. Yu, Phys. Rev. B 82, 045128 (2010).
1446
+ [15] Q. Niu, X.-G. Zhao, G. A. Georgakis, and M. G. Raizen, Phys.
1447
+ Rev. Lett. 76, 4504 (1996).
1448
+ [16] O. Morsch, J. H. M¨uller, M. Cristiani, D. Ciampini, and E. Ari-
1449
+ mondo, Phys. Rev. Lett. 87, 140402 (2001).
1450
+ [17] Y. A. Chen, S. D. Huber, S. Trotzky, I. Bloch, and E. Altman,
1451
+ Nat. Phys. 7, 61 (2011).
1452
+ [18] M. Cristiani, O. Morsch, J. H. M¨uller, D. Ciampini, and E. Ari-
1453
+ mondo, Phys. Rev. A 65, 063612 (2002).
1454
+ [19] Q. Zhang, P. H¨anggi, and J. Gong, Phys. Rev. A 77, 053607
1455
+ (2008).
1456
+ [20] C. S. E. van Ditzhuijzen, A. Tauschinsky, and H. B. van Linden
1457
+ van den Heuvell, Phys. Rev. A 80, 063407 (2009).
1458
+ [21] J. Liu, L. Fu, B.-Y. Y. Ou, S.-G. G. Chen, D.-i. I. Choi, B. Wu,
1459
+ and Q. Niu, Phys. Rev. A 66, 1 (2002), 0105140.
1460
+ [22] S.-C. Li, L.-B. Fu, W.-S. Duan, and J. Liu, Phys. Rev. A 78,
1461
+ 063621 (2008).
1462
+ [23] L.-B. Fu, D.-F. Ye, C. Lee, W. Zhang, and J. Liu, Phys. Rev. A
1463
+ 80, 013619 (2009).
1464
+ [24] D.-F. Ye, L.-B. Fu, and J. Liu, Phys. Rev. A 77, 013402 (2008).
1465
+ [25] S.-C. Li, Journal of Physics B: Atomic, Molecular and Optical
1466
+ Physics 43, 205303 (2010).
1467
+ [26] S.-C. Li and L.-B. Fu, Phys. Rev. A 102, 033323 (2020); Phys.
1468
+ Rev. A 101, 023618 (2020); Phys. Rev. A 102, 033313 (2020).
1469
+ [27] J. Liu, L. Fu, B.-Y. Ou, S.-G. Chen, D.-I. Choi, B. Wu, and
1470
+ Q. Niu, Phys. Rev. A 66, 023404 (2002).
1471
+ [28] G. J. Milburn, J. Corney, E. M. Wright, and D. F. Walls, Phys.
1472
+ Rev. A 55, 4318 (1997).
1473
+ [29] A. Smerzi, S. Fantoni, S. Giovanazzi, and S. R. Shenoy, Phys.
1474
+ Rev. Lett. 79, 4950 (1997).
1475
+ [30] S. Kohler and F. Sols, Phys. Rev. Lett. 89, 060403 (2002).
1476
+ [31] O. V. Ivakhnenko, S. N. Shevchenko, and F. Nori, Physics Re-
1477
+ ports 995, 1 (2023).
1478
+ [32] B. Wu and Q. Niu, Phys. Rev. A 61, 023402 (2000).
1479
+ [33] S.-C. Li, L.-B. Fu, and J. Liu, Phys. Rev. A 98, 013601 (2018).
1480
+ [34] R. El-Ganainy, K. G. Makris, M. Khajavikhan, Z. H. Mussli-
1481
+ mani, S. Rotter, and D. N. Christodoulides, Nat. Phys. 14, 11
1482
+ (2018).
1483
+ [35] Y. Ashida, Z. Gong, and M. Ueda, Advances in Physics 69, 249
1484
+ (2020).
1485
+ [36] M. A. Miri and A. Al`u, Science 363, eaar7709 (2019).
1486
+ [37] W. Zhu, X. Fang, D. Li, Y. Sun, Y. Li, Y. Jing, and H. Chen,
1487
+ Phys. Rev. Lett. 121, 124501 (2018).
1488
+ [38] Y. Wu, W. Liu, J. Geng, X. Song, X. Ye, C.-K. Duan, X. Rong,
1489
+ and J. Du, Science 364, 878 (2019).
1490
+ [39] J. Li, A. K. Harter, J. Liu, L. de Melo, Y. N. Joglekar, and
1491
+ L. Luo, Nat. Commun. 10, 855 (2019), arXiv:1608.05061.
1492
+ [40] W. Xiong, Z. Li, Y. Song, J. Chen, G.-Q. Zhang, and M. Wang,
1493
+ Phys. Rev. A 104, 063508 (2021).
1494
+ [41] W. Xiong, Z. Li, G.-Q. Zhang, M. Wang, H.-C. Li, X.-Q. Luo,
1495
+ and J. Chen, Phys. Rev. A 106, 033518 (2022).
1496
+ [42] S. Yao and Z. Wang, Phys. Rev. Lett. 121, 086803 (2018).
1497
+ [43] C. Yin, H. Jiang, L. Li, R. L¨u, and S. Chen, Phys. Rev. A 97,
1498
+ 052115 (2018).
1499
+ [44] C. H. Lee and R. Thomale, Phys. Rev. B 99, 201103 (2019).
1500
+ [45] L. Li, C. H. Lee, and J. Gong, Phys. Rev. Lett. 124, 250402
1501
+ (2020).
1502
+ [46] X. Huang, C. Lu, C. Liang, H. Tao, and Y. C. Liu, Light Sci.
1503
+ Appl. 10 (2021), 10.1038/s41377-021-00464-2.
1504
+ [47] C. M. Bender and S. Boettcher, Phys. Rev. Lett. 80, 5243
1505
+ (1998).
1506
+ [48] J. Wong, J. Math. Phys. 8, 2039 (1967).
1507
+ [49] F. H. M. Faisal and J. V. Moloney, J. Phys. B 16, 3109 (1983).
1508
+ [50] A. Mostafazadeh, J. Math. Phys. 43, 205 (2002); J. Math. Phys.
1509
+ 43, 2814 (2002); J. Math. Phys. 43, 3944 (2002); J. Math. Phys.
1510
+ 43, 6343 (2002); J. Math. Phys. 44, 974 (2003); J. Math. Phys.
1511
+ 45, 932 (2004); Nuclear Physics B 640, 419 (2002).
1512
+ [51] C. M. Bender, K. A. Milton, and V. M. Savage, Phys. Rev. D
1513
+ 62, 085001 (2000).
1514
+ [52] C. M. Bender, S. Boettcher, H. Jones, P. N. Meisinger, and
1515
+ M. Simsek, Phys. Lett. A 291, 197 (2001).
1516
+ [53] C. M. Bender, D. C. Brody, and H. F. Jones, Phys. Rev. Lett.
1517
+ 93, 251601 (2004); Phys. Rev. D 70, 025001 (2004).
1518
+ [54] A. Mostafazadeh, Int. J. Mod. Phys. A 21, 2553 (2006).
1519
+ [55] C. M. Bender, V. Branchina, and E. Messina, Phys. Rev. D 85,
1520
+ 085001 (2012).
1521
+ [56] C. M. Bender and K. A. Milton, Phys. Rev. D 57, 3595 (1998).
1522
+ [57] P. Dorey, C. Dunning, and R. Tateo, J. Phys. A 34, L391 (2001).
1523
+ [58] Y.-G. Miao, H. J. M¨uller-Kirsten, and D. K. Park, Journal of
1524
+ High Energy Physics 2003, 038 (2003).
1525
+ [59] L. Jin and Z. Song, Phys. Rev. A 80, 052107 (2009); Phys. Rev.
1526
+ A 85, 012111 (2012); J. Phys. A 44, 375304 (2011).
1527
+ [60] F. Minganti, A. Miranowicz, R. W. Chhajlany,
1528
+ and F. Nori,
1529
+ Phys. Rev. A 100, 062131 (2019).
1530
+ [61] B. Longstaff and E.-M. Graefe, Phys. Rev. A 100, 052119
1531
+ (2019).
1532
+ [62] E. M. Graefe, H. J. Korsch, and A. E. Niederle, Phys. Rev. Lett.
1533
+ 101, 150408 (2008).
1534
+ [63] B. Longstaff and E.-M. Graefe, Phys. Rev. A 100, 052119
1535
+ (2019).
1536
+ [64] X. Shen, F. Wang, Z. Li, and Z. Wu, Phys. Rev. A 100, 062514
1537
+ (2019).
1538
+ [65] W.-Y. Wang, B. Sun, and J. Liu, Phys. Rev. A 106, 063708
1539
+ (2022).
1540
+ [66] S. Ib´a˜nez and J. G. Muga, Phys. Rev. A 89, 033403 (2014).
1541
+ [67] H.-D. Liu, J. Fang, and T.-Y. Zheng, Commun. Theor. Phys.
1542
+ 68, 439 (2017).
1543
+ [68] J. I. Cirac, M. Lewenstein, K. Mølmer, and P. Zoller, Phys. Rev.
1544
+ A 57, 1208 (1998).
1545
+ [69] W. Wang, L. B. Fu, and X. X. Yi, Phys. Rev. A 75, 045601
1546
+ (2007).
1547
+
1dAyT4oBgHgl3EQf1fkT/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
1tE4T4oBgHgl3EQfaQwc/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ac013d166c2dc030d8f7bd9bf82d690a5bf6753853593a747b5b1f1374c952e
3
+ size 177623
29E1T4oBgHgl3EQfAQI2/content/tmp_files/2301.02836v1.pdf.txt ADDED
@@ -0,0 +1,1606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Dynamic Local Feature Aggregation for Learning on Point Clouds
2
+ Zihao Lia, Pan Gaoa, Hui Yuanb, Ran Weic
3
+ aNanjing University of Aeronautics and Astronautics, Nanjing ,China
4
+ bShandong University, Jinan, China
5
+ cScience and Technology on Electro-optic Control Laboratory, Luoyang, China
6
+ Abstract
7
+ Existing point cloud learning methods aggregate features from neighbouring points relying on constructing graph in the
8
+ spatial domain, which results in feature update for each point based on spatially-fixed neighbours throughout layers.
9
+ In this paper, we propose a dynamic feature aggregation (DFA) method that can transfer information by constructing
10
+ local graphs in the feature domain without spatial constraints. By finding k-nearest neighbors in the feature domain,
11
+ we perform relative position encoding and semantic feature encoding to explore latent position and feature similarity
12
+ information, respectively, so that rich local features can be learned. At the same time, we also learn low-dimensional global
13
+ features from the original point cloud for enhancing feature representation. Between DFA layers, we dynamically update
14
+ the constructed local graph structure, so that we can learn richer information, which greatly improves adaptability
15
+ and efficiency.
16
+ We demonstrate the superiority of our method by conducting extensive experiments on point cloud
17
+ classification and segmentation tasks. Implementation code is available: https://github.com/jiamang/DFA.
18
+ Keywords:
19
+ dynamic feature aggregation, point cloud, relative position encoding, semantic feature encoding,
20
+ classification, segmentation
21
+ 1. Introduction
22
+ The collection of points that express the spatial distri-
23
+ bution and surface features of the target is called point
24
+ cloud data, which represents the 3D target in an unstruc-
25
+ tured form. The point cloud obtained by combining the
26
+ laser principle and the photography principle mainly con-
27
+ tains three-dimensional position coordinates (X, Y, Z),
28
+ laser reflection intensity and color information (R, G, B).
29
+ Common point cloud data formats include RGB-D dual-
30
+ modality format and Point Cloud space format.
31
+ RGB-
32
+ D dual-modality data records the color information and
33
+ depth information of the surface of the target object. The
34
+ Email addresses: pride_19@163.com (Zihao Li),
35
+ Pan.Gao@nuaa.edu.cn (Pan Gao), huiyuan@sdu.edu.cn (Hui Yuan),
36
+ 115946873@qq.com (Ran Wei)
37
+ Point Cloud space format records three-dimensional coor-
38
+ dinates of the sampling points on the surface of the object,
39
+ reflecting the spatial contour information.
40
+ Learning features from point clouds often requires a lot
41
+ of advanced processing.
42
+ Traditional methods proposed
43
+ to solve these problems include capturing the geometric
44
+ characteristics of point clouds by using the hand-crafted
45
+ features [1]. With the breakthrough of convolution neu-
46
+ ral network and deep learning, significantly better perfor-
47
+ mance is achieved in various tasks of point cloud process-
48
+ ing. However, standard deep neural network needs nor-
49
+ mative input data, but the point cloud data does not need
50
+ to be irregular, and operations such as translation and
51
+ rotation will not change its own nature. Some methods
52
+ consider converting to a normative 3D grid and then send
53
+ the grid into the network for training, but it will cause ad-
54
+ Preprint submitted to Journal of LATEX Templates
55
+ January 10, 2023
56
+ arXiv:2301.02836v1 [cs.CV] 7 Jan 2023
57
+
58
+ ditional memory occupation and information loss. Point-
59
+ net proposed by [2] creates a precedent for learning and
60
+ processing directly on the original point cloud, where the
61
+ multi-layer perceptron is applied to each point.
62
+ However, since Pointnet [2] cannot capture the contex-
63
+ tual information, many recent studies have introduced dif-
64
+ ferent modules to learn more abundant local structures,
65
+ which can be divided into the following categories:
66
+ 1)
67
+ Feature update based on constructing graph structure
68
+ [3][4][5][6][7]; 2) Feature pooling based on neighboring
69
+ points [8][9][10][11][12]; 3) Convolution based on a series
70
+ of kernels [13][14][15][16][17][15][18][19]; 4) Learning based
71
+ on attention mechanism [20][21][22][23]. These methods
72
+ have achieved good results in classification and segmen-
73
+ tation, but the construction of local feature learners and
74
+ calculation of attention weight have very expensive com-
75
+ puting cost and memory occupation. In addition, the fea-
76
+ ture extractors proposed by some methods are not efficient
77
+ enough, and there are many parts worth improving.
78
+ The goal of this paper is to design an efficient local
79
+ feature extractor without adding much complexity, and
80
+ then use the learned efficient features to represent objects,
81
+ which will improve the point cloud classification and seg-
82
+ mentation tasks.
83
+ So we propose a dynamic feature ag-
84
+ gregation (DFA) module, which extracts and learns latent
85
+ features by finding k-nearest neighbors in the feature do-
86
+ main, encoding location information and semantic feature
87
+ information simultaneously, and concatenating these two
88
+ parts.
89
+ In the classification and segmentation task, this
90
+ module is stacked to extract rich local features.
91
+ Using
92
+ the network structure like Pointnet [2], we extract low-
93
+ dimensional global features from the initial point cloud,
94
+ and then concatenate them with local features extracted
95
+ by multiple DFAs. Finally, high-dimensional global fea-
96
+ tures are obtained for classification and segmentation. For
97
+ segmentation, we concatenate the high-dimensional global
98
+ features again with local features, and perform the MLP
99
+ operation to predict the category of each point.
100
+ In general, we design an efficient local feature extrac-
101
+ tor that utilizes multi-level and multi-source features to
102
+ effectively characterize objects.
103
+ Multi-level features are
104
+ reflected in that by stacking several layers of DFA, we can
105
+ gradually obtain deeper contextual features. Multi-source
106
+ features are reflected in that we combine multiple types of
107
+ features of location information, feature differences, fea-
108
+ tures themselves, and low-dimensional global features to
109
+ perform deeper and higher-dimensional feature learning.
110
+ In order to test its efficiency, we have done relevant tests
111
+ on the ModelNet40 [24], shapeNet [25] and S3DIS [26]
112
+ datasets. Furthermore, we also do many visualization re-
113
+ sults and ablation experiments. Our main contributions
114
+ are summarized as follows:
115
+ • We propose a new operation DFA, which finds k-
116
+ nearest neighbors in the feature domain to construct
117
+ a local graph structure for feature aggregation at each
118
+ time. The graph between DFA layers is dynamically
119
+ updated, which is more adaptable.
120
+ • In each DFA layer, we can learn rich latent position
121
+ and feature difference information through proposed
122
+ relative position encoding and semantic feature en-
123
+ coding, respectively. To the best of our knowledge,
124
+ simultaneously aggregating the relative position and
125
+ feature information in the feature domain has not
126
+ been studied before.
127
+ • We make full use of the learned local features and low-
128
+ dimensional global features for point cloud classifica-
129
+ tion and segmentation tasks, and test on benchmark
130
+ datasets with outstanding quantitative and qualita-
131
+ tive results.
132
+ 2. Related work
133
+ 2.1. Voxel-based Network.
134
+ Converting point cloud data into regular voxel structure
135
+ can preserve and express spatial distribution. In 2016, Qi
136
+ 2
137
+
138
+ et al. [27] improved voxel CNN and proposed two differ-
139
+ ent voxel CNN network structures. Afterwards, Tchapmi
140
+ et al. [28] jointly proposed segcloud based on voxel-based
141
+ 3D full convolution neural network and point based con-
142
+ ditional random field. Wang et al. [29] proposed O-CNN.
143
+ Its core idea is to use octree to represent 3D shapes, and
144
+ only the sparse octree occupied by the shape boundary
145
+ is subject to CNN operation. In order to effectively en-
146
+ code the distribution of voxel midpoint, Meng et al. [30]
147
+ proposed the voxel variational self encoder network VV-
148
+ net, and the point distribution in each voxel is captured
149
+ by the self encoder. In 2020, Shao et al. [31] proposed
150
+ the data structure of opportunity space hash, designed
151
+ hash2col and col2hash, so that CNN operations such as
152
+ convolution and pooling can be parallelized.
153
+ 2.2. View-based Network.
154
+ Usually, the point cloud is projected into the 2D image
155
+ first, and then the 2D CNN is used to extract the image
156
+ features. Due to the limitations of the existing deep learn-
157
+ ing network, this kind of method can only recognize the
158
+ point cloud model from a specific angle. In 2017, Lawin et
159
+ al. [32] generated images with different pitch angles and
160
+ translation distances by controlling the equidistant angle.
161
+ Snapnet-r proposed by Gueery et al. [33] can use 2D im-
162
+ ages and 3D as spatial structure information at the same
163
+ time. The mvpnet proposed by Jaritz et al. [34] in 2019
164
+ can aggregate 2D image features into 3D. The relationship
165
+ network proposed by Yang et al. [35] comprehensively con-
166
+ siders the relationship between different views and regions,
167
+ and also uses the attention mechanism to generate scores
168
+ to reflect the relative discrimination ability of views.
169
+ 2.3. Point-based Network.
170
+ Direct processing of point clouds contains complete orig-
171
+ inal information. Qi et al. [2] proposed Pointnet network,
172
+ which is the first deep neural network to directly process
173
+ disordered point clouds. Since it does not consider local
174
+ features, they [36] further proposed Pointnet++ to extract
175
+ local features at multiple levels. Later Atzmon et al. [37]
176
+ proposed point convolution neural network, which uses ex-
177
+ pansion operator and constraint operator to generate con-
178
+ volution. In response to the problem of inflexibility of fixed
179
+ grids, Thomas et al. [19] proposed KPconv, which is lo-
180
+ cated in Euclidean space and is very effective in classifying
181
+ point clouds with different densities. In addition, Point-
182
+ Conv [15] and PointCNN [38] use 3D convolution kernels to
183
+ extract features instead of sharing MLP. The PointConv
184
+ [15] can be extended to deconvolution to achieve better
185
+ segmentation results. And PointCNN [38] introduced the
186
+ x-transform to rearrange the points into a potentially regu-
187
+ lar order, and then use convolution to extract local features
188
+ from the point cloud.
189
+ Graph-based Methods. By constructing a local or global
190
+ graph structure to update delivery messages and learn fea-
191
+ tures. In general, the graph structure of the spatial domain
192
+ relies on finding k-nearest neighbors for message passing,
193
+ and the graph structure of the spectral domain needs to
194
+ be realized by methods such as Laplace matrix spectral
195
+ decomposition and Chebyshev polynomial approximation.
196
+ KCNet [4] defines a point set kernel as a set of learnable
197
+ 3D points. It aggregates repetitive features at 3D locations
198
+ on the nearest neighbor graph based on geometric rela-
199
+ tionships and local high-dimensional features measured by
200
+ kernel correlations. Wang et al. [5] proposed DGCNN to
201
+ learn the embedding of edges by constructing local graphs.
202
+ Unlike DGCNN [5], 3DGCN [39] defines learnable ker-
203
+ nels using graph max pooling mechanism, and introduces
204
+ shift invariance and scale invariance into deep learning net-
205
+ works. DeepGCNs [40] uses residual connections and di-
206
+ lated convolutions to train deeper graph structures, and
207
+ experiments confirm the positive effect of depth.
208
+ Transformer-based Methods. Since the great success of
209
+ transformers in the NLP field, a lot of work has also in-
210
+ troduced attention mechanisms to related tasks in point
211
+ clouds recently. PCT [41] adopts a similar architecture to
212
+ 3
213
+
214
+ Concat
215
+ 𝑓�′
216
+ Pool
217
+ 𝑥��, 𝑓��
218
+ 𝑥�, 𝑓�
219
+ 𝑥��, 𝑓��
220
+ 𝑥��, 𝑓��
221
+ 𝑥��, 𝑓��
222
+ 𝑥��, 𝑓��
223
+ ℎ���
224
+ ℎ���
225
+ ℎ���
226
+ ℎ���
227
+ ℎ���
228
+ Feature Potential Encoding(FeaPE)
229
+ Concat
230
+ MLP
231
+ shared
232
+ ℎ���
233
+ ℎ���
234
+ ℎ���
235
+ ℎ���
236
+ ℎ���
237
+ ℎ��
238
+ shared
239
+ 𝐸��
240
+ 𝑥�- 𝑥��
241
+ 𝑥�
242
+ 𝑥��
243
+ MLP
244
+ 𝐸���
245
+ 𝑥�- 𝑥��
246
+ 𝑥�
247
+ 𝑥��
248
+ 𝐸��
249
+ 𝑥�- 𝑥��
250
+ 𝑥�
251
+ 𝑥��
252
+ ℎ��
253
+ ℎ��
254
+ Relative position encoding
255
+ ℎ��
256
+ Concat
257
+ (𝑓�- 𝑓��)
258
+ 𝑓�
259
+ (𝑓�- 𝑓��)
260
+ 𝑓�
261
+ (𝑓�- 𝑓��)
262
+ 𝑓�
263
+ ℎ��
264
+ ℎ��
265
+ Semantic feature encoding
266
+ ℎ��
267
+ ℎ��
268
+ ℎ��
269
+ ℎ��
270
+ ℎ��
271
+ ℎ��
272
+ ℎ��
273
+ ℎ��
274
+ ℎ��
275
+ ℎ��
276
+ Figure 1: Illustration of feature extraction by DFA layer. The color closeness represents the adjacent points in the feature domain rather than
277
+ the spatial neighbors. Rich information is obtained through relative position encoding and semantic feature encoding. The edge features of
278
+ each adjacent point are obtained by sharing MLP, and finally the features of the central point are updated by maximum pooling operation.
279
+ The subscript j1 · · · j5 index the feature-domain neighbors for center xi.
280
+ pointnet [2], using neighbor information embedding, and
281
+ improved offset transformer for feature learning, so that it
282
+ has achieved good results in classification and segmenta-
283
+ tion tasks. Similarly, there are also some research works
284
+ based on the pointnet++ [36] network, such as PT [42]
285
+ and BL-Net [43] . The PT [42] proposed by Zhao et al.
286
+ is to add a layer of transformer to extract features after
287
+ each downsampling or upsampling. The transformer has
288
+ been modified to measure the difference between the cor-
289
+ responding channels between two eigenvectors (Q and K).
290
+ BL-Net [43] newly designed position feedback module to
291
+ perform feature-guided point shifting. In addition, Yan et
292
+ al. [44] also used the attention mechanism and proposed
293
+ PointASNL that can effectively process point clouds with
294
+ noise.
295
+ 3. Methodology
296
+ Extracting and utilizing effective features is crucial in
297
+ point cloud tasks. We construct a local graph structure
298
+ through dynamic updating, and the information can dif-
299
+ fuse nonlocally in the whole point cloud. Based on the
300
+ graph structure, we explore both the latent location and
301
+ semantic features of different layers. Further, we make full
302
+ use of global features and local features containing detailed
303
+ information. We describe the operation called Dynamic
304
+ Feature Aggregation (DFA) in Section 3.1, and then the
305
+ network structure is introduced in Section 3.2.
306
+ 3.1. Dynamic Feature Aggregation
307
+ We
308
+ define
309
+ the
310
+ input
311
+ point
312
+ cloud
313
+ as
314
+ X
315
+ =
316
+ {xi|i = 1, 2, ..., N}
317
+
318
+ RN×3
319
+ with
320
+ the
321
+ corresponding
322
+ features defined as F = {fi|i = 1, 2, ...N} ∈ RN×D. Here
323
+ xi represents the three-dimensional coordinates (x, y, z)
324
+ of the i-th point. As the input point cloud only contain
325
+ three-dimensional coordinates, the geometry coordinates
326
+ can also be regarded as its initial feature.
327
+ When extracting features at each layer, a local graph
328
+ needs to be dynamically constructed, which is defined
329
+ as G = (V, E), where V = {1, 2, ...n} and E ⊆ V × V
330
+ are the vertices and edges, respectively. We construct a
331
+ local graph structure by finding k-nearest neighbors in
332
+ the feature domain, including self-loops.
333
+ Suppose that
334
+ xi is the center point of the graph structure, and then
335
+ N(i) = {j : (i, j) ∈ E} is the neighboring point in the fea-
336
+ ture domain. Specifically, the similarity of features is cal-
337
+ culated and measured in the same way as Euclidean space
338
+ 4
339
+
340
+ DFA
341
+ (64)
342
+ DFA
343
+ (64)
344
+ DFA
345
+ (64)
346
+ DFA
347
+ (64)
348
+ N , 3
349
+ N , 64
350
+ N , 64
351
+ N , 64
352
+ N , 64
353
+
354
+ N , 1024
355
+ Pool
356
+ 1024
357
+
358
+ N , 1024
359
+ 1024
360
+ Pool
361
+ Pointnet(64)
362
+ Pointnet(64)
363
+ N , 1280
364
+
365
+ repeat
366
+
367
+ Classification
368
+ Segmentation
369
+ Model Architecture
370
+
371
+ Categorical
372
+ vector
373
+ MLP
374
+ MLP
375
+ (N,192)
376
+ (N,256)
377
+ Spatial
378
+ transform
379
+ (64)
380
+ MLP
381
+ MLP
382
+ (512,256,c)
383
+ (512,256,p)
384
+ Figure 2: DFA based network architectures for classification and segmentation tasks. ⊕ stands for concatenated operations. The spatial
385
+ transformation is designed to compute a 3 × 3 matrix to align the input point cloud to the canonical space. By concatenating local features
386
+ and low-dimensional global features through MLP and max pooling, 1D global descriptors can be generated for classification tasks. For part
387
+ segmentation, we generate 1024-dimensional global features, fuse the category feature vectors, and then concatenate the detailed local features
388
+ again to output the category score of each point through MLP.
389
+ distance in each feature dimension, and the k points with
390
+ the smallest value are selected as the nearest neighbors.
391
+ Then retrieve the 3D coordinates of each nearest neigh-
392
+ bor. Given the input three-dimensional coordinates and
393
+ D-dimensional features, our purpose is to learn and output
394
+ M-dimensional features with the same number of points
395
+ through the DFA layer.
396
+ Because we establish the connection between the center
397
+ point and the surrounding k-nearest neighbors by build-
398
+ ing a local graph structure, so we define the feature of the
399
+ edge as eij = hΘ(fi, fj) , where hΘ : RD × RD → RM
400
+ is a nonlinear function with a set of learnable parameters
401
+ Θ. Finally, we aggregate the edge features of the k near-
402
+ est neighbors along each channel, and obtain the result
403
+ for each center point fi that enters the DFA layer feature
404
+ extraction, which is defined as follows:
405
+ f
406
+
407
+ i =
408
+ Π
409
+ j∈N(i)hΘ(fi, fj)
410
+ (1)
411
+ Semantic Feature Encoding. We choose to find k-
412
+ nearest neighbors in the feature domain, which means that
413
+ the points sharing the same class will have high probabil-
414
+ ity to be connected. Then we concatenate the feature of
415
+ the center point and the feature differences with its neigh-
416
+ bors as semantic feature information.
417
+ Because this not
418
+ only includes the features of all the original center points,
419
+ but also transmits information to the surrounding points
420
+ through the feature difference with the neighbors. And we
421
+ define the encoding as follows:
422
+ hfj = fi ⊕ (fi − fj), j ∈ N(i)
423
+ (2)
424
+ Here, ⊕ is the concatenate operation. We calculate and
425
+ concatenate the feature differences and its own features
426
+ along each dimension, aiming to encode semantically sim-
427
+ ilar features and explore their latent information.
428
+ Relative Position Encoding. We first need to store
429
+ the original 3-dimensional position coordinate, and then
430
+ find the latent position information of the corresponding
431
+ nearest neighbors in the feature domain for each center
432
+ point.
433
+ We use the relative position information of the
434
+ neighboring points to encode as follows:
435
+ hxj = MLP(xi⊕xj⊕(xi−xj)⊕ ∥ xi−xj ∥), j ∈ N(i) (3)
436
+ where xi and xj represent the original three-dimensional
437
+ coordinates, (xi − xj) calculate the relative coordinates of
438
+ the center point and the k-nearest neighbors of the fea-
439
+ ture domain , ⊕ is the concatenate operation, and ∥ · ∥
440
+ 5
441
+
442
+ calculates the Euclidean distance between the neighbours
443
+ and center point. Unlike finding the nearest neighbors in
444
+ the space restricted by geometry distance, we can discover
445
+ more latent location information in the feature domain
446
+ that may have similar semantic feature but with larger
447
+ geometry distance.
448
+ When obtaining the position and semantic embedding,
449
+ we can concatenate these two parts first and then extract
450
+ the edge features through the MLP operation:
451
+ hij = MLP(hxj ⊕ hfj), j ∈ N(i)
452
+ (4)
453
+ Finally, we need to consider how to aggregate the fea-
454
+ tures of the neighboring edges, that is Π in (1). We have
455
+ three options for the over-aggregation Π. The first is to
456
+ maximize the pool of edge features learned by all nearest
457
+ neighbors to obtain the features of the center point. The
458
+ second is to add all edge features. The third is to perform
459
+ softmax on the neighbors to obtain a weight coefficient
460
+ Wij, and then multiply it with each edge feature, that
461
+ is, Wij × hij to obtain the attentive edge feature, and fi-
462
+ nally add and update the features of the center point. The
463
+ experimental results show that the first maximum pool-
464
+ ing has the best performance, so we choose the maximum
465
+ pooling to aggregate all edge features.
466
+ 3.2. Network Architecture
467
+ We use the proposed DFA layer to design two network
468
+ architectures for the point cloud classification and segmen-
469
+ tation task as shown in Fig. 2. We send the initial point
470
+ cloud into a spatial transformation network similar to the
471
+ Pointnet [2] network. By learning the position information
472
+ of the point cloud itself, we can learn a rotation matrix
473
+ that is most conducive to the classification or segmenta-
474
+ tion.
475
+ The point clouds are multiplied and fed into our
476
+ stacked DFA layer to extract features.
477
+ Local and Global Information Aggregation. Fo-
478
+ cusing only on the global features obtained by pooling on
479
+ each point ignores the local interaction between points.
480
+ Or only focusing on local features of surrounding points
481
+ is one-sided. Therefore, we choose a combination of local
482
+ features and global features to comprehensively learn the
483
+ information contained in the point cloud, so that it can be
484
+ better used in classification and segmentation tasks. Our
485
+ local features are learned by several layers of DFA, and the
486
+ lower-dimensional global features is obtained similarly to
487
+ Pointnet [2] by using shared MLP and max pooling. Our
488
+ ablation experiments have also confirmed that integration
489
+ with global feature is beneficial. On the other hand, we
490
+ set several local features and low-dimensional global fea-
491
+ tures to the same dimension (64) because we think they
492
+ are equally important, which is also confirmed in practice.
493
+ Classification Network.
494
+ Our classification network
495
+ is shown in the upper part of Fig. 2, and the point cloud
496
+ through the spatial transformation network is sequentially
497
+ passed through four DFA to extract local features. The
498
+ input of each layer is the output of the previous layer. We
499
+ concatenate these four local features and the global fea-
500
+ tures extracted from the initial point cloud, and then con-
501
+ vert them to higher dimensions through MLP operations.
502
+ Finally, global features are obtained by max pooling for
503
+ classification prediction.
504
+ Segmentation Network. Our segmentation network
505
+ is similar to the classification network, as shown in the
506
+ lower part of Fig.
507
+ 2.
508
+ We pass the transformed point
509
+ cloud through three DFA layers in sequence. The three
510
+ local features and low-dimensional global features are also
511
+ concatenated to obtain a 1024-dimensional global features
512
+ through MLP and max pooling. If it is part segmenta-
513
+ tion, then we add a category feature vector (64). If it is
514
+ semantic segmentation, it will not be added. Finally we
515
+ use the shared MLP to resize the features and predict the
516
+ semantic label for each point.
517
+ Dynamic Graph Update.
518
+ Depending on the spa-
519
+ tial interaction of the point cloud, locally adjacent parts
520
+ can form subsets. However, considering the spatial neigh-
521
+ bors for graph update sometimes leads to failure of fea-
522
+ 6
523
+
524
+ ture aggregation. For example, for the point clouds of air
525
+ plane, the aircraft wing and fuselage are adjacent in space,
526
+ the mutually updated features are useless. So we use the
527
+ point of finding k-nearest neighbors on the feature domain,
528
+ which means that these points can constitute meaningful
529
+ parts. Each time we find neighbors in the feature domain
530
+ to reconstruct the local graph structure. It can be said
531
+ that our graph is dynamically updated, so we can explore
532
+ more latent location information, which is also a limitation
533
+ that cannot be achieved by doing k-nearest neighbors in
534
+ space.
535
+ 4. Experiments
536
+ In this section, we evaluate our models using DFA for
537
+ point cloud classification and part segmentation tasks.
538
+ Methods
539
+ Input
540
+ point
541
+ mAcc
542
+ OA
543
+ Pointnet[2]
544
+ xyz
545
+ 1k
546
+ 86.0
547
+ 89.2
548
+ Pointnet++[36]
549
+ xyz
550
+ 1k
551
+ -
552
+ 90.7
553
+ Pointnet++[36]
554
+ xyz,normal
555
+ 5k
556
+ -
557
+ 91.9
558
+ SpiderCNN[45]
559
+ xyz,normal
560
+ 1k
561
+ -
562
+ 92.4
563
+ PointWeb[12]
564
+ xyz,normal
565
+ 1k
566
+ 89.4
567
+ 92.3
568
+ PointCNN[38]
569
+ xyz
570
+ 1k
571
+ 88.1
572
+ 92.2
573
+ DGCNN[5]
574
+ xyz
575
+ 1k
576
+ 90.2
577
+ 92.2
578
+ Point2Sequence[46]
579
+ xyz
580
+ 1k
581
+ 90.4
582
+ 92.6
583
+ FPConv[47]
584
+ xyz,normal
585
+ 1k
586
+ -
587
+ 92.5
588
+ PointConv[15]
589
+ xyz,normal
590
+ 1k
591
+ -
592
+ 92.5
593
+ KPConv[19]
594
+ xyz
595
+ 6k
596
+ -
597
+ 92.9
598
+ Point2Node [48]
599
+ xyz
600
+ 1k
601
+ -
602
+ 93.0
603
+ PointASNL[44]
604
+ xyz
605
+ 1k
606
+ -
607
+ 92.9
608
+ PointASNL[44]
609
+ xyz,normal
610
+ 1k
611
+ -
612
+ 93.2
613
+ PCT[41]
614
+ xyz
615
+ 1k
616
+ -
617
+ 93.2
618
+ SO-Net[8]
619
+ xyz,normal
620
+ 5k
621
+ 90.8
622
+ 93.4
623
+ BL-Net[43]
624
+ xyz
625
+ 1k
626
+ -
627
+ 93.5
628
+ AG-conv[49]
629
+ xyz
630
+ 1k
631
+ 90.7
632
+ 93.4
633
+ PointStack[50]
634
+ xyz
635
+ 1k
636
+ 89.6
637
+ 93.3
638
+ Ours(1024 points)
639
+ xyz
640
+ 1k
641
+ 91.1
642
+ 93.6
643
+ Ours(2048 points)
644
+ xyz
645
+ 2k
646
+ 91.6
647
+ 94.0
648
+ Table 1: Classification results on ModelNet40.
649
+ 4.1. Classification
650
+ Data. We evaluate our point cloud classification model
651
+ on the ModelNet40 [24] dataset.
652
+ This dataset contains
653
+ 12311 mesh CAD models from 40 categories, where 9843
654
+ models are used for training and 2468 models are used for
655
+ testing. We follow the experimental setting of [2]. We uni-
656
+ formly sample 1024 or 2048 points for each model, each
657
+ using only 3D coordinates (x, y, z) as input.
658
+ Data aug-
659
+ mentation operations include point shifting, scaling and
660
+ perturbing of the points.
661
+ Network Configuration. The network architecture is
662
+ shown in Fig. 2. At each layer we recompute the graph
663
+ based on feature similarity. For the 1024 points we set the
664
+ number of nearest neighbors k value to 20, and to maintain
665
+ the same density, we set k to 40 for the 2048 points. We
666
+ use four DFA layers to extract local geometric features and
667
+ a Pointnet-like structure to extract low-dimensional global
668
+ features. These are implemented using fully connected lay-
669
+ ers (64). We connect the extracted multi-layer features to
670
+ obtain 64×5 = 320-dimensional features. Then the global
671
+ features are obtained, and then two fully connected layers
672
+ are used to transform the global features for classification.
673
+ All layers use LeakyReLU and batch normalization. We
674
+ use the SGD optimizer with momentum of 0.9. The initial
675
+ learning rate is 0.1, and the random drop rate of the fully
676
+ connected layer is 0.5 to prevent overfitting. The batch size
677
+ is set to 32. We use Pytorch implementation and train the
678
+ network on two RTX 2080Ti GPUs.
679
+ Results. Table 1 shows the results of the classification
680
+ task, and the evaluation metrics we use on this dataset
681
+ are the average class accuracy and overall accuracy. Our
682
+ network only feeds 3D coordinates into training, which
683
+ contains less raw information, but achieves the best re-
684
+ sults on this dataset.
685
+ The test result of 2048 sampling
686
+ points is better than that of 1024 points, indicating that
687
+ when more original information is included, our network
688
+ can learn more features and have better performance.
689
+ 7
690
+
691
+ PointNet
692
+ DGCNN
693
+ AG-conv
694
+ ours
695
+ ground truth
696
+ Figure 3: Visual comparison of four methods for part segmentation.
697
+ Methods
698
+ mIou
699
+ air.
700
+ bag
701
+ cap
702
+ car
703
+ cha.
704
+ ear.
705
+ gui.
706
+ kni.
707
+ lam.
708
+ lap.
709
+ mot.
710
+ mug
711
+ pis.
712
+ roc.
713
+ ska.
714
+ tab.
715
+ NUM
716
+ 2690
717
+ 76
718
+ 55
719
+ 898
720
+ 3758
721
+ 69
722
+ 787
723
+ 392
724
+ 1547
725
+ 451
726
+ 202
727
+ 184
728
+ 283
729
+ 66
730
+ 152
731
+ 5271
732
+ Pointnet[2]
733
+ 83.7
734
+ 83.4
735
+ 78.7
736
+ 82.5
737
+ 74.9
738
+ 89.6
739
+ 73.0
740
+ 91.5
741
+ 85.9
742
+ 80.8
743
+ 95.3
744
+ 65.2
745
+ 93.0
746
+ 81.2
747
+ 57.9
748
+ 72.8
749
+ 80.6
750
+ Pointnet++[36]
751
+ 85.1
752
+ 82.4
753
+ 79.0
754
+ 87.7
755
+ 77.3
756
+ 90.8
757
+ 71.8
758
+ 91.0
759
+ 85.9
760
+ 83.7
761
+ 95.3
762
+ 71.6
763
+ 94.1
764
+ 81.3
765
+ 58.7
766
+ 76.4
767
+ 82.6
768
+ SO-Net[8]
769
+ 84.9
770
+ 82.8
771
+ 77.8
772
+ 88.0
773
+ 77.3
774
+ 90.6
775
+ 73.5
776
+ 90.7
777
+ 83.9
778
+ 82.8
779
+ 94.8
780
+ 69.1
781
+ 94.2
782
+ 80.9
783
+ 53.1
784
+ 72.9
785
+ 83.0
786
+ RGCNN[51]
787
+ 84.3
788
+ 80.2
789
+ 82.8
790
+ 92.6
791
+ 75.3
792
+ 89.2
793
+ 73.7
794
+ 91.3
795
+ 88.4
796
+ 83.3
797
+ 96.0
798
+ 63.9
799
+ 95.7
800
+ 60.9
801
+ 44.6
802
+ 72.9
803
+ 80.4
804
+ DGCNN[5]
805
+ 85.2
806
+ 84.0
807
+ 83.4
808
+ 86.7
809
+ 77.8
810
+ 90.6
811
+ 74.7
812
+ 91.2
813
+ 87.5
814
+ 82.8
815
+ 95.7
816
+ 66.3
817
+ 94.9
818
+ 81.1
819
+ 63.5
820
+ 74.5
821
+ 82.6
822
+ PCNN[37]
823
+ 85.1
824
+ 82.4
825
+ 80.1
826
+ 85.5
827
+ 79.5
828
+ 90.8
829
+ 73.2
830
+ 91.3
831
+ 86.0
832
+ 85.0
833
+ 96.7
834
+ 73.2
835
+ 94.8
836
+ 83.3
837
+ 51.0
838
+ 75.0
839
+ 81.8
840
+ 3D-GCN[39]
841
+ 85.1
842
+ 83.1
843
+ 84.0
844
+ 86.6
845
+ 77.5
846
+ 90.3
847
+ 74.1
848
+ 90.9
849
+ 86.4
850
+ 83.8
851
+ 95.3
852
+ 65.2
853
+ 93.0
854
+ 81.2
855
+ 59.6
856
+ 75.7
857
+ 82.8
858
+ PointASNL[44]
859
+ 86.1
860
+ 84.1
861
+ 84.7
862
+ 87.9
863
+ 79.7
864
+ 92.2
865
+ 73.7
866
+ 91.0
867
+ 87.2
868
+ 84.2
869
+ 95.8
870
+ 74.4
871
+ 95.2
872
+ 81.0
873
+ 63.0
874
+ 76.3
875
+ 83.2
876
+ PRA-Net[52]
877
+ 86.3
878
+ 84.4
879
+ 86.8
880
+ 89.5
881
+ 78.4
882
+ 91.4
883
+ 76.4
884
+ 91.5
885
+ 88.2
886
+ 85.3
887
+ 95.7
888
+ 73.4
889
+ 94.8
890
+ 82.1
891
+ 62.3
892
+ 75.5
893
+ 84.0
894
+ Ours
895
+ 86.0
896
+ 85.4
897
+ 80.0
898
+ 85.8
899
+ 80.6
900
+ 92.4
901
+ 74.1
902
+ 92.0
903
+ 87.4
904
+ 84.6
905
+ 95.6
906
+ 73.5
907
+ 94.4
908
+ 83.9
909
+ 59.0
910
+ 74.0
911
+ 83.2
912
+ Table 2: Part segmentation results on ShapeNet dataset. Metric is mIoU(%).
913
+ 4.2. Part Segmentation
914
+ Data. We test our model on the ShapeNet dataset [25]
915
+ for point cloud part segmentation. This dataset contains
916
+ 16881 shapes in 16 categories, of which 14006 are used
917
+ for training and 2874 are used for testing. There are 50
918
+ parts tags in total, and each model includes 2-6 parts.
919
+ We follow the experimental setup of [2]. 2048 points are
920
+ sampled from each shape, and the input consists only of
921
+ the 3D coordinates.
922
+ Network Configuration. We use three DFA layers to
923
+ extract features, and operate the same as classification to
924
+ obtain 1024-dimensional global features. Following [5], we
925
+ also add a one-hot vector representing the category type
926
+ to each point. Then we concatenate global features and
927
+ category vectors as new global features with 1024 + 64 =
928
+ 1088-dimensions.
929
+ We re-concatenate the previous three
930
+ local features and convert them into the features of each
931
+ point through three fully connected layers (512, 256, 128)
932
+ for segmentation. The settings of our training parameters
933
+ are the same as in the classification task, except that the
934
+ batch size is changed to 16.
935
+ Results. We evaluate the performance of part segmen-
936
+ tation by the mIou metric. The Iou of a shape is computed
937
+ by averaging of each part. The mean Iou (mIou) is calcu-
938
+ lated by averaging the Ious of all testing instances. From
939
+ the experimental results in table 2, it can be seen that
940
+ 8
941
+
942
+ Methods
943
+ mAcc
944
+ mIou
945
+ ceiling
946
+ floor
947
+ wall
948
+ beam
949
+ column
950
+ windows
951
+ door
952
+ chair
953
+ table
954
+ bookcase
955
+ sofa
956
+ board
957
+ clutter
958
+ Pointnet[2]
959
+ 48.98
960
+ 41.09
961
+ 88.80
962
+ 97.33
963
+ 69.80
964
+ 0.05
965
+ 3.92
966
+ 46.26
967
+ 10.76
968
+ 58.93
969
+ 52.61
970
+ 5.85
971
+ 40.28
972
+ 26.38
973
+ 33.22
974
+ SEGCloud[28]
975
+ 57.35
976
+ 48.92
977
+ 90.06
978
+ 96.05
979
+ 69.86
980
+ 0.00
981
+ 18.37
982
+ 38.35
983
+ 23.12
984
+ 70.40
985
+ 75.89
986
+ 40.88
987
+ 58.42
988
+ 12.96
989
+ 41.60
990
+ PointCNN[38]
991
+ 63.86
992
+ 57.26
993
+ 92.31
994
+ 98.24
995
+ 79.41
996
+ 0.00
997
+ 17.60
998
+ 22.77
999
+ 62.09
1000
+ 74.39
1001
+ 80.59
1002
+ 31.67
1003
+ 66.67
1004
+ 62.05
1005
+ 56.74
1006
+ PointWeb[12]
1007
+ 66.64
1008
+ 60.28
1009
+ 91.95
1010
+ 98.48
1011
+ 79.39
1012
+ 0.00
1013
+ 21.11
1014
+ 59.72
1015
+ 34.81
1016
+ 76.33
1017
+ 88.27
1018
+ 46.89
1019
+ 69.30
1020
+ 64.91
1021
+ 52.46
1022
+ SPG[53]
1023
+ 66.50
1024
+ 58.04
1025
+ 89.35
1026
+ 96.87
1027
+ 78.12
1028
+ 0.00
1029
+ 42.81
1030
+ 48.93
1031
+ 61.58
1032
+ 84.66
1033
+ 75.41
1034
+ 69.84
1035
+ 52.60
1036
+ 2.10
1037
+ 52.22
1038
+ PCNN[37]
1039
+ 67.01
1040
+ 58.27
1041
+ 92.26
1042
+ 96.20
1043
+ 75.89
1044
+ 0.27
1045
+ 5.98
1046
+ 69.49
1047
+ 63.45
1048
+ 66.87
1049
+ 65.63
1050
+ 47.28
1051
+ 68.91
1052
+ 59.10
1053
+ 46.22
1054
+ PCT[41]
1055
+ 67.65
1056
+ 61.33
1057
+ 92.54
1058
+ 98.42
1059
+ 80.63
1060
+ 0.00
1061
+ 19.35
1062
+ 61.64
1063
+ 48.00
1064
+ 76.58
1065
+ 85.20
1066
+ 46.22
1067
+ 67.71
1068
+ 67.93
1069
+ 52.29
1070
+ Ours
1071
+ 67.96
1072
+ 62.18
1073
+ 92.68
1074
+ 98.50
1075
+ 79.12
1076
+ 0.05
1077
+ 36.72
1078
+ 67.45
1079
+ 65.18
1080
+ 75.36
1081
+ 86.77
1082
+ 71.52
1083
+ 52.59
1084
+ 65.02
1085
+ 57.12
1086
+ Table 3: Semantic segmentation results on S3DIS dataset.
1087
+ Pointnet
1088
+ DGCNN
1089
+ ours
1090
+ ground truth
1091
+ Figure 4: Visual comparison of three methods for semantic segmentation.
1092
+ in some categories with a small number of samples, the
1093
+ segmentation effect is not good due to too few training
1094
+ samples. But overall, our method has better performance,
1095
+ especially with the highest mIou in many categories such
1096
+ as airplane, car, chair, etc. This benefits from these cat-
1097
+ egories having sufficient samples so that our network can
1098
+ learn rich features for part segmentation tasks.
1099
+ Fig.
1100
+ 3
1101
+ shows the visual differences between us and several other
1102
+ mainstream methods on some categories. These methods
1103
+ are roughly capable of distinguishing different parts of an
1104
+ object, and the difference lies in the identification of de-
1105
+ tails. Looking closely at the tail section of the airplane,
1106
+ the fence section below the chair, the top of the car, and
1107
+ the connection between different parts in the guitar, our
1108
+ method is closer to the ground truth.
1109
+ 4.3. Semantic Segmentation
1110
+ Data. We further test our model on the Stanford Large-
1111
+ Scale 3D Indoor Spaces Dataset (S3DIS) dataset [26] for
1112
+ point cloud semantic scene segmentation.
1113
+ This dataset
1114
+ is taken from 271 rooms in 6 different areas in 3 differ-
1115
+ ent buildings.
1116
+ The point cloud data of each scene has
1117
+ 9-dimensional data including xyz three-dimensional coor-
1118
+ dinates, RGB color information, and the normalized posi-
1119
+ tion coordinates x′y′z′ of each point relative to the room
1120
+ where it is located. At the same time, each point cloud in
1121
+ the scene is assigned a semantic label from 13 categories
1122
+ 9
1123
+
1124
+ (such as ceiling, table, etc.).
1125
+ Network Configuration. Our semantic segmentation
1126
+ network configuration is the same as for part segmentation,
1127
+ the only difference is that no feature vector is added.
1128
+ Results. We divide each room into 1m × 1m blocks and
1129
+ sample 4096 points in each block during training. And we
1130
+ use area5 as the test set. For evaluation metrics, we use
1131
+ mean class accuracy (mAcc) and mean class intersection
1132
+ (mIou). The experimental results are shown in the table
1133
+ 3, and the visualization is shown in the fig. 4.
1134
+ 4.4. Ablation Studies
1135
+ In this subsection, we explore the effect of using different
1136
+ choices in the network. The effectiveness of our module
1137
+ and parameter selection is demonstrated in these ablation
1138
+ experiments.
1139
+ Number of neighbors. The k value of constructing
1140
+ the local graph structure has a great influence on the ex-
1141
+ tracted features. Therefore, it is very important to choose
1142
+ an appropriate value of k in the experiment. We conducted
1143
+ 4 sets of experiments to explore the impact of choosing dif-
1144
+ ferent k values on the classification results of 2048 points,
1145
+ which is also shown in the table 4. When the value of k is
1146
+ 10 and 20, the neighborhood of each center point is small
1147
+ and cannot fully interact with the neighbor points. Appro-
1148
+ priately increasing the value of k can also have room for
1149
+ improvement, which also shows that DFA can effectively
1150
+ use the features of neighborhood points to learn local fea-
1151
+ tures. By further increasing the value of k, it can be found
1152
+ that increasing the value of k all the time will not increase
1153
+ the accuracy of the model. Because when the value of k
1154
+ is too large, there will be many noise points that are very
1155
+ different from the center point features, which is useless or
1156
+ even burdensome for updating the center point features,
1157
+ and will also increase the amount of parameters and net-
1158
+ work training time. Choosing a neighbor k value of 40 can
1159
+ obtain the best average class accuracy and overall accu-
1160
+ racy.
1161
+ k
1162
+ mAcc
1163
+ OA
1164
+ 10
1165
+ 90.2
1166
+ 93.3
1167
+ 20
1168
+ 90.8
1169
+ 93.7
1170
+ 40
1171
+ 91.6
1172
+ 94.0
1173
+ 60
1174
+ 91.5
1175
+ 93.3
1176
+ Table 4: Number of neighbors(k)
1177
+ Selection of aggregate functions Π. It can be seen in
1178
+ many previous works[2][36][41] that some symmetric pool-
1179
+ ing functions such as max/sum/mean are often used to
1180
+ overcome the disordered characteristics of point clouds.
1181
+ In our DFA layer, we also need to aggregate edge features
1182
+ to update features for each center point. We experimented
1183
+ with different aggregation functions such as max, sum, or
1184
+ sum with attention weights which first do softmax on k-
1185
+ nearest neighbors dimension to get the attention weights
1186
+ and then multiply and accumulate them accordingly. The
1187
+ max function is to select the largest feature of points in
1188
+ the local neighborhood. The sum function is to add the
1189
+ features of all points in the neighborhood, and the mean
1190
+ function is to divide by the k value after the sum func-
1191
+ tion. Table 5 shows the results of our selection of differ-
1192
+ ent aggregation functions on a classification experiment of
1193
+ 2048 points. Although the maximum pooling function will
1194
+ lose the non-largest part of the features, it will retain the
1195
+ largest part of the most significant features, and the ex-
1196
+ perimental results show that it is the most effective. We
1197
+ finally choose the best-performing max function to aggre-
1198
+ gate the edge features.
1199
+ Π
1200
+ mAcc
1201
+ OA
1202
+ max
1203
+ 91.6
1204
+ 94.0
1205
+ sum
1206
+ 90.5
1207
+ 93.4
1208
+ mean
1209
+ 90.3
1210
+ 93.2
1211
+ attention sum
1212
+ 91.0
1213
+ 93.5
1214
+ Table 5: Choice of different aggregation functions Π
1215
+ 10
1216
+
1217
+ Feature or space domains. Further, we explore in
1218
+ which domain is better to compute k-nearest neighbors,
1219
+ i.e., the feature domain or the spatial domain. If we choose
1220
+ to do k-nearest neighbors in the spatial domain, it means
1221
+ that the graph structure is fixed each time. On the one
1222
+ hand, the relative position coding will be the same, on
1223
+ the other hand, it is very limited to exchange information
1224
+ with fixed neighbor points each time. If we choose to do
1225
+ k-nearest neighbors on the feature domain, it means that
1226
+ the local graph structure is dynamically updated, and the
1227
+ neighbors of the graph are different each time but the fea-
1228
+ tures are similar. We can make better use of DFA layers to
1229
+ discover efficient features. We choose to compare the ex-
1230
+ perimental results in the classification task of 2048 points.
1231
+ As can be seen from the table 6, our way of exchanging
1232
+ information with neighbor updates in the feature domain
1233
+ is better. Because the k-nearest neighbors obtained in this
1234
+ way are more homogeneous. Especially for part segmen-
1235
+ tation, spatially adjacent points are not necessarily of the
1236
+ same class, so it is useless or even redundant to exchange
1237
+ information with these points.
1238
+ spatial or feature domain
1239
+ mAcc
1240
+ OA
1241
+ feature
1242
+ 91.6
1243
+ 94.0
1244
+ spatial
1245
+ 91.1
1246
+ 93.4
1247
+ Table 6: Comparison of k-nearest neighbors in feature domain and
1248
+ space.
1249
+ Relative position information. By computing the
1250
+ k-nearest neighbors of the feature domain, we are able to
1251
+ discover latent-location feature information that is not lim-
1252
+ ited by space. In this way, the relative position encoding
1253
+ in each DFA layer is different because the neighborhood
1254
+ points are changing. This allows us to connect points that
1255
+ may not be in close spatial locations. So we explore its ef-
1256
+ fectiveness by whether incorporating this part in the clas-
1257
+ sification task of 2048 points.
1258
+ The experimental results
1259
+ in table 7 show that adding location information encoding
1260
+ can have better performance. This also shows that the po-
1261
+ tential position information obtained by relative position
1262
+ encoding is crucial.
1263
+ Position information
1264
+ mAcc
1265
+ OA
1266
+ w
1267
+ 91.6
1268
+ 94.0
1269
+ w/o
1270
+ 90.1
1271
+ 93.3
1272
+ Table 7: Whether to add position information
1273
+ Low-dimensional
1274
+ global
1275
+ features.
1276
+ Inspired by
1277
+ Pointnet [2] and Pointnet++ [36], it is not advisable to
1278
+ only focus on global features or local features, so we adopt
1279
+ a fusion of both. Global features can provide overall direc-
1280
+ tion control, while local features can provide more detailed
1281
+ information. We believe that these are equally important
1282
+ in network learning, so after extracting local features of
1283
+ different depths, we concatenate these local features and
1284
+ low-dimensional global features together through MLP op-
1285
+ erations to upgrade to high-dimensional for subsequent
1286
+ tasks. To this end, we compare the classification results
1287
+ of 2048 points with or without adding low-dimensional
1288
+ global features. The table 8 confirms the effectiveness of
1289
+ our way of concatenating the learned local features and
1290
+ low-dimensional global features.
1291
+ Low-global features
1292
+ mAcc
1293
+ OA
1294
+ w
1295
+ 91.6
1296
+ 94.0
1297
+ w/o
1298
+ 89.9
1299
+ 93.1
1300
+ Table 8: Whether to add low-dimensional global features
1301
+ 4.5. Model Complexity
1302
+ We use the stat package in pytorch to output some quan-
1303
+ titative results of the network model. It includes the total
1304
+ number of parameters of the network model, the number
1305
+ of floating-point operations required for network opera-
1306
+ tion, and the memory occupied by node inference. The
1307
+ experimental results are all tested based on the classifi-
1308
+ cation model on 1024 points. At the same time, we test
1309
+ 11
1310
+
1311
+ other mainstream methods for comparison as shown in the
1312
+ following table 9.
1313
+ It can be seen that our model has fewer parameters and
1314
+ does not occupy a large amount of memory, indicating that
1315
+ our network structure is lightweight, and not complicated
1316
+ and easy to implement. In networks based on graph meth-
1317
+ ods, the amount of computation is generally too large due
1318
+ to the need to interact with neighbors to update features.
1319
+ Compared with other methods of this type, our floating-
1320
+ point operations are also much less. At the same time the
1321
+ performance is still the best.
1322
+ Method
1323
+ Pparams
1324
+ Flops
1325
+ Memory
1326
+ OA
1327
+ Pointnet[2]
1328
+ 0.7M
1329
+ 0.5M
1330
+ 10.5M
1331
+ 89.2
1332
+ Pointnet++[36]
1333
+ 2.2M
1334
+ 3.1M
1335
+ 231.5M
1336
+ 91.9
1337
+ DGCNN[5]
1338
+ 1.8M
1339
+ 1.89G
1340
+ 123.0M
1341
+ 92.9
1342
+ AG-conv[49]
1343
+ 1.9M
1344
+ 2.9G
1345
+ 202.0M
1346
+ 93.4
1347
+ PCT[41]
1348
+ 2.9M
1349
+ 2.32G
1350
+ 187.6M
1351
+ 93.2
1352
+ ours
1353
+ 1.1M
1354
+ 2.17G
1355
+ 154.5M
1356
+ 93.6
1357
+ Table 9: Quantitative evaluation of classification on ModelNet40.
1358
+ 5. Conclusion
1359
+ This paper proposes a new operation for point cloud
1360
+ learning and also demonstrates its performance in differ-
1361
+ ent tasks. The main contribution of our method is to ag-
1362
+ gregate local feature in the feature domain, explore the la-
1363
+ tent relative position information and semantic feature in-
1364
+ formation, and learn to obtain higher-dimensional features
1365
+ by concatenating local features and low-dimensional global
1366
+ features. Our DFA can dynamically construct graphs that
1367
+ are not spatially correlated and exchange information be-
1368
+ tween points with semantically similar features.
1369
+ Exper-
1370
+ imental results show that our network outperforms the
1371
+ state-of-the-art on several public datasets. Further, our
1372
+ DFA module is simple and efficient, and can be seamlessly
1373
+ integrated into other network models.
1374
+ References
1375
+ [1] S. Biasotti, A. Cerri, A. Bronstein, M. Bronstein, Recent trends,
1376
+ applications, and perspectives in 3d shape similarity assessment,
1377
+ in: Computer graphics forum, Vol. 35, Wiley Online Library,
1378
+ 2016, pp. 87–119.
1379
+ [2] C. R. Qi, H. Su, K. Mo, L. J. Guibas, Pointnet: Deep learning on
1380
+ point sets for 3d classification and segmentation, in: Proceed-
1381
+ ings of the IEEE conference on computer vision and pattern
1382
+ recognition, 2017, pp. 652–660.
1383
+ [3] C. Wang, B. Samari, K. Siddiqi, Local spectral graph convolu-
1384
+ tion for point set feature learning, in: Proceedings of the Euro-
1385
+ pean conference on computer vision (ECCV), 2018, pp. 52–66.
1386
+ [4] Y. Shen, C. Feng, Y. Yang, D. Tian, Mining point cloud local
1387
+ structures by kernel correlation and graph pooling, in: Proceed-
1388
+ ings of the IEEE conference on computer vision and pattern
1389
+ recognition, 2018, pp. 4548–4557.
1390
+ [5] Y. Wang, Y. Sun, Z. Liu, S. E. Sarma, M. M. Bronstein, J. M.
1391
+ Solomon, Dynamic graph cnn for learning on point clouds, Acm
1392
+ Transactions On Graphics (tog) 38 (5) (2019) 1–12.
1393
+ [6] L. Wang, Y. Huang, Y. Hou, S. Zhang, J. Shan, Graph at-
1394
+ tention convolution for point cloud semantic segmentation, in:
1395
+ Proceedings of the IEEE/CVF conference on computer vision
1396
+ and pattern recognition, 2019, pp. 10296–10305.
1397
+ [7] J. Liu, B. Ni, C. Li, J. Yang, Q. Tian, Dynamic points agglom-
1398
+ eration for hierarchical point sets learning, in: Proceedings of
1399
+ the IEEE/CVF International Conference on Computer Vision,
1400
+ 2019, pp. 7546–7555.
1401
+ [8] J. Li, B. M. Chen, G. H. Lee, So-net: Self-organizing network for
1402
+ point cloud analysis, in: Proceedings of the IEEE conference on
1403
+ computer vision and pattern recognition, 2018, pp. 9397–9406.
1404
+ [9] A. Mnih, K. Gregor, Neural variational inference and learn-
1405
+ ing in belief networks, in: International Conference on Machine
1406
+ Learning, PMLR, 2014, pp. 1791–1799.
1407
+ [10] Q. Huang, W. Wang, U. Neumann, Recurrent slice networks for
1408
+ 3d segmentation of point clouds, in: Proceedings of the IEEE
1409
+ conference on computer vision and pattern recognition, 2018,
1410
+ pp. 2626–2635.
1411
+ [11] Z. Zhang, B.-S. Hua, S.-K. Yeung, Shellnet:
1412
+ Efficient point
1413
+ cloud convolutional neural networks using concentric shells
1414
+ statistics, in: Proceedings of the IEEE/CVF international con-
1415
+ ference on computer vision, 2019, pp. 1607–1616.
1416
+ [12] H. Zhao, L. Jiang, C.-W. Fu, J. Jia, Pointweb: Enhancing lo-
1417
+ cal neighborhood features for point cloud processing, in: Pro-
1418
+ ceedings of the IEEE/CVF conference on computer vision and
1419
+ pattern recognition, 2019, pp. 5565–5573.
1420
+ [13] H. Su, V. Jampani, D. Sun, S. Maji, E. Kalogerakis, M.-H.
1421
+ Yang, J. Kautz, Splatnet:
1422
+ Sparse lattice networks for point
1423
+ 12
1424
+
1425
+ cloud processing, in: Proceedings of the IEEE conference on
1426
+ computer vision and pattern recognition, 2018, pp. 2530–2539.
1427
+ [14] B.-S. Hua, M.-K. Tran, S.-K. Yeung, Pointwise convolutional
1428
+ neural networks, in: Proceedings of the IEEE conference on
1429
+ computer vision and pattern recognition, 2018, pp. 984–993.
1430
+ [15] W. Wu, Z. Qi, L. Fuxin, Pointconv: Deep convolutional net-
1431
+ works on 3d point clouds, in: Proceedings of the IEEE/CVF
1432
+ Conference on Computer Vision and Pattern Recognition, 2019,
1433
+ pp. 9621–9630.
1434
+ [16] S. Lan, R. Yu, G. Yu, L. S. Davis, Modeling local geometric
1435
+ structure of 3d point clouds using geo-cnn, in: Proceedings of
1436
+ the IEEE/cvf conference on computer vision and pattern recog-
1437
+ nition, 2019, pp. 998–1008.
1438
+ [17] A. Komarichev, Z. Zhong, J. Hua, A-cnn: Annularly convolu-
1439
+ tional neural networks on point clouds, in: Proceedings of the
1440
+ IEEE/CVF conference on computer vision and pattern recog-
1441
+ nition, 2019, pp. 7421–7430.
1442
+ [18] J. Mao, X. Wang, H. Li, Interpolated convolutional networks
1443
+ for 3d point cloud understanding, in:
1444
+ Proceedings of the
1445
+ IEEE/CVF international conference on computer vision, 2019,
1446
+ pp. 1578–1587.
1447
+ [19] H.
1448
+ Thomas,
1449
+ C.
1450
+ R.
1451
+ Qi,
1452
+ J.-E.
1453
+ Deschaud,
1454
+ B.
1455
+ Marcotegui,
1456
+ F. Goulette, L. J. Guibas, Kpconv: Flexible and deformable
1457
+ convolution for point clouds, in: Proceedings of the IEEE/CVF
1458
+ international conference on computer vision, 2019, pp. 6411–
1459
+ 6420.
1460
+ [20] A. Paigwar, O. Erkent, C. Wolf, C. Laugier, Attentional point-
1461
+ net for 3d-object detection in point clouds, in: Proceedings of
1462
+ the IEEE/CVF Conference on Computer Vision and Pattern
1463
+ Recognition Workshops, 2019, pp. 0–0.
1464
+ [21] S. Xie, S. Liu, Z. Chen, Z. Tu, Attentional shapecontextnet for
1465
+ point cloud recognition, in: Proceedings of the IEEE conference
1466
+ on computer vision and pattern recognition, 2018, pp. 4606–
1467
+ 4615.
1468
+ [22] W. Zhang, C. Xiao, Pcan:
1469
+ 3d attention map learning using
1470
+ contextual information for point cloud based retrieval, in: Pro-
1471
+ ceedings of the IEEE/CVF Conference on Computer Vision and
1472
+ Pattern Recognition, 2019, pp. 12436–12445.
1473
+ [23] J. Yang, Q. Zhang, B. Ni, L. Li, J. Liu, M. Zhou, Q. Tian,
1474
+ Modeling point clouds with self-attention and gumbel subset
1475
+ sampling, in: Proceedings of the IEEE/CVF conference on com-
1476
+ puter vision and pattern recognition, 2019, pp. 3323–3332.
1477
+ [24] Z. Wu, S. Song, A. Khosla, F. Yu, L. Zhang, X. Tang, J. Xiao,
1478
+ 3d shapenets: A deep representation for volumetric shapes, in:
1479
+ Proceedings of the IEEE conference on computer vision and
1480
+ pattern recognition, 2015, pp. 1912–1920.
1481
+ [25] L. Yi, V. G. Kim, D. Ceylan, I.-C. Shen, M. Yan, H. Su, C. Lu,
1482
+ Q. Huang, A. Sheffer, L. Guibas, A scalable active framework
1483
+ for region annotation in 3d shape collections, ACM Transactions
1484
+ on Graphics (ToG) 35 (6) (2016) 1–12.
1485
+ [26] I. Armeni, O. Sener, A. R. Zamir, H. Jiang, I. Brilakis, M. Fis-
1486
+ cher, S. Savarese, 3d semantic parsing of large-scale indoor
1487
+ spaces, in: Proceedings of the IEEE conference on computer
1488
+ vision and pattern recognition, 2016, pp. 1534–1543.
1489
+ [27] C. R. Qi, H. Su, M. Nießner, A. Dai, M. Yan, L. J. Guibas, Vol-
1490
+ umetric and multi-view cnns for object classification on 3d data,
1491
+ in: Proceedings of the IEEE conference on computer vision and
1492
+ pattern recognition, 2016, pp. 5648–5656.
1493
+ [28] L. Tchapmi, C. Choy, I. Armeni, J. Gwak, S. Savarese, Seg-
1494
+ cloud: Semantic segmentation of 3d point clouds, in: 2017 in-
1495
+ ternational conference on 3D vision (3DV), IEEE, 2017, pp.
1496
+ 537–547.
1497
+ [29] P.-S. Wang, Y. Liu, Y.-X. Guo, C.-Y. Sun, X. Tong, O-cnn:
1498
+ Octree-based convolutional neural networks for 3d shape analy-
1499
+ sis, ACM Transactions On Graphics (TOG) 36 (4) (2017) 1–11.
1500
+ [30] H.-Y. Meng, L. Gao, Y.-K. Lai, D. Manocha, Vv-net: Voxel
1501
+ vae net with group convolutions for point cloud segmentation,
1502
+ in: Proceedings of the IEEE/CVF international conference on
1503
+ computer vision, 2019, pp. 8500–8508.
1504
+ [31] T. Shao, Y. Yang, Y. Weng, Q. Hou, K. Zhou, H-cnn: Spatial
1505
+ hashing based cnn for 3d shape analysis, IEEE transactions on
1506
+ visualization and computer graphics 26 (7) (2018) 2403–2416.
1507
+ [32] F. J. Lawin, M. Danelljan, P. Tosteberg, G. Bhat, F. S. Khan,
1508
+ M. Felsberg, Deep projective 3d semantic segmentation, in: In-
1509
+ ternational Conference on Computer Analysis of Images and
1510
+ Patterns, Springer, 2017, pp. 95–107.
1511
+ [33] J. Guerry, A. Boulch, B. Le Saux, J. Moras, A. Plyer, D. Fil-
1512
+ liat, Snapnet-r: Consistent 3d multi-view semantic labeling for
1513
+ robotics, in: Proceedings of the IEEE international conference
1514
+ on computer vision workshops, 2017, pp. 669–678.
1515
+ [34] M. Jaritz, J. Gu, H. Su, Multi-view pointnet for 3d scene un-
1516
+ derstanding, in: Proceedings of the IEEE/CVF International
1517
+ Conference on Computer Vision Workshops, 2019, pp. 0–0.
1518
+ [35] Z. Yang, L. Wang, Learning relationships for multi-view 3d ob-
1519
+ ject recognition, in:
1520
+ Proceedings of the IEEE/CVF Interna-
1521
+ tional Conference on Computer Vision, 2019, pp. 7505–7514.
1522
+ [36] C. R. Qi, L. Yi, H. Su, L. J. Guibas, Pointnet++: Deep hierar-
1523
+ chical feature learning on point sets in a metric space, Advances
1524
+ in neural information processing systems 30.
1525
+ [37] M.
1526
+ Atzmon,
1527
+ H.
1528
+ Maron,
1529
+ Y.
1530
+ Lipman,
1531
+ Point
1532
+ convolutional
1533
+ neural
1534
+ networks
1535
+ by
1536
+ extension
1537
+ operators,
1538
+ arXiv
1539
+ preprint
1540
+ arXiv:1803.10091.
1541
+ [38] Y. Li, R. Bu, M. Sun, W. Wu, X. Di, B. Chen, Pointcnn: Convo-
1542
+ lution on x-transformed points, Advances in neural information
1543
+ processing systems 31.
1544
+ [39] Z.-H. Lin, S.-Y. Huang, Y.-C. F. Wang, Convolution in the
1545
+ 13
1546
+
1547
+ cloud:
1548
+ Learning deformable kernels in 3d graph convolution
1549
+ networks for point cloud analysis, in:
1550
+ Proceedings of the
1551
+ IEEE/CVF conference on computer vision and pattern recog-
1552
+ nition, 2020, pp. 1800–1809.
1553
+ [40] G. Li, M. Muller, A. Thabet, B. Ghanem, Deepgcns: Can gcns
1554
+ go as deep as cnns?, in: Proceedings of the IEEE/CVF inter-
1555
+ national conference on computer vision, 2019, pp. 9267–9276.
1556
+ [41] M.-H. Guo, J.-X. Cai, Z.-N. Liu, T.-J. Mu, R. R. Martin, S.-M.
1557
+ Hu, Pct: Point cloud transformer, Computational Visual Media
1558
+ 7 (2) (2021) 187–199.
1559
+ [42] H. Zhao, L. Jiang, J. Jia, P. H. Torr, V. Koltun, Point trans-
1560
+ former, in: Proceedings of the IEEE/CVF International Con-
1561
+ ference on Computer Vision, 2021, pp. 16259–16268.
1562
+ [43] W. Han, H. Wu, C. Wen, C. Wang, X. Li, Blnet: Bidirectional
1563
+ learning network for point clouds, Computational Visual Media
1564
+ (2022) 1–12.
1565
+ [44] X. Yan, C. Zheng, Z. Li, S. Wang, S. Cui, Pointasnl: Robust
1566
+ point clouds processing using nonlocal neural networks with
1567
+ adaptive sampling, in: Proceedings of the IEEE/CVF Confer-
1568
+ ence on Computer Vision and Pattern Recognition, 2020, pp.
1569
+ 5589–5598.
1570
+ [45] Y. Xu, T. Fan, M. Xu, L. Zeng, Y. Qiao, Spidercnn: Deep
1571
+ learning on point sets with parameterized convolutional filters,
1572
+ in: Proceedings of the European Conference on Computer Vi-
1573
+ sion (ECCV), 2018, pp. 87–102.
1574
+ [46] X. Liu, Z. Han, Y.-S. Liu, M. Zwicker, Point2sequence: Learn-
1575
+ ing the shape representation of 3d point clouds with an
1576
+ attention-based sequence to sequence network, in: Proceedings
1577
+ of the AAAI Conference on Artificial Intelligence, Vol. 33, 2019,
1578
+ pp. 8778–8785.
1579
+ [47] Y. Lin, Z. Yan, H. Huang, D. Du, L. Liu, S. Cui, X. Han, Fp-
1580
+ conv: Learning local flattening for point convolution, in: Pro-
1581
+ ceedings of the IEEE/CVF Conference on Computer Vision and
1582
+ Pattern Recognition, 2020, pp. 4293–4302.
1583
+ [48] W. Han, C. Wen, C. Wang, X. Li, Q. Li, Point2node: Correla-
1584
+ tion learning of dynamic-node for point cloud feature modeling,
1585
+ in: Proceedings of the AAAI Conference on Artificial Intelli-
1586
+ gence, Vol. 34, 2020, pp. 10925–10932.
1587
+ [49] H. Zhou, Y. Feng, M. Fang, M. Wei, J. Qin, T. Lu, Adaptive
1588
+ graph convolution for point cloud analysis, in: Proceedings of
1589
+ the IEEE/CVF International Conference on Computer Vision,
1590
+ 2021, pp. 4965–4974.
1591
+ [50] K. T. Wijaya, D.-H. Paek, S.-H. Kong, Advanced feature learn-
1592
+ ing on point clouds using multi-resolution features and learnable
1593
+ pooling, arXiv preprint arXiv:2205.09962.
1594
+ [51] G. Te, W. Hu, A. Zheng, Z. Guo, Rgcnn: Regularized graph cnn
1595
+ for point cloud segmentation, in: Proceedings of the 26th ACM
1596
+ international conference on Multimedia, 2018, pp. 746–754.
1597
+ [52] S. Cheng, X. Chen, X. He, Z. Liu, X. Bai, Pra-net:
1598
+ Point
1599
+ relation-aware network for 3d point cloud analysis, IEEE Trans-
1600
+ actions on Image Processing PP (99).
1601
+ [53] L. Landrieu, M. Simonovsky, Large-scale point cloud semantic
1602
+ segmentation with superpoint graphs, in: Proceedings of the
1603
+ IEEE conference on computer vision and pattern recognition,
1604
+ 2018, pp. 4558–4567.
1605
+ 14
1606
+
3NAyT4oBgHgl3EQfo_gV/content/2301.00515v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38f99e325325ec339db4458a18e9ad613c7b76c40ac403376024ab4f7e4464df
3
+ size 3587107
3NAyT4oBgHgl3EQfo_gV/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ea2041feb817da058c01223a475070b60a9c3eb3f8355742636c288ef1c3ea5
3
+ size 5505069
3NFST4oBgHgl3EQfYjhQ/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37f989248bf0e7d7631889667f317461d99ee6d1f09c4d31ff451edb81b5236b
3
+ size 3473453
3tAyT4oBgHgl3EQfb_f6/content/2301.00276v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a45f1bf1b8c0ce86f4c2698182a0db7fd8ea428831d4d3721b205967f1b3d6
3
+ size 397459
3tAyT4oBgHgl3EQfb_f6/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad3e59720d8a4c56546248e3fa00303564b64620f6fb76d6a3c812bffc166bf6
3
+ size 5046317
3tAzT4oBgHgl3EQfuf3H/content/tmp_files/2301.01693v1.pdf.txt ADDED
@@ -0,0 +1,940 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Mortality modeling at old-age: an mixture model approach
2
+ Silvio C. Patricio*
3
+ The Interdisciplinary Centre on Population Dynamics, University of Southern Denmark
4
+ Fredy Castellares
5
+ Departamento de Estat´ıstica, Universidade Federal de Minas Gerais
6
+ Bernardo Queiroz
7
+ Departamento de Demografia, Universidade Federal de Minas Gerais
8
+ January 5, 2023
9
+ Abstract
10
+ In this paper, we propose a mixture-based model for mortality modeling above age 70. The proposed
11
+ model is compared with 4 other widely used models: the Beard, Gompertz, Makeham, and Perks models.
12
+ Our model captures well the mortality rate’s behavior at all the ages. We applied the method to a country
13
+ with high quality data, Japan, and one with lower data quality, Brazil. In the comparative study for the
14
+ Japanese population, the model presented a better fit to the data, obtaining an absolute mean percentage
15
+ error of less than 7%, while the other models presented values greater than 30%.
16
+ Keywords: mixture model, old-age, mortality modeling
17
+ 1
18
+ Introduction
19
+ In the past centuries, much has been done to model the process of mortality in populations and its con-
20
+ sequences (Graunt, 1662; Gompertz, 1825a; Wilmoth, 2000; van Raalte, 2021). One of humanity’s most
21
+ outstanding achievements in the last century, perhaps the last millennium, has been the four-decade increase
22
+ in human life expectancy over the past 160 years (Vaupel et al., 2021; Wilmoth, 2000) and the improvement
23
+ in human mortality. All these changes in human longevity directly affect pension, welfare, and health care
24
+ systems (Cutler et al., 2006).
25
+ *silca@sam.sdu.dk
26
+ 1
27
+ arXiv:2301.01693v1 [stat.AP] 4 Jan 2023
28
+
29
+ Despite pioneering work by Graunt and Gompertz, understanding of mortality for older ages remains
30
+ a challenge, specially in developing countries with more defective data. In general, mortality estimates at
31
+ older ages are limited by small numbers both in the exposure, death count and problems with age declaration
32
+ (Feehan, 2018; Wrigley-Field, 2014; Nepomuceno et al., 2019). There is an important and ongoing debate
33
+ about the levels of mortality at older ages. In general terms, the debate is whether mortality at older ages
34
+ is declining or continues to increase (Gavrilov & Gavrilova, 2019; Feehan, 2018). In some settings, such
35
+ as Brazil, there is also an important question on the crossover of mortality at older ages when comparing
36
+ different population sub-groups (Nepomuceno et al., 2019; Pinheiro & Queiroz, 2019; Gomes & Turra,
37
+ 2009).
38
+ In addition to the problem of the quality of the data, there is a debate on hypotheses of selectivity and
39
+ of the biological limit of mortality in human populations that, in different ways, would impact the behavior
40
+ of mortality taxes in more advanced ages. One of the consequences of the mortality selectivity hypothesis
41
+ would be a greater rate of deceleration of the rates of mortality in more advanced ages. In this context,
42
+ there are a series of models to explain mortality behavior at older ages. The choice of the appropriate model
43
+ depends on the hypotheses assumed, whether in relation to the quality of the two data or in relation to the
44
+ impacts produced by the selectivity.
45
+ There are several possible explanations for the observed results and estimates. First one is related to
46
+ data quality in different areas of a country, across sub-population groups and age. For instance, it could be a
47
+ consequence of different age misreporting patterns or issues with quality of vital registration systems (Black
48
+ et al., 2017). Preston et al (2000) investigated how different types of age misreporting can affect estimates
49
+ of mortality rates at older ages, by analyzing the effects of three patterns of age misreporting: net age
50
+ overstatement, net age understatement, and symmetric age misreporting.. It is also possible that mortality
51
+ selection plays a role in the observed levels of mortality at older ages (Barbi et al., 2018; Wachter, 2018).
52
+ In the context of higher mortality rates at young ages, survivors to older ages would be physiologically
53
+ stronger and then live longer than others.
54
+ Unfortunately, data quality at older ages limits the understanding of mortality and the evolution of
55
+ survivorship at older ages. Feehan (2018) uses alternative methods to cohort mortality above age 80. He
56
+ finds that no model can be universally applied to estimate old-age mortality, but he argues that Log-Quad
57
+ (Wilmoth et al., 2012) provides a good fit. However, the log-quad method is based on standard mortality
58
+ changes from the Human Mortality Database that is constructed from a series of countries in the Northern
59
+ Hemisphere and might be limited to low and middle income countries.
60
+ In this paper, we suggest a model that captures decline in mortality rates at older ages, which is a
61
+ characteristic observed in some populations. Based on the proposed model, we perform a comparative study
62
+ using establish mortality laws with our proposed approach. The analysis was split into two parts. First, to
63
+ 2
64
+
65
+ compare the four widely used models with the proposed model: in this part we will study the behavior of
66
+ these models in two databases: one with good quality data on mortality in Japan in 2015 (obtained from The
67
+ Human Mortality Database of mortality), and the other database that has limited data regarding mortality
68
+ in Brazil in 2010. In it the models will be evaluated from Mean Absolute Percentage Error (MAPE) of
69
+ the log-hazard using the leave-one-out cross-validation method, and the model with the least MAPE will
70
+ all be the best model. Moreover, as some models are complex, the genetic algorithm was used to obtain
71
+ the estimates via maximum likelihood. Using this algorithm ensures convergence to the global maximum
72
+ value. The second part applies the proposed model to different databases, and aims to understand the model
73
+ behavior and also to verify its potential for application to real data.The model presented a better fit to the
74
+ data, obtaining an absolute mean percentage error of less than 7%, while the other models presented values
75
+ greater than 30%.
76
+ 2
77
+ Models specification’s and parameter estimation
78
+ Considering a non negative random variable (r.v.) T defined in a probability space (R+, B, Pθ), representing
79
+ the individual life-spam, the r.v. T can be characterized by the survival function
80
+ S(x|θ) = Pθ(T > x)
81
+ which is associated with the density
82
+ f(x|θ) = − ∂
83
+ ∂xS(x|θ).
84
+ If S is a continuous survival function associated with a f density function, then the function µ defined in
85
+ R+ by
86
+ µ(x|θ) = lim
87
+ ε↓0
88
+ Pθ(x < T < x + ε|X > x)
89
+ ε
90
+ = f(x|θ)
91
+ S(x|θ)
92
+ it’s called the T mortality force. This function is usually used to describe the force of mortality for a group
93
+ of people or population.
94
+ The inferences in the model are based on the assumption that the number of death has a Poisson dis-
95
+ tribution. Therefore, be D = (D0, D1, . . . , Dm)′ a random sample with Poisson distribution, with Dk
96
+ representing the number of deaths between ages [k, k + 1), with k = 0, . . . , m, i.e. the number of death of
97
+ people with k years old.
98
+ For this approach it is considered that E(Dk) = µ(k|θ)Ek, with µ(k|θ) representing the mortality force
99
+ at age k, where θ = (θ1, θ2, . . . , θp)′ is the parameter vector that characterizes the mortality rate, and Ek the
100
+ population at age k exposed to risk, that are assumptions widely used by demographers (Brillinger et al.,
101
+ 3
102
+
103
+ 1986). Also, as it is the Poisson distribution, we have to V(Dk) = µ(k|θ)Ek, same value of expectation.
104
+ Be D = (D0, . . . , Dm)′ e E = (E0, . . . , Em)′. The log-likelihood function from θ is given by
105
+ ℓ(θ|D) =
106
+ m
107
+
108
+ k=1
109
+ Dk log λ(θ, k) − λ(θ, k),
110
+ (1)
111
+ with λ(θ, x) = µ(x|θ)E(x). The likelihood estimate �θ is obtained from maximizing the log-likelihood
112
+ function with in equation 1, with respect to θ. Obtaining the partial derivative vector of the equation 1, with
113
+ respect to θi, i = 1, . . . , p, we have
114
+ ∂ℓ(θ|D)
115
+ ∂θi
116
+ =
117
+ m
118
+
119
+ k=1
120
+
121
+ Dk
122
+ µ(k|θ) − Ek
123
+ � ∂µ(k|θ)
124
+ ∂θi
125
+ .
126
+ (2)
127
+ The likelihood estimation can also be obtained by equating the partial derivative vector to zero and simul-
128
+ taneously solving the system of equations. The explicit form of the gradient vector is explained for each of
129
+ the models considered in this article. The Newton-Raphson method can be applied to solve the likelihood
130
+ equation to obtain the estimate �θ.
131
+ 2.1
132
+ Beard model
133
+ In this model introduced in Beard (1959), we have that the force of mortality is given by
134
+ µ(k|θ) =
135
+ aebk
136
+ 1 + δebk
137
+ with θ = (a, b, δ)′ ∈ R3
138
+ +. From which we calculate the partial derivative with respect to a and b. E Equation
139
+ 2 gives us a general equation for the gradient vector, where it depends only on the mortality rate and its
140
+ partial derivative with respect to each parameter. Hence we get
141
+ ∂ℓ(θ|D)
142
+ ∂a
143
+ =
144
+ m
145
+
146
+ k=1
147
+
148
+ Dk
149
+ �1 + δebk
150
+ aebk
151
+
152
+ − Ek
153
+
154
+ ebk
155
+ (1 + δebk)
156
+ ∂ℓ(θ|D)
157
+ ∂b
158
+ =
159
+ m
160
+
161
+ k=1
162
+
163
+ Dk
164
+ �1 + δebk
165
+ aebk
166
+
167
+ − Ek
168
+
169
+ akebk
170
+ (1 + δebk)2
171
+ ∂ℓ(θ|D)
172
+ ∂δ
173
+ =
174
+ m
175
+
176
+ k=1
177
+
178
+ Dk
179
+ �1 + δebk
180
+ aebk
181
+
182
+ − Ek
183
+
184
+ ae2bk
185
+ (1 + δebk)2
186
+ 4
187
+
188
+ representing the gradient vector.
189
+ 2.2
190
+ Gompertz model
191
+ In this model introduced in Gompertz (1825b), we have that the force of mortality is given by
192
+ µ(k|θ) = aebk,
193
+ with θ = (a, b)′ ∈ R2
194
+ +. So for the gradient vector we have
195
+ ∂ℓ(θ|D)
196
+ ∂a
197
+ =
198
+ m
199
+
200
+ k=1
201
+ � Dk
202
+ aebk − Ek
203
+
204
+ ebk
205
+ ∂ℓ(θ|D)
206
+ ∂b
207
+ =
208
+ m
209
+
210
+ k=1
211
+ � Dk
212
+ aebk − Ek
213
+
214
+ akebk
215
+ 2.3
216
+ Makeham model
217
+ In this model introduced in Makeham (1860), we have that the force of mortality is given by
218
+ µ(k|θ) = aebk + c,
219
+ with θ = (a, b, c)′ ∈ R3
220
+ +. So for the gradient vector we have
221
+ ∂ℓ(θ|D)
222
+ ∂a
223
+ =
224
+ m
225
+
226
+ k=1
227
+
228
+ Dk
229
+ aebk + c − Ek
230
+
231
+ ebk
232
+ ∂ℓ(θ|D)
233
+ ∂b
234
+ =
235
+ m
236
+
237
+ k=1
238
+
239
+ Dk
240
+ aebk + c − Ek
241
+
242
+ akebk
243
+ ∂ℓ(θ|D)
244
+ ∂c
245
+ =
246
+ m
247
+
248
+ k=1
249
+
250
+ Dk
251
+ aebk + c − Ek
252
+
253
+ 5
254
+
255
+ 2.4
256
+ Perks model
257
+ In this model introduced in Perks (1932), we have that the force of mortality is given by
258
+ µ(k|θ) = γ + aebk
259
+ 1 + δebk
260
+ with θ = (a, b, γ, δ)′. So for the gradient vector we have
261
+ ∂ℓ(θ|D)
262
+ ∂a
263
+ =
264
+ m
265
+
266
+ k=1
267
+
268
+ Dk
269
+ � 1 + δebk
270
+ γ + aebk
271
+
272
+ − Ek
273
+
274
+ ebk
275
+ 1 + δebk
276
+ ∂ℓ(θ|D)
277
+ ∂b
278
+ =
279
+ m
280
+
281
+ k=1
282
+
283
+ Dk
284
+ � 1 + δebk
285
+ γ + aebk
286
+
287
+ − Ek
288
+ � k(a − δγ)ebk
289
+ (1 + δebk)2
290
+ ∂ℓ(θ|D)
291
+ ∂γ
292
+ =
293
+ m
294
+
295
+ k=1
296
+
297
+ Dk
298
+ � 1 + δebk
299
+ γ + aebk
300
+
301
+ − Ek
302
+
303
+ 1
304
+ 1 + δebk
305
+ ∂ℓ(θ|D)
306
+ ∂δ
307
+ =
308
+ m
309
+
310
+ k=1
311
+
312
+ Dk
313
+ � 1 + δebk
314
+ γ + aebk
315
+
316
+ − Ek
317
+ � ebk �
318
+ aebk + γ
319
+
320
+ (1 + δebk)2
321
+ 2.5
322
+ Mixture model
323
+ As with Makeham, we will seek to decompose mortality into two components: premature and senescent
324
+ mortality, respectively modeled by an exponential and a Gompertz component. However, Makeham dis-
325
+ tinguishes these components through mortality force, and here we propose to distinguish them through
326
+ distribution. Therefore, we are considering that the r.v. T introduced at the beginning of this session is
327
+ associated with a probability density function f, which is define as:
328
+ f(x|θ) = p
329
+
330
+ λe−λx�
331
+ + (1 − p)
332
+
333
+ ab exp
334
+
335
+ a
336
+
337
+ ebx − 1
338
+
339
+ + bx
340
+ ��
341
+ (3)
342
+ with θ = (a, b, λ, p)′.
343
+ The density f is a Gompertz and a exponential distribution a mixture. The Gompertz distribution will fit
344
+ the senescence deaths count, and the exponential distribution will fit the premature deaths, such as accidents
345
+ and disease. Briefly, this model considers the existence of two sub populations in the death count, one
346
+ 6
347
+
348
+ Gompertz and the other Exponential, and the parameters p and q = 1 − p represent the proportions of each
349
+ one.
350
+ Since the random variable T is associated with a density function, we can also associate it with a hazard
351
+ function. In this case the force of mortality is defined by:
352
+ µ(x|θ) = f(x|θ)
353
+ S(x|θ) = p
354
+
355
+ λe−λx�
356
+ + (1 − p)
357
+
358
+ ab exp
359
+
360
+ a
361
+
362
+ ebx − 1
363
+
364
+ + bx
365
+ ��
366
+ pe−λx + (1 − p) exp{−a (ebx − 1)}
367
+ ,
368
+ (4)
369
+ for which there is no straightforward interpretation. Which is lost due to the ease of deriving functions such
370
+ as statistical moments and expected average residual life (for more details, see Finkelstein (2009)) .From
371
+ 7
372
+
373
+ this we can get the gradient vector which, for this model, is given by
374
+ ∂ℓ(θ|D)
375
+ ∂a
376
+ =
377
+ m
378
+
379
+ k=1
380
+
381
+ Dk
382
+ pe−λk + (1 − p) exp{−a
383
+
384
+ ebk − 1
385
+
386
+ }
387
+ p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek
388
+
389
+ ×
390
+ × b(1 − p)ea(keb−1)+bλ + ab(1 − p)(keb − 1)ea(keb−1)+bx
391
+ pe−λk + (1 − p) exp{−a (ebk − 1)}
392
+ +
393
+ + (−1)(1 − p)(1 − ebk)e−a(ebk−1) �
394
+ p
395
+
396
+ λe−λk�
397
+ + (1 − p)
398
+
399
+ ab exp
400
+
401
+ a
402
+
403
+ ebk − 1
404
+
405
+ + bk
406
+ ���
407
+ (pe−λk + (1 − p) exp{−a (ebk − 1)})2
408
+ ∂ℓ(θ|D)
409
+ ∂b
410
+ =
411
+ m
412
+
413
+ k=1
414
+
415
+ Dk
416
+ pe−λk + (1 − p) exp{−a
417
+
418
+ ebk − 1
419
+
420
+ }
421
+ p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek
422
+
423
+ ×
424
+ ×
425
+ a(1 − p)xebx−a(ebx−1) �
426
+ ab(1 − p)ea(xebx−1)+bx + λpe−λx�
427
+
428
+ (1 − p)ea(ebx−1) + pe−λx�2
429
+ +
430
+ + a(1 − p)ea(ebx−1)+bx + ab(1 − p)ea(ebx−1)+bx �
431
+ axeb + x
432
+
433
+ (1 − p)ea(ebx−1) + pe−λx
434
+ ∂ℓ(θ|D)
435
+ ∂λ
436
+ =
437
+ m
438
+
439
+ k=1
440
+
441
+ Dk
442
+ pe−λk + (1 − p) exp{−a
443
+
444
+ ebk − 1
445
+
446
+ }
447
+ p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek
448
+
449
+ ×
450
+ ×
451
+
452
+ ��
453
+ pe−λx − λpxe−λx
454
+ (1 − p)e−a(ebx−1) + pe−λx +
455
+ pxe−λx �
456
+ ab(1 − p)ea(xeb−1)+bx + λpe−λx�
457
+
458
+ (1 − p)e−a(ebx−1) + pe−λx
459
+ �2
460
+
461
+ ��
462
+ ∂ℓ(θ|D)
463
+ ∂p
464
+ =
465
+ m
466
+
467
+ k=1
468
+
469
+ Dk
470
+ pe−λk + (1 − p) exp{−a
471
+
472
+ ebk − 1
473
+
474
+ }
475
+ p (λe−λk) + (1 − p) (ab exp {a (ebk − 1) + bk}) − Ek
476
+
477
+ ×
478
+ ×
479
+
480
+ �� λe−λx − abea(xeb−1)+bx
481
+ (1 − p)e−a(ebx−1) + pe−λx −
482
+
483
+ e−λx − e−a(ebx−1)� �
484
+ ab(1 − p)ea(xeb−1)+bx + λpe−λx�
485
+
486
+ (1 − p)e−a(ebx−1) + pe−λx
487
+ �2
488
+
489
+ ��
490
+ 3
491
+ Data and empirical results
492
+ In order to evaluate the proposed model, we will compare its performance on high and low-quality data.
493
+ For this, we will evaluate its performance against four other models, using the Mean Absolute Percentage
494
+ Error (MAPE) combined with the leave-one-out cross-validation method, which will measure the average
495
+ distance between the log-hazard and the log-mortality rate. Moreover, as some models are highly nonlinear,
496
+ 8
497
+
498
+ the Genetic Algorithm (Scrucca, 2013; Mirjalili, 2019) will be used to maximize the likelihood function.
499
+ This algorithm ensures convergence to the global maximum value.
500
+ 3.1
501
+ Models comparison
502
+ In a high quality data setting
503
+ In this scenario, we will use mortality data from Japan in 2015 obtained from The Human Mortality
504
+ Database (HMD). The observed value of log µ is linearly increasing to a certain age, and then has a sharp
505
+ drop. This behavior was also noted this country in the last three decades. However this is not restricted
506
+ to Japan, other countries like Sweden, Germany, USA and Korea also had the same mortality behavior.
507
+ The Figure 1 shows the estimated log-hazard function. We can clearly see the models of Beard, Gompertz,
508
+ Makeham and Perks were not able to fit properly the mortality rate after age 100.
509
+ 70
510
+ 80
511
+ 90
512
+ 100
513
+ 110
514
+ −5
515
+ −4
516
+ −3
517
+ −2
518
+ −1
519
+ 0
520
+ 1
521
+ Age
522
+ log(µ)
523
+ Beard
524
+ Gompertz
525
+ Makeham
526
+ Perks
527
+ Mixture model
528
+ Figure 1: Japan 2015 modeling
529
+ The Gompertz model consider force of mortality being log-linear, but clearly this behavior does not
530
+ describe the entire observed curve. For this model the estimated parameter is �θ = (0.0179, 0.1094)′. And
531
+ this model has a MAPE of 34.0127, i.e., this model’s predictions are on average 34.0127 % distant of the
532
+ observed value. A similar result can be obtained from the Makeham model, which has estimated parameter
533
+ �θ = (0.0174, 0.1103, 0.0008)′, and MAPE 33.0288.
534
+ The Beard can be seen as the ratio of a Gompertz and a Makeham models with c = 1, with the pa-
535
+ rameters estimated by ML �θ = (0.0165, 0.1216, 0.0073)′. Despite Beard’s combination of Makeham and
536
+ Gompertz models, this model provided the worse fit, reaching a MAPE of 55.6189.
537
+ The Perks model also has a similar construction to Beard. It is the ratio between two Makeham models.
538
+ For this model we estimate �θ = (0.0135, 0.1313, 0.0040, 0.0075)′. And as expected, this model had a very
539
+ 9
540
+
541
+ similar behavior to the previous model, including in MAPE of 51.3591, suggesting that this model does not
542
+ fit well to the data.
543
+ Finally, for the proposed mixture-based model, we estimated �θ = (0.1155, 0.0163, 0.2061, 0.0126)′, and
544
+ a MAPE of 6.9193, the best of the models presented in this study. In addition, this model was the only one
545
+ that was able to capture the sharp drop in the mortality rate. With the estimated parameters we can interpret
546
+ that the non-senescence death represents 1.2599 % of the total death after age 70.
547
+ In a low quality data setting
548
+ We observed that the model works well on data that has good quality, and now we aim to understand how
549
+ the model behaves when the data has limitations. In this case we are going to use data from Brazil from
550
+ 2010 (Queiroz et al., 2020; Gonzaga & Schmertmann, 2016) . Previous studies showa a mortality crossover
551
+ above age 60 when comparing more and less developed states in Brazil using the Topals model (Queiroz
552
+ et al., 2020; Gonzaga & Schmertmann, 2016). It is argued that the result is related to the level of complete-
553
+ ness of death counts, age misreporting and mortality selection. Thus, it is an important and relevant case
554
+ study for the application of our prosed mixture model. For this, as before, we will compare the performance
555
+ of the 5 models presented through MAPE.
556
+ 70
557
+ 75
558
+ 80
559
+ 85
560
+ 90
561
+ 95
562
+ 100
563
+ −3.0
564
+ −2.5
565
+ −2.0
566
+ −1.5
567
+ −1.0
568
+ −0.5
569
+ 0.0
570
+ Age
571
+ log(µ)
572
+ Beard
573
+ Gompertz
574
+ Makeham
575
+ Perks
576
+ Mixture model
577
+ Figure 2: Brazil 2010 modeling
578
+ For the first model (Beard) we estimated �θ = (0.0375, 0.0942, 5.5625 × 10−8)′, and a MAP of 20.4629,
579
+ i.e., on average this model distanced by 20 % of the mortality rate. We also got a similar conclusion about
580
+ the Gompertz model, estimating �θ = (0.0375, 0.0943)′ and MAPE about 20.4499.
581
+ The Makeham and Perks models also obtained similar results. For Makeham it was estimated �θ =
582
+ 10
583
+
584
+ (0.01481, 0.1338, 0.03131)′ resulting in a MAP of 14.5473, and for Perks model it was estimated �θ =
585
+ (0.0163, 0.0129, 0.0290, 3.4272 × 10−7)′ which results in MAPE of 14.9002.
586
+ Finally, for the proposed model was estimated �θ = (0.1036, 0.0315, 0.2389, 0.0692)′, and a MAPE of
587
+ 18.0038%, which indicates that the model is not able to capture mortality well in these data. Therefore,
588
+ the results found in this application match the results discussed in Feehan (2018) on the power of models
589
+ capturing mortality at advanced ages universally.
590
+ 3.2
591
+ Model applications
592
+ As we have seen, the proposed model has a high capacity to fit the mortality at older ages. Therefore, we
593
+ will illustrate the power of this model by applying it to mortality data from Japan (1993 and 2002), Sweden
594
+ (2011), Germany (2016), USA (1990 and 1992), Spain (2012) and Italy (2011). Table 1 represents the
595
+ estimate for each dataset, and Figure 4 represents their respective decomposed distribution of death.
596
+ Table 1: Parameters estimated.
597
+ Country
598
+ Year
599
+ ˆa
600
+ ˆb
601
+ ˆc
602
+ ˆp
603
+ MAPE
604
+ Japan
605
+ 1993
606
+ 0.10911
607
+ 0.02916
608
+ 0.21615
609
+ 0.00250
610
+ 8.86459
611
+ Japan
612
+ 2002
613
+ 0.10897
614
+ 0.02425
615
+ 0.30152
616
+ 0.03276
617
+ 7.49451
618
+ Sweden
619
+ 2011
620
+ 0.12390
621
+ 0.01520
622
+ 0.26448
623
+ 0.01559
624
+ 12.27019
625
+ Germany
626
+ 2016
627
+ 0.11046
628
+ 0.02090
629
+ 0.22283
630
+ 0.00397
631
+ 10.68258
632
+ USA
633
+ 1990
634
+ 0.08845
635
+ 0.03569
636
+ 0.20360
637
+ 0.02569
638
+ 3.80694
639
+ USA
640
+ 1992
641
+ 0.09057
642
+ 0.03404
643
+ 0.20575
644
+ 0.03217
645
+ 2.91887
646
+ Spain
647
+ 2012
648
+ 0.12372
649
+ 0.01544
650
+ 0.22751
651
+ 0.01307
652
+ 12.38755
653
+ Italy
654
+ 2011
655
+ 0.11606
656
+ 0.01768
657
+ 0.21710
658
+ 0.01999
659
+ 13.24385
660
+ In Table 1 it can be seen that the estimated values for p are small, less than 0.04, which indicates that the
661
+ proportion of premature deaths above age 70 does not exceed 4%. This result was already expected, since
662
+ by truncating the mortality data at age 70, we are excluding infant mortality and mortality hump (Remund
663
+ et al., 2018), and we only observe the tail of the distribution of premature mortality. Furthermore, the our
664
+ result is also in agreement with Horiuchi & Wilmoth’s results, that above age 75 mortality decelerates for
665
+ most causes of death (Horiuchi & Wilmoth, 1997).
666
+ The estimated values for the c parameter are similar, and concentrated around 0.23. This suggests that,
667
+ despite having different proportions, the distributions of premature death are similar, as can be seen on the
668
+ left in Figure 3. Such similarity was not observed in the senescent death distributions, which have a marked
669
+ difference, as can be seen on the right in Figure 3. Despite this, it is clear that the modal age of death is
670
+ between 80 and 90, which is consistent with previous studies and (Horiuchi et al., 2013).
671
+ 11
672
+
673
+ 70
674
+ 80
675
+ 90
676
+ 100
677
+ 110
678
+ 0.00
679
+ 0.05
680
+ 0.10
681
+ 0.15
682
+ 0.20
683
+ 0.25
684
+ 0.30
685
+ Age
686
+ prematur mortality distribution
687
+ JPN − 1993
688
+ JPN − 2002
689
+ SWE − 2011
690
+ DEUTNP − 2016
691
+ USA − 1990
692
+ USA − 1992
693
+ ESP − 2012
694
+ ITA − 2011
695
+ 70
696
+ 80
697
+ 90
698
+ 100
699
+ 110
700
+ 0.00
701
+ 0.01
702
+ 0.02
703
+ 0.03
704
+ 0.04
705
+ 0.05
706
+ Age
707
+ senescence mortality distribution
708
+ JPN − 1993
709
+ JPN − 2002
710
+ SWE − 2011
711
+ DEUTNP − 2016
712
+ USA − 1990
713
+ USA − 1992
714
+ ESP − 2012
715
+ ITA − 2011
716
+ Figure 3: Estimates of mortality components.
717
+ 70
718
+ 80
719
+ 90
720
+ 100
721
+ 110
722
+ 0
723
+ 10000
724
+ 20000
725
+ 30000
726
+ JPN − 1993
727
+ Age
728
+ dx
729
+ 70
730
+ 80
731
+ 90
732
+ 100
733
+ 110
734
+ 0
735
+ 10000
736
+ 20000
737
+ 30000
738
+ JPN − 2002
739
+ Age
740
+ dx
741
+ 70
742
+ 80
743
+ 90
744
+ 100
745
+ 110
746
+ 0
747
+ 1000
748
+ 2000
749
+ 3000
750
+ 4000
751
+ SWE − 2011
752
+ Age
753
+ dx
754
+ 70
755
+ 80
756
+ 90
757
+ 100
758
+ 110
759
+ 0
760
+ 10000
761
+ 20000
762
+ 30000
763
+ DEUTNP − 2016
764
+ Age
765
+ dx
766
+ 70
767
+ 80
768
+ 90
769
+ 100
770
+ 110
771
+ 0
772
+ 20000
773
+ 40000
774
+ 60000
775
+ USA − 1990
776
+ Age
777
+ dx
778
+ 70
779
+ 80
780
+ 90
781
+ 100
782
+ 110
783
+ 0
784
+ 20000
785
+ 40000
786
+ 60000
787
+ USA − 1992
788
+ Age
789
+ dx
790
+ 70
791
+ 80
792
+ 90
793
+ 100
794
+ 110
795
+ 0
796
+ 5000
797
+ 10000
798
+ 15000
799
+ ESP − 2012
800
+ Age
801
+ dx
802
+ 70
803
+ 80
804
+ 90
805
+ 100
806
+ 110
807
+ 0
808
+ 5000
809
+ 15000
810
+ 25000
811
+ ITA − 2011
812
+ Age
813
+ dx
814
+ Senescence deaths
815
+ Premature deaths
816
+ Overall deaths
817
+ Figure 4: Estimations
818
+ The Figure 4 shows the distribution of death estimated and broken down into premature and senescent
819
+ deaths. In it, we can observe the quality of fit of the estimated model (black line). In addition, it is possible
820
+ 12
821
+
822
+ to see that for Japan in 1993 and Germany in 2016, there were practically no premature deaths after age 70,
823
+ this could also be inferred from analyzing the Table 1, where the values of estimate for p are small.
824
+ 4
825
+ Conclusions and future works
826
+ Robust estimates of mortality rates in advanced ages are a challenge for demographers for various reasons.
827
+ Even in populations with good records of deaths and population there are disturbances in the function of
828
+ the low number of events and/or some limitation in the information on the age of death. In case of countries
829
+ where the problems of data quality is present, the challenges are greater.
830
+ For some centuries there has been an ambition to decompose mortality into interpretable components.
831
+ The best known are those proposed by Makeham (1860) and Heligman & Pollard (1980). However, in recent
832
+ years researchers have devoted to this problem (Remund et al., 2017; Mazzuco et al., 2021). Therefore,
833
+ this paper aims to bring a contribution to this discussion, delivering a new parametric model capable of
834
+ decomposing mortality through mixing models in a frequentest framework. Mazzuco et al. (2021) proposes
835
+ an approach similar to the one proposed in this paper, however the authors use a Bayesian framework.
836
+ As we have seen, the proposed model fits well the mortality curve, specially above age 100, and this
837
+ model does it without overparametrization, as Heligman & Pollard (1980). Furthermore, as it is a mixture
838
+ model, the model is flexible to become the Gompertz model (p = 0), or the Exponential model (p = 1).
839
+ When 0 < p < 1, the model fits a mortality curve with inflexion point (mortality deceleration) and plateau
840
+ (mortality plateau).
841
+ The use of Brazilian mortality data shed light on the performance of the model in a low quality database.
842
+ We could see that the mixture-based model captures the dynamics of mortality well only when there is a
843
+ drop in mortality rates, serving as an alternative to models that do not have this characteristic.
844
+ Although the present work presents a model capable of capturing the specific dynamics of the force of
845
+ mortality in certain populations, it also sheds light on other problems to be solved. Since the model is based
846
+ on mixtures of distributions, we are interested in deriving hypothesis tests on the estimated parameters. One
847
+ of the main ones is to test if p = 0, i.e. whether the model can be reduced to a Gompertz model; similar
848
+ interest to that studied in B¨ohnstedt & Gampe (2019), when a hypothesis test for Gamma heterogeneity is
849
+ derived, and important statistical properties are studied.
850
+ Finally, in the recently published paper Vaupel et al. (2022) point out that estimating senescence mor-
851
+ tality is of fundamental importance to understand the pace of human aging, human longevity and how far
852
+ we can live. In this sense, this work brought a method capable of identifying and estimating senescent mor-
853
+ tality, without having a great computational cost, often seen in Bayesian analysis (See Barber et al. (2015)),
854
+ or overparameterized models, as seen in Heligman & Pollard (1980).
855
+ 13
856
+
857
+ Bibliography
858
+ Barber, S., Voss, J., & Webster, M. (2015). The rate of convergence for approximate bayesian computation.
859
+ Electronic Journal of Statistics, 9(1), 80–105.
860
+ Barbi, E., Lagona, F., Marsili, M., Vaupel, J. W., & Wachter, K. W. (2018). The plateau of human mortality:
861
+ Demography of longevity pioneers. Science, 360(6396), 1459–1461.
862
+ Beard, R. E. (1959). Note on some mathematical mortality models. In Ciba Foundation Symposium-The
863
+ Lifespan of Animals (Colloquia on Ageing), volume 5 (pp. 302–311).: Wiley Online Library.
864
+ Black, D. A., Hsu, Y.-C., Sanders, S. G., Schofield, L. S., & Taylor, L. J. (2017). The methuselah effect: The
865
+ pernicious impact of unreported deaths on old-age mortality estimates. Demography, 54(6), 2001–2024.
866
+ B¨ohnstedt, M. & Gampe, J. (2019). Detecting mortality deceleration: Likelihood inference and model
867
+ selection in the gamma-gompertz model. Statistics & Probability Letters, 150, 68–73.
868
+ Brillinger, D. R. et al. (1986). The natural variability of vital rates and associated statistics. Biometrics,
869
+ 42(4), 693–734.
870
+ Cutler, D., Deaton, A., & Lleras-Muney, A. (2006). The determinants of mortality. Journal of economic
871
+ perspectives, 20(3), 97–120.
872
+ Feehan, D. M. (2018). Separating the signal from the noise: evidence for deceleration in old-age death
873
+ rates. Demography, 55(6), 2025–2044.
874
+ Finkelstein, M. (2009). Understanding the shape of the mixture failure rate (with engineering and demo-
875
+ graphic applications). Applied Stochastic Models in Business and Industry, 25(6), 643–663.
876
+ Gavrilov, L. A. & Gavrilova, N. S. (2019). Late-life mortality is underestimated because of data errors.
877
+ PLoS biology, 17(2), e3000148.
878
+ Gomes, M. M. F. & Turra, C. M. (2009). The number of centenarians in brazil: indirect estimates based on
879
+ death certificates. Demographic Research, 20, 495–502.
880
+ Gompertz, B. (1825a). Xxiv. on the nature of the function expressive of the law of human mortality, and
881
+ on a new mode of determining the value of life contingencies. in a letter to francis baily, esq. frs &c.
882
+ Philosophical transactions of the Royal Society of London, (115), 513–583.
883
+ 14
884
+
885
+ Gompertz, B. (1825b). Xxiv. on the nature of the function expressive of the law of human mortality, and
886
+ on a new mode of determining the value of life contingencies. in a letter to francis baily, esq. frs &c.
887
+ Philosophical transactions of the Royal Society of London, 0(115), 513–583.
888
+ Gonzaga, M. R. & Schmertmann, C. P. (2016). Estimating age-and sex-specific mortality rates for small
889
+ areas with topals regression: an application to brazil in 2010. Revista Brasileira de Estudos de Populac¸˜ao,
890
+ 33, 629–652.
891
+ Graunt, J. (1662). Natural and political observations mentioned in a following index, and made upon the
892
+ bills of mortality. In Mathematical Demography (pp. 11–20). Springer.
893
+ Heligman, L. & Pollard, J. H. (1980). The age pattern of mortality. Journal of the Institute of Actuaries,
894
+ 107(1), 49–80.
895
+ Horiuchi, S., Ouellette, N., Cheung, S. L. K., & Robine, J.-M. (2013). Modal age at death: lifespan indicator
896
+ in the era of longevity extension. Vienna Yearbook of Population Research, (pp. 37–69).
897
+ Horiuchi, S. & Wilmoth, J. R. (1997). Age patterns of the life table aging rate for major causes of death
898
+ in japan, 1951–1990. The Journals of Gerontology Series A: Biological Sciences and Medical Sciences,
899
+ 52(1), B67–B77.
900
+ Makeham, W. M. (1860). On the law of mortality and the construction of annuity tables. Journal of the
901
+ Institute of Actuaries, 8(6), 301–310.
902
+ Mazzuco, S. S., Suhrcke, M. M., & Zanotto, L. L. (2021). How to measure premature mortality? a proposal
903
+ combining “relative” and “absolute” approaches. Population health metrics, 19(1), 1–14.
904
+ Mirjalili, S. (2019). Genetic algorithm. In Evolutionary algorithms and neural networks (pp. 43–55).
905
+ Springer.
906
+ Nepomuceno, M., Turra, C., et al. (2019). The population of centenarians in Brazil: historical estimates
907
+ from 1900 to 2000. Technical report, Max Planck Institute for Demographic Research, Rostock, Ger-
908
+ many.
909
+ Perks, W. (1932). On some experiments in the graduation of mortality statistics. Journal of the Institute of
910
+ Actuaries, 63(1), 12–57.
911
+ Pinheiro, P. C. & Queiroz, B. L. (2019). Regional disparities in brazilian adult mortality: an analysis using
912
+ modal age at death (m) and compression of mortality (iqr). Anais, (pp. 1–20).
913
+ 15
914
+
915
+ Queiroz, B. L., Gonzaga, M. R., Vasconcelos, A., Lopes, B. T., & Abreu, D. M. (2020). Comparative
916
+ analysis of completeness of death registration, adult mortality and life expectancy at birth in brazil at the
917
+ subnational level. Population health metrics, 18(1), 1–15.
918
+ Remund, A., Camarda, C. G., & Riffe, T. (2017). Analyzing the young adult mortality hump in r with
919
+ morthump. Rostock: Max Planck Institute for Demographic Research (MPIDR Technical Report TR-
920
+ 2018-003).
921
+ Remund, A., Camarda, C. G., & Riffe, T. (2018). A cause-of-death decomposition of young adult excess
922
+ mortality. Demography, 55(3), 957–978.
923
+ Scrucca, L. (2013). GA: A package for genetic algorithms in R. Journal of Statistical Software, 53(4),
924
+ 1–37.
925
+ van Raalte, A. A. (2021). What have we learned about mortality patterns over the past 25 years? Population
926
+ Studies, 75(sup1), 105–132.
927
+ Vaupel, J. W. et al. (2022). The Pull of the Plateau and the Sway of the Mode: Formal Relationships to
928
+ Estimate the Pace of Senescence. Technical report, Center for Open Science.
929
+ Vaupel, J. W., Villavicencio, F., & Bergeron-Boucher, M.-P. (2021). Demographic perspectives on the rise
930
+ of longevity. Proceedings of the National Academy of Sciences, 118(9).
931
+ Wachter, K. W. (2018). Hypothetical errors and plateaus: A response to newman. PLoS biology, 16(12),
932
+ e3000076.
933
+ Wilmoth, J., Zureick, S., Canudas-Romo, V., Inoue, M., & Sawyer, C. (2012). A flexible two-dimensional
934
+ mortality model for use in indirect estimation. Population studies, 66(1), 1–28.
935
+ Wilmoth, J. R. (2000). Demography of longevity: past, present, and future trends. Experimental gerontol-
936
+ ogy, 35(9-10), 1111–1129.
937
+ Wrigley-Field, E. (2014). Mortality deceleration and mortality selection: three unexpected implications of
938
+ a simple model. Demography, 51(1), 51–71.
939
+ 16
940
+
3tAzT4oBgHgl3EQfuf3H/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
5NFIT4oBgHgl3EQf7itm/content/2301.11398v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0af6ac12e85736f6ec662252891b06a672016d4ccd614c5506042172bf99333
3
+ size 167907
5NFIT4oBgHgl3EQf7itm/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58c273ab7d0f1f50c47cf0bded92781c6b77e1552f222e318279db3eeab0605f
3
+ size 2162733
5NFIT4oBgHgl3EQf7itm/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e61d1b452ae3b3789cbfe10d69aefd71423cc69edc5d32d107dc6f8671a99325
3
+ size 76066
5NFKT4oBgHgl3EQf-C45/content/tmp_files/2301.11956v1.pdf.txt ADDED
@@ -0,0 +1,2309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ On the Connection Between MPNN and Graph Transformer
2
+ Chen Cai 1 Truong Son Hy 1 Rose Yu 1 Yusu Wang 1
3
+ Abstract
4
+ Graph Transformer (GT) recently has emerged
5
+ as a new paradigm of graph learning algorithms,
6
+ outperforming the previously popular Message
7
+ Passing Neural Network (MPNN) on multiple
8
+ benchmarks. Previous work (Kim et al., 2022)
9
+ shows that with proper position embedding, GT
10
+ can approximate MPNN arbitrarily well, implying
11
+ that GT is at least as powerful as MPNN. In this
12
+ paper, we study the inverse connection and show
13
+ that MPNN with virtual node (VN), a commonly
14
+ used heuristic with little theoretical understand-
15
+ ing, is powerful enough to arbitrarily approximate
16
+ the self-attention layer of GT.
17
+ In particular, we first show that if we consider
18
+ one type of linear transformer, the so-called Per-
19
+ former/Linear Transformer (Choromanski et al.,
20
+ 2020; Katharopoulos et al., 2020b), then MPNN
21
+ + VN with only O(1) depth and O(1) width
22
+ can approximate a self-attention layer in Per-
23
+ former/Linear Transformer. Next, via a connec-
24
+ tion between MPNN + VN and DeepSets, we
25
+ prove the MPNN + VN with O(nd) width and
26
+ O(1) depth can approximate the self-attention
27
+ layer arbitrarily well, where d is the input fea-
28
+ ture dimension. Lastly, under some assumptions,
29
+ we provide an explicit construction of MPNN +
30
+ VN with O(1) width and O(n) depth approxi-
31
+ mating the self-attention layer in GT arbitrarily
32
+ well. On the empirical side, we demonstrate that
33
+ 1) MPNN + VN is a surprisingly strong baseline,
34
+ outperforming GT on the recently proposed Long
35
+ Range Graph Benchmark (LRGB) dataset, 2) our
36
+ MPNN + VN improves over early implementation
37
+ on a wide range of OGB datasets and 3) MPNN +
38
+ VN outperforms Linear Transformer and MPNN
39
+ on the climate modeling task.
40
+ 1University of California San Diego, San Diego, USA. Corre-
41
+ spondence to: Chen Cai <c1cai@ucsd.edu>.
42
+ Copyright 2023 by the author(s).
43
+ VN
44
+ Transformer
45
+ (a)
46
+ (b)
47
+ Figure 1: MPNN + VN and Graph Transformers.
48
+ 1. Introduction
49
+ MPNN (Message Passing Neural Network) (Gilmer et al.,
50
+ 2017) has been the leading architecture for processing graph-
51
+ structured data. Recently, transformers in natural language
52
+ processing (Vaswani et al., 2017; Kalyan et al., 2021) and
53
+ vision (d’Ascoli et al., 2021; Han et al., 2022) have extended
54
+ their success to the domain of graphs. There have been
55
+ several pieces of work (Ying et al., 2021; Wu et al., 2021;
56
+ Kreuzer et al., 2021; Rampášek et al., 2022; Kim et al., 2022)
57
+ showing that with careful position embedding (Lim et al.,
58
+ 2022), graph transformers (GT) can achieve compelling
59
+ empirical performances on large-scale datasets and start to
60
+ challenge the dominance of MPNN.
61
+ MPNN imposes a sparsity pattern on the computation graph
62
+ and therefore enjoys linear complexity. It however suffers
63
+ from well-known over-smoothing (Li et al., 2018; Oono
64
+ & Suzuki, 2019; Cai & Wang, 2020) and over-squashing
65
+ (Alon & Yahav, 2020; Topping et al., 2021) issues, limiting
66
+ its usage on long-range modeling tasks where the label of
67
+ one node depends on features of nodes far away. GT relies
68
+ purely on position embedding to encode the graph structure
69
+ and uses vanilla transformers on top. 1 It models all pairwise
70
+ interactions directly in one layer, making it computationally
71
+ more expensive. Compared to MPNN, GT shows promising
72
+ results on tasks where modeling long-range interaction is
73
+ the key, but the quadratic complexity of self-attention in GT
74
+ 1GT in this paper refers to the practice of tokenizing graph
75
+ nodes and applying standard transformers on top (Ying et al., 2021;
76
+ Kim et al., 2022). There exists a more sophisticated GT (Kreuzer
77
+ et al., 2021) that further conditions attention on edge types but it is
78
+ not considered in this paper.
79
+ arXiv:2301.11956v1 [cs.LG] 27 Jan 2023
80
+
81
+ On the Connection Between MPNN and Graph Transformer
82
+ limits its usage to graphs of medium size. Scaling up GT
83
+ to large graphs remains an active research area (Wu et al.,
84
+ 2022).
85
+ Theoretically, it has been shown that graph transformers can
86
+ be powerful graph learners (Kim et al., 2022), i.e., graph
87
+ transformers with appropriate choice of token embeddings
88
+ have the capacity of approximating linear permutation equiv-
89
+ ariant basis, and therefore can approximate 2-IGN (Invariant
90
+ Graph Network), a powerful architecture that is at least as
91
+ expressive as MPNN (Maron et al., 2018). This raises an
92
+ important question that whether GT is strictly more powerful
93
+ than MPNN. Can we approximate GT with MPNN?
94
+ One common intuition of the advantage of GT over MPNN
95
+ is its ability to model long-range interaction more effectively.
96
+ However, from the MPNN side, one can resort to a simple
97
+ trick to escape locality constraints for effective long-range
98
+ modeling: the use of an additional virtual node (VN) that
99
+ connects to all input graph nodes. On a high level, MPNN
100
+ + VN augments the existing graph with one virtual node,
101
+ which acts like global memory for every node exchanging
102
+ messages with other nodes. Empirically this simple trick has
103
+ been observed to improve the MPNN and has been widely
104
+ adopted (Gilmer et al., 2017; Hu et al., 2020; 2021) since
105
+ the early beginning of MPNN (Gilmer et al., 2017; Battaglia
106
+ et al., 2018). However, there is very little theoretical study
107
+ of MPNN + VN (Hwang et al., 2022).
108
+ In this work, we study the theoretical property of MPNN
109
+ + VN, and its connection to GT. We systematically study
110
+ the representation power of MPNN + VN, both for certain
111
+ approximate self-attention and for the full self-attention
112
+ layer, and provide a depth-width trade-off, summarized in
113
+ Table 1. In particular,
114
+ • With O(1) depth and O(1) width, MPNN + VN
115
+ can approximate one self-attention layer of Performer
116
+ (Choromanski et al., 2020) and Linear Transformer
117
+ (Katharopoulos et al., 2020b), a type of linear trans-
118
+ formers (Tay et al., 2020).
119
+ • Via a link between MPNN + VN with DeepSets (Za-
120
+ heer et al., 2017), we prove MPNN + VN with O(1)
121
+ depth and O(nd) width (d is the input feature dimen-
122
+ sion) is permutation equivariant universal, implying
123
+ it can approximate self-attention layer and even full-
124
+ transformers.
125
+ • Under certain assumptions on node features, we prove
126
+ an explicit construction of O(n) depth O(1) width
127
+ MPNN + VN approximating 1 self-attention layer ar-
128
+ bitrarily well on graphs of size n. Unfortunately, the
129
+ assumptions on node features are rather strong, and
130
+ whether we can alleviate them will be an interesting
131
+ future direction to explore.
132
+ • Empirically, we show 1) that MPNN + VN works sur-
133
+ prisingly well on the recently proposed LRGB (long-
134
+ range graph benchmarks) datasets (Dwivedi et al.,
135
+ 2022), which arguably require long-range interaction
136
+ reasoning to achieve strong performance 2) our imple-
137
+ mentation of MPNN + VN is able to further improve
138
+ the early implementation of MPNN + VN on OGB
139
+ datasets and 3) MPNN + VN outperforms Linear Trans-
140
+ former (Katharopoulos et al., 2020b) and MPNN on
141
+ the climate modeling task.
142
+ 2. Related Work
143
+ Virtual node in MPNN. The virtual node augments the
144
+ graph with an additional node to facilitate the information
145
+ exchange among all pairs of nodes. It is a heuristic proposed
146
+ in (Gilmer et al., 2017) and has been observed to improve
147
+ the performance in different tasks (Hu et al., 2021; 2020).
148
+ Surprisingly, its theoretical properties have received little
149
+ study. To the best of our knowledge, only a recent paper
150
+ (Hwang et al., 2022) analyzed the role of the virtual node in
151
+ the link prediction setting in terms of 1) expressiveness of
152
+ the learned link representation and 2) the potential impact
153
+ on under-reaching and over-smoothing.
154
+ Graph transformer.
155
+ Because of the great successes
156
+ of Transformers in natural language processing (NLP)
157
+ (Vaswani et al., 2017; Wolf et al., 2020) and recently in
158
+ computer vision (Dosovitskiy et al., 2020; d’Ascoli et al.,
159
+ 2021; Liu et al., 2021), there is great interest in extending
160
+ transformers for graphs. One common belief of advantage
161
+ of graph transformer over MPNN is its capacity in capturing
162
+ long-range interactions while alleviating over-smoothing (Li
163
+ et al., 2018; Oono & Suzuki, 2019; Cai & Wang, 2020) and
164
+ over-squashing in MPNN (Alon & Yahav, 2020; Topping
165
+ et al., 2021).
166
+ Fully-connected Graph transformer (Dwivedi & Bresson,
167
+ 2020) was introduced with eigenvectors of graph Laplacian
168
+ as the node positional encoding (PE). Various follow-up
169
+ works proposed different ways of PE to improve GT, ranging
170
+ from an invariant aggregation of Laplacian?s eigenvectors
171
+ in SAN (Kreuzer et al., 2021), pair-wise graph distances in
172
+ Graphormer (Ying et al., 2021), relative PE derived from dif-
173
+ fusion kernels in GraphiT (Mialon et al., 2021), and recently
174
+ Sign and Basis Net (Lim et al., 2022) with a principled way
175
+ of handling sign and basis invariance. Other lines of re-
176
+ search in GT include combining MPNN and GT (Wu et al.,
177
+ 2021; Rampášek et al., 2022), encoding the substructures
178
+ (Chen et al., 2022), and efficient graph transformers for
179
+ large graphs (Wu et al., 2022).
180
+
181
+ On the Connection Between MPNN and Graph Transformer
182
+ Table 1: Summary of approximation result of MPNN + VN on self-attention layer. n is the number of nodes and d is the
183
+ feature dimension of node features. The dependency on d is hidden.
184
+ Depth
185
+ Width
186
+ Self-Attention
187
+ Note
188
+ Theorem 4.1
189
+ O(1)
190
+ O(1)
191
+ Approximate
192
+ Approximate self attention in Performer (Choromanski et al., 2020)
193
+ Theorem 5.5
194
+ O(1)
195
+ O(nd)
196
+ Full
197
+ Leverage the universality of equivariant DeepSets
198
+ Theorem 6.3
199
+ O(n)
200
+ O(1)
201
+ Full
202
+ Explicit construction, strong assumption on X
203
+ Proposition B.10
204
+ O(n)
205
+ O(1)
206
+ Full
207
+ Explicit construction, more relaxed (but still strong) assumption on X
208
+ 3. Preliminaries
209
+ We denote X ∈ Rn×d the concatenation of graph node
210
+ features and positional encodings, where node i has feature
211
+ xi ∈ Rd. When necessary, we use x(l)
212
+ j
213
+ to denote the node
214
+ j’s feature at depth l. Let M be the space of multisets of
215
+ vectors in Rd. We use X ⊆ Rn×d to denote the space of
216
+ node features and the Xi be the projection of X on i-th
217
+ coordinate. ∥ · ∥ denotes the 2-norm. [x, y, z] denotes the
218
+ concatenation of x, y, z. [n] stands for the set {1, 2, ..., n}.
219
+ Definition 3.1 (attention). We denote key and query matrix
220
+ as WK, WQ ∈ Rd×d′, and value matrix as WV ∈ Rd×d
221
+ 2. Attention score between two vectors u, v ∈ Rd×1 is de-
222
+ fined as α(u, v) = softmax(uT WQ(WK)T v). We denote
223
+ A as the space of attention α for different WQ, WK, WV .
224
+ We also define unnormalized attention score α′(·, ·) to be
225
+ α′(u, v) = uT WQ(WK)T v. Self attention layer is a ma-
226
+ trix function L : Rn×d → Rn×d of the following form:
227
+ L(X) = softmax(XWQ(XWK)T )XWV .
228
+ 3.1. MPNN Layer
229
+ Definition 3.2 (MPNN layer (Gilmer et al., 2017)). An
230
+ MPNN layer on a graph G with node features x(k) at k-th
231
+ layer and edge features e is of the following form
232
+ x(k)
233
+ i
234
+ = γ(k) �
235
+ x(k−1)
236
+ i
237
+ , τj∈N (i)φ(k) �
238
+ x(k−1)
239
+ i
240
+ , x(k−1)
241
+ j
242
+ , ej,i
243
+ ��
244
+ Here γ : Rd × Rd′ → Rd is update function, φ : Rd ×
245
+ Rd × Rde → Rd′ is message function where de is the edge
246
+ feature dimension, τ : M → Rd is permutation invariant
247
+ aggregation function and N(i) is the neighbors of node i
248
+ in G. Update/message/aggregation functions are usually
249
+ parametrized by neural networks. For graphs of different
250
+ types of edges and nodes, one can further extend MPNN to
251
+ the heterogeneous setting. We use 1, ..., n to index graph
252
+ nodes and vn to denote the virtual node.
253
+ Definition 3.3 (heterogeneous MPNN + VN layer). The
254
+ heterogeneous MPNN + VN layer operates on two types
255
+ 2For simplicity, we assume the output dimension of self-
256
+ attention is the same as the input dimension. All theoretical results
257
+ can be extended to the case where the output dimension is different
258
+ from d.
259
+ of nodes: 1) virtual node and 2) graph nodes, denoted as
260
+ vn and gn, and three types of edges: 1) vn-gn edge and 2)
261
+ gn-gn edges and 3) gn-vn edges. It has the following form
262
+ x(k)
263
+ vn = γ(k)
264
+ vn
265
+
266
+ x(k−1)
267
+ i
268
+ , τj∈[n]φ(k)
269
+ vn-gn
270
+
271
+ x(k−1)
272
+ i
273
+ , x(k−1)
274
+ j
275
+ , ej,i
276
+ ��
277
+ (1)
278
+ for the virtual node, and
279
+ x(k)
280
+ i
281
+ = γ(k)
282
+ gn (x(k−1)
283
+ i
284
+ , τj∈N1(i)φ(k)
285
+ gn-vn
286
+
287
+ x(k−1)
288
+ i
289
+ , x(k−1)
290
+ j
291
+ , ej,i
292
+
293
+ + τj∈N2(i)φ(k)
294
+ gn-gn
295
+
296
+ x(k−1)
297
+ i
298
+ , x(k−1)
299
+ j
300
+ , ej,i)
301
+
302
+ (2)
303
+ for graph node. Here N1(i) for graph node i is the virtual
304
+ node and N2(i) is the set of neighboring graph nodes.
305
+ Our proof of approximating self-attention layer L with
306
+ MPNN layers does not use the graph topology. Next, we
307
+ introduce a simplified heterogeneous MPNN + VN layer,
308
+ which will be used in the proof. It is easy to see that set-
309
+ ting φ(k)
310
+ gn-gn to be 0 in Definition 3.3 recovers the simplified
311
+ heterogeneous MPNN + VN layer.
312
+ Definition 3.4 (simplified heterogeneous MPNN + VN
313
+ layer). A simplified heterogeneous MPNN + VN layer is
314
+ the same as a heterogeneous MPNN + VN layer in Defini-
315
+ tion 3.3 except we set θgn-gn to be 0. I.e., we have
316
+ x(k)
317
+ vn = γ(k)
318
+ vn
319
+
320
+ x(k−1)
321
+ i
322
+ , τj∈[n]φ(k)
323
+ vn-gn
324
+
325
+ x(k−1)
326
+ i
327
+ , x(k−1)
328
+ j
329
+ , ej,i
330
+ ��
331
+ for the virtual node, and
332
+ x(k)
333
+ i
334
+ = γ(k)
335
+ gn
336
+
337
+ x(k−1)
338
+ i
339
+ , τj∈N1(i)φ(k)
340
+ gn-vn
341
+
342
+ x(k−1)
343
+ i
344
+ , x(k−1)
345
+ j
346
+ , ej,i
347
+ ��
348
+ for graph nodes.
349
+ Intuitively, adding the virtual node (VN) to MPNN makes it
350
+ easy to compute certain quantities, for example, the mean
351
+ of node features (which is hard for standard MPNN unless
352
+ the depth is proportional to the diameter of the graph). Us-
353
+ ing VN thus makes it easy to implement for example the
354
+ mean subtraction, which helps reduce over-smoothing and
355
+ improves the performance of GNN. (Yang et al., 2020; Zhao
356
+ & Akoglu, 2019)
357
+
358
+ On the Connection Between MPNN and Graph Transformer
359
+ 3.2. Assumptions
360
+ We have two mild assumptions on feature space X ⊂ Rn×d
361
+ and the regularity of target function L.
362
+ AS1. ∀i ∈ [n], xi ∈ Xi, ∥xi∥ < C1. This implies X is
363
+ compact.
364
+ AS2. ∥WQ∥ < C2, ∥WK∥ < C2, ∥WV ∥ < C2 for target
365
+ layer L. Combined with AS1 on X, this means α′(xi, xj)
366
+ is both upper and lower bounded, which further implies
367
+
368
+ j eα′(xi,xj) be both upper bounded and lower bounded.
369
+ 4. O(1)-depth O(1)-width MPNN + VN for
370
+ unbiased approximation of attention
371
+ The standard self-attention takes O(n2) computational time,
372
+ therefore not scalable for large graphs. Reducing the compu-
373
+ tational complexity of self-attention in Transformer is active
374
+ research (Tay et al., 2020). In this section, we consider
375
+ self-attention in a specific type of efficient transformers, Per-
376
+ former (Choromanski et al., 2020) and Linear Transformer
377
+ (Katharopoulos et al., 2020b).
378
+ One full self-attention layer L is of the following form
379
+ x(l+1)
380
+ i
381
+ =
382
+ n
383
+
384
+ j=1
385
+ κ
386
+
387
+ W (l)
388
+ Q x(l)
389
+ i , W (l)
390
+ K x(l)
391
+ j
392
+
393
+ �n
394
+ k=1 κ
395
+
396
+ W (l)
397
+ Q x(l)
398
+ i , W (l)
399
+ K x(l)
400
+ k
401
+ �·
402
+
403
+ W (l)
404
+ V x(l)
405
+ j
406
+
407
+ (3)
408
+ where κ
409
+ :
410
+ Rd × Rd
411
+
412
+ R is the softmax kernel
413
+ κ(x, y) := exp(xT y). The kernel function can be ap-
414
+ proximated via κ(x, y) = ⟨Φ(x), Φ(y)⟩V ≈ φ(x)T φ(y)
415
+ where the first equation is by Mercer’s theorem and
416
+ φ(·) : Rd → Rm is a low-dimensional feature map
417
+ with random transformation. For Performer (Choroman-
418
+ ski et al., 2020), the choice of φ is taken as φ(x) =
419
+ exp
420
+
421
+ −∥x∥2
422
+ 2
423
+ 2
424
+
425
+ √m
426
+
427
+ exp
428
+
429
+ wT
430
+ 1 x
431
+
432
+ , · · · , exp
433
+
434
+ wT
435
+ mx
436
+ ��
437
+ where wk ∼
438
+ N (0, Id) is i.i.d sampled random variable. For Linear Trans-
439
+ former (Katharopoulos et al., 2020b), φ(x) = elu(x) + 1.
440
+ By switching κ(x, y) to be φ(x)T φ(y), and denote qi =
441
+ W (l)
442
+ Q x(l)
443
+ i , ki = W (l)
444
+ K x(l)
445
+ i
446
+ and vi = W (l)
447
+ V x(l)
448
+ i , the approx-
449
+ imated version of Equation (3) by Performer and Linear
450
+ Transformer becomes
451
+ x(l+1)
452
+ i
453
+ =
454
+ n
455
+
456
+ j=1
457
+ φ (qi)T φ (kj)
458
+ �n
459
+ k=1 φ (qi)T φ (kk)
460
+ · vj
461
+ =
462
+
463
+ φ (qi)T �n
464
+ j=1 φ (kj) ⊗ vj
465
+ �T
466
+ φ (qi)T �n
467
+ k=1 φ (kk)
468
+ .
469
+ (4)
470
+ where we use the matrix multiplication association rule to
471
+ derive the second equality.
472
+ The key advantage of Equation (4) is that �n
473
+ j=1 φ (kj) and
474
+ �n
475
+ j=1 φ(kj) ⊗ vj can be approximated by the virtual node,
476
+ and shared for all graph nodes, using only O(1) layers of
477
+ MPNNs. We denote the self-attention layer of this form
478
+ in Equation (4) as LPerformer. Linear Transformer differs
479
+ from Performer by choosing a different form of φ(x) =
480
+ Relu(x) + 1 in its self-attention layer LLinear-Transformer.
481
+ In particular, the VN will approximate �n
482
+ j=1 φ (kj) and
483
+ �n
484
+ j=1 φ (kj) ⊗ vj, and represent it as its feature. Both
485
+ φ (kj) and φ (kj)⊗vj can be approximated arbitrarily well
486
+ by an MLP with constant width (constant in n but can be
487
+ exponential in d) and depth. Note that φ(kj) ⊗ vj ∈ Rdm
488
+ but can be reshaped to 1 dimensional feature vector.
489
+ More specifically, the initial feature for the virtual node is
490
+ 1(d+1)m, where d is the dimension of node features and m
491
+ is the number of random projections ωi. Message function
492
+ + aggregation function for virtual node τφvn-gn : R(d+1)m ×
493
+ M → R(d+1)m is
494
+ τj∈[n]φ(k)
495
+ vn-gn(·, {xi}i) = [
496
+ n
497
+
498
+ j=1
499
+ φ (kj) ,
500
+ ReshapeTo1D(
501
+ n
502
+
503
+ j=1
504
+ φ (kj) ⊗ vj)]
505
+ (5)
506
+ where ReshapeTo1D(·) flattens a 2D matrix to a 1D vec-
507
+ tor in raster order. This function can be arbitrarily approxi-
508
+ mated by MLP. Note that the virtual node’s feature dimen-
509
+ sion is (d + 1)m (where recall m is the dimension of the
510
+ feature map φ used in the linear transformer/Performer),
511
+ which is larger than the dimension of the graph node
512
+ d. This is consistent with the early intuition that the vir-
513
+ tual node might be overloaded when passing information
514
+ among nodes. The update function for virtual node γvn :
515
+ R(d+1)m × R(d+1)m → R(d+1)m is just coping the second
516
+ argument, which can be exactly implemented by MLP.
517
+ VN then sends its message back to all other nodes, where
518
+ each graph node i applies the update function γgn
519
+ :
520
+ R(d+1)m × Rd → Rd of the form
521
+ γgn(xi, [
522
+ n
523
+
524
+ j=1
525
+ φ (kj) , ReshapeTo1D(
526
+ n
527
+
528
+ j=1
529
+ φ (kj) ⊗ vj)])
530
+ =
531
+
532
+ φ (qi) �n
533
+ j=1 φ (kj) ⊗ vj
534
+ �T
535
+ φ (qi)T �n
536
+ k=1 φ (kk)
537
+ (6)
538
+ to update the graph node feature.
539
+ As the update function γgn can not be computed exactly in
540
+ MLP, what is left is to show that error induced by using
541
+ MLP to approximate τφvn-gn and γgn in Equation (5) and
542
+ Equation (6) can be made arbitrarily small.
543
+ Theorem 4.1. Under the AS1 and AS2, MPNN + VN of
544
+ O(1) width and O(1) depth can approximate LPerformer and
545
+ LLinear-Transformer arbitrarily well.
546
+
547
+ On the Connection Between MPNN and Graph Transformer
548
+ Proof. We first prove the case of LPerformer. We can decom-
549
+ pose our target function as the composition of τj∈[n]φ(k)
550
+ vn-gn,
551
+ γgn and φ. By the uniform continuity of the functions,
552
+ it suffices to show that 1) we can approximate φ, 2) we
553
+ can approximate operations in γgn and τφvn-gn arbitrar-
554
+ ily well on the compact domain, and 3) the denominator
555
+ φ (qi)T �n
556
+ k=1 φ (kk) is uniformly lower bounded by a pos-
557
+ itive number for any node features in X.
558
+ For 1), each component of φ is continuous and all inputs
559
+ kj, qj lie in the compact domain so φ can be approximated
560
+ arbitrarily well by MLP with O(1) width and O(1) depth
561
+ (Cybenko, 1989).
562
+ For 2), we need to approximate the operations in γgn and
563
+ τφvn-gn, i.e., approximate multiplication, and vector-scalar
564
+ division arbitrarily well. As all those operations are con-
565
+ tinuous, it boils down to showing that all operands lie
566
+ in a compact domain. By assumption AS1 and AS2 on
567
+ WQ, WK, WV and input feature X, we know that qi, ki, vi
568
+ lies in a compact domain for all graph nodes i. As φ is con-
569
+ tinuous, this implies that φ(qi), �n
570
+ j=1 φ(kj) ⊗ vj lies in a
571
+ compact domain (n is fixed), therefore the numerator lies
572
+ in a compact domain. Lastly, since all operations do not
573
+ involve n, the depth and width are constant in n.
574
+ For 3), it is easy to see that φ (qi)T �n
575
+ k=1 φ (kk) is always
576
+ positive. We just need to show that the denominator is bound
577
+ from below by a positive constant. For Performer, φ(x) =
578
+ exp
579
+
580
+ −∥x∥2
581
+ 2
582
+ 2
583
+
584
+ √m
585
+
586
+ exp
587
+
588
+ wT
589
+ 1 x
590
+
591
+ , · · · , exp
592
+
593
+ wT
594
+ mx
595
+ ��
596
+ where wk ∼
597
+ N (0, Id). As all norm of input x to φ is upper bounded
598
+ by AS1, exp( −∥x∥2
599
+ 2
600
+ 2
601
+ ) is lower bounded. As m is fixed,
602
+ we know that ∥wT
603
+ i x∥ ≤ ∥wi∥∥x∥, which implies that
604
+ wT
605
+ i x is lower bounded by −∥wi∥∥x∥ which further im-
606
+ plies that exp(wT
607
+ i x) is lower bounded. This means that
608
+ φ (qi)T �n
609
+ k=1 φ (kk) is lower bounded.
610
+ For Linear Transformer, the proof is essentially the same
611
+ as above. We only need to show that φ(x) = elu(x) + 1 is
612
+ continuous and positive, which is indeed the case.
613
+ Besides Performers, there are many other different ways of
614
+ obtaining linear complexity. In Appendix C.2, we discuss
615
+ the limitation of MPNN + VN on approximating other types
616
+ of efficient transformers such as Linformer (Wang et al.,
617
+ 2020b) and Sparse Transformer (Child et al., 2019).
618
+ 5. O(1) depth O(nd) width MPNN + VN
619
+ We have shown that the MPNN + VN can approximate self-
620
+ attention in Performer and Linear Transformer using only
621
+ O(1) depth and O(1) width. One may naturally wonder
622
+ whether MPNN + VN can approximate the self-attention
623
+ layer in the full transformer. In this section, we show that
624
+ MPNN + VN with O(1) depth (number of layers), but with
625
+ O(nd) width, can approximate 1 self-attention layer (and
626
+ full transformer) arbitrarily well.
627
+ The main observation is that MPNN + VN is able to ex-
628
+ actly simulate (not just approximate) equivariant DeepSets
629
+ (Zaheer et al., 2017), which is proved to be universal in
630
+ approximating any permutation invariant/equivariant maps
631
+ (Zaheer et al., 2017; Segol & Lipman, 2019). Since the
632
+ self-attention layer is permutation equivariant, this implies
633
+ that MPNN + VN can approximate the self-attention layer
634
+ (and full transformer) with O(1) depth and O(nd) width fol-
635
+ lowing a result on DeepSets from Segol & Lipman (2019).
636
+ We first introduce the permutation equivariant map, equiv-
637
+ ariant DeepSets, and permutation equivariant universality.
638
+ Definition 5.1 (permutation equivariant map). A map F :
639
+ Rn×k → Rn×l satisfying F (σ · X) = σ · F (X) for all
640
+ σ ∈ Sn and X ∈ Rn×d is called permutation equivariant.
641
+ Definition 5.2 (equivariant DeepSets of Zaheer et al.
642
+ (2017)). Equivariant DeepSets has the following form
643
+ F (X) = Lds
644
+ m◦ν◦· · ·◦ν◦Lds
645
+ 1 (X), where Lds
646
+ i is a linear per-
647
+ mutation equivariant layer and ν is a nonlinear layer such as
648
+ ReLU. The linear permutation equivariant layer in DeepSets
649
+ has the following form Lds
650
+ i (X) = XA+ 1
651
+ n11T XB+1cT ,
652
+ where A, B ∈ Rdi×di+1, c ∈ Rdi+1 is the weights and bias
653
+ in layer i, and ν is ReLU.
654
+ Definition 5.3 (permutation equivariant universality).
655
+ Given a compact domain X of Rn×din, permutation equiv-
656
+ ariant universality of a model F : Rn×din → Rn×dout means
657
+ that for every permutation equivariant continuous function
658
+ H : Rn×din → Rn×dout defined over X, and any ϵ > 0,
659
+ there exists a choice of m (i.e., network depth), di (i.e., net-
660
+ work width at layer i) and the trainable parameters of F so
661
+ that ∥H(X) − F (X)∥∞ < ϵ for all X ∈ X.
662
+ The universality of equivariant DeepSets is stated as follows.
663
+ Theorem 5.4 (Segol & Lipman (2019)). DeepSets with con-
664
+ stant layer is universal. Using ReLU activation the width
665
+ ω := maxidi (di is the width for i-th layer of DeepSets)
666
+ required for universal permutation equivariant network sat-
667
+ isfies ω ≤ dout + din +
668
+ � n + din
669
+ din
670
+
671
+ = O(ndin).
672
+ We are now ready to state our main theorem.
673
+ Theorem 5.5. MPNN + VN can simulate (not just approx-
674
+ imate) equivariant DeepSets: Rn×d → Rn×d. The depth
675
+ and width of MPNN + VN needed to simulate DeepSets is up
676
+ to a constant factor of the depth and width of DeepSets. This
677
+ implies that MPNN + VN of O(1) depth and O(nd) width
678
+ is permutation equivariant universal, and can approximate
679
+ self-attention layer and transformers arbitrarily well.
680
+ Proof. Equivariant DeepSets has the following form
681
+ F (X) = Lds
682
+ m ◦ ν ◦ · · · ◦ ν ◦ Lds
683
+ 1 (X), where Lds
684
+ i is the
685
+
686
+ On the Connection Between MPNN and Graph Transformer
687
+ Table 2: Baselines for Peptides-func (graph classification) and Peptides-struct (graph regression). The perfor-
688
+ mance metric is Average Precision (AP) for classification and MAE for regression. Bold: Best score.
689
+ Model
690
+ # Params.
691
+ Peptides-func
692
+ Peptides-struct
693
+ Test AP before VN
694
+ Test AP after VN ↑ Test MAE before VN Test MAE after VN ↓
695
+ GCN
696
+ 508k
697
+ 0.5930±0.0023
698
+ 0.6623±0.0038
699
+ 0.3496±0.0013
700
+ 0.2488±0.0021
701
+ GINE
702
+ 476k
703
+ 0.5498±0.0079
704
+ 0.6346±0.0071
705
+ 0.3547±0.0045
706
+ 0.2584±0.0011
707
+ GatedGCN
708
+ 509k
709
+ 0.5864±0.0077
710
+ 0.6635±0.0024
711
+ 0.3420±0.0013
712
+ 0.2523±0.0016
713
+ GatedGCN+RWSE
714
+ 506k
715
+ 0.6069±0.0035
716
+ 0.6685±0.0062
717
+ 0.3357±0.0006
718
+ 0.2529±0.0009
719
+ Transformer+LapPE
720
+ 488k
721
+ 0.6326±0.0126
722
+ -
723
+ 0.2529±0.0016
724
+ -
725
+ SAN+LapPE
726
+ 493k
727
+ 0.6384±0.0121
728
+ -
729
+ 0.2683±0.0043
730
+ -
731
+ SAN+RWSE
732
+ 500k
733
+ 0.6439±0.0075
734
+ -
735
+ 0.2545±0.0012
736
+ -
737
+ linear permutation equivariant layer and ν is an entrywise
738
+ nonlinear activation layer. Recall that the linear equivariant
739
+ layer has the form Lds
740
+ i (X) = XA+ 1
741
+ n11T XB +1cT . As
742
+ one can use the same nonlinear entrywise activation layer ν
743
+ in MPNN + VN, it suffices to prove that MPNN + VN can
744
+ compute linear permutation equivariant layer Lds. Now we
745
+ show that 2 layers of MPNN + VN can exactly simulate any
746
+ given linear permutation equivariant layer Lds.
747
+ Specifically, at layer 0, we initialized the node features as
748
+ follows: The VN node feature is set to 0, while the node
749
+ feature for the i-th graph node is set up as xi ∈ Rd.
750
+ At layer 1: VN node feature is 1
751
+ n11T X, average of node
752
+ features. The collection of features over n graph node fea-
753
+ ture is XA. We only need to transform graph node features
754
+ by a linear transformation, and set the VN feature as the
755
+ average of graph node features in the last iteration. Both
756
+ can be exactly implemented in Definition 3.4 of simplified
757
+ heterogeneous MPNN + VN.
758
+ At layer 2: VN node feature is set to be 0, and the graph node
759
+ feature is XA + 1
760
+ n11T XB + 1cT . Here we only need to
761
+ perform the matrix multiplication of the VN feature with B,
762
+ as well as add a bias c. This can be done by implementing a
763
+ linear function for γgn.
764
+ It is easy to see the width required for MPNN + VN to
765
+ simulate DeepSets is constant. Thus, one can use 2 layers
766
+ of MPNN + VN to compute linear permutation equivariant
767
+ layer Lds
768
+ i , which implies that MPNN + VN can simulate
769
+ 1 layer of DeepSets exactly with constant depth and con-
770
+ stant width (independent of n). Then by the universality of
771
+ DeepSets, stated in Theorem 5.4, we conclude that MPNN +
772
+ VN is also permutation equivariant universal, which implies
773
+ that the constant layer of MPNN + VN with O(nd) width
774
+ is able to approximate any continuous equivariant maps.
775
+ As the self-attention layer L and full transformer are both
776
+ continuous and equivariant, they can be approximated by
777
+ MPNN + VN arbitrarily well.
778
+ Thanks to the connection between MPNN + VN with
779
+ DeepSets, there is no extra assumption on X except for
780
+ being compact. The drawback on the other hand is that the
781
+ upper bound on the computational complexity needed to
782
+ approximate the self-attention with wide MPNN + VN is
783
+ worse than directly computing self-attention when d > 2.
784
+ 6. O(n) depth O(1) width MPNN + VN
785
+ The previous section shows that we can approximate a full at-
786
+ tention layer in Transformer using MPNN with O(1) depth
787
+ but O(nd) width where n is the number of nodes and d is the
788
+ dimension of node features. In practice, it is not desirable
789
+ to have the width depend on the graph size.
790
+ In this section, we hope to study MPNN + VNs with O(1)
791
+ width and their ability to approximate a self-attention layer
792
+ in the Transformer. However, this appears to be much more
793
+ challenging. Our result in this section only shows that for
794
+ a rather restrictive family of input graphs (see Assumption
795
+ 3 below), we can approximate a full self-attention layer
796
+ of transformer with an MPNN + VN of O(1) width and
797
+ O(n) depth. We leave the question of MPNN + VN’s ability
798
+ in approximate transformers for more general families of
799
+ graphs for future investigation.
800
+ We first introduce the notion of (V , δ) separable node fea-
801
+ tures. This is needed to ensure that VN can approximately
802
+ select one node feature to process at each iteration with
803
+ attention αvn, the self-attention in the virtual node.
804
+ Definition 6.1 ((V , δ) separable by ¯α). Given a graph G
805
+ of size n and a fixed V ∈ Rn×d = [v1, ..., vn] and ¯α ∈ A,
806
+ we say node feature X ∈ Rn×d of G is (V , δ) separable
807
+ by some ¯α if the following holds. For any node feature xi,
808
+ there exist weights W ¯α
809
+ K, W ¯α
810
+ Q in attention score ¯α such that
811
+ ¯α(xi, vi) > maxj̸=i ¯α(xj, vi) + δ. We say set X is (V , δ)
812
+ separable by ¯α if every element X ∈ X is (V , δ) separable
813
+ by ¯α.
814
+ The use of (V , δ) separability is to approximate hard se-
815
+ lection function arbitrarily well, which is stated below and
816
+ proved in Appendix B.1.
817
+ Lemma 6.2 (approximate hard selection). Given X is
818
+ (V , δ) separable by ¯α for some fixed V ∈ Rn×d, ¯α ∈ A
819
+
820
+ On the Connection Between MPNN and Graph Transformer
821
+ Table 3: Test performance in graph-level OGB benchmarks (Hu et al., 2020). Shown is the mean ± s.d. of 10 runs.
822
+ Model
823
+ ogbg-molhiv
824
+ ogbg-molpcba
825
+ ogbg-ppa
826
+ ogbg-code2
827
+ AUROC ↑
828
+ Avg. Precision ↑
829
+ Accuracy ↑
830
+ F1 score ↑
831
+ GCN
832
+ 0.7606 ± 0.0097
833
+ 0.2020 ± 0.0024
834
+ 0.6839 ± 0.0084
835
+ 0.1507 ± 0.0018
836
+ GCN+virtual node
837
+ 0.7599 ± 0.0119
838
+ 0.2424 ± 0.0034
839
+ 0.6857 ± 0.0061
840
+ 0.1595 ± 0.0018
841
+ GIN
842
+ 0.7558 ± 0.0140
843
+ 0.2266 ± 0.0028
844
+ 0.6892 ± 0.0100
845
+ 0.1495 ± 0.0023
846
+ GIN+virtual node
847
+ 0.7707 ± 0.0149
848
+ 0.2703 ± 0.0023
849
+ 0.7037 ± 0.0107
850
+ 0.1581 ± 0.0026
851
+ SAN
852
+ 0.7785 ± 0.2470
853
+ 0.2765 ± 0.0042
854
+
855
+
856
+ GraphTrans (GCN-Virtual)
857
+
858
+ 0.2761 ± 0.0029
859
+
860
+ 0.1830 ± 0.0024
861
+ K-Subtree SAT
862
+
863
+
864
+ 0.7522 ± 0.0056
865
+ 0.1937 ± 0.0028
866
+ GPS
867
+ 0.7880 ± 0.0101
868
+ 0.2907 ± 0.0028
869
+ 0.8015 ± 0.0033
870
+ 0.1894 ± 0.0024
871
+ MPNN + VN + NoPE
872
+ 0.7676 ± 0.0172
873
+ 0.2823 ± 0.0026
874
+ 0.8055 ± 0.0038
875
+ 0.1727 ± 0.0017
876
+ MPNN + VN + PE
877
+ 0.7687 ± 0.0136
878
+ 0.2848 ± 0.0026
879
+ 0.8027 ± 0.0026
880
+ 0.1719 ± 0.0013
881
+ and δ > 0, the following holds. For any ϵ > 0 and i ∈ [n],
882
+ there exists a set of attention weights Wi,Q, Wi,K in i-th
883
+ layer of MPNN + VN such that αvn(xi, vi) > 1 − ϵ for
884
+ any xi ∈ Xi. In other words, we can approximate a hard
885
+ selection function fi(x1, ..., xn) = xi arbitrarily well on
886
+ X by setting αvn = ¯α.
887
+ With the notation set up, We now state an extra assumption
888
+ needed for deep MPNN + VN case and the main theorem.
889
+ AS3. X is (V , δ) separable by ¯α for some fixed V ∈ Rn×d,
890
+ ¯α ∈ A and δ > 0.
891
+ Theorem 6.3. Assume AS 1-3 hold for the compact set X
892
+ and L. Given any graph G of size n with node features X ∈
893
+ X, and a self-attention layer L on G (fix WK, WQ, WV
894
+ in α), there exists a O(n) layer of heterogeneous MPNN
895
+ + VN with the specific aggregate/update/message function
896
+ that can approximate L on X arbitrarily well.
897
+ The proof is presented in the Appendix B. On the high level,
898
+ we can design an MPNN + VN where the i-th layer will
899
+ select ˜xi, an approximation of xi via attention mechanism,
900
+ enabled by Lemma 6.2, and send ˜xi to the virtual node.
901
+ Virtual node will then pass the ˜xi to all graph nodes and
902
+ computes the approximation of eα(xi,xj), ∀j ∈ [n]. Repeat
903
+ such procedures n times for all graph nodes, and finally, use
904
+ the last layer for attention normalization. A slight relaxation
905
+ of AS3 is also provided in the appendix.
906
+ 7. Experiments
907
+ 7.1. MPNN + VN for LRGB Datasets
908
+ We experiment with MPNN + VN for Long Range Graph
909
+ Benchmark (LRGB) datasets. Original paper (Dwivedi
910
+ et al., 2022) observes that GT outperforms MPNN on
911
+ 4 out of 5 datasets, among which GT shows signifi-
912
+ cant improvement over MPNN on Peptides-func and
913
+ Peptides-struct for all MPNNs. To test the effec-
914
+ tiveness of the virtual node, we take the original code and
915
+ modify the graph topology by adding a virtual node and
916
+ keeping the hyperparameters of all models unchanged.
917
+ Results are in Table 2.
918
+ Interestingly, such a simple
919
+ change can boost MPNN + VN by a large margin on
920
+ Peptides-func and Peptides-struct. Notably,
921
+ with the addition of VN, GatedGCN + RWSE (random-walk
922
+ structural encoding) after augmented by VN outperforms
923
+ all transformers on Peptides-func, and GCN outper-
924
+ forms transformers on Peptides-struct.
925
+ 7.2. Stronger MPNN + VN Implementation
926
+ Next, by leveraging the modularized implementation from
927
+ GraphGPS (Rampášek et al., 2022), we implemented a ver-
928
+ sion of MPNN + VN with/without extra positional embed-
929
+ ding. Our goal is not to achieve SOTA but instead to push
930
+ the limit of MPNN + VN and better understand the source
931
+ of the performance gain for GT. In particular, we replace
932
+ the GlobalAttention Module in GraphGPS with DeepSets,
933
+ which is equivalent to one specific version of MPNN + VN.
934
+ We tested this specific version of MPNN + VN on 4 OGB
935
+ datasets, both with and without the use of positional em-
936
+ bedding. The results are reported in Table 3. Interestingly,
937
+ even without the extra position embedding, our MPNN +
938
+ VN is able to further improve over the previous GCN +
939
+ VN & GIN + VN implementation. The improvement on
940
+ ogbg-ppa is particularly impressive, which is from 0.7037
941
+ to 0.8055. Furthermore, it is important to note that while
942
+ MPNN + VN does not necessarily outperform GraphGPS,
943
+ which is a state-of-the-art architecture using both MPNN,
944
+ Position/structure encoding and Transformer, the difference
945
+ is quite small – this however, is achieved by a simple MPNN
946
+ + VN architecture.
947
+ We also test MPNN + VN on large-scale molecule datasets
948
+ PCQMv2, which has 529,434 molecule graphs. We fol-
949
+ lowed (Rampášek et al., 2022) and used the original vali-
950
+ dation set as the test set, while we left out random 150K
951
+ molecules for our validation set. As we can see from Table 4,
952
+ MPNN + VN + NoPE performs significantly better than the
953
+ early MPNN + VN implementation: GIN + VN and GCN +
954
+
955
+ On the Connection Between MPNN and Graph Transformer
956
+ Table 4: Evaluation on PCQM4Mv2 (Hu et al., 2021) dataset. For GPS evaluation, we treated the validation set of the
957
+ dataset as a test set, since the test-dev set labels are private.
958
+ Model
959
+ PCQM4Mv2
960
+ Test-dev MAE ↓
961
+ Validation MAE ↓
962
+ Training MAE
963
+ # Param.
964
+ GCN
965
+ 0.1398
966
+ 0.1379
967
+ n/a
968
+ 2.0M
969
+ GCN-virtual
970
+ 0.1152
971
+ 0.1153
972
+ n/a
973
+ 4.9M
974
+ GIN
975
+ 0.1218
976
+ 0.1195
977
+ n/a
978
+ 3.8M
979
+ GIN-virtual
980
+ 0.1084
981
+ 0.1083
982
+ n/a
983
+ 6.7M
984
+ GRPE (Park et al., 2022)
985
+ 0.0898
986
+ 0.0890
987
+ n/a
988
+ 46.2M
989
+ EGT (Hussain et al., 2022)
990
+ 0.0872
991
+ 0.0869
992
+ n/a
993
+ 89.3M
994
+ Graphormer (Shi et al., 2022)
995
+ n/a
996
+ 0.0864
997
+ 0.0348
998
+ 48.3M
999
+ GPS-small
1000
+ n/a
1001
+ 0.0938
1002
+ 0.0653
1003
+ 6.2M
1004
+ GPS-medium
1005
+ n/a
1006
+ 0.0858
1007
+ 0.0726
1008
+ 19.4M
1009
+ MPNN + VN + PE (small)
1010
+ n/a
1011
+ 0.0942
1012
+ 0.0617
1013
+ 5.2M
1014
+ MPNN + VN + PE (medium)
1015
+ n/a
1016
+ 0.0867
1017
+ 0.0703
1018
+ 16.4M
1019
+ MPNN + VN + NoPE (small)
1020
+ n/a
1021
+ 0.0967
1022
+ 0.0576
1023
+ 5.2M
1024
+ MPNN + VN + NoPE (medium)
1025
+ n/a
1026
+ 0.0889
1027
+ 0.0693
1028
+ 16.4M
1029
+ VN. The performance gap between GPS on the other hand is
1030
+ rather small: 0.0938 (GPS) vs. 0.0942 (MPNN + VN + PE)
1031
+ for the small model and 0.0858 (GPS) vs. 0.0867 (MPNN +
1032
+ VN + PE) for the medium model.
1033
+ 7.3. Forecasting Sea Surface Temperature
1034
+ In this experiment, we apply our MPNN + VN model to
1035
+ forecast sea surface temperature (SST). We are particularly
1036
+ interested in the empirical comparison between MPNN +
1037
+ VN and Linear Transformer (Katharopoulos et al., 2020a)
1038
+ as according to Section 4, MPNN + VN theoretically can
1039
+ approximate Linear Transformer.
1040
+ In particular, from the DOISST data proposed by (Huang
1041
+ et al., 2021), we construct a dataset of daily SST in the
1042
+ Pacific Ocean from 1982 to 2021, in the region of lon-
1043
+ gitudes from 180.125◦E to 269.875◦E and latitudes from
1044
+ −14.875◦N to 14.875◦N. Following the procedure from
1045
+ (de Bezenac et al., 2018; de Bézenac et al., 2019) and Wang
1046
+ et al. (2022), we divide the region into 11 batches of equal
1047
+ size with 30 longitudes and 30 latitudes at 0.5◦-degree reso-
1048
+ lution, that can be represented as a graph of 900 nodes. The
1049
+ tasks are to predict the next 4 weeks, 2 weeks and 1 week
1050
+ of SST at each location, given 6 weeks of historical data.
1051
+ We train on data from years 1982–2018, validate on data
1052
+ from 2019 and test on data from 2020–2021. The number of
1053
+ training, validation, and testing examples are roughly 150K,
1054
+ 3K, and 7K. See details of dataset construction, model ar-
1055
+ chitectures, and training scheme in Appendix D.4.
1056
+ We compare our model to other baselines including TF-
1057
+ Net (Wang et al., 2020a), a SOTA method for spatiotempo-
1058
+ ral forecasting, Linear Transformer (Katharopoulos et al.,
1059
+ 2020a; Wang et al., 2020b) with Laplacian positional en-
1060
+ coding (LapPE), and Multilayer Perceptron (MLP). We use
1061
+ Mean Square Error (MSE) as the metric and report the er-
1062
+ rors on the test set, shown in the Table 5. We observe that
1063
+ the virtual node (VN) alone improves upon MPNN by 3.8%,
1064
+ 6.6% and 4.5% in 4-, 2- and 1-week settings, respectively.
1065
+ Table 5: Results of SST prediction.
1066
+ Model
1067
+ 4 weeks
1068
+ 2 weeks
1069
+ 1 week
1070
+ MLP
1071
+ 0.3302
1072
+ 0.2710
1073
+ 0.2121
1074
+ TF-Net
1075
+ 0.2833
1076
+ 0.2036
1077
+ 0.1462
1078
+ Linear Transformer + LapPE
1079
+ 0.2818
1080
+ 0.2191
1081
+ 0.1610
1082
+ MPNN
1083
+ 0.2917
1084
+ 0.2281
1085
+ 0.1613
1086
+ MPNN + VN
1087
+ 0.2806
1088
+ 0.2130
1089
+ 0.1540
1090
+ Furthermore, aligned with our theory in Section 4, MPNN +
1091
+ VN indeed achieves comparable results with Linear Trans-
1092
+ former and outperforms it by a margin of 0.4%, 2.8% and
1093
+ 4.3% in 4-, 2- and 1-week settings, respectively.
1094
+ 8. Concluding Remarks
1095
+ In this paper, we study the expressive power of MPNN +
1096
+ VN under the lens of GT. If we target the self-attention
1097
+ layer in Performer and Linear Transformer, one only needs
1098
+ O(1)-depth O(1) width for arbitrary approximation error.
1099
+ For self-attention in full transformer, we prove that hetero-
1100
+ geneous MPNN + VN of either O(1) depth O(nd) width or
1101
+ O(n) depth O(1) width (under some assumptions) can ap-
1102
+ proximate 1 self-attention layer arbitrarily well. Compared
1103
+ to early results (Kim et al., 2022) showing GT can approx-
1104
+ imate MPNN, our theoretical result draws the connection
1105
+ from the inverse direction.
1106
+ On the empirical side, we demonstrate that MPNN + VN
1107
+ remains a surprisingly strong baseline. Despite recent ef-
1108
+ forts, we still lack good benchmark datasets where GT can
1109
+ outperform MPNN by a large margin. Understanding the
1110
+ inductive bias of MPNN and GT remains challenging. For
1111
+ example, can we mathematically characterize tasks that re-
1112
+ quire effective long-range interaction modeling, and provide
1113
+ a theoretical justification for using GT over MPNN (or vice
1114
+ versa) for certain classes of functions on the space of graphs?
1115
+ We believe making processes towards answering such ques-
1116
+ tions is an important future direction for the graph learning
1117
+ community.
1118
+
1119
+ On the Connection Between MPNN and Graph Transformer
1120
+ References
1121
+ Alon, U. and Yahav, E. On the bottleneck of graph neural
1122
+ networks and its practical implications. arXiv preprint
1123
+ arXiv:2006.05205, 2020.
1124
+ Battaglia, P. W., Hamrick, J. B., Bapst, V., Sanchez-
1125
+ Gonzalez, A., Zambaldi, V., Malinowski, M., Tacchetti,
1126
+ A., Raposo, D., Santoro, A., Faulkner, R., et al. Rela-
1127
+ tional inductive biases, deep learning, and graph networks.
1128
+ arXiv preprint arXiv:1806.01261, 2018.
1129
+ Brody, S., Alon, U., and Yahav, E. How attentive are graph
1130
+ attention networks? arXiv preprint arXiv:2105.14491,
1131
+ 2021.
1132
+ Cai, C. and Wang, Y. A note on over-smoothing for graph
1133
+ neural networks. arXiv preprint arXiv:2006.13318, 2020.
1134
+ Chen, D., O’Bray, L., and Borgwardt, K. Structure-aware
1135
+ transformer for graph representation learning. In Interna-
1136
+ tional Conference on Machine Learning, pp. 3469–3489.
1137
+ PMLR, 2022.
1138
+ Child, R., Gray, S., Radford, A., and Sutskever, I. Gen-
1139
+ erating long sequences with sparse transformers. arXiv
1140
+ preprint arXiv:1904.10509, 2019.
1141
+ Choromanski, K., Likhosherstov, V., Dohan, D., Song, X.,
1142
+ Gane, A., Sarlos, T., Hawkins, P., Davis, J., Mohiuddin,
1143
+ A., Kaiser, L., et al. Rethinking attention with performers.
1144
+ arXiv preprint arXiv:2009.14794, 2020.
1145
+ Cybenko, G. Approximation by superpositions of a sig-
1146
+ moidal function. Mathematics of control, signals and
1147
+ systems, 2(4):303–314, 1989.
1148
+ de Bezenac, E., Pajot, A., and Gallinari, P. Deep learn-
1149
+ ing for physical processes: Incorporating prior scientific
1150
+ knowledge. In International Conference on Learning
1151
+ Representations, 2018. URL https://openreview.
1152
+ net/forum?id=By4HsfWAZ.
1153
+ de Bézenac, E., Pajot, A., and Gallinari, P. Deep learn-
1154
+ ing for physical processes: incorporating prior scien-
1155
+ tific knowledge. Journal of Statistical Mechanics: The-
1156
+ ory and Experiment, 2019(12):124009, dec 2019. doi:
1157
+ 10.1088/1742-5468/ab3195. URL https://dx.doi.
1158
+ org/10.1088/1742-5468/ab3195.
1159
+ Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn,
1160
+ D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M.,
1161
+ Heigold, G., Gelly, S., et al. An image is worth 16x16
1162
+ words: Transformers for image recognition at scale. arXiv
1163
+ preprint arXiv:2010.11929, 2020.
1164
+ Dwivedi, V. P. and Bresson, X.
1165
+ A generalization
1166
+ of transformer networks to graphs.
1167
+ arXiv preprint
1168
+ arXiv:2012.09699, 2020.
1169
+ Dwivedi, V. P., Rampášek, L., Galkin, M., Parviz, A., Wolf,
1170
+ G., Luu, A. T., and Beaini, D. Long range graph bench-
1171
+ mark. arXiv preprint arXiv:2206.08164, 2022.
1172
+ d’Ascoli, S., Touvron, H., Leavitt, M. L., Morcos, A. S.,
1173
+ Biroli, G., and Sagun, L. Convit: Improving vision trans-
1174
+ formers with soft convolutional inductive biases. In In-
1175
+ ternational Conference on Machine Learning, pp. 2286–
1176
+ 2296. PMLR, 2021.
1177
+ Gilmer, J., Schoenholz, S. S., Riley, P. F., Vinyals, O., and
1178
+ Dahl, G. E. Neural message passing for quantum chem-
1179
+ istry. In International conference on machine learning,
1180
+ pp. 1263–1272. PMLR, 2017.
1181
+ Han, K., Wang, Y., Chen, H., Chen, X., Guo, J., Liu, Z.,
1182
+ Tang, Y., Xiao, A., Xu, C., Xu, Y., et al. A survey on
1183
+ vision transformer. IEEE transactions on pattern analysis
1184
+ and machine intelligence, 2022.
1185
+ Hu, W., Fey, M., Zitnik, M., Dong, Y., Ren, H., Liu, B.,
1186
+ Catasta, M., and Leskovec, J. Open graph benchmark:
1187
+ Datasets for machine learning on graphs. Advances in
1188
+ neural information processing systems, 33:22118–22133,
1189
+ 2020.
1190
+ Hu, W., Fey, M., Ren, H., Nakata, M., Dong, Y.,
1191
+ and Leskovec, J.
1192
+ Ogb-lsc: A large-scale challenge
1193
+ for machine learning on graphs.
1194
+ arXiv preprint
1195
+ arXiv:2103.09430, 2021.
1196
+ Huang, B., Liu, C., Banzon, V., Freeman, E., Gra-
1197
+ ham,
1198
+ G.,
1199
+ Hankins,
1200
+ B.,
1201
+ Smith,
1202
+ T.,
1203
+ and Zhang,
1204
+ H.-M.
1205
+ Improvements of the daily optimum inter-
1206
+ polation sea surface temperature (doisst) version
1207
+ 2.1.
1208
+ Journal of Climate, 34(8):2923 – 2939, 2021.
1209
+ doi:
1210
+ 10.1175/JCLI-D-20-0166.1.
1211
+ URL https:
1212
+ //journals.ametsoc.org/view/journals/
1213
+ clim/34/8/JCLI-D-20-0166.1.xml.
1214
+ Hussain, M. S., Zaki, M. J., and Subramanian, D. Global
1215
+ self-attention as a replacement for graph convolution. In
1216
+ Proceedings of the 28th ACM SIGKDD Conference on
1217
+ Knowledge Discovery and Data Mining, pp. 655–665,
1218
+ 2022.
1219
+ Hwang, E., Thost, V., Dasgupta, S. S., and Ma, T. An
1220
+ analysis of virtual nodes in graph neural networks for link
1221
+ prediction. In Learning on Graphs Conference, 2022.
1222
+ Kalyan, K. S., Rajasekharan, A., and Sangeetha, S. Am-
1223
+ mus: A survey of transformer-based pretrained mod-
1224
+ els in natural language processing.
1225
+ arXiv preprint
1226
+ arXiv:2108.05542, 2021.
1227
+ Katharopoulos, A., Vyas, A., Pappas, N., and Fleuret, F.
1228
+ Transformers are rnns: Fast autoregressive transformers
1229
+ with linear attention. In Proceedings of the International
1230
+
1231
+ On the Connection Between MPNN and Graph Transformer
1232
+ Conference on Machine Learning (ICML), 2020a. URL
1233
+ https://arxiv.org/abs/2006.16236.
1234
+ Katharopoulos, A., Vyas, A., Pappas, N., and Fleuret, F.
1235
+ Transformers are rnns: Fast autoregressive transformers
1236
+ with linear attention. In International Conference on
1237
+ Machine Learning, pp. 5156–5165. PMLR, 2020b.
1238
+ Kim, J., Nguyen, T. D., Min, S., Cho, S., Lee, M., Lee,
1239
+ H., and Hong, S. Pure transformers are powerful graph
1240
+ learners. arXiv preprint arXiv:2207.02505, 2022.
1241
+ Kingma, D. and Ba, J. Adam: A method for stochastic
1242
+ optimization. International Conference on Learning Rep-
1243
+ resentations, 12 2014.
1244
+ Kreuzer, D., Beaini, D., Hamilton, W., Létourneau, V., and
1245
+ Tossou, P. Rethinking graph transformers with spectral
1246
+ attention. Advances in Neural Information Processing
1247
+ Systems, 34:21618–21629, 2021.
1248
+ Li, Q., Han, Z., and Wu, X.-M. Deeper insights into graph
1249
+ convolutional networks for semi-supervised learning. In
1250
+ Thirty-Second AAAI conference on artificial intelligence,
1251
+ 2018.
1252
+ Lim, D., Robinson, J., Zhao, L., Smidt, T., Sra, S., Maron,
1253
+ H., and Jegelka, S. Sign and basis invariant networks
1254
+ for spectral graph representation learning. arXiv preprint
1255
+ arXiv:2202.13013, 2022.
1256
+ Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin,
1257
+ S., and Guo, B. Swin transformer: Hierarchical vision
1258
+ transformer using shifted windows. In Proceedings of the
1259
+ IEEE/CVF International Conference on Computer Vision,
1260
+ pp. 10012–10022, 2021.
1261
+ Maron, H., Ben-Hamu, H., Shamir, N., and Lipman, Y.
1262
+ Invariant and equivariant graph networks. arXiv preprint
1263
+ arXiv:1812.09902, 2018.
1264
+ Mialon, G., Chen, D., Selosse, M., and Mairal, J. Graphit:
1265
+ Encoding graph structure in transformers. arXiv preprint
1266
+ arXiv:2106.05667, 2021.
1267
+ Oono, K. and Suzuki, T. Graph neural networks exponen-
1268
+ tially lose expressive power for node classification. arXiv
1269
+ preprint arXiv:1905.10947, 2019.
1270
+ Park, W., Chang, W.-G., Lee, D., Kim, J., et al. Grpe:
1271
+ Relative positional encoding for graph transformer. In
1272
+ ICLR2022 Machine Learning for Drug Discovery, 2022.
1273
+ Rampášek, L., Galkin, M., Dwivedi, V. P., Luu, A. T., Wolf,
1274
+ G., and Beaini, D. Recipe for a general, powerful, scal-
1275
+ able graph transformer. arXiv preprint arXiv:2205.12454,
1276
+ 2022.
1277
+ Reynolds, R. W., Smith, T. M., Liu, C., Chelton, D. B.,
1278
+ Casey, K. S., and Schlax, M. G. Daily high-resolution
1279
+ blended analyses for sea surface temperature. J. Climate,
1280
+ 20:5473–5496, 2007.
1281
+ Santoro, A., Raposo, D., Barrett, D. G., Malinowski, M.,
1282
+ Pascanu, R., Battaglia, P., and Lillicrap, T. A simple
1283
+ neural network module for relational reasoning. Advances
1284
+ in neural information processing systems, 30, 2017.
1285
+ Segol, N. and Lipman, Y. On universal equivariant set
1286
+ networks. arXiv preprint arXiv:1910.02421, 2019.
1287
+ Shi, Y., Zheng, S., Ke, G., Shen, Y., You, J., He, J.,
1288
+ Luo, S., Liu, C., He, D., and Liu, T.-Y. Benchmarking
1289
+ graphormer on large-scale molecular modeling datasets.
1290
+ arXiv preprint arXiv:2203.04810, 2022.
1291
+ Tay, Y., Dehghani, M., Bahri, D., and Metzler, D. Effi-
1292
+ cient transformers: A survey. ACM Computing Surveys
1293
+ (CSUR), 2020.
1294
+ Topping, J., Di Giovanni, F., Chamberlain, B. P., Dong,
1295
+ X., and Bronstein, M. M. Understanding over-squashing
1296
+ and bottlenecks on graphs via curvature. arXiv preprint
1297
+ arXiv:2111.14522, 2021.
1298
+ Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones,
1299
+ L., Gomez, A. N., Kaiser, Ł., and Polosukhin, I. At-
1300
+ tention is all you need. Advances in neural information
1301
+ processing systems, 30, 2017.
1302
+ Veliˇckovi´c, P., Cucurull, G., Casanova, A., Romero, A.,
1303
+ Lio, P., and Bengio, Y. Graph attention networks. arXiv
1304
+ preprint arXiv:1710.10903, 2017.
1305
+ Wang, R., Kashinath, K., Mustafa, M., Albert, A., and Yu,
1306
+ R. Towards physics-informed deep learning for turbulent
1307
+ flow prediction. pp. 1457–1466, 08 2020a. doi: 10.1145/
1308
+ 3394486.3403198.
1309
+ Wang, R., Walters, R., and Yu, R. Meta-learning dynamics
1310
+ forecasting using task inference. In Oh, A. H., Agarwal,
1311
+ A., Belgrave, D., and Cho, K. (eds.), Advances in Neural
1312
+ Information Processing Systems, 2022. URL https:
1313
+ //openreview.net/forum?id=BsSP7pZGFQO.
1314
+ Wang, S., Li, B. Z., Khabsa, M., Fang, H., and Ma, H.
1315
+ Linformer: Self-attention with linear complexity. arXiv
1316
+ preprint arXiv:2006.04768, 2020b.
1317
+ Wolf, T., Debut, L., Sanh, V., Chaumond, J., Delangue, C.,
1318
+ Moi, A., Cistac, P., Rault, T., Louf, R., Funtowicz, M.,
1319
+ et al. Transformers: State-of-the-art natural language
1320
+ processing. In Proceedings of the 2020 conference on em-
1321
+ pirical methods in natural language processing: system
1322
+ demonstrations, pp. 38–45, 2020.
1323
+
1324
+ On the Connection Between MPNN and Graph Transformer
1325
+ Wu, Q., Zhao, W., Li, Z., Wipf, D., and Yan, J. Nodeformer:
1326
+ A scalable graph structure learning transformer for node
1327
+ classification. In Advances in Neural Information Pro-
1328
+ cessing Systems, 2022.
1329
+ Wu, Z., Jain, P., Wright, M., Mirhoseini, A., Gonzalez,
1330
+ J. E., and Stoica, I. Representing long-range context for
1331
+ graph neural networks with global attention. Advances
1332
+ in Neural Information Processing Systems, 34:13266–
1333
+ 13279, 2021.
1334
+ Yang, C., Wang, R., Yao, S., Liu, S., and Abdelzaher, T.
1335
+ Revisiting over-smoothing in deep gcns. arXiv preprint
1336
+ arXiv:2003.13663, 2020.
1337
+ Ying, C., Cai, T., Luo, S., Zheng, S., Ke, G., He, D., Shen, Y.,
1338
+ and Liu, T.-Y. Do transformers really perform badly for
1339
+ graph representation? Advances in Neural Information
1340
+ Processing Systems, 34:28877–28888, 2021.
1341
+ Zaheer, M., Kottur, S., Ravanbakhsh, S., Poczos, B.,
1342
+ Salakhutdinov, R. R., and Smola, A. J. Deep sets. Ad-
1343
+ vances in neural information processing systems, 30,
1344
+ 2017.
1345
+ Zhao, L. and Akoglu, L. Pairnorm: Tackling oversmoothing
1346
+ in gnns. arXiv preprint arXiv:1909.12223, 2019.
1347
+ Zweig, A. and Bruna, J. Exponential separations in symmet-
1348
+ ric neural networks. arXiv preprint arXiv:2206.01266,
1349
+ 2022.
1350
+
1351
+ On the Connection Between MPNN and Graph Transformer
1352
+ A. Notations
1353
+ We provide a notation table for references.
1354
+ Table 6: Summary of important notations.
1355
+ Symbol
1356
+ Meaning
1357
+ X ∈ X ⊂ Rn×d
1358
+ graph node features
1359
+ xi ∈ R1×d
1360
+ graph node i’s feature
1361
+ ˜xi ∈ R1×d
1362
+ approximated graph node i’s feature via attention selection
1363
+ M
1364
+ A multiset of vectors in Rd
1365
+ W (l)
1366
+ Q , W (l)
1367
+ K , W (l)
1368
+ V
1369
+ ∈ Rd×d′
1370
+ attention matrix of l-th self-attention layer in graph transformer
1371
+ X
1372
+ feature space
1373
+ Xi
1374
+ projection of feature space onto i-th coordinate
1375
+ Lds
1376
+ i
1377
+ i-th linear permutation equivariant layer in DeepSets
1378
+ L, L′
1379
+ full self attention layer; approximate self attention layer in Performer
1380
+ z(l)
1381
+ vn , z(l)
1382
+ i
1383
+ virtual/graph node feature at layer l of heterogeneous MPNN + VN
1384
+ αvn
1385
+ attention score in MPNN + VN
1386
+ α(·, ·)
1387
+ normalized attention score
1388
+ αGATv2(·, ·)
1389
+ normalized attention score with GATv2
1390
+ α′(·, ·)
1391
+ unnormalized attention score. α′(u, v) = uWQ(WK)T vT
1392
+ α′
1393
+ GATv2(·, ·)
1394
+ unnormalized attention score with GATv2. α′
1395
+ GATv2(u, v) := aT LeakyReLU (W · [u∥v] + b)
1396
+ A
1397
+ space of attentions, where each element α ∈ A is of form α(u, v) = softmax(uWQ(WK)T vT )
1398
+ C1
1399
+ upper bound on norm of all node features ∥xi∥
1400
+ C2
1401
+ upper bound on the norm of WQ, WK, WV in target L
1402
+ C3
1403
+ upper bound on the norm of attention weights of αvn when selecting xi
1404
+ γ(k)(·, ·)
1405
+ update function
1406
+ θ(k)(·, ·)
1407
+ message function
1408
+ τ(·)
1409
+ aggregation function
1410
+ B. O(n) Heterogeneous MPNN + VN Layer with O(1) Width Can Approximate 1 Self Attention
1411
+ Layer Arbitrarily Well
1412
+ B.1. Assumptions
1413
+ Definition B.1 ((V , δ) separable by ¯α). Given a graph G of size n and a fixed V ∈ Rn×d = [v1, ..., vn] and ¯α ∈ A, we
1414
+ say node feature X ∈ Rn×d of G is (V , δ) separable by some ¯α if the following holds. For any node feature xi, there exist
1415
+ weights W ¯α
1416
+ K, W ¯α
1417
+ Q in attention score ¯α such that ¯α(xi, vi) > maxj̸=i ¯α(xj, vi) + δ. We say set X is (V , δ) separable by ¯α
1418
+ if every element X ∈ X is (V , δ) separable by ¯α.
1419
+ A special case of (V , δ) separable is when δ = 0, i.e., ∀i, ¯α(xi, vi) > maxj̸=i ¯α(xj, vi). We provide a geometric
1420
+ characterization of X being (V , 0) separable.
1421
+ Lemma B.2. Given ¯α and V , X is (V , 0) separable by ¯α ⇐⇒ xi is not in the convex hull spanned by {xj}j̸=i. ⇐⇒ there
1422
+ are no points in the convex hull of {xi}i∈[n].
1423
+ Proof. The second equivalence is trivial so we only prove the first equivalence. By definition, X is (V , 0) separable by ¯α
1424
+ ⇐⇒ ¯α(xi, vi) > maxj̸=i ¯α(xj, vi)∀i ∈ [n] ⇐⇒ ⟨xi, W ¯α
1425
+ QW ¯α,T
1426
+ K
1427
+ vi⟩ > maxj̸=i⟨xj, W ¯α
1428
+ QW ¯α,T
1429
+ K
1430
+ vi⟩∀i ∈ [n].
1431
+ By denoting the v′
1432
+ i := W ¯α
1433
+ QW ¯α,T
1434
+ K
1435
+ vi ∈ Rd, we know that ⟨xi, v′
1436
+ i⟩ > maxj̸=i⟨xj, v′
1437
+ i⟩∀i ∈ [n], which implies that
1438
+ ∀i ∈ [n], xi can be linearly seprated from {xj}j̸=i ⇐⇒ xi is not in the convex hull spanned by {xj}j̸=i, which concludes
1439
+ the proof.
1440
+ Lemma B.3 (approximate hard selection). Given X is (V , δ) separable by ¯α for some fixed V ∈ Rn×d, ¯α ∈ A and
1441
+ δ > 0, the following holds. For any ϵ > 0 and i ∈ [n], there exists a set of attention weights Wi,Q, Wi,K in i-th layer of
1442
+
1443
+ On the Connection Between MPNN and Graph Transformer
1444
+ MPNN + VN such that αvn(xi, vi) > 1 − ϵ for any xi ∈ Xi. In other words, we can approximate a hard selection function
1445
+ fi(x1, ..., xn) = xi arbitrarily well on X by setting αvn = ¯α.
1446
+ Proof. Denote ¯α′ as the unnormalized ¯α. As X is (V , δ) separable by ¯α, by definition we know that ¯α(xi, vi) >
1447
+ maxj̸=i ¯α(xj, vi) + δ holds for any i ∈ [n] and xi ∈ M. We can amplify this by multiple the weight matrix in ¯α by a
1448
+ constant factor c to make ¯α′(xi, vi) > maxj̸=i ¯α′(xj, vi) + cδ. This implies that e¯α′(xi,vi) > ecδ maxj̸=i e¯α′(xj,vi). This
1449
+ means after softmax, the attention score ¯α(xi, vi) will be at least
1450
+ ecδ
1451
+ ecδ+n−1. We can pick a large enough c(δ, ϵ) such that
1452
+ ¯α(xi, vi) > 1 − ϵ for any xi ∈ Xi and ϵ > 0.
1453
+ Proof Intuition and Outline. On the high level, i-th MPNN + VN layer will select ˜xi, an approximation i-th node feature
1454
+ xi via attention mechanism, enabled by Lemma 6.2, and send ˜xi to the virtual node. Virtual node will then pass the ˜xi to all
1455
+ graph nodes and computes the approximation of eα(xi,xj), ∀j ∈ [n]. Repeat such procedures n times for all graph nodes,
1456
+ and finally, use the last layer for attention normalization.
1457
+ The main challenge of the proof is to 1) come up with message/update/aggregation functions for heterogeneous MPNN
1458
+ + VN layer, which is shown in Appendix B.2, and 2) ensure the approximation error, both from approximating Aggre-
1459
+ gate/Message/Update function with MLP and the noisy input, can be well controlled, which is proved in Appendix B.4.
1460
+ We will first instantiate the Aggregate/Message/Update function for virtual/graph nodes in Appendix B.2, and prove that
1461
+ each component can be either exactly computed or approximated to an arbitrary degree by MLP. Then we go through an
1462
+ example in Appendix B.3 of approximate self-attention layer L with O(n) MPNN + VN layers. The main proof is presented
1463
+ in Appendix B.4, where we show that the approximation error introduced during different steps is well controlled. Lastly, in
1464
+ Appendix B.5 we show assumption on node features can be relaxed if a more powerful attention mechanism GATv2 (Brody
1465
+ et al., 2021) is allowed in MPNN + VN.
1466
+ B.2. Aggregate/Message/Update Functions
1467
+ Let M be a multiset of vectors in Rd. The specific form of Aggregate/Message/Update for virtual and graph nodes are listed
1468
+ below. Note that ideal forms will be implemented as MLP, which will incur an approximation error that can be controlled to
1469
+ an arbitrary degree. We use z(k)
1470
+ vn denotes the virtual node’s feature at l-th layer, and z(k)
1471
+ i
1472
+ denotes the graph node i’s node
1473
+ feature. Iteration index k starts with 0 and the node index starts with 1.
1474
+ B.2.1. VIRTUAL NODE
1475
+ At k-th iteration, virtual node i’s feature z(k)
1476
+ i
1477
+ is a concatenation of three component [˜xi, vk+1, 0] where the first component
1478
+ is the approximately selected node features xi ∈ Rd, the second component is the vi ∈ Rd that is used to select the node
1479
+ feature in i-th iteration. The last component is just a placeholder to ensure the dimension of the virtual node and graph node
1480
+ are the same. It is introduced to simplify notation.
1481
+ Initial feature is z(0)
1482
+ vn = [0d, v1, 0].
1483
+ Message function + Aggregation function τj∈[n]φ(k)
1484
+ vn-gn : R2d+1 × M → R2d+1 has two cases to discuss depending on value
1485
+ of k. For k = 1, 2, ..., n,
1486
+ τj∈[n]φ(k)
1487
+ vn-gn(z(k−1)
1488
+ vn
1489
+ , {z(k−1)
1490
+ i
1491
+ }i) =
1492
+ ��
1493
+ i αvn(z(k−1)
1494
+ vn
1495
+ , z(k−1)
1496
+ i
1497
+ )z(k−1)
1498
+ i
1499
+ k = 1, 2, ..., n
1500
+ 12d+1
1501
+ k = n + 1, n + 2
1502
+ (7)
1503
+ where z(k−1)
1504
+ vn
1505
+ = [˜xk−1, vk, 0]. z(k−1)
1506
+ i
1507
+ = [
1508
+ 2d+1 dim
1509
+
1510
+ ��
1511
+
1512
+ xi
1513
+ ����
1514
+ d dim
1515
+ , ..., ...] is the node i’s feature, where the first d coordinates remain fixed for
1516
+ different iteration k. τj∈[n]φ(k)
1517
+ vn-gn use attention αvn to approximately select k-th node feature [
1518
+ 2d+1 dim
1519
+
1520
+ ��
1521
+
1522
+ xk
1523
+ ����
1524
+ d dim
1525
+ , ..., ...]. Note that the
1526
+ particular form of attention αvn needed for soft selection is not important as long as we can approximate hard selection
1527
+
1528
+ On the Connection Between MPNN and Graph Transformer
1529
+ arbitrarily well. As the z(k−1)
1530
+ vn
1531
+ contains vk and z(k−1)
1532
+ i
1533
+ contains xi (see definition of graph node feature in Appendix B.2.2),
1534
+ this step can be made as close to hard selection as possible, according to Lemma B.7.
1535
+ In the case of k = n + 1, τj∈[n]φ(k)
1536
+ vn-gn : R2d+1
1537
+ � �� �
1538
+ vn
1539
+ × M
1540
+ ����
1541
+ set of gn
1542
+ → Rd simply returns 12d+1. This can be exactly implemented by
1543
+ an MLP.
1544
+ Update function γ(k)
1545
+ vn
1546
+ : R2d+1
1547
+ � �� �
1548
+ vn
1549
+ × R2d+1
1550
+ � �� �
1551
+ gn
1552
+ → R2d+1: Given the virtual node’s feature in the last iteration, and the selected
1553
+ feature in virtual node y = [xk, ..., ...] with αvn,
1554
+ γ(k)
1555
+ vn (·, y) =
1556
+
1557
+
1558
+
1559
+
1560
+
1561
+ [y0:d, vk+1, 0]
1562
+ k = 1, ..., n − 1
1563
+ [y0:d, 0d, 0]
1564
+ k = n
1565
+ 12d+1
1566
+ k = n + 1, n + 2
1567
+ (8)
1568
+ where y0:d denotes the first d channels of y ∈ R2d+1. y denotes the selected node zi’s feature in Message/Aggregation
1569
+ function. γ(k)
1570
+ vn can be exactly implemented by an MLP for any k = 1, ..., n + 2.
1571
+ B.2.2. GRAPH NODE
1572
+ Graph node i’s feature vi ∈ R2d+1 can be thought of as a concatenation of three components [ xi
1573
+ ����
1574
+ d dim
1575
+ , tmp
1576
+ ����
1577
+ d dim
1578
+ , partialsum
1579
+
1580
+ ��
1581
+
1582
+ 1 dim
1583
+ ],
1584
+ where xi, ∈ Rd, tmp ∈ Rd 3, and partialsum ∈ R.
1585
+ In particular, xi is the initial node feature. The first d channel will stay the same until the layer n + 2. tmp =
1586
+
1587
+ j∈subset of[n] eα′
1588
+ ijxj stands for the unnormalized attention contribution up to the current iteration. partialsum ∈ R
1589
+ is a partial sum of the unnormalized attention score, which will be used for normalization in the n + 2-th iteration.
1590
+ Initial feature z(0)
1591
+ gn = [xi, 0d, 0].
1592
+ Message function + Aggregate function: τj∈[n]φ(k)
1593
+ gn-vn : R2d+1 × R2d+1 → R2d+1 is just “copying the second argument”
1594
+ since there is just one incoming message from the virtual node, i.e., τj∈[n]φ(k)
1595
+ gn-vn(x, {y}) = y. This function can be exactly
1596
+ implemented by an MLP.
1597
+ Update function γ(k)
1598
+ gn : R2d+1
1599
+ � �� �
1600
+ gn
1601
+ × R2d+1
1602
+ � �� �
1603
+ vn
1604
+ → R2d+1 is of the following form.
1605
+ γ(k)
1606
+ gn ([x, tmp, partialsum], y) =
1607
+
1608
+
1609
+
1610
+
1611
+
1612
+
1613
+
1614
+
1615
+
1616
+ [x, tmp, partialsum]
1617
+ k = 1
1618
+ [x, tmp + eα′(x,y0:d)WV y0:d,
1619
+ partialsum + eα′(x,y0:d)]
1620
+ k = 2, ..., n + 1
1621
+ [
1622
+ tmp
1623
+ partialsum, 0d, 0]
1624
+ k = n + 2
1625
+ (9)
1626
+ where α′(x, y0:d) is the usual unnormalized attention score. Update function γ(k)
1627
+ gn can be arbitrarily approximated by an
1628
+ MLP, which is proved below.
1629
+ Lemma B.4. Update function γ(k)
1630
+ gn can be arbitrarily approximated by an MLP from R2d+1 × R2d+1 to R2d+1 for all
1631
+ k = 1, ..., n + 2.
1632
+ Proof. We will show that for any k = 1, ..., n + 2, the target function γ(k)
1633
+ gn : R2d+1 × R2d+1 → R2d+1 is continuous and
1634
+ the domain is compact. By the universality of MLP in approximating continuous function on the compact domain, we know
1635
+ γ(k)
1636
+ gn can be approximated to arbitrary precision by an MLP.
1637
+ 3tmp technicially denotes the dimension of projected feature by WV and does not has to be in Rd. We use Rd here to reduce the
1638
+ notation clutter.
1639
+
1640
+ On the Connection Between MPNN and Graph Transformer
1641
+ Recall that
1642
+ γ(k)
1643
+ gn ([x, tmp, partialsum], y) =
1644
+
1645
+
1646
+
1647
+
1648
+
1649
+
1650
+
1651
+
1652
+
1653
+ [x, tmp, partialsum]
1654
+ k = 1
1655
+ [x, tmp + eα′(x,y0:d)WV y0:d,
1656
+ partialsum + eα′(x,y0:d)]
1657
+ k = 2, ..., n + 1
1658
+ [
1659
+ tmp
1660
+ partialsum, 0d, 0]
1661
+ k = n + 2
1662
+ it is easy to see that k = 1, γ(1)
1663
+ gn is continuous. We next show for k = 2, ..., n + 2, γ(1)
1664
+ gn is also continuous and all arguments
1665
+ lie in a compact domain.
1666
+ γ(k)
1667
+ gn
1668
+ is continuous because to a) α′(x, y) is continuous b) scalar-vector multiplication, sum, and exponential are all
1669
+ continuous. Next, we show that four component x, tmp, partialsum, y0:d all lies in a compact domain.
1670
+ x is the initial node features, and by AS1 their norm is bounded so x is in a compact domain.
1671
+ tmp is an approximation of eα′
1672
+ i,1WV x1 + eα′
1673
+ i,2WV x2 + .... As α′(xi, xj) is both upper and lower bounded by AS2 for all
1674
+ i, j ∈ [n] and xi is bounded by AS1, eα′
1675
+ i,1WV x1 + eα′
1676
+ i,2WV x2 + ... is also bounded from below and above. tmp will also
1677
+ be bounded as we can control the error to any precision.
1678
+ partialsum is an approximation of eα′
1679
+ i,1 + eα′
1680
+ i,2 + .... For the same reason as the case above, partialsum is also bounded
1681
+ both below and above.
1682
+ y0:d will be ˜xi at i-th iteration so it will also be bounded by AS1.
1683
+ Therefore we conclude the proof.
1684
+ B.3. A Running Example
1685
+ We provide an example to illustrate how node features are updated in each iteration.
1686
+ Time 0: All nodes are initialized as indicated in Appendix B.2. Virtual node feature z(0)
1687
+ vn = [0d, v1, 0]. Graph node feature
1688
+ z(0)
1689
+ i
1690
+ = [xi, 0d, 0] for all i ∈ [n].
1691
+ Time 1:
1692
+ For virtual node, according to the definition of τj∈[n]φ(1)
1693
+ vn-gn in Equation (7), it will pick an approximation of x1, i.e. ˜x1.
1694
+ Note that the approximation error can be made arbitrarily small. VN’s node feature z(1)
1695
+ vn = [˜x1, v2, 0].
1696
+ For i-th graph node feature, z(0)
1697
+ vn = 1d, and z(0)
1698
+ i
1699
+ = [xi, 0d, 0]. According to γ(k)
1700
+ gn in Equation (9), z(1)
1701
+ i
1702
+ = [xi, 0d, 0].
1703
+ Time 2:
1704
+ For the virtual node feature: similar to the analysis in time 1, VN’s feature z(2)
1705
+ vn = [˜x2, v3, 0] now. Note that the weights and
1706
+ bias in τj∈[n]φ(2)
1707
+ vn-gn will be different from those in τj∈[n]φ(1)
1708
+ vn-gn.
1709
+ For i-th graph node feature, as z(1)
1710
+ vn
1711
+ = [˜x1, v2, 0] and z(1)
1712
+ i
1713
+ = [xi, 0d, 0], according to γ(k)
1714
+ gn
1715
+ in Equation (9), z(2)
1716
+ i
1717
+ =
1718
+ [xi, e
1719
+
1720
+ α′
1721
+ i,1WV ˜x1, e
1722
+
1723
+ α′
1724
+ i,1]. Here �
1725
+ α′
1726
+ i,1 := α′(xi, ˜x1). We will use similar notations in later iterations. 4
1727
+ Time 3:
1728
+ Similar to the analysis above, z(3)
1729
+ vn = [�
1730
+ x3, v4, 0].
1731
+ z(3)
1732
+ i
1733
+ = [xi, e
1734
+
1735
+ α′
1736
+ i,1WV ˜x1 + e
1737
+
1738
+ α′
1739
+ i,2WV ˜x2, e
1740
+
1741
+ α′
1742
+ i,1 + e
1743
+
1744
+ α′
1745
+ i,2].
1746
+ Time n:
1747
+ z(n)
1748
+ vn = [˜xn, 0d, 0].
1749
+ 4To reduce the notation clutter and provide an intuition of the proof, we omit the approximation error introduced by using MLP to
1750
+ approximate aggregation/message/update function, and assume the aggregation/message/update can be exactly implemented by neural
1751
+ networks. In the proofs, approximation error by MLP is handled rigorously.
1752
+
1753
+ On the Connection Between MPNN and Graph Transformer
1754
+ z(n)
1755
+ i
1756
+ = xi, e
1757
+
1758
+ α′
1759
+ i,1WV ˜x1 + ... + e
1760
+
1761
+ α′
1762
+ i,n−1WV �
1763
+ xn−1
1764
+
1765
+ ��
1766
+
1767
+ n−1 terms
1768
+ ,
1769
+ e
1770
+
1771
+ α′
1772
+ i,1 + e
1773
+
1774
+ α′
1775
+ i,2 + ... + e
1776
+
1777
+ α′
1778
+ i,n−1]
1779
+
1780
+ ��
1781
+
1782
+ n−1 terms
1783
+ .
1784
+ Time n + 1:
1785
+ According to Appendix B.2.1, in n + 1 iteration, the virtual node’s feature will be 1d.
1786
+ z(n+1)
1787
+ i
1788
+ = [xi, �
1789
+ k∈[n] e
1790
+
1791
+ α′
1792
+ ikWV ˜xk, �
1793
+ k∈[n] e
1794
+
1795
+ α′
1796
+ ik]
1797
+ Time n + 2 (final layer):
1798
+ For the virtual node, its node feature will stay the same.
1799
+ For the graph node feature, the last layer will serve as a normalization of the attention score (use MLP to approximate vector-
1800
+ scalar multiplication), and set the last channel to be 0 (projection), resulting in an approximation of [xi,
1801
+
1802
+ k∈[n] e
1803
+
1804
+ α′
1805
+ ik WV ˜xk
1806
+
1807
+ k∈[n] e
1808
+
1809
+ α′
1810
+ ik
1811
+ , 0].
1812
+ Finally, we need one more linear transformation to make the node feature become [
1813
+
1814
+ k∈[n] e
1815
+
1816
+ α′
1817
+ ik WV ˜xk
1818
+
1819
+ k∈[n] e
1820
+
1821
+ α′
1822
+ ik
1823
+ , 0d, 0]. The first d
1824
+ channel is an approximation of the output of the self-attention layer for node i where the approximation error can be made
1825
+ as small as possible. This is proved in Appendix B, and we conclude that heterogeneous MPNN + VN can approximate the
1826
+ self-attention layer L to arbitrary precision with O(n) MPNN layers.
1827
+ B.4. Controlling Error
1828
+ On the high level, there are three major sources of approximation error: 1) approximate hard selection with self-attention and
1829
+ 2) approximate equation γ(k)
1830
+ gn with MLPs, and 3) attention normalization in the last layer. In all cases, we aim to approximate
1831
+ the output of a continuous map Lc(x). However, our input is usually not exact x but an approximation of ˜x. We also cannot
1832
+ access the original map Lc but instead, an MLP approximation of Lc, denoted as LMLP. The following lemma allows to
1833
+ control the difference between Lc(x) and LMLP(˜x).
1834
+ Lemma B.5. Let Lc be a continuous map from compact set to compact set in Euclidean space. Let LMLP be the ap-
1835
+ proximation of Lc by MLP. If we can control ∥x − ˜x∥ to an arbitrarily small degree, we can then control the error
1836
+ ∥Lc(x) − LMLP(˜x)∥ arbitrarily small.
1837
+ Proof. By triangle inequality ∥Lc(x) − LMLP(˜x)∥ ≤ ∥Lc(x) − LMLP(x))∥ + ∥LMLP(x) − LMLP(˜x)∥.
1838
+ For the first term ∥Lc(˜x)−LMLP(˜x)∥, by the universality of MLP, we can control the error ∥Lc(˜x)−LMLP(˜x)∥ in arbitrary
1839
+ degree.
1840
+ For the second term ∥LMLP(x) − LMLP(˜x)∥, as LMLP is continuous on a compact domain, it is uniformly continuous by
1841
+ Heine-Cantor theorem. This means that we can control the ∥LMLP(x) − LMLP(˜x)∥ as long as we can control ∥x − ˜x∥,
1842
+ independent from different x. By assumption, this is indeed the case so we conclude the proof.
1843
+ Remark B.6. The implication is that when we are trying to approximate the output of a continuous map Lc on the compact
1844
+ domain by an MLP LMLP, it suffices to show the input is 1) ∥Lc − LMLP∥∞ and 2) ∥˜x − x∥ can be made arbitrarily small.
1845
+ The first point is usually done by the universality of MLP on the compact domain (Cybenko, 1989). The second point needs
1846
+ to be shown case by case.
1847
+ In the Appendix B.3, to simplify the notations we omit the error introduced by using MLP to approximate aggrega-
1848
+ tion/message/update functions (continuous functions on the compact domain of Rd.) in MPNN + VN. Lemma B.5 justify
1849
+ such reasoning.
1850
+ Lemma B.7 (˜xi approximates xi. �
1851
+ α′
1852
+ i,j approximates α′
1853
+ i,j.). For any ϵ > 0 and x ∈ X, there exist a set of weights for
1854
+ message/aggregate functions of the virtual node such that ||xi − ˜xi|| < ϵ and |α′
1855
+ i,j − �
1856
+ α′
1857
+ i,j| < ϵ.
1858
+
1859
+ On the Connection Between MPNN and Graph Transformer
1860
+ Proof. By Lemma 6.2 We know that �
1861
+ αi,j := �α(xi, xj) → δ(i − j) as C3(ϵ) goes to infinity. Therefore we have
1862
+ ||˜xi − xi|| = ||
1863
+
1864
+ j
1865
+
1866
+ αi,jxj − xi|| = ||
1867
+
1868
+ (�αi,j − δ(i − j))xj|| < ϵ
1869
+
1870
+ ||xj|| < nC1ϵ
1871
+ (10)
1872
+ As n and C1 are fixed, we can make the upper bound as small as we want by increasing C3.
1873
+ |α′
1874
+ i,j−�
1875
+ α′
1876
+ i,j| = |α′(xi, xj)−α′
1877
+ MLP(˜xi, xj)| = |α′(xi, xj)−α′(˜xi, xj)|+|α′(˜xi, xj)−α′
1878
+ MLP(˜xi, xj)| = |α′(xi−˜xi, xj)| =
1879
+ (xi − ˜xi)T xjC2
1880
+ 2 + ϵ < nC1ϵC1C2
1881
+ 2 + ϵ = (nC2
1882
+ 1C2
1883
+ 2 + 1)ϵ. As α′
1884
+ i,j, �
1885
+ α′
1886
+ i,j is bounded from above and below, it’s easy to see
1887
+ that |eα′
1888
+ i,j − e
1889
+
1890
+ α′
1891
+ i,j| = |eα′
1892
+ i,j(1 − eα′
1893
+ i,j− �
1894
+ α′
1895
+ i,j)| < C(1 − eα′
1896
+ i,j− �
1897
+ α′
1898
+ i,j) can be controlled to arbitrarily degree.
1899
+ Theorem 6.3. Assume AS 1-3 hold for the compact set X and L. Given any graph G of size n with node features X ∈ X,
1900
+ and a self-attention layer L on G (fix WK, WQ, WV in α), there exists a O(n) layer of heterogeneous MPNN + VN with
1901
+ the specific aggregate/update/message function that can approximate L on X arbitrarily well.
1902
+ Proof. i-th MPNN + VN layer will select ˜xi, an arbitrary approximation i-th node feature xi via attention mechanism. This
1903
+ is detailed in the message/aggregation function of the virtual node in Appendix B.2.1. Assuming the regularity condition on
1904
+ feature space X, detailed in AS3, the approximation error can be made as small as needed, as shown in Lemmas 6.2 and B.7.
1905
+ Virtual node will then pass the ˜xi to all graph nodes, which computes an approximation of eα′(˜xi,xj), ∀j ∈ [n]. This step
1906
+ is detailed in the update function γ(k)
1907
+ gn of graph nodes, which can also be approximated arbitrarily well by MLP, proved
1908
+ in Lemma B.4. By Lemma B.5, we have an arbitrary approximation of eα′(˜xi,xj), ∀j ∈ [n], which itself is an arbitrary
1909
+ approximation of eα′(xi,xj), ∀j ∈ [n].
1910
+ Repeat such procedures n times for all graph nodes, we have an arbitrary approximation of �
1911
+ k∈[n] eα′
1912
+ ikWV xk ∈ Rd and
1913
+
1914
+ k∈[n] eα′
1915
+ ik ∈ R. Finally, we use the last layer to approximate attention normalization Lc(x, y) = x
1916
+ y , where x ∈ Rd, y ∈ R.
1917
+ As inputs for attention normalization are arbitrary approximation of �
1918
+ k∈[n] eα′
1919
+ ikWV xk and �
1920
+ k∈[n] eα′
1921
+ ik, both of them
1922
+ are lower/upper bounded according to AS1 and AS2. Since the denominator is upper bounded by a positive number, this
1923
+ implies that the target function Lc is continuous in both arguments. By evoking Lemma B.5 again, we conclude that we can
1924
+ approximate its output
1925
+
1926
+ k∈[n] eα′
1927
+ ik WV xk
1928
+
1929
+ k∈[n] eα′
1930
+ ik
1931
+ arbitrarily well. This concludes the proof.
1932
+ B.5. Relaxing Assumptions with More Powerful Attention
1933
+ One limitation of Theorem 6.3 are assumptions on node features space X: we need to 1) restrict the variability of node
1934
+ feature so that we can select one node feature to process each iteration. 2) The space of the node feature also need to satisfy
1935
+ certain configuration in order for VN to select it. For 2), we now consider a different attention function for αvn in MPNN +
1936
+ VN that can relax the assumptions AS3 on X.
1937
+ More powerful attention mechanism. From proof of Theorem 6.3, we just need α(·, ·) uniformly select every node in
1938
+ X ∈ X. The unnormalized bilinear attention α′ is weak in the sense that f(·) = ⟨xiWQW T
1939
+ K, ·⟩ has a linear level set. Such
1940
+ a constraint can be relaxed via an improved attention module GATv2. Observing the ranking of the attention scores given by
1941
+ GAT (Veliˇckovi´c et al., 2017) is unconditioned on the query node, Brody et al. (2021) proposed GATv2, a more expressive
1942
+ attention mechanism. In particular, the unnormalized attention score α′
1943
+ GATv2(u, v) := aT LeakyReLU (W · [u∥v] + b),
1944
+ where [·||·] is concatenation. We will let αvn = αGATv2 to select features in τj∈[n]φ(k)
1945
+ vn-gn.
1946
+ Lemma B.8. α′
1947
+ GATv2(·, ·) can approximate any continuous function from Rd × Rd → R. For any v ∈ Rd, a restriction of
1948
+ α′
1949
+ GATv2(·, v) can approximate any continuous function from Rd → R.
1950
+ Proof. Any function continuous in both arguments of α′
1951
+ GATv2 is also continuous in the concatenation of both arguments. As
1952
+ any continuous functions in R2d can be approximated by α′
1953
+ GATv2 on a compact domain according to the universality of MLP
1954
+ (Cybenko, 1989), we finish the proof for the first statement.
1955
+
1956
+ On the Connection Between MPNN and Graph Transformer
1957
+ (a)
1958
+ (b)
1959
+ Figure 2: In the left figure, we have one example of X being (V , δ) separable, for which α can uniformly select any point
1960
+ (marked as red) xi ∈ Xi. In the right figure, we change αvn in MPNN + VN to αGATv2, which allows us to select more
1961
+ diverse feature configurations. The cluster in the middle cannot be selected by any α ∈ A but can be selected by αGATv2
1962
+ according to Proposition B.10.
1963
+ For the second statement, we can write W as 2 × 2 block matrix and restrict it to cases where only W11 is non-zero. Then
1964
+ we have
1965
+ α′
1966
+ GATv2(u, v) = aT LeakyReLU
1967
+ �� W11
1968
+ W12
1969
+ W21
1970
+ W22
1971
+
1972
+ ·
1973
+ � u
1974
+ v
1975
+
1976
+ + b
1977
+
1978
+ = aT LeakyReLU (W11u + b)
1979
+ (11)
1980
+ which gives us an MLP on the first argument u. By the universality of MLP, we conclude the proof for the second statement.
1981
+ Definition B.9. Given δ > 0, We call X is δ nonlinearly separable if and only if mini̸=j d(Xi, Xj) > δ.
1982
+ AS 3’. X is δ nonlinearly separable for some δ > 0.
1983
+ Proposition B.10. If X ⊂ Rn×d satisfies that Xi is δ-separated from Xj for any i, j ∈ [n], the following holds. For any
1984
+ X ∈ X and i ∈ [n], there exist a αGATv2 to select any xi ∈ Xi. This implies that we can arbitrarily approximate the
1985
+ self-attention layer L after relaxing AS3 to AS3’.
1986
+ Proof. For any i ∈ [n], as Xi is δ-separated from other Xj, ∀j ̸= i, we can draw a region Ωi ⊂ Rd that contains Xi and
1987
+ separate Xi from other Xj(j ̸= i), where the distance from Xi from other Xj is at least δ according to the definition of
1988
+ Definition B.9. Next, we show how to construct a continuous function f whose value in Xi is at least 1 larger than its values
1989
+ in any other Xj ∀j ̸= i.
1990
+ We set the values of f in Xi to be 1.5 and values of f in Xj, ∀j ̸= i to be 0. We can then interpolate f in areas outside
1991
+ of ∪Xi (one way is to set the values of f(x) based on d(x, Xi), which results in a continuous function that satisfies our
1992
+ requirement. By the universality of αGATv2, we can approximate f to arbitrary precision, and this will let us select any
1993
+ Xi.
1994
+ C. On the Limitation of MPNN + VN
1995
+ Although we showed that in the main paper, MPNN + VN of varying depth/width can approximate the self-attention of
1996
+ full/linear transformers, this does not imply that there is no difference in practice between MPNN + VN and GT. Our
1997
+ theoretical analysis mainly focuses on approximating self-attention without considering computational efficiency. In this
1998
+ section, we mention a few limitations of MPNN + VN compared to GT.
1999
+ C.1. Representation Gap
2000
+ The main limitation of deep MPNN + VN approximating full self-attention is that we require a quite strong assumption:
2001
+ we restrict the variability of node features in order to select one node feature to process each iteration. Such assumption is
2002
+ relaxed by employing stronger attention in MPNN + VN but is still quite strong.
2003
+ For the large width case, the main limitation is the computational complexity: even though the self-attention layer requires
2004
+ O(n2) complexity, to approximate it in wide MPNN + VN framework, the complexity will become O(nd) where d is the
2005
+ dimension of node features.
2006
+
2007
+ On the Connection Between MPNN and Graph Transformer
2008
+ We think such limitation shares a similarity with research in universal permutational invariant functions. Both DeepSets
2009
+ (Zaheer et al., 2017) and Relational Network (Santoro et al., 2017) are universal permutational invariant architecture but
2010
+ there is still a representation gap between the two (Zweig & Bruna, 2022). Under the restriction to analytic activation
2011
+ functions, one can construct a symmetric function acting on sets of size n with elements in dimension d, which can be
2012
+ efficiently approximated by the Relational Network, but provably requires width exponential in n and d for the DeepSets.
2013
+ We believe a similar representation gap also exists between GT and MPNN + VN and leave the characterization of functions
2014
+ lying in such gap as the future work.
2015
+ C.2. On The Difficulty of Approximating Other Linear Transformers
2016
+ In Section 4, we showed MPNN + VN of O(1) width and depth can approximate the self-attention layer of one type of
2017
+ linear transformer, Performer. The literature on efficient transformers is vast (Tay et al., 2020) and we do not expect MPNN
2018
+ + VN can approximate many other efficient transformers. Here we sketch a few other linear transformers that are hard to
2019
+ approximate by MPNN + VN of constant depth and width.
2020
+ Linformer (Wang et al., 2020b) projects the n×d dimension keys and values to k×d suing additional projection layers, which
2021
+ in graph setting is equivalent to graph coarsening. As MPNN + VN still operates on the original graph, it fundamentally
2022
+ lacks the key component to approximate Linformer.
2023
+ We consider various types of efficient transformers effectively generalize the virtual node trick. By first switching to a more
2024
+ expansive model and reducing the computational complexity later on, efficient transformers effectively explore a larger
2025
+ model design space than MPNN + VN, which always sticks to the linear complexity.
2026
+ C.3. Difficulty of Representing SAN Type Attention
2027
+ In SAN (Kreuzer et al., 2021), different attentions are used conditional on whether an edge is presented in the graph or not,
2028
+ detailed below. One may wonder whether we can approximate such a framework in MPNN + VN.
2029
+ In our proof of using MPNN + VN to approximate regular GT, we mainly work with Definition 3.4 where we do not use any
2030
+ gn-gn edges and therefore not leverage the graph topology. It is straightforward to use gn-gn edges and obtain the different
2031
+ message/update/aggregate functions for gn-gn edges non-gn-gn edges. Although we still achieve the similar goal of SAN to
2032
+ condition on the edge types, it turns out that we can not arbitrarily approximate SAN.
2033
+ Without loss of generality, SAN uses two types of attention depending on whether two nodes are connected by the edge.
2034
+ Specifically,
2035
+ ˆwk,l
2036
+ ij =
2037
+
2038
+
2039
+
2040
+ Q1,k,lhl
2041
+ i◦K1,k,lhl
2042
+ j◦E1,k,leij
2043
+ √dk
2044
+ if i and j are connected in sparse graph
2045
+ Q2,k,lhl
2046
+ i◦K2,k,lhl
2047
+ j◦E2,k,leij
2048
+ √dk
2049
+ otherwise
2050
+
2051
+
2052
+
2053
+ wk,l
2054
+ ij =
2055
+
2056
+
2057
+
2058
+ 1
2059
+ 1+γ · softmax
2060
+ ��
2061
+ dk ˆwk,l
2062
+ ij
2063
+
2064
+ if i and j are connected in sparse graph
2065
+ γ
2066
+ 1+γ · softmax
2067
+ ��
2068
+ dk ˆwk,l
2069
+ ij
2070
+
2071
+ otherwise
2072
+
2073
+
2074
+
2075
+ (12)
2076
+ where ◦ denotes element-wise multiplication and Q1,k,l, Q2,k,l, K1,k,l, K2,k,l, E1,k,l, E2,k,l ∈ Rdk×d. γ ∈ R+is a
2077
+ hyperparameter that tunes the amount of bias towards full-graph attention, allowing flexibility of the model to different
2078
+ datasets and tasks where the necessity to capture long-range dependencies may vary.
2079
+ To reduce the notation clutter, we remove the layer index l, and edge features, and also consider only one-attention head
2080
+ case (remove attention index k). The equation is then simplified to
2081
+ ˆwij =
2082
+
2083
+
2084
+
2085
+ Q1hl
2086
+ i◦K1hl
2087
+ j
2088
+ √dk
2089
+ if i and j are connected in sparse graph
2090
+ Q2hl
2091
+ i◦K2hl
2092
+ j
2093
+ √dk
2094
+ otherwise
2095
+
2096
+
2097
+
2098
+ wij =
2099
+
2100
+ 1
2101
+ 1+γ · softmax (�
2102
+ d ˆwij)
2103
+ if i and j are connected in sparse graph
2104
+ γ
2105
+ 1+γ · softmax (�
2106
+ d ˆwij)
2107
+ otherwise
2108
+
2109
+ (13)
2110
+ We will then show that Equation (13) can not be expressed (up to an arbitrary approximation error) in MPNN + VN
2111
+ framework. To simulate SAN type attention, our MPNN + VN framework will have to first simulate one type of attention
2112
+ for all edges, as we did in the main paper, and then simulate the second type of attention between gn-gn edges by properly
2113
+
2114
+ On the Connection Between MPNN and Graph Transformer
2115
+ offset the contribution from the first attention. This turns out to be impossible as we cannot express the difference between
2116
+ two attention in the new attention mechanism.
2117
+ D. Experimental Details
2118
+ D.1. Dataset Description
2119
+ ogbg-molhiv and ogbg-molpcba (Hu et al., 2020) are molecular property prediction datasets adopted by OGB from
2120
+ MoleculeNet. These datasets use a common node (atom) and edge (bond) featurization that represent chemophysical
2121
+ properties. The prediction task of ogbg-molhiv is a binary classification of molecule?s fitness to inhibit HIV replication. The
2122
+ ogbg-molpcba, derived from PubChem BioAssay, targets to predict the results of 128 bioassays in the multi-task binary
2123
+ classification setting.
2124
+ ogbg-ppa (Wu et al., 2021) consists of protein-protein association (PPA) networks derived from 1581 species categorized
2125
+ into 37 taxonomic groups. Nodes represent proteins and edges encode the normalized level of 7 different associations
2126
+ between two proteins. The task is to classify which of the 37 groups does a PPA network originate from.
2127
+ ogbg-code2 (Wu et al., 2021) consists of abstract syntax trees (ASTs) derived from the source code of functions written in
2128
+ Python. The task is to predict the first 5 subtokens of the original function?s name.
2129
+ OGB-LSC PCQM4Mv2 (Hu et al., 2021) is a large-scale molecular dataset that shares the same featurization as ogbg-mol*
2130
+ datasets. It consists of 529,434 molecule graphs. The task is to predict the HOMO-LUMO gap, a quantum physical property
2131
+ originally calculated using Density Functional Theory. True labels for original ?test-dev? and ?test-challange? dataset
2132
+ splits are kept private by the OGB-LSC challenge organizers. Therefore for the purpose of this paper, we used the original
2133
+ validation set as the test set, while we left out random 150K molecules for our validation set.
2134
+ D.2. Reproducibility
2135
+ For LRGB results in Section 7.1, we reproduce the original results up to very small differences.
2136
+ Table 7: Reproduce the original results up to small differences. No VN is used.
2137
+ Model
2138
+ # Params.
2139
+ Peptides-func
2140
+ Peptides-struct
2141
+ Test AP (reproduce)
2142
+ Test AP ↑
2143
+ Test MAE (reproduce)
2144
+ Test MAE ↓
2145
+ GCN
2146
+ 508k
2147
+ 0.5918±0.0065
2148
+ 0.5930±0.0023
2149
+ 0.3468±0.0009
2150
+ 0.3496±0.0013
2151
+ GINE
2152
+ 476k
2153
+ 0.5595±0.0126
2154
+ 0.5498±0.0079
2155
+ 0.3532±0.0024
2156
+ 0.3547±0.0045
2157
+ GatedGCN
2158
+ 509k
2159
+ 0.5886±0.0027
2160
+ 0.5864±0.0077
2161
+ 0.3409±0.0011
2162
+ 0.3420±0.0013
2163
+ GatedGCN+RWSE
2164
+ 506k
2165
+ 0.6083±0.0032
2166
+ 0.6069±0.0035
2167
+ 0.3377±0.0025
2168
+ 0.3357±0.0006
2169
+ D.3. Additional Experiments
2170
+ We tested MPNN + VN on PascalVOC-SP datasets and also observe improvement, shown in Table 8, although the
2171
+ improvement is not as large as that of Peptides-func and Peptides-struct datasets. The best MPNN + VN model
2172
+ is GatedGCN + LapPE where the performance gap to the best GT model is rather small.
2173
+ D.4. Predicting Sea Surface Temperature
2174
+ In this experiment, we consider a specific physical modeling problem: forecasting sea surface temperature (SST), that
2175
+ is the water temperature close to the ocean’s surface. SST is an essential climate indicator and plays a significant role
2176
+ in analyzing and monitoring the dynamics of weather, climate, and other biological systems for several applications in
2177
+ environmental protection, agriculture, and industry. We use the NOAA/NESDIS/NCEI Daily Optimum Interpolation Sea
2178
+ Surface Temperature (DOISST) version 2.1 proposed by (Huang et al., 2021) as an improvement upon version 2.0 from
2179
+ (Reynolds et al., 2007). We consider the daily SST data of the Pacific Ocean from 1982 to 2021, in the region of longitudes
2180
+ from 180.125◦E to 269.875◦E and latitudes from −14.875◦N to 14.875◦N. We reduce the resolution of the original data
2181
+ from 0.25◦-degree to 0.5◦-degree. Following the procedure from (de Bezenac et al., 2018), (de Bézenac et al., 2019) and
2182
+ (Wang et al., 2022), we divide the region into 11 square batches of equal size (see Table 10), each contains exactly 30
2183
+
2184
+ On the Connection Between MPNN and Graph Transformer
2185
+ Table 8: Baseline experiments for PascalVOC-SP and COCO-SP with rag-boundary graph on SLIC compactness
2186
+ 30 for the node classification task. The performance metric is macro F1 on the respective splits (Higher is better). All
2187
+ experiments are run 4 times with 4 different seeds. The MP-GNN models are 8 layers deep, while the transformer-based
2188
+ models have 4 layers in order to maintain comparable hidden representation size at the fixed parameter budget of 500k.
2189
+ Bold: Best score.
2190
+ Model
2191
+ # Params
2192
+ PascalVOC-SP
2193
+ Before VN + Test F1
2194
+ After VN + Test F1 ↑
2195
+ GCN
2196
+ 496k
2197
+ 0.1268±0.0060
2198
+ 0.1901±0.0040
2199
+ GINE
2200
+ 505k
2201
+ 0.1265±0.0076
2202
+ 0.1198±0.0073
2203
+ GatedGCN
2204
+ 502k
2205
+ 0.2873±0.0219
2206
+ 0.2874±0.0178
2207
+ GatedGCN+LapPE
2208
+ 502k
2209
+ 0.2860±0.0085
2210
+ 0.3103±0.0068
2211
+ Transformer+LapPE
2212
+ 501k
2213
+ 0.2694±0.0098
2214
+ -
2215
+ SAN+LapPE
2216
+ 531k
2217
+ 0.3230±0.0039
2218
+ -
2219
+ SAN+RWSE
2220
+ 468k
2221
+ 0.3216±0.0027
2222
+ -
2223
+ Table 9: Number of training, validation and testing examples for each setting in the task of SST prediction.
2224
+ History window
2225
+ Prediction window
2226
+ Train size
2227
+ Validation size
2228
+ Test size
2229
+ 6 weeks
2230
+ 4 weeks
2231
+ 147, 884
2232
+ 3, 245
2233
+ 7, 271
2234
+ 2 weeks
2235
+ 148, 038
2236
+ 3, 399
2237
+ 7, 425
2238
+ 1 week
2239
+ 148, 115
2240
+ 3, 476
2241
+ 7, 502
2242
+ longitudes and 30 latitudes that can be represented as a grid graph of 900 nodes in which we connect each node to its nearest
2243
+ 8 neighbors. We take time series from 1982 to 2018 as our training set, data in 2019 as our validation set, and data from 2020
2244
+ to 2021 as our testing set. In our experiments, we set the history window wh as 6 weeks (i.e. 42 days) and the prediction
2245
+ window wp as 4 weeks (i.e. 28 days), 2 weeks (i.e. 14 days) or 1 week (i.e. 7 days). For each example, each node of the
2246
+ graph is associated with an input time series capturing the temperatures at the corresponding (longitude, latitude) for the
2247
+ last wh days, and the task is to predict the output time series of temperatures for the next wp days. We represent each time
2248
+ series as a long vector and the learning task is fundamentally a node-level regression task. We make sure that there is no
2249
+ overlapping among training, validation and testing sets (e.g., the output of a training example will not appear in any input
2250
+ of another validation example). The number of training, validation, and testing examples are roughly 150K, 3K and 7K,
2251
+ respectively for each setting (see Table 9). We compare our MPNN + VN model with:
2252
+ • Multilayer Perceptron (MLP) which treats both the input and output as long vectors and has 512 hidden neurons.
2253
+ • TF-Net (Wang et al., 2020a) with the setting as in the original paper.
2254
+ • Linear Transformer (Katharopoulos et al., 2020a) (Wang et al., 2020b)5 with Laplacian positional encoding (LapPE).
2255
+ We compute the first 16 eigenvectors as positions for LapPE.
2256
+ Both MPNN and MPNN + VN have 3 layers of message passing with 256 hidden dimensions. We apply an MLP with one
2257
+ hidden layer of 512 neurons on top of the network to make the final prediction.
2258
+ We train all our models with 100 epochs with batch size 20, initial learning rate 10−3, and Adam optimizer (Kingma & Ba,
2259
+ 2014).
2260
+ 5The
2261
+ Linear
2262
+ Transformer
2263
+ implementation
2264
+ is
2265
+ publicly
2266
+ available
2267
+ at
2268
+ https://github.com/lucidrains/
2269
+ linear-attention-transformer
2270
+
2271
+ On the Connection Between MPNN and Graph Transformer
2272
+ Table 10: These are 11 regions of the Pacific in our experiment.
2273
+ Index
2274
+ Longitudes
2275
+ Latitues
2276
+ 1
2277
+ [180.125◦E, 194.875◦E]
2278
+ [-14.875◦N, -0.125◦N]
2279
+ 2
2280
+ [195.125◦E, 209.875◦E]
2281
+ [-14.875◦N, -0.125◦N]
2282
+ 3
2283
+ [210.125◦E, 224.875◦E]
2284
+ [-14.875◦N, -0.125◦N]
2285
+ 4
2286
+ [225.125◦E, 239.875◦E]
2287
+ [-14.875◦N, -0.125◦N]
2288
+ 5
2289
+ [240.125◦E, 254.875◦E]
2290
+ [-14.875◦N, -0.125◦N]
2291
+ 6
2292
+ [255.125◦E, 269.875◦E]
2293
+ [-14.875◦N, -0.125◦N]
2294
+ 7
2295
+ [180.125◦E, 194.875◦E]
2296
+ [0.125◦N, 14.875◦N]
2297
+ 8
2298
+ [195.125◦E, 209.875◦E]
2299
+ [0.125◦N, 14.875◦N]
2300
+ 9
2301
+ [210.125◦E, 224.875◦E]
2302
+ [0.125◦N, 14.875◦N]
2303
+ 10
2304
+ [225.125◦E, 239.875◦E]
2305
+ [0.125◦N, 14.875◦N]
2306
+ 11
2307
+ [240.125◦E, 254.875◦E]
2308
+ [0.125◦N, 14.875◦N]
2309
+
5NFKT4oBgHgl3EQf-C45/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
5dE0T4oBgHgl3EQfegDz/content/tmp_files/2301.02393v1.pdf.txt ADDED
@@ -0,0 +1,2304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
2
+ 1
3
+ Graph Convolution Based Cross-Network
4
+ Multi-Scale Feature Fusion for Deep Vessel
5
+ Segmentation
6
+ Gangming Zhao, Kongming Liang, Chengwei Pan, Fandong Zhang, Xianpeng Wu,
7
+ Xinyang Hu, and Yizhou Yu, Fellow, IEEE
8
+ Abstract— Vessel segmentation is widely used to help
9
+ with vascular disease diagnosis. Vessels reconstructed
10
+ using existing methods are often not sufficiently accurate
11
+ to meet clinical use standards. This is because 3D vessel
12
+ structures are highly complicated and exhibit unique char-
13
+ acteristics, including sparsity and anisotropy. In this paper,
14
+ we propose a novel hybrid deep neural network for ves-
15
+ sel segmentation. Our network consists of two cascaded
16
+ subnetworks performing initial and refined segmentation
17
+ respectively. The second subnetwork further has two tightly
18
+ coupled components, a traditional CNN-based U-Net and
19
+ a graph U-Net. Cross-network multi-scale feature fusion is
20
+ performed between these two U-shaped networks to effec-
21
+ tively support high-quality vessel segmentation. The entire
22
+ cascaded network can be trained from end to end. The
23
+ graph in the second subnetwork is constructed according
24
+ to a vessel probability map as well as appearance and
25
+ semantic similarities in the original CT volume. To tackle
26
+ the challenges caused by the sparsity and anisotropy of
27
+ vessels, a higher percentage of graph nodes are distributed
28
+ in areas that potentially contain vessels while a higher per-
29
+ centage of edges follow the orientation of potential nearby
30
+ vessels. Extensive experiments demonstrate our deep net-
31
+ work achieves state-of-the-art 3D vessel segmentation per-
32
+ formance on multiple public and in-house datasets.
33
+ Index Terms— Vessel Segmentation, Graph Convolu-
34
+ tional Networks, Deep Learning
35
+ This work was funded in part by National Key Research and Develop-
36
+ ment Program of China (No. 2019YFC0118101), National Natural Sci-
37
+ ence Foundation of China (Grant Nos. 62141605 and 82072005), Key
38
+ Program of Beijing Municipal Natural Science Foundation (No.7191003),
39
+ and Zhejiang Province Key Research & Development Program (No.
40
+ 2020C03073). (Corresponding authors: Yizhou Yu and Xinyang Hu.)
41
+ Gangming Zhao and Yizhou Yu are with the Department of Com-
42
+ puter Science, The University of Hong Kong, Hong Kong (e-mail:
43
+ gmzhao@connect.hku.hk, yizhouy@acm.org).
44
+ Kongming Liang is with Pattern Recognition and Intelligent Sys-
45
+ tem Laboratory, School of Artificial Intelligence, Beijing University
46
+ of Posts and Telecommunications, Beijing, China (e-mail: liangkong-
47
+ ming@bupt.edu.cn).
48
+ Chengwei Pan is with Institute of Artificial Intelligence, Beihang Uni-
49
+ versity, Beijing, China (e-mail: pancw@buaa.edu.cn).
50
+ Fandong Zhang is with the AI Lab, Deepwise Healthcare, Beijing,
51
+ China (e-mail: zhangfandong@deepwise.com).
52
+ Xinyang Hu and Xianpeng Wu are with Department of Cardiol-
53
+ ogy of the Second Affiliated Hospital, School of Medicine, Zhejiang
54
+ University, Hangzhou, China, and Key Laboratory of Cardiovascular
55
+ of Zhejiang Province, Hangzhou, China (e-mail: hxy0507@zju.edu.cn,
56
+ wxpzju123@163.com)
57
+ G. Zhao, K. Liang and C. Pan have equal contribution.
58
+ I. INTRODUCTION
59
+ V
60
+ ESSEL segmentation is widely used in daily practice
61
+ to characterize many vascular diseases [1], [2]. For
62
+ example, the obstructed vessels may lead to coronary heart
63
+ disease, which is the worldwide leading cause of death [3],
64
+ [4]. Since clinicians mainly rely on interactive tracing and
65
+ segmentation, vessel reconstruction is traditionally a very
66
+ time-consuming process and affects the efficiency of diagnosis
67
+ and intervention. Thus, automatic vessel segmentation can
68
+ facilitate the reviewing process and plays an important role
69
+ in medical image analysis.
70
+ Over the years, numerous methods have been proposed for
71
+ automatic vessel segmentation. Due to the state-of-the-art per-
72
+ formance of convolutional neural networks (CNNs) on a wide
73
+ range of pixel-level labelling tasks [5]–[7], CNNs has also
74
+ been applied to vessel segmentation [8]–[10]. Nonetheless,
75
+ the reconstructed vessels are often not sufficiently accurate to
76
+ meet clinical use standards. This is because vessel structures
77
+ in 3D CT volumes are highly complicated and exhibit unique
78
+ characteristics. First, since vessels are thin structures, they
79
+ only occupy a sparse subset of pixels. Thus, there exists
80
+ a severe imbalance between vessel and non-vessel pixels.
81
+ Second, vessel segments are elongated tubular structures that
82
+ are highly directional and anisotropic. Conventional CNNs
83
+ adopt uniform spatial sampling, and therefore, are inept at
84
+ modeling such sparse and anisotropic structures, giving rise
85
+ to broken or incomplete results. Thus it becomes critical to
86
+ design deep neural networks that can effectively exploit the
87
+ aforementioned characteristics of vessels.
88
+ In this paper, we propose a novel hybrid deep neural
89
+ network for vessel segmentation. Our network consists of two
90
+ cascaded subnetworks performing initial and refined segmen-
91
+ tation respectively. The second subnetwork further consists of
92
+ two tightly coupled components, a traditional CNN-based U-
93
+ shaped network and a graph-based U-shaped network based on
94
+ graph convolutions. Cross-network multi-scale feature fusion
95
+ is performed between these two U-shaped networks to ef-
96
+ fectively support high-quality vessel segmentation. The entire
97
+ cascaded network can be trained from end to end.
98
+ As shown in previous work [11]–[13], graph convolutional
99
+ networks naturally possess a complex shape modeling ability
100
+ which is well suited for structured data. By setting local
101
+ arXiv:2301.02393v1 [eess.IV] 6 Jan 2023
102
+
103
+ EMB
104
+ NPS
105
+ UFFC
106
+ SignalProcessing Society
107
+ 0222
108
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
109
+ regions (supervoxels) in a CT volume as nodes and con-
110
+ nections among nearby local regions as edges, the whole
111
+ CT volume can be regarded as a graph. Specifically, the
112
+ graph in the second subnetwork is constructed according to
113
+ a vessel probability map as well as appearance and semantic
114
+ similarities in the original CT volume. To tackle the challenges
115
+ brought up by the aforementioned characteristics of vessels,
116
+ a higher percentage of graph nodes are distributed in areas
117
+ that potentially contain vessels while a higher percentage of
118
+ edges follow the orientation of potential nearby vessels. In
119
+ addition, the CNN-based U-shaped network is first utilized to
120
+ extract multi-scale features from the original CT volume. Then
121
+ at every scale, the features from the CNN are mapped into
122
+ node features at the corresponding scale of the graph-based
123
+ U-shaped network and propagated by the GCN at that scale
124
+ to counteract sparsity and anisotropy. Finally, the enhanced
125
+ features are reversely mapped into the spatial domain and
126
+ fused with the original features extracted by the CNN-based
127
+ U-shaped network.
128
+ In summary, our contributions in this paper are three-fold:
129
+ • We propose a cascaded deep neural network for vessel
130
+ segmentation. The two subnetworks in the cascade are
131
+ respectively responsible for initial and refined segmen-
132
+ tation. There are a pair of tightly coupled U-shaped
133
+ networks in the second subnetwork of the cascade, one
134
+ based on CNN and the other based on GCN. Cross-
135
+ network multi-scale feature fusion is performed between
136
+ these two U-shaped networks to effectively support high-
137
+ quality vessel segmentation.
138
+ • We propose a novel way to transform a dense 3D CT
139
+ volume to a sparse graph format, which can efficiently
140
+ represent sparse and anisotropic vessel structures. More-
141
+ over, our method integrates both appearance and semantic
142
+ similarities for graph construction.
143
+ • Extensive
144
+ experiments
145
+ indicate
146
+ our
147
+ deep
148
+ network
149
+ achieves state-of-the-art 3D vessel segmentation perfor-
150
+ mance on multiple public and in-house datasets for coro-
151
+ nary vessels as well as head and neck vessels, including
152
+ the public ASOCA dataset.
153
+ II. RELATED WORK
154
+ A. Graph Convolutional Networks
155
+ Though CNNs achieve impressive performance in many
156
+ computer vision tasks, they can not efficiently handle graph-
157
+ structured data. To operate directly on graphs, GCN [11] is
158
+ proposed by using layer-wise propagation rule for neural net-
159
+ work models. Li et al. [13] further adapted the residual/dense
160
+ connections and dilated convolutions from CNNs into GCN
161
+ which can solve vanishing gradient problem and increase the
162
+ depth of GCN. Gao et al. [14] proposed graph pooling and
163
+ unpooling operations to develop an encoder-decoder model
164
+ on graph for node classification. The above methods show
165
+ that GCNs can achieve promising results on modeling graph
166
+ structure. However, it is still challenging to integrate GCNs
167
+ into an existing image segmentation framework which is
168
+ dominated by CNNs.
169
+ B. Multi-scale feature modeling
170
+ Multi-scale feature modeling can efficiently capture the
171
+ global contextual dependencies which plays an important role
172
+ in image segmentation. Kamnitsas et al. [15] proposed a dual
173
+ pathway deep convolutional neural network. The proposed
174
+ dual pathway network incorporates both local and larger con-
175
+ textual information by processing the input images at multiple
176
+ scales simultaneously. Chen et al. [16] proposed to use several
177
+ parallel atrous convolution with different rates to model the
178
+ contextual dependencies at multiple scales. Zhao et al. [17]
179
+ proposed a pyramid pooling module to generate feature maps
180
+ in different levels for scene parsing. Recently, Tao et al. [18]
181
+ proposed to combine multi-scale predictions with attention
182
+ mechanism and achieved the state-of-the-art on Cityscapes
183
+ and Mapillary Vistas. However, all the above methods adopt
184
+ uniform spatial sampling for multi-scale feature learning and
185
+ fail to model the sparsity and anisotropy of vessel.
186
+ C. Medical Image Segmentation
187
+ Deep learning has become a methodology of choice for
188
+ medical image segmentation. Ronneberger et al. proposed
189
+ UNET [19], which has an encoder-decoder architecture. To
190
+ avoid missing spatial information, the decoder features from
191
+ the previous level are up-sampled and combined with the
192
+ encoder features at the corresponding level through skip con-
193
+ nections. The 3D version of UNET [20] was further proposed
194
+ by replacing all 2D operations with their 3D counterparts. In
195
+ addition, a hybrid densely connected UNET [21] was proposed
196
+ to extract intra-slice features with a 2D DenseUNET and
197
+ aggregate volumetric contexts with its 3D counterpart. Dou et
198
+ al. [22] presented a 3D fully convolutional network equipped
199
+ with a 3D deep supervision mechanism to combat potential
200
+ optimization difficulties. Likewise, Zhu et al. [23] proposed to
201
+ use eight additional deeply supervised layers in their architec-
202
+ ture. Jiang et al. [24] developed two multi-resolution residually
203
+ connected networks to simultaneously combine features across
204
+ multiple image resolutions and feature levels. ACSNet [25]
205
+ combines global contexts and local details to deal with the
206
+ shape and size variations of segmented regions. Similarly,
207
+ PraNet [26] aggregates multi-scale features and successively
208
+ refines the segmentation map through boundary extraction.
209
+ Recently, Isensee et al. proposed nnUNET [27], which auto-
210
+ matically adapts its architecture according to the geometry of
211
+ input images. Zhou et al. [28] introduced nnFormer, which is
212
+ an encoder-decoder architecture for volumetric medical image
213
+ segmentation through the combination of convolution layers
214
+ and Transformer blocks. In addition, the gated axial-attention
215
+ model in [29] extends the existing architectures and introduces
216
+ an additional control mechanism with a Local-Global training
217
+ strategy.
218
+ D. Vessel Segmentation
219
+ Vessel segmentation plays an important role in medical
220
+ image analysis. Kong et al. [9] proposed to use a tree-
221
+ structured convolutional gated recurrent unit (ConvGRU) layer
222
+ for modeling the anatomical structure of vessels. Since the
223
+
224
+ ZHAO et al.: GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION
225
+ 3
226
+ Stage1 Segmentation
227
+ Loss
228
+ UNET-1
229
+ Encoder
230
+ UNET-1
231
+ Decoder
232
+ 𝑨𝟎
233
+ UNET-0
234
+ Encoder
235
+ UNET-0
236
+ Decoder
237
+ 𝒀𝟎
238
+ Weight Initialized From UNET-0
239
+ Loss
240
+ Trained From Scratch
241
+ UNET-G
242
+ Encoder
243
+ UNET-2
244
+ Encoder
245
+ UNET-G
246
+ Decoder
247
+ UNET-2
248
+ Decoder
249
+ Stage0 Segmentation
250
+ Graph Construction
251
+ Preliminary Segmentation
252
+ Cross-network Multi-scale Feature Fusion
253
+ Forward
254
+ Backward
255
+ ×
256
+ Vessel Structure Modeling
257
+ Forward
258
+ Backward
259
+ Fig. 1.
260
+ Our proposed pipeline for vessel segmentation consists of three stages, preliminary segmentation using a U-Net (UNET-0), graph
261
+ construction, and final segmentation with a cascaded network, which further consists of two subnetworks with the first subnetwork being a U-
262
+ Net (UNET-1) and the second subnetwork being a pair of tightly coupled U-shaped networks, a CNN-based U-Net (UNET-2) and a graph U-Net
263
+ (UNET-G). The preliminary segmentation in the first stage is used by the second stage to construct a graph, whose topology becomes the first level
264
+ graph in UNET-G.
265
+ input of the ConvGRU layer is a uniform local patch, their
266
+ method cannot well exploit the anisotropy of vessels. Wang
267
+ et al. [10] proposed a multi-task network to predict a vessel
268
+ segmentation mask and a distance map. Values in the map
269
+ represent distances from the center to the surface of every
270
+ vessel. However, the global structure of vessels is not consid-
271
+ ered, which limits contextual dependency modeling. There is
272
+ much work [8], [30]–[32] on the utilization of graph neural
273
+ networks for vessel segmentation. Shin et al. [8] incorporated
274
+ a GCN into a CNN architecture to exploit the global structure
275
+ of vessel shape. However, only the pixel with maximum
276
+ vessel probability within every rectangular patch is sampled
277
+ as a graph node, which limits the representation ability of
278
+ the graph. In addition, GCN features are only calculated at
279
+ a single scale and do not interact with CNN features. In
280
+ contrast, our framework exhibits a very different way to learn
281
+ the structural information of vessels. Specifically, we exploit
282
+ superpixel generation algorithms such as SLIC [33] to better
283
+ model the sparsity and anisotropy of vessels, and tightly couple
284
+ a graph U-Net and a traditional CNN-based U-Net through
285
+ multi-scale feature fusion across these two networks to better
286
+ support high-quality vessel segmentation.
287
+ III. OUR FRAMEWORK
288
+ A. Overview
289
+ Consider an input 3D image volume X ∈ RD×H×W ,
290
+ where D, H and W are the spatial depth, height and width
291
+ respectively. The pipeline of our proposed method for vessel
292
+ segmentation can be decomposed into three stages as shown
293
+ in Fig. 1.
294
+ Preliminary Segmentation. An U-shaped network, UNET-
295
+ 0, is first utilized to create a probability map of the input
296
+ image volume. This probability map is used for discovering
297
+ local image regions that have a relatively high probability
298
+ to contain vessels. Since the probability map may not be
299
+ very accurate, to reduce the chance of missing regions that
300
+ actually contain vessels, we apply the dilation operator, a
301
+ type of image morphological operators, to the probability
302
+ map to increase the size of image areas with relatively high
303
+ probability values. The result is a preliminary probability map
304
+ denoted as A0 ∈ (0, 1)D×H×W , which is further thresholded
305
+ to produce a preliminary segmentation mask denoted as Y 0 ∈
306
+ {0, 1}D×H×W . The preliminary segmentation mask is used
307
+ for indicating vessel orientations in regions where vessels are
308
+ likely to occur. In our experiments, we use a 7 × 7 square as
309
+ the kernel of the dilation operator.
310
+ Graph Construction. On the basis of the preliminary segmen-
311
+ tation mask Y 0 and probability map A0, a graph G = (V, E) is
312
+ constructed with a node set V, and an edge set E. To counteract
313
+ the characteristics of vessel structures including sparsity and
314
+ anisotropy, a higher percentage of graph nodes are distributed
315
+ in regions where the preliminary probability map has relatively
316
+ large values while a higher percentage of edges follow the
317
+ orientation of the preliminary vessel segmentation mask.
318
+ Final Segmentation with a Cascaded Network. Instead
319
+ of using a network to refine the preliminary segmentation
320
+ result obtained in the first stage, we start from scratch and
321
+ train a cascaded network that takes the original 3D image
322
+ volume as the input, and performs end-to-end segmentation
323
+ to produce the final segmentation result. This network con-
324
+ sists of two cascaded subnetworks performing initial and
325
+ refined segmentation respectively. The first subnetwork is an
326
+ U-shaped network, UNET-1, that shares the same network
327
+
328
+ 4
329
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
330
+ architecture with UNET-0 in the first stage, but have different
331
+ network weights because it is trained together with the second
332
+ subnetwork. The second subnetwork further consists of two
333
+ tightly coupled components, a traditional CNN-based U-Net
334
+ (UNET-2) and a graph U-Net (UNET-G) [14]. The graph G
335
+ constructed in the second stage becomes the graph with the
336
+ highest spatial resolution in UNET-G. Cross-network multi-
337
+ scale feature fusion is performed between UNET-2 and UNET-
338
+ G to effectively support high-quality vessel segmentation.
339
+ UNET-1 and UNET-2 are cascaded. The output from UNET-1
340
+ includes a hard mask and a soft probability map P. Since the
341
+ input to UNET-2 is the product of P and the original input
342
+ image I, the cascaded network is differentiable. Note that
343
+ UNET-0 is used to construct graphs as a pre-process. Once
344
+ the graphs for all training samples have been precomputed,
345
+ the entire cascaded network can be trained from end to end
346
+ through gradient backpropagation.
347
+ Now let us focus on the second subnetwork. For UNET-2,
348
+ we represent its convolutional encoder and decoder features
349
+ as Ec
350
+ 1:L = {Ec
351
+ l } and Dc
352
+ 1:L = {Dc
353
+ l } respectively with L
354
+ being the number of feature levels. The Lth decoder and
355
+ encoder stages have the lowest spatial resolution. UNET-
356
+ G has the same number of feature levels as UNET-2. The
357
+ encoder and decoder stages in UNET-2 and UNET-G have
358
+ one-to-one correspondence. For the lth encoder in UNET-G,
359
+ its initial graph feature is created as Eg
360
+ l = f(Ec
361
+ l , G) through
362
+ a forward mapping function f(·) proposed in
363
+ [34] aiming
364
+ to transform the features between spatial domain and node
365
+ domain. The forward mapping function f(·) is called KNN-
366
+ map, which utilizes the K nearest neighborhoods to create the
367
+ corresponding node feature. Once graph convolutions have
368
+ been performed on Eg
369
+ l , the resulting graph convolutional
370
+ feature is mapped back to the original convolutional feature
371
+ space of UNET-2 through a backward mapping function g(·)
372
+ also proposed in [34] and fused with its initial encoder feature
373
+ Ec
374
+ l .
375
+ B. Graph Construction
376
+ Graph Nodes. Since graph neural networks cannot process
377
+ dense 3D images directly due to high computational cost, we
378
+ first group all the pixels from a 3D image into super-pixels
379
+ and then represent each super-pixel as a graph node. Here
380
+ we use the SLIC algorithm [33] for super-pixel generation.
381
+ In order to capture the 3D structure of vessels, the local
382
+ region (super-pixel) represented by a graph node should satisfy
383
+ the following properties: 1) the summation of the vessel
384
+ probabilities in the region is high; 2) the pixels in the region
385
+ have similar appearance; 3) the shape of the region follows
386
+ the local shape of the vessels. The SLIC algorithm is based
387
+ on a distance measure, which originally consists of two terms,
388
+ grayscale difference and Euclidean distance. To satisfy the
389
+ aforementioned properties, we add a third term based on
390
+ geodesic distance. The updated distance measure for SLIC and
391
+ its three terms are formulated as follows.
392
+ d(i, j) = dgray(i, j) + ddis(i, j) + dgeo(i, j),
393
+ (1)
394
+ where
395
+ dgray(i, j) = |Xi − Xj|,
396
+ (2)
397
+ Preliminary
398
+ Segmentation
399
+ Y
400
+ A
401
+ SP Images
402
+ 10
403
+ 1 2
404
+ 3 4 5
405
+ 6
406
+ 7
407
+ 8
408
+ 9
409
+ 10
410
+ 11
411
+ 1
412
+ 2
413
+ 3
414
+ 4
415
+ 1
416
+ 2
417
+ 3
418
+ 5
419
+ 4
420
+ 6
421
+ 7
422
+ 8
423
+ 9 10
424
+ 11
425
+ 13
426
+ 12
427
+ 1 2
428
+ 4 5
429
+ 3
430
+ 6
431
+ 7
432
+ 8 9
433
+ 10
434
+ 11 13
435
+ 12
436
+ Edge:
437
+ 1: 1-2 2: 2-3 2-4 2-5
438
+ 3: 3-4 3-6 4: 4-6
439
+ 5: 5-9 6: 6-7 6-11
440
+ 7: 7-8 8: 8-12
441
+ 9: 9-10 11: 11-12
442
+ 12: 12-13
443
+ Edge:
444
+ 1: 1-2 2: 2-3
445
+ 3: 3-4 4: None
446
+ Edge:
447
+ 1: 1-2 2: 2-3 2-4 2-5
448
+ 3: 3-4 3-6 3-7 4: 4-5 4-10
449
+ 5: 5-9 6: 6-7
450
+ 7: 7-8 10: 10-11
451
+ Edge:
452
+ 1: 1-2 1-3 2: 2-3
453
+ 3: 3-4 4: 4-5 4-6 4-11
454
+ 5: 5-11 6: 6-7 6-8
455
+ 7: 7-8 7-9 8: 8-9 8-10
456
+ 10: 11-13
457
+ Nodes and Edges
458
+ CT Images
459
+ 1
460
+ 3
461
+ 2
462
+ Fig. 2. A simple example illustrating the graph construction process.
463
+ ddis(i, j) =
464
+
465
+ (xi − xj)2 + (yi − yj)2 + (zi − zj)2,
466
+ (3)
467
+ dgeo(i, j) = min
468
+ Q∈Pi,j
469
+
470
+ q∈Q
471
+ A0
472
+ q∥∇(Xq + X0
473
+ q ) · uq∥,
474
+ (4)
475
+ where Xi denotes the gray scale of the ith pixel, [xi, yi, zi]T
476
+ denotes its spatial coordinates, Pi,j represents the complete
477
+ set of paths from pixel i to pixel j, Q denotes one path in
478
+ Pi,j, q denotes any pixel on Q, and uq represents the unit
479
+ tangent vector of path Q at q. The geodesic distance between
480
+ two points is defined as the minimum of the integration of
481
+ X, X0 and A0 as in (4) among all the paths in Pi,j, where
482
+ X0 = X ◦ Y 0 and ◦ stands for element-wise multiplication.
483
+ ∇(Xq + X0
484
+ q ) means the gradient of Xq + X0
485
+ q . Xq + X0
486
+ q
487
+ doubles the value in vessel areas, therefore, it will create more
488
+ graph nodes in vessels because of a larger distance between
489
+ different nodes in these areas. In practice, we use the Dijkstra’s
490
+ algorithm to calculate the geodesic distance. The definition
491
+ of geodesic distance in (4) ensures that regions potentially
492
+ containing vessels have a higher density of graph nodes. Note
493
+ that the three distance terms have been individually normalized
494
+ before added together in the overall distance measure.
495
+ Graph Edges. As there typically exist a large number of
496
+ graph nodes in a 3D image volume, in this paper, we only
497
+ consider locally connected graphs to reduce computational
498
+ cost. Each node i is only connected to other nearby nodes
499
+ whose geodesic distance is below a predefined threshold tgeo.
500
+ That is, there exists an edge between nodes i and j if and only
501
+ if d′
502
+ geo(i, j) < tgeo, where d′
503
+ geo(i, j) is a modified version
504
+ of the geodesic distance in (4) where A0 is replaced with
505
+ (1−A0). Since our geodesic distance is affected by the vessel
506
+ mask and probability map, this connection rule implies that the
507
+ Euclidean distance between two connected nodes has a larger
508
+ threshold when the nodes are near potential vessels and the
509
+
510
+ ZHAO et al.: GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION
511
+ 5
512
+ Layers
513
+ Output size
514
+ UNET-0,1,2
515
+ en-conv0
516
+ 256 × 256 × 256
517
+ conv(3 × 3 × 3, 16)
518
+ en-conv1
519
+ 128 × 128 × 128
520
+ 2 × BuildBlock(3 × 3 × 3, 32)
521
+ en-conv2
522
+ 64 × 64 × 64
523
+ 2 × BuildBlock(3 × 3 × 3, 64))
524
+ en-conv3
525
+ 32 × 32 × 32
526
+ 2 × BuildBlock(3 × 3 × 3, 128)
527
+ en-conv4
528
+ 16 × 16 × 16
529
+ 2 × BuildBlock(3 × 3 × 3, 256)
530
+ de-conv4
531
+ 16 × 16 × 16
532
+ 2 × BuildBlock(3 × 3 × 3, 256)
533
+ de-conv3
534
+ 32 × 32 × 32
535
+ 2 × BuildBlock(3 × 3 × 3, 128)
536
+ de-conv2
537
+ 64 × 64 × 64
538
+ 2 × BuildBlock(3 × 3 × 3, 64)
539
+ de-conv1
540
+ 128 × 128 × 128
541
+ 2 × BuildBlock(3 × 3 × 3, 32)
542
+ de-conv0
543
+ 256 × 256 × 256
544
+ 2 × BuildBlock(3 × 3 × 3, 16)
545
+ classifier
546
+ 256 × 256 × 256
547
+ conv(1 × 1 × 1, 2)
548
+ TABLE I
549
+ NETWORK ARCHITECTURE OF UNET-0,1,2 USED IN THE PROPOSED
550
+ PIPELINE. CONVOLUTION LAYERS IN THE ENCODER OF THE ORIGINAL
551
+ U-NET ARE REPLACED WITH RESIDUAL BLOCKS. INSIDE THE
552
+ BRACKETS ARE THE SHAPE OF THE RESIDUAL BLOCKS, AND OUTSIDE
553
+ THE BRACKETS IS THE NUMBER OF STACKED BLOCKS IN A STAGE.
554
+ DOWNSAMPLING (MAX POOLING) IS PERFORMED AFTER EN-CONV0,
555
+ EN-CONV1, EN-CONV2, EN-CONV3 WITH STRIDE 2, RESPECTIVELY.
556
+ UPSAMPLING IS PERFORMED AFTER EACH DE-CONV STAGE, AND THE
557
+ NUMBER OF INPUT CHANNELS OF EACH LAYER CAN BE FOUND FROM
558
+ THE PRECEDING LAYER.
559
+ orientation of the edge between the nodes roughly follows the
560
+ local orientation of the preliminary vessel mask. As a result,
561
+ the constructed graph has denser and longer connections in
562
+ regions potentially containing vessels.
563
+ In our constructed graph, every edge is associated with an
564
+ edge weight, which is a product of two components, ew =
565
+ es
566
+ wea
567
+ w, where es
568
+ w and ea
569
+ w represent semantic consistency and
570
+ appearance similarity respectively. For a convolutional feature
571
+ map F in UNET-2, we first create its node representation FV ∈
572
+ R|V |×C through the forward mapping function f(·) in [34] on
573
+ the feature map F. Then we define the semantic consistency
574
+ of the edge between nodes i and j as
575
+ es
576
+ w(i, j) = σ([F i
577
+ V , F j
578
+ V ]ws),
579
+ (5)
580
+ where F i
581
+ V , F j
582
+ V
583
+ represent the i-th and j-th node features,
584
+ ws ∈ R2C is a trainable weight vector fusing the two node
585
+ features, and σ(·) is the sigmoid activation function. [] means
586
+ a concatenation.
587
+ We use the gray-scale information associated with graph
588
+ nodes to define the appearance similarity of an edge as
589
+ ea
590
+ w(i, j) = σ([F i
591
+ X, F j
592
+ X]wg)
593
+ (6)
594
+ where FX = f(X◦Y 0◦A0, G), wg ∈ R2C is another trainable
595
+ weight vector fusing the mapped features at the two nodes.
596
+ Instead of using the gray-scale information X from the input
597
+ image volume only, we also include the semantic information
598
+ Y 0 and A0 from the preliminary segmentation to focus on
599
+ potential vessel regions.
600
+ A simple example illustrating the above graph construction
601
+ process is given in Fig. 2.
602
+ C. Cross-Network Multi-Scale Feature Fusion
603
+ The features from UNET-2 need to be mapped into the
604
+ node domain of UNET-G and further enhanced through graph
605
+ convolutions over the constructed graph structure to better
606
+ Dataset
607
+ Avg #Nodes per Image
608
+ Avg #Edges per Node
609
+ ASOCA
610
+ 12000
611
+ 8.12
612
+ ACA
613
+ 9600
614
+ 7.11
615
+ HNA
616
+ 13000
617
+ 7.03
618
+ TABLE II
619
+ STATISTICS OF CONSTRUCTED GRAPHS. AVERAGE NUMBER OF NODES
620
+ PER IMAGE IS CALCULATED USING ALL IMAGES IN A DATASET. AND
621
+ AVERAGE NUMBER OF EDGES PER NODE IS CALCULATED USING ALL
622
+ NODES IN A DATASET.
623
+ Dataset
624
+ Avg #Nodes per Image
625
+ Avg #Edges per Node
626
+ Set1 1: n segmetns is 28000
627
+ ASOCA
628
+ 19010
629
+ 8.67
630
+ ACA
631
+ 14300
632
+ 7.40
633
+ HNA
634
+ 22420
635
+ 8.10
636
+ Set1 2: n segmetns is 14000
637
+ ASOCA
638
+ 12000
639
+ 8.12
640
+ ACA
641
+ 9600
642
+ 7.11
643
+ HNA
644
+ 13000
645
+ 7.03
646
+ Set1 3: n segmetns is 7000
647
+ ASOCA
648
+ 6020
649
+ 6.12
650
+ ACA
651
+ 3110
652
+ 4.11
653
+ HNA
654
+ 5200
655
+ 4.03
656
+ Set1 4: n segmetns is 3500
657
+ ASOCA
658
+ 3210
659
+ 4.12
660
+ ACA
661
+ 2930
662
+ 3.03
663
+ HNA
664
+ 2122
665
+ 2.14
666
+ TABLE III
667
+ AVERAGE NUMBER OF GRAPH NODES AND EDGES FOR DIFFERENT
668
+ VALUES OF N_SEGMENTS WHEN MIN_SIZE_FACTOR IS FIXED TO 0.5.
669
+ observe global priors of vessel connectivity. Afterwards we
670
+ reversely map the enhanced features to the spatial domain of
671
+ UNET-2 and fuse them with the original features there through
672
+ a residual connection.
673
+ Encoder Feature Fusion. The encoder feature map Ec
674
+ l from
675
+ the lth stage of UNET-2 is transformed into node features
676
+ at the corresponding level of UNET-G through the forward
677
+ mapping function f(·) defined in [34]. Then the mapped fea-
678
+ tures are fused with the down-sampled encoder features from
679
+ the previous stage in UNET-G. A residual graph convolution
680
+ module Ω(·) is utilized to enhance the fused features for
681
+ more accurately modeling complex vessel structures and better
682
+ observing global priors of vessel connectivity. Therefore, the
683
+ graph convolutional encoder features at the lth stage of UNET-
684
+ G are created as
685
+ Eg
686
+ l = Ω(f(Ec
687
+ l , G) + down(Eg
688
+ l−1)).
689
+ (7)
690
+ Then the graph convolutional features Eg
691
+ l
692
+ are reversely
693
+ mapped to the original convolutional feature space of UNET-
694
+ 2 through the backward mapping function g(·) defined in
695
+ [34] and fused with its initial encoder feature to produce the
696
+ enhanced encoder feature at the lth level,
697
+ El = g(Eg
698
+ l ) + Ec
699
+ l .
700
+ (8)
701
+ Decoder Feature Fusion. The decoder feature Dc
702
+ l from the
703
+ lth stage of UNET-2 is transformed into node features at the
704
+ corresponding level of UNET-G through the same forward
705
+
706
+ 6
707
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
708
+ Ground Truth UNet
709
+ nnUNet
710
+ DVS
711
+ DDT
712
+ Ours
713
+ Ground Truth UNet
714
+ nnUNet
715
+ DVS
716
+ DDT
717
+ Ours
718
+ Ground Truth UNet
719
+ nnUNet
720
+ DVS
721
+ DDT
722
+ Ours
723
+ Fig. 3.
724
+ From left to right, it is the ground truth, the results of UNET, nnUNET, DVS, DDT and our model, respectively. The aorta and the coronary
725
+ vessels are marked with red and green. Although DDT achieves the best performance compared with other previous state-of-the-art methods, it
726
+ may generate incomplete vessel masks when the structure of vessels is complicated.
727
+ Dataset
728
+ Avg #Nodes per Image
729
+ Avg #Edges per Node
730
+ Set2 1: min size factor is 0.3
731
+ ASOCA
732
+ 8900
733
+ 8.01
734
+ ACA
735
+ 7230
736
+ 6.89
737
+ HNA
738
+ 9600
739
+ 7.01
740
+ Set2 2: min size factor is 0.4
741
+ ASOCA
742
+ 10300
743
+ 8.02
744
+ ACA
745
+ 8410
746
+ 7.12
747
+ HNA
748
+ 11200
749
+ 7.13
750
+ Set2 3: min size factor is 0.5
751
+ ASOCA
752
+ 12000
753
+ 8.12
754
+ ACA
755
+ 9600
756
+ 7.11
757
+ HNA
758
+ 13000
759
+ 7.03
760
+ Set2 4 min size factor is 0.6
761
+ ASOCA
762
+ 12100
763
+ 8.13
764
+ ACA
765
+ 9870
766
+ 7.21
767
+ HNA
768
+ 13210
769
+ 7.13
770
+ TABLE IV
771
+ AVERAGE NUMBER OF GRAPH NODES AND EDGES FOR DIFFERENT
772
+ VALUES OF MIN_SIZE_FACTOR. N_SEGMENTS IS FIXED TO 14000.
773
+ mapping function f(·). Then the mapped features are fused
774
+ with the up-sampled decoder features from the previous stage
775
+ in UNET-G, and the fused features are enhanced with the same
776
+ residual graph convolution module Ω(·) before further fused
777
+ with the graph encoder feature Eg
778
+ l through the skip connection
779
+ at the lth stage of UNET-G. Thus the graph convolutional
780
+ decoder features at the lth stage of UNET-G are defined as
781
+ Dg
782
+ l = Ω(f(Dc
783
+ l , G) + up(Dg
784
+ l+1)) + Eg
785
+ l .
786
+ (9)
787
+ Then the graph convolutional decoder features Dg
788
+ l are re-
789
+ versely mapped to the original feature space of UNET-2
790
+ through the same backward mapping function g(·). We further
791
+ fuse the reversely mapped features with both the initial decoder
792
+ feature of UNET-2 and the skip-connected enhanced encoder
793
+ feature El to produce the enhanced decoder feature at the lth
794
+ level,
795
+ Dl = g(Dg
796
+ l ) + Dc
797
+ l + El.
798
+ (10)
799
+ The last enhanced decoder feature is used to produce the final
800
+ segmentation of vessels with a pixel-wise softmax classifier.
801
+ Forward and Backward Mappings We adopt the forward
802
+ and backward mapping functions defined in [34] to map pixel-
803
+ level features in a CNN-based U-Net to node features in a
804
+ graph U-Net and vice versa. The key consideration during
805
+ feature mapping design lies in revealing the relations between
806
+ node and pixel-level features. As illustrated in the following
807
+ equations, the kNN (k Nearest Neighbor) based forward map-
808
+ ping φk with its auxiliary matrix A aggregates pixel-level
809
+ features over irregular regions to obtain corresponding node
810
+ features adaptively according to their spatial relations.
811
+ φk(F, N) = (Qf)T F,
812
+ (11)
813
+
814
+ ZHAO et al.: GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION
815
+ 7
816
+ Ground Truth UNet
817
+ nnUNet
818
+ DVS
819
+ DDT
820
+ Ours
821
+ Fig. 4.
822
+ Sample visual results on the ACA dataset. From left to right, it is the ground truth, the results of UNET, nnUNET, DVS, DDT and our model,
823
+ respectively. The aorta and the coronary vessels are marked with red and green. Although DDT achieves the best performance compared with
824
+ other previous state-of-the-art methods, it may generate incomplete vessel masks when the structure of vessels is complicated.
825
+ Qf = A(Λf)−1,
826
+ (12)
827
+ Aij =
828
+
829
+ 1
830
+ if j th node is kNN of i th pixel
831
+ 0
832
+ Otherwise
833
+ ,
834
+ (13)
835
+ where N ∈ {V, U} represents the node set corresponding
836
+ to pixel-level spatial visual features F
837
+ ∈ RHW ×C, A ∈
838
+ RHW ×|N | is an auxiliary matrix that assigns spatial features
839
+ to k nearest graph nodes, Λf ∈ R|N |×|N | is a diagonal matrix,
840
+ Λf
841
+ jj =
842
+ HW
843
+
844
+ i=1
845
+ Aij, and Qf ∈ RHW ×|N | is a normalized form of
846
+ A and serves as the forward mapping matrix.
847
+ The backward mapping function ψk projects each graph
848
+ node feature back to the spatial domain. The backward map-
849
+ ping follows similar design principles as the forward mapping
850
+ and makes use of the same number of nearest neighbors.
851
+ Formally, ψk is formulated as follows.
852
+ ψk(Z, N) = Qr[Z]e,
853
+ (14)
854
+ Qr = (Λr)−1A,
855
+ (15)
856
+ where N ∈ {V, U} represents the node set of the graph,
857
+ A ∈ RHW ×|N | is similar to the definition in Equation 13,
858
+ [·]e indicates the indexing operator which selects nodes in the
859
+ graph, Λr ∈ RHW ×HW is a diagonal matrix, Λr
860
+ ii =
861
+ |N |
862
+
863
+ j=1
864
+ Aij,
865
+ and Qr ∈ RHW ×|N | is the backward mapping matrix, which
866
+ is also a normalized form of A.
867
+ IV. EXPERIMENTS
868
+ A. Datasets
869
+ ASOCA Automated Segmentation of Coronary Arteries
870
+ Dataset (ASOCA) is a public dataset in MICCAI-2020 chal-
871
+ lenge 1 which aims to segment the coronary artery lumen.
872
+ The dataset consists of a training set of 40 Cardiac Computed
873
+ Tomography Angiography (CCTA) images and a test set of
874
+ 20 CCTA images. The images in the testing set were anno-
875
+ tated and verified by experts we invited. The original image
876
+ resolution of the ASOCA dataset is 512×512×N, where N
877
+ is between 168 and 334.
878
+ ACA Aorta and Coronary Artery Dataset (ACA) is an in-
879
+ house dataset which contains 1000 CCTA images. The dataset
880
+ is utilized to segment both aorta and coronary arteries. Each
881
+ image is annotated by one expert annotator and verified by a
882
+ second expert. We split the dataset into a training set of 800
883
+ images, a validation set of 100 images and a test set of 100
884
+ images. The original image resolution of the ACA dataset is
885
+ 512×512×N, where N is between 192 and 600.
886
+ HNA Head and Neck Artery Dataset (HNA) is an in-house
887
+ dataset which contains 800 CT angiography (CTA) images of
888
+ head and neck. The images are annotated in the same way as
889
+ ACA. Cerebral, vertebral and carotid arteries are annotated as
890
+ the target vessel mask. The dataset is split into a training set
891
+ of 640 images, a validation set of 80 images and a test set of
892
+ 1https://asoca.grand-challenge.org
893
+
894
+ 8
895
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
896
+ GT
897
+ UNet
898
+ nnUNet
899
+ DVS
900
+ DDT
901
+ Ours
902
+ Fig. 5.
903
+ Sample visual results on the HNA dataset. From left to right, it is the ground truth, the results of UNET, nnUNET, DVS, DDT and our model,
904
+ respectively.
905
+ 80 images. The original image resolution of the HNA dataset
906
+ is also 512×512×N, where N is between 192 and 600.
907
+ B. Experimental Setup
908
+ Evaluation Metrics. Dice coefficient (DICE) and average
909
+ symmetric surface distance (ASSD) (ASSD is measured in
910
+ millimeters) are adopted as the evaluation metrics since they
911
+ are commonly used in medical image segmentation [35]. In
912
+ addition, to evaluate unique characteristics of tubular structure,
913
+ another two metrics called skeleton recall (SR) and skeleton
914
+ precision (SP) are defined as follows:
915
+ SR(S, G) = |S � Q(G)|
916
+ |Q(G)|
917
+ ,
918
+ (16)
919
+ SP(S, G) = |Q(S) � G|
920
+ |Q(S)|
921
+ ,
922
+ (17)
923
+ where S and G are the segmentation result and the ground
924
+ truth annotation respectively. The function Q(·) is used to
925
+ acquire the skeleton of a tubular mask, which can preserve
926
+ original vascular topology and connectivity. Here we use
927
+ skeletonization function [36] as the implementation of Q(·).
928
+ Network Structure and Training. Each sub-network of the
929
+ proposed method is a U-shaped network. All CNN-based U-
930
+ shaped networks, including UNET-0, UNET-1 and UNET-2
931
+ are based on the original U-Net [19] except that the original
932
+ convolution layers in its encoder are replaced with residual
933
+ blocks [37]. The network architecture of UNET-0,1,2 used in
934
+ the proposed pipeline is given in Table I. UNET-G is a graph
935
+ U-Net [14]. Each downsampling operation in UNET-G halves
936
+ the number of graph nodes, and each upsampling operation
937
+ doubles the number of nodes. The feature dimension of every
938
+ graph node is always set to 256 in all the experiments reported
939
+ in this paper. The input image is always resized to 256×256×
940
+ 256, and the batch size on a single GPU is 2. The proposed
941
+ cascaded network is trained by jointly optimizing the weighted
942
+ cross-entropy loss, Lwbce = −βy·log(p)−(1−y)·log(1−p),
943
+ and the dice loss, LDice = 1 −
944
+ 2y·p
945
+ ∥y∥1+∥p∥1 , where y and p
946
+ are the ground-truth and predicted masks, respectively. We set
947
+ β = 5 to increase the vessel recall. All models are trained
948
+ for 100 epochs from scratch using PyTorch [38] on NVIDIA
949
+ Titan Xp pascal GPUs. We set the weight decay to 1e-4 and
950
+ use Adam [39] as the optimizer with the initial learning rate
951
+ set to 1e-4. The learning rate is reduced by a factor of 10 after
952
+ every 40 epochs.
953
+ Graph Hyperparameter Setting. We use a 3D version of the
954
+ SLIC algorithm [33] to generate superpixels. Two parameters
955
+ of the algorithm control the total number of superpixels in an
956
+ image. One of them is ‘n segments’, which is the maximum
957
+
958
+ ZHAO et al.: GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION
959
+ 9
960
+ Fig. 6.
961
+ Sample visual results on the HNA dataset. From left to right are the ground truth, the results of UNET, nnUNET, DVS, DDT and our model,
962
+ respectively.
963
+ number of superpixels, and the other is ‘min size factor’,
964
+ which defines the ratio between the minimum size of a super-
965
+ pixel and the average size of a superpixel. In the experiments
966
+ reported in this paper, ‘n segments’ is always set to 14000,
967
+ and ‘min size factor’ is set between 0.3 and 0.65.
968
+ In a graph, each node i is only connected to other nearby
969
+ nodes whose geodesic distance is below a predefined threshold
970
+ tgeo. That is, there exists an edge between nodes i and j if
971
+ and only if d′
972
+ geo(i, j) < tgeo, where d′
973
+ geo(i, j) is the geodesic
974
+ distance between nodes i and j. tgeo is a hyperparameter that
975
+ needs to be empirically set only once for each dataset. For the
976
+ ASOCA dataset, tgeo is set to 0.30. For the ACA dataset, tgeo
977
+ is set to 0.35. For the HNA dataset, tgeo is set to 0.40.
978
+ Table II shows the statistics of graph nodes and edges.
979
+ C. Comparison with the State of the Art
980
+ We compared our proposed model with existing state-
981
+ of-the-art algorithms for vessel segmentation on the three
982
+ datasets. The methods in these comparisons include DDT [10],
983
+ DVS [8], UNET3d [20], nnUNET [27], ResUNET [40],
984
+ DenseUNET [21], PSP-Net [17] and HMSA [18]. DDT
985
+ performs tubular structure modeling and is specifically de-
986
+ signed for vessel segmentation. For medical image analysis,
987
+ nnUNET is considered a strong baseline as it achieves state-
988
+ of-the-art performance on many well-established segmentation
989
+ challenges. PSP-Net [17] and HMSA [18] are included for
990
+ comparison since they are state-of-the-art methods for generic
991
+ semantic segmentation. In addition, we include DVS for com-
992
+ parison since it also uses a GCN for structure modeling. Since
993
+ the proposed framework is not limited to a specific backbone
994
+ network, we integrate it with more powerful backbones, e.g.
995
+ ResUNET, DenseUNET and H-DenseUNET. As shown in
996
+ Table V, VI and VII, the performance can be improved.
997
+ As shown in Table V, the proposed method achieves the
998
+ state-of-the-art performance in terms of four evaluation metrics
999
+ on the ASOCA dataset, and outperforms the top-6 methods in
1000
+ the challenge leaderboard. Specifically, our method achieves
1001
+ 89.91% DICE, 0.530 ASSD 95.8% SP and 96.0% SR. The
1002
+ DICE of our method is higher than that of DDT and the top-1
1003
+ method in the leaderboard by around 1.5%.
1004
+ On the ACA and HNA datasets, the proposed method
1005
+ also achieves the best performance among all the methods
1006
+ considered in the comparisons. Specifically, the proposed
1007
+ method outperforms DVS by 4.7% and 3.2% on ACA and
1008
+ HNA respectively in terms of DICE. This demonstrates that
1009
+ multi-scale feature interaction between CNNs and GCNs is
1010
+ important for vessel structure modeling.
1011
+ The above experiments demonstrate the superiority of the
1012
+ proposed method on three vessel segmentation tasks. Com-
1013
+ pared to other methods [17], [18], [20], [27], [40], the main
1014
+ advantage of our approach is that it constructs a vessel graph
1015
+ to capture the 3D structure of vessels. On the basis of the
1016
+ constructed vessel graph, our proposed method uses GCNs
1017
+ to enhance feature propagation along vessel structures, and
1018
+ improve the interconnection between isolated vessel predic-
1019
+
1020
+ GT
1021
+ UNet
1022
+ nnUNet
1023
+ DVS
1024
+ DDT
1025
+ Ours10
1026
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
1027
+ Method
1028
+ DICE (%)
1029
+ ASSD
1030
+ SP (%)
1031
+ SR (%)
1032
+ LB-1st
1033
+ 88.56
1034
+ -
1035
+ -
1036
+ -
1037
+ LB-2nd
1038
+ 88.00
1039
+ -
1040
+ -
1041
+ -
1042
+ LB-3rd
1043
+ 87.94
1044
+ -
1045
+ -
1046
+ -
1047
+ LB-4th
1048
+ 87.36
1049
+ -
1050
+ -
1051
+ -
1052
+ LB-5th
1053
+ 87.17
1054
+ -
1055
+ -
1056
+ -
1057
+ LB-6th
1058
+ 87.11
1059
+ -
1060
+ -
1061
+ -
1062
+ DDT [10]
1063
+ 88.21
1064
+ 0.571
1065
+ 95.0
1066
+ 94.5
1067
+ DVS [8]
1068
+ 87.32
1069
+ 0.582
1070
+ 94.1
1071
+ 93.2
1072
+ UNET3d [20]
1073
+ 83.20
1074
+ 0.644
1075
+ 93.0
1076
+ 91.0
1077
+ ResUNET [40]
1078
+ 83.20
1079
+ 0.644
1080
+ 93.0
1081
+ 91.0
1082
+ DenseUNET [21]
1083
+ 83.20
1084
+ 0.644
1085
+ 93.0
1086
+ 91.0
1087
+ H-DenseUNET [21]
1088
+ 83.20
1089
+ 0.644
1090
+ 93.0
1091
+ 91.0
1092
+ nnUNET [27]
1093
+ 85.11
1094
+ 0.572
1095
+ 94.3
1096
+ 92.4
1097
+ PSP-Net [17]
1098
+ 84.12
1099
+ 0.593
1100
+ 94.2
1101
+ 92.1
1102
+ HMSA [18]
1103
+ 86.23
1104
+ 0.561
1105
+ 95.2
1106
+ 93.3
1107
+ Ours
1108
+ 89.89
1109
+ 0.544
1110
+ 95.6
1111
+ 95.9
1112
+ Ours+ResUNET
1113
+ 89.90
1114
+ 0.541
1115
+ 95.7
1116
+ 95.9
1117
+ Ours+DenseUNET
1118
+ 89.89
1119
+ 0.540
1120
+ 95.7
1121
+ 95.9
1122
+ Ours+H-DenseUNET
1123
+ 89.91
1124
+ 0.530
1125
+ 95.8
1126
+ 96.0
1127
+ TABLE V
1128
+ PERFORMANCE COMPARISON ON THE ASOCA DATASET AMONG
1129
+ STATE-OF-THE-ART SEGMENTATION ALGORITHMS. THE RESULTS OF
1130
+ MICCAI LEADERBOARD ARE SHOWN IN HTTPS://ASOCA.
1131
+ GRAND-CHALLENGE.ORG/EVALUATION/CHALLENGE/LEADERBOARD/,
1132
+ WHICH ONLY SHOWS THE PERFORMANCE IN TERMS OF DICE. FOR
1133
+ OTHER METHODS, WE EVALUATE THEM IN TERMS OF FOUR
1134
+ PERFORMANCE METRICS INCLUDING DICE, ASSD, SP AND SR.
1135
+ tions. Although DVS [8] and our method both exploit GCNs,
1136
+ the major distinction is that we use a super-pixel algorithm to
1137
+ generate graph nodes from a preliminary segmentation and the
1138
+ pixel values of the input image. Leveraging super-pixels makes
1139
+ our constructed graph more completely cover potential vessel
1140
+ regions, and therefore, improve the skeleton recall. In addition,
1141
+ we make use of forward and backward feature mappings to
1142
+ perform more thorough feature fusion between the CNN-based
1143
+ UNET and the graph UNET.
1144
+ To further validate the robustness of the proposed method,
1145
+ we collect two subsets of 35 hard samples from the test
1146
+ sets of ACA and HNA, respectively. Arteries in the chosen
1147
+ samples have calcifications, stents or tortuous segments, which
1148
+ significantly increase the difficulty of vessel segmentation
1149
+ in clinical practice. Experimental results in Table VIII and
1150
+ Table IX show that the proposed method performs the best on
1151
+ these two subsets, which demonstrates the robustness of the
1152
+ proposed method on hard samples.
1153
+ Furthermore, we compare the inference time complexity of
1154
+ state-of-the-art networks in Table X. As shown in the table, the
1155
+ inference time of our method for a computed tomography an-
1156
+ giography image is 0.190/0.193/0.198 second on the ASOCA,
1157
+ ACA and HNA datasets, respectively. Since we use a GPU-
1158
+ based implementation [41] of the SLIC algorithm to generate
1159
+ super-pixels, the graph construction step of our method is
1160
+ very efficient, and the overall inference time of our method
1161
+ is comparable to that of other methods.
1162
+ D. Ablation Study
1163
+ Ablation of graph node construction. We investigate the ef-
1164
+ fectiveness of the three components of Eqn. (1) for graph node
1165
+ construction on the ACA dataset. As shown in Table XIV, all
1166
+ Method
1167
+ DICE (%)
1168
+ ASSD
1169
+ SP (%)
1170
+ SR (%)
1171
+ DDT [10]
1172
+ 91.2
1173
+ 0.497
1174
+ 96.0
1175
+ 89.2
1176
+ DVS [8]
1177
+ 90.1
1178
+ 0.503
1179
+ 95.1
1180
+ 88.3
1181
+ UNET3d [20]
1182
+ 87.3
1183
+ 0.711
1184
+ 94.0
1185
+ 89.4
1186
+ ResUNET [40]
1187
+ 88.4
1188
+ 0.612
1189
+ 95.1
1190
+ 89.6
1191
+ DenseUNET [21]
1192
+ 88.9
1193
+ 0.568
1194
+ 95.2
1195
+ 89.4
1196
+ H-DenseUNET [21]
1197
+ 89.9
1198
+ 0.528
1199
+ 95.3
1200
+ 90.1
1201
+ nnUNET [27]
1202
+ 88.3
1203
+ 0.630
1204
+ 95.5
1205
+ 90.6
1206
+ PSP-Net [17]
1207
+ 89.0
1208
+ 0.642
1209
+ 95.0
1210
+ 89.6
1211
+ HMSA [18]
1212
+ 90.2
1213
+ 0.592
1214
+ 96.7
1215
+ 90.1
1216
+ Ours
1217
+ 94.2
1218
+ 0.448
1219
+ 97.1
1220
+ 95.1
1221
+ Ours+ResUNET
1222
+ 94.6
1223
+ 0.445
1224
+ 97.3
1225
+ 95.2
1226
+ Ours+DenseUNET
1227
+ 94.8
1228
+ 0.444
1229
+ 97.3
1230
+ 95.3
1231
+ Ours+H-DenseUNET
1232
+ 94.8
1233
+ 0.443
1234
+ 97.3
1235
+ 95.2
1236
+ TABLE VI
1237
+ PERFORMANCE COMPARISON ON THE ACA DATASET AMONG
1238
+ STATE-OF-THE-ART SEGMENTATION ALGORITHMS.
1239
+ Method
1240
+ DICE (%)
1241
+ ASSD
1242
+ SP (%)
1243
+ SR (%)
1244
+ DDT [10]
1245
+ 92.4
1246
+ 0.401
1247
+ 96.1
1248
+ 93.3
1249
+ DVS [8]
1250
+ 91.3
1251
+ 0.472
1252
+ 97.2
1253
+ 92.4
1254
+ UNET3d [20]
1255
+ 87.3
1256
+ 0.664
1257
+ 95.1
1258
+ 90.5
1259
+ ResUNET [40]
1260
+ 87.7
1261
+ 0.661
1262
+ 95.2
1263
+ 90.7
1264
+ DenseUNET [21]
1265
+ 88.1
1266
+ 0.618
1267
+ 95.3
1268
+ 90.8
1269
+ H-DenseUNET [21]
1270
+ 89.1
1271
+ 0.588
1272
+ 95.7
1273
+ 92.0
1274
+ nnUNET [27]
1275
+ 89.9
1276
+ 0.600
1277
+ 94.3
1278
+ 91.2
1279
+ PSP-Net [17]
1280
+ 90.1
1281
+ 0.593
1282
+ 94.7
1283
+ 90.0
1284
+ HMSA [18]
1285
+ 91.4
1286
+ 0.543
1287
+ 95.6
1288
+ 91.1
1289
+ Ours
1290
+ 94.3
1291
+ 0.379
1292
+ 97.1
1293
+ 96.3
1294
+ Ours+ResUNET
1295
+ 94.4
1296
+ 0.376
1297
+ 97.1
1298
+ 96.6
1299
+ Ours+DenseUNET
1300
+ 94.4
1301
+ 0.375
1302
+ 97.2
1303
+ 96.6
1304
+ Ours+H-DenseUNET
1305
+ 94.5
1306
+ 0.374
1307
+ 97.2
1308
+ 96.5
1309
+ TABLE VII
1310
+ PERFORMANCE COMPARISON ON THE HNA DATASET AMONG
1311
+ STATE-OF-THE-ART SEGMENTATION ALGORITHMS.
1312
+ the components play important roles in the node construction
1313
+ process, and dgeo is the most important for the segmentation
1314
+ performance. Removing A0 in dgeo leads to 0.6% performance
1315
+ drop and removing X0 leads to about 1.2% performance
1316
+ drop in terms of DICE. We further investigate how the hy-
1317
+ perparameter ‘n segments’ and ‘min size factor’ of the SLIC
1318
+ algorithm affect the performance of our method. For the
1319
+ ablation study on ‘n segments’, we first fix ‘min size factor’
1320
+ to 0.5 and change the value of ‘n segments’ to 28000
1321
+ (Set1 1), 14000 (Set1 2), 7000 (Set1 3), and 3500 (Set1 4).
1322
+ Then we fix ‘n segments’ to 14000 and change the value of
1323
+ ‘min size factor’ to 0.3 (Set2 1), 0.4 (Set2 2), 0.5 (Set2 3),
1324
+ and 0.6 (Set2 4). For the above eight settings, we demonstrate
1325
+ how the number of nodes and edges changes in Table III and
1326
+ Table IV. Then we conduct an ablation study on all three
1327
+ datasets and report the results in Table XV. From the exper-
1328
+ imental results, we can see that our model achieves the best
1329
+ performance by setting ‘n segments’ and ‘min size factor’ to
1330
+ 14000 and 0.5, respectively. The corresponding number of
1331
+ nodes per image is 12000, 9600, and 13000 on the ASOCA,
1332
+ ACA and HNA datasets, respectively.
1333
+ Ablation of graph edge construction. Next, we investi-
1334
+ gate the effectiveness of the two components of graph edge
1335
+ construction. As shown in Table XV, both es
1336
+ w and ea
1337
+ w are
1338
+ important for vessel segmentation. Furthermore, if we discard
1339
+
1340
+ ZHAO et al.: GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION
1341
+ 11
1342
+ Method
1343
+ DICE (%)
1344
+ ASSD
1345
+ SP (%)
1346
+ SR (%)
1347
+ DDT [10]
1348
+ 87.1
1349
+ 0.511
1350
+ 92.0
1351
+ 88.2
1352
+ DVS [8]
1353
+ 86.2
1354
+ 0.544
1355
+ 91.1
1356
+ 87.3
1357
+ UNET3d [20]
1358
+ 86.0
1359
+ 0.722
1360
+ 91.0
1361
+ 87.4
1362
+ ResUNET [40]
1363
+ 86.4
1364
+ 0.712
1365
+ 91.2
1366
+ 87.6
1367
+ DenseUNET [21]
1368
+ 86.5
1369
+ 0.701
1370
+ 91.4
1371
+ 87.6
1372
+ H-DenseUNET [21]
1373
+ 87.1
1374
+ 0.690
1375
+ 91.6
1376
+ 87.9
1377
+ nnUNET [27]
1378
+ 87.2
1379
+ 0.631
1380
+ 92.5
1381
+ 86.6
1382
+ PSP-Net [17]
1383
+ 84.0
1384
+ 0.742
1385
+ 91.0
1386
+ 84.6
1387
+ HMSA [18]
1388
+ 85.2
1389
+ 0.762
1390
+ 89.7
1391
+ 85.2
1392
+ Ours
1393
+ 92.1
1394
+ 0.453
1395
+ 96.4
1396
+ 93.3
1397
+ Ours+ResUNET
1398
+ 92.2
1399
+ 0.451
1400
+ 96.5
1401
+ 93.5
1402
+ Ours+DenseUNET
1403
+ 92.4
1404
+ 0.450
1405
+ 96.5
1406
+ 93.6
1407
+ Ours+H-DenseUNET
1408
+ 92.5
1409
+ 0.448
1410
+ 96.6
1411
+ 93.6
1412
+ TABLE VIII
1413
+ PERFORMANCE COMPARISON ON A SUBSET OF HARD SAMPLES FROM
1414
+ THE ACA DATASET AMONG STATE-OF-THE-ART SEGMENTATION
1415
+ ALGORITHMS. ARTERIES IN THESE HARD SAMPLES HAVE
1416
+ CALCIFICATIONS, STENTS OR TORTUOUS SEGMENTS.
1417
+ Method
1418
+ DICE (%)
1419
+ ASSD
1420
+ SP (%)
1421
+ SR (%)
1422
+ DDT [10]
1423
+ 88.4
1424
+ 0.504
1425
+ 94.1
1426
+ 91.3
1427
+ DVS [8]
1428
+ 87.3
1429
+ 0.584
1430
+ 93.2
1431
+ 90.4
1432
+ UNET3d [20]
1433
+ 86.4
1434
+ 0.654
1435
+ 92.1
1436
+ 89.5
1437
+ ResUNET [40]
1438
+ 86.6
1439
+ 0.631
1440
+ 92.6
1441
+ 89.9
1442
+ DenseUNET [21]
1443
+ 86.7
1444
+ 0.598
1445
+ 92.7
1446
+ 89.9
1447
+ H-DenseUNET [21]
1448
+ 86.9
1449
+ 0.552
1450
+ 92.9
1451
+ 89.9
1452
+ nnUNET [27]
1453
+ 87.9
1454
+ 0.554
1455
+ 93.3
1456
+ 90.5
1457
+ PSP-Net [17]
1458
+ 87.1
1459
+ 0.567
1460
+ 92.7
1461
+ 89.0
1462
+ HMSA [18]
1463
+ 88.4
1464
+ 0.542
1465
+ 92.1
1466
+ 91.3
1467
+ Ours
1468
+ 90.1
1469
+ 0.449
1470
+ 96.0
1471
+ 94.2
1472
+ Ours+ResUNet
1473
+ 90.6
1474
+ 0.441
1475
+ 96.2
1476
+ 94.3
1477
+ Ours+DenseUNet
1478
+ 90.6
1479
+ 0.440
1480
+ 96.1
1481
+ 94.5
1482
+ Ours+H-DenseUNet
1483
+ 90.8
1484
+ 0.432
1485
+ 96.3
1486
+ 94.6
1487
+ TABLE IX
1488
+ PERFORMANCE COMPARISON ON A SUBSET OF HARD SAMPLES FROM
1489
+ THE HNA DATASET AMONG STATE-OF-THE-ART SEGMENTATION
1490
+ ALGORITHMS. ARTERIES IN THESE HARD SAMPLES HAVE
1491
+ CALCIFICATIONS OR TORTUOUS SEGMENTS.
1492
+ the two edge terms and use the traditional binary edge, the
1493
+ performance drops by 1.7% in terms of DICE.
1494
+ Ablation of cross-network feature fusion. To show the
1495
+ effectiveness of cross-network feature fusion, we first discard
1496
+ UNET-G of our proposed framework and only keep the
1497
+ cascaded UNET. As shown in Tables XI, XII and XIII, the
1498
+ performance drops by 2.0%, 1.9% and 2.77% on ACA, HNA
1499
+ and ASOCA respectively, which further validates the impor-
1500
+ tance of vessel structure modeling. In addition, we find that the
1501
+ improvement of UNET-G on ASOCA dataset is much more
1502
+ significant than ACA and HNA. As the training set of ASOCA
1503
+ only contains 40 CT images, this demonstrates that CNNs
1504
+ cannot well exploit the characteristics of vessels when the size
1505
+ of training data is small. Then we evaluate the effectiveness of
1506
+ using multi-scale fusion and graph convolution. Experimental
1507
+ results show that both components are important for vessel
1508
+ segmentation.
1509
+ E. Visualization
1510
+ As shown in Fig. 3, the proposed method generates higher
1511
+ quality vessel masks than other state-of-the-art algorithms,
1512
+ including DDT, in most of the cases. Specifically, the proposed
1513
+ Method
1514
+ ASOCA
1515
+ ACA
1516
+ HNA
1517
+ DDT [10]
1518
+ 0.182
1519
+ 0.184
1520
+ 0.187
1521
+ DVS [8]
1522
+ 0.186
1523
+ 0.187
1524
+ 0.190
1525
+ UNET3d [20]
1526
+ 0.132
1527
+ 0.134
1528
+ 0.136
1529
+ nnUNET [27]
1530
+ 0.201
1531
+ 0.204
1532
+ 0.206
1533
+ ResUNET [40]
1534
+ 0.136
1535
+ 0.137
1536
+ 0.139
1537
+ DenseUNET [21]
1538
+ 0.141
1539
+ 0.144
1540
+ 0.147
1541
+ H-DenseUNET [21]
1542
+ 0.139
1543
+ 0.142
1544
+ 0.146
1545
+ PSP-Net [17]
1546
+ 0.142
1547
+ 0.144
1548
+ 0.145
1549
+ HMSA [18]
1550
+ 0.341
1551
+ 0.344
1552
+ 0.347
1553
+ Ours
1554
+ 0.190
1555
+ 0.193
1556
+ 0.198
1557
+ Ours+ResUNET
1558
+ 0.192
1559
+ 0.195
1560
+ 0.201
1561
+ Ours+DenseUNET
1562
+ 0.196
1563
+ 0.198
1564
+ 0.204
1565
+ Ours+H-DenseUNET
1566
+ 0.195
1567
+ 0.197
1568
+ 0.202
1569
+ TABLE X
1570
+ COMPARISON OF INFERENCE TIME AMONG STATE-OF-THE-ART
1571
+ SEGMENTATION ALGORITHMS ON THE ASOCA, ACA AND HNA
1572
+ DATASETS. THE AVERAGE INFERENCE TIME OF EACH ALGORITHM ON
1573
+ EACH DATASET IS SHOWN. THE UNIT IS SECOND PER SAMPLE.
1574
+ method can well exploit vessel structures and generate more
1575
+ complete vessel masks. In comparison to the proposed method,
1576
+ DDT may generate isolated segmentation masks since it is
1577
+ incapable of modeling the global structure of vessels. Fig. 4
1578
+ and Fig. 6 further visualize vessel segmentation results from
1579
+ different methods on the ACA and HNA datasets respectively.
1580
+ We add more examples for qualitative comparison in Fig. 7.
1581
+ The good cases show that our GCN-based cascaded network
1582
+ can improve vessel connectivity among individual vessel pre-
1583
+ dictions and achieve a higher skeleton recall. In the meantime,
1584
+ most of the false positive predictions can be removed. From
1585
+ the bad case, we find that the proposed method is limited when
1586
+ the initial segmentation is far from the ground truth. In such
1587
+ cases, vessel segmentation errors in the initial segmentation
1588
+ Fig. 7.
1589
+ Sample visual results with and without the graph module on the
1590
+ ACA dataset. From left to right, it is the ground truth, the result without
1591
+ the graph module and the result with the graph module, respectively.
1592
+
1593
+ 12
1594
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
1595
+ UNET-0
1596
+ UNET-G
1597
+ Cfeature in UNET-G
1598
+ Graph Convolution Ω
1599
+ DICE (%)
1600
+ ASSD
1601
+ SP (%)
1602
+ SR (%)
1603
+
1604
+
1605
+
1606
+
1607
+ 94.2
1608
+ 0.448
1609
+ 97.1
1610
+ 95.1
1611
+
1612
+
1613
+
1614
+
1615
+ 93.1
1616
+ 0.469
1617
+ 95.3
1618
+ 94.2
1619
+
1620
+
1621
+
1622
+
1623
+ 92.8
1624
+ 0.487
1625
+ 95.1
1626
+ 94.0
1627
+ -
1628
+
1629
+ -
1630
+ -
1631
+ 92.2
1632
+ 0.469
1633
+ 95.3
1634
+ 94.2
1635
+
1636
+
1637
+
1638
+
1639
+ 92.4
1640
+ 0.470
1641
+ 95.9
1642
+ 92.7
1643
+ TABLE XI
1644
+ EFFECTIVENESS OF DIFFERENT COMPONENTS ON THE ACA DATASET. ‘UNET-G’ MEANS THE GRAPH UNET STRUCTURE ON OUR MODEL. IF IT IS
1645
+ REMOVED, OUR FRAMEWORK IS DEGENERATED INTO A CASCADED MODEL WITH TWO CNN-UNET STRUCTURES. ‘CFEATURE IN UNET-G’ MEANS
1646
+ WE FUSE CNN FEATURES OF DIFFERENT STAGES INTO UNET-G . IF IT IS DISCARDED, THE FEATURES OF UNET-G ARE ONLY ACQUIRED FROM ITS
1647
+ FIRST GRAPH FEATURES Eg
1648
+ 1 THAT IS ACQUIRED BY CONDUCTING FORWARD MAPPING f ON Ec
1649
+ 1. ‘GRAPH CONVOLUTION Ω’ AIMS TO PROPAGATE
1650
+ MESSAGE AND FUSE THE CNN FEATURES INTO UNET-G. WE UTILIZE IT TO COMPARE THE IMPORTANCE OF THE VESSEL GRAPH MODELLING
1651
+ ABILITY.
1652
+ UNET-0
1653
+ UNET-G
1654
+ Cfeature in UNET-G
1655
+ Graph Convolution Ω
1656
+ DICE (%)
1657
+ ASSD
1658
+ SP (%)
1659
+ SR (%)
1660
+
1661
+
1662
+
1663
+
1664
+ 94.3
1665
+ 0.379
1666
+ 97.1
1667
+ 96.3
1668
+
1669
+
1670
+
1671
+
1672
+ 93.1
1673
+ 0.412
1674
+ 96.3
1675
+ 95.4
1676
+
1677
+
1678
+
1679
+
1680
+ 93.0
1681
+ 0.434
1682
+ 96.1
1683
+ 95.2
1684
+ -
1685
+
1686
+ -
1687
+ -
1688
+ 92.4
1689
+ 0.471
1690
+ 95.9
1691
+ 94.8
1692
+
1693
+
1694
+
1695
+
1696
+ 92.6
1697
+ 0.462
1698
+ 96.1
1699
+ 94.9
1700
+ TABLE XII
1701
+ EFFECTIVENESS OF DIFFERENT COMPONENTS ON THE HNA DATASET.
1702
+ UNET-0
1703
+ UNET-G
1704
+ Cfeature in UNET-G
1705
+ Graph Convolution Ω
1706
+ DICE (%)
1707
+ ASSD
1708
+ SP (%)
1709
+ SR (%)
1710
+
1711
+
1712
+
1713
+
1714
+ 89.89
1715
+ 0.544
1716
+ 95.6
1717
+ 95.9
1718
+
1719
+
1720
+
1721
+
1722
+ 88.03
1723
+ 0.567
1724
+ 95.0
1725
+ 94.7
1726
+
1727
+
1728
+
1729
+
1730
+ 88.01
1731
+ 0.568
1732
+ 94.8
1733
+ 94.4
1734
+ -
1735
+
1736
+ -
1737
+ -
1738
+ 87.12
1739
+ 0.579
1740
+ 94.8
1741
+ 93.4
1742
+
1743
+
1744
+
1745
+
1746
+ 87.91
1747
+ 0.573
1748
+ 94.9
1749
+ 93.8
1750
+ TABLE XIII
1751
+ EFFECTIVENESS OF DIFFERENT COMPONENTS ON THE ASOCA DATASET.
1752
+ cannot be completely corrected by our cascaded network.
1753
+ V. CONCLUSIONS AND FUTURE WORK
1754
+ In this paper, we have presented a cascaded deep neural
1755
+ network for vessel segmentation on CTA images. Our ap-
1756
+ proach represents a new paradigm for modeling the structural
1757
+ information of 3D vessels using deep neural networks through
1758
+ the interaction between a pair of CNN-based U-Net and
1759
+ graph U-Net. By fusing the features across these two types
1760
+ of networks, our method successfully tackles the challenges
1761
+ brought up by the sparsity and anisotropy of vessel structures.
1762
+ Extensive experiments on both public and in-house datasets
1763
+ verify the superiority and effectiveness of our method. By
1764
+ constructing a vessel graph to complement CNNs, our method
1765
+ not only outperforms baseline methods but also achieves the
1766
+ state-of-the-art performance with DICE 89.91/94.8/94.5 on the
1767
+ ASOCA/ACA/HNA datasets, respectively.
1768
+ Our proposed framework provides a stronger spatial struc-
1769
+ ture representation by learning 3D vessel connectivity priors.
1770
+ Our future work includes 1) building a more powerful graph
1771
+ neural network to enhance message passing in our cross-
1772
+ network feature fusion module, 2) investigating better graph
1773
+ construction methods by exploiting more domain knowledge
1774
+ from medical experts, and 3) building a high-quality annotated
1775
+ dataset and a friendly open-source code base for 3D vessel
1776
+ segmentation tasks.
1777
+ Acknowledgment
1778
+ The retrospective study on our in-house datasets has been
1779
+ approved by the institutional review board of the Second
1780
+ Affiliated Hospital of Zhejiang University School of Medicine,
1781
+ and was carried out following the principles of the Declaration
1782
+ of Helsinki.
1783
+ REFERENCES
1784
+ [1] F. Gr´elard, F. Baldacci, A. Vialard, and J.-P. Domenger, “New methods
1785
+ for the geometrical analysis of tubular organs,” Medical image analysis,
1786
+ vol. 42, pp. 89–101, 2017.
1787
+ [2] J. Leipsic, S. Abbara, S. Achenbach, R. Cury, J. P. Earls, G. J. Mancini,
1788
+ K. Nieman, G. Pontone, and G. L. Raff, “Scct guidelines for the
1789
+ interpretation and reporting of coronary ct angiography: a report of the
1790
+ society of cardiovascular computed tomography guidelines committee,”
1791
+ Journal of cardiovascular computed tomography, vol. 8, no. 5, pp. 342–
1792
+ 358, 2014.
1793
+ [3] W. H. Organization et al., “Fact sheet: the top ten causes of death,” Fact
1794
+ sheet, no. 310, 2008.
1795
+ [4] V. L. Roger, A. S. Go, D. M. Lloyd-Jones, R. J. Adams, J. D. Berry,
1796
+ T. M. Brown, M. R. Carnethon, S. Dai, G. De Simone, E. S. Ford et al.,
1797
+ “Heart disease and stroke statistics—2011 update: a report from the
1798
+ american heart association,” Circulation, vol. 123, no. 4, pp. e18–e209,
1799
+ 2011.
1800
+ [5] J. Long, E. Shelhamer, and T. Darrell, “Fully convolutional networks
1801
+ for semantic segmentation,” in Proceedings of the IEEE conference on
1802
+ computer vision and pattern recognition, 2015, pp. 3431–3440.
1803
+ [6] Z. Huang, X. Wang, L. Huang, C. Huang, Y. Wei, and W. Liu, “Ccnet:
1804
+ Criss-cross attention for semantic segmentation,” in Proceedings of the
1805
+ IEEE International Conference on Computer Vision, 2019, pp. 603–612.
1806
+
1807
+ ZHAO et al.: GRAPH CONVOLUTION BASED CROSS-NETWORK MULTI-SCALE FEATURE FUSION FOR DEEP VESSEL SEGMENTATION
1808
+ 13
1809
+ dgray
1810
+ ddis
1811
+ dgeo
1812
+ DICE
1813
+ ASSD
1814
+ SP
1815
+ SR
1816
+ -
1817
+ -
1818
+ X0
1819
+ A0
1820
+ -
1821
+ -
1822
+ -
1823
+ -
1824
+
1825
+
1826
+
1827
+
1828
+ 94.2
1829
+ 0.448
1830
+ 97.1
1831
+ 95.1
1832
+
1833
+
1834
+
1835
+
1836
+ 93.2
1837
+ 0.462
1838
+ 95.3
1839
+ 94.2
1840
+
1841
+
1842
+
1843
+
1844
+ 93.3
1845
+ 0.465
1846
+ 95.2
1847
+ 93.1
1848
+
1849
+
1850
+
1851
+
1852
+ 93.0
1853
+ 0.478
1854
+ 94.1
1855
+ 92.2
1856
+
1857
+
1858
+
1859
+
1860
+ 93.6
1861
+ 0.487
1862
+ 95.1
1863
+ 94.2
1864
+
1865
+
1866
+
1867
+
1868
+ 93.2
1869
+ 0.488
1870
+ 94.2
1871
+ 93.3
1872
+
1873
+
1874
+
1875
+
1876
+ 93.7
1877
+ 0.481
1878
+ 95.0
1879
+ 94.3
1880
+
1881
+
1882
+
1883
+
1884
+ 93.8
1885
+ 0.472
1886
+ 95.3
1887
+ 94.0
1888
+
1889
+
1890
+
1891
+
1892
+ 92.9
1893
+ 0.512
1894
+ 94.0
1895
+ 94.7
1896
+
1897
+
1898
+
1899
+
1900
+ 92.8
1901
+ 0.522
1902
+ 93.7
1903
+ 94.2
1904
+
1905
+
1906
+ ���
1907
+
1908
+ 92.8
1909
+ 0.513
1910
+ 94.1
1911
+ 94.0
1912
+
1913
+
1914
+
1915
+
1916
+ 92.1
1917
+ 0.552
1918
+ 93.2
1919
+ 93.5
1920
+
1921
+
1922
+
1923
+
1924
+ 92.0
1925
+ 0.561
1926
+ 93.1
1927
+ 93.2
1928
+
1929
+
1930
+
1931
+
1932
+ 92.3
1933
+ 0.557
1934
+ 93.0
1935
+ 93.1
1936
+
1937
+
1938
+
1939
+
1940
+ 92.1
1941
+ 0.562
1942
+ 93.4
1943
+ 93.3
1944
+
1945
+
1946
+
1947
+
1948
+ 92.1
1949
+ 0.541
1950
+ 95.1
1951
+ 92.1
1952
+ TABLE XIV
1953
+ EFFECTIVENESS OF GRAPH NODE SET CONSTRUCTION ON THE ACA
1954
+ DATASET. WE REMOVE DIFFERENT COMPONENTS OF GRAPH NODES TO
1955
+ EXPLORE THEIR INFLUENCE ON OUR FRAMEWORK. NOTE THAT DICE,
1956
+ SP AND SR ARE PRESENTED AS PERCENTAGE.
1957
+ es
1958
+ w
1959
+ ea
1960
+ w
1961
+ DICE (%)
1962
+ ASSD
1963
+ SP (%)
1964
+ SR (%)
1965
+ -
1966
+ Y 0
1967
+ A0
1968
+ -
1969
+ -
1970
+ -
1971
+ -
1972
+
1973
+
1974
+
1975
+ 94.2
1976
+ 0.448
1977
+ 97.1
1978
+ 95.1
1979
+
1980
+
1981
+
1982
+ 93.6
1983
+ 0.465
1984
+ 91.3
1985
+ 90.7
1986
+
1987
+
1988
+
1989
+ 93.9
1990
+ 0.462
1991
+ 91.6
1992
+ 90.1
1993
+
1994
+
1995
+
1996
+ 93.7
1997
+ 0.461
1998
+ 91.0
1999
+ 89.8
2000
+
2001
+
2002
+
2003
+ 92.4
2004
+ 0.466
2005
+ 90.2
2006
+ 88.8
2007
+
2008
+
2009
+
2010
+ 92.3
2011
+ 0.472
2012
+ 90.1
2013
+ 87.4
2014
+
2015
+
2016
+
2017
+ 92.2
2018
+ 0.486
2019
+ 90.3
2020
+ 88.1
2021
+
2022
+
2023
+
2024
+ 92.5
2025
+ 0.484
2026
+ 86.1
2027
+ 85.2
2028
+ TABLE XV
2029
+ EFFECTIVENESS OF GRAPH EDGE SET CONSTRUCTION ON THE ACA
2030
+ DATASET. WE REMOVE DIFFERENT COMPONENTS OF GRAPH EDGES TO
2031
+ EXPLORE THEIR INFLUENCE ON OUR FRAMEWORK. IF ALL
2032
+ COMPONENTS ARE REMOVED, GRAPH EDGES BECOME THE
2033
+ TRADITIONAL BINARY EDGES.
2034
+ [7] J. Fu, J. Liu, H. Tian, Y. Li, Y. Bao, Z. Fang, and H. Lu, “Dual attention
2035
+ network for scene segmentation,” in Proceedings of the IEEE Conference
2036
+ on Computer Vision and Pattern Recognition, 2019, pp. 3146–3154.
2037
+ [8] S. Y. Shin, S. Lee, I. D. Yun, and K. M. Lee, “Deep vessel segmentation
2038
+ by learning graphical connectivity,” Medical image analysis, vol. 58, p.
2039
+ 101556, 2019.
2040
+ [9] B. Kong, X. Wang, J. Bai, Y. Lu, F. Gao, K. Cao, J. Xia, Q. Song, and
2041
+ Y. Yin, “Learning tree-structured representation for 3d coronary artery
2042
+ segmentation,” Computerized Medical Imaging and Graphics, vol. 80,
2043
+ p. 101688, 2020.
2044
+ [10] Y. Wang, X. Wei, F. Liu, J. Chen, Y. Zhou, W. Shen, E. K. Fishman, and
2045
+ A. L. Yuille, “Deep distance transform for tubular structure segmentation
2046
+ in ct scans,” in Proceedings of the IEEE/CVF Conference on Computer
2047
+ Vision and Pattern Recognition, 2020, pp. 3833–3842.
2048
+ [11] T. N. Kipf and M. Welling, “Semi-supervised classification with graph
2049
+ convolutional networks,” arXiv preprint arXiv:1609.02907, 2016.
2050
+ [12] P. Veliˇckovi´c, G. Cucurull, A. Casanova, A. Romero, P. Lio, and Y. Ben-
2051
+ gio, “Graph attention networks,” arXiv preprint arXiv:1710.10903, 2017.
2052
+ [13] G. Li, M. Muller, A. Thabet, and B. Ghanem, “Deepgcns: Can gcns go
2053
+ as deep as cnns?” in Proceedings of the IEEE International Conference
2054
+ on Computer Vision, 2019, pp. 9267–9276.
2055
+ [14] H. Gao and S. Ji, “Graph u-nets,” in Proceedings of the 36th Interna-
2056
+ tional Conference on Machine Learning, 2019.
2057
+ [15] K. Kamnitsas, C. Ledig, V. F. Newcombe, J. P. Simpson, A. D. Kane,
2058
+ D. K. Menon, D. Rueckert, and B. Glocker, “Efficient multi-scale 3d
2059
+ cnn with fully connected crf for accurate brain lesion segmentation,”
2060
+ Medical Image Analysis, vol. 36, pp. 61–78, 2017.
2061
+ [16] L.-C. Chen, G. Papandreou, F. Schroff, and H. Adam, “Rethinking
2062
+ Method
2063
+ DICE (%)
2064
+ ASSD
2065
+ SP (%)
2066
+ SR (%)
2067
+ ASOCA Dataset
2068
+ Set1 1
2069
+ 88.12
2070
+ 0.593
2071
+ 94.1
2072
+ 95.1
2073
+ Set1 2
2074
+ 89.89
2075
+ 0.544
2076
+ 95.6
2077
+ 95.9
2078
+ Set1 3
2079
+ 89.01
2080
+ 0.566
2081
+ 93.1
2082
+ 94.2
2083
+ Set1 4
2084
+ 88.76
2085
+ 0.612
2086
+ 93.0
2087
+ 93.8
2088
+ Set2 1
2089
+ 88.78
2090
+ 0.641
2091
+ 93.3
2092
+ 94.0
2093
+ Set2 2
2094
+ 88.64
2095
+ 0.633
2096
+ 93.1
2097
+ 93.9
2098
+ Set2 3
2099
+ 89.89
2100
+ 0.544
2101
+ 95.6
2102
+ 95.9
2103
+ Set2 4
2104
+ 87.19
2105
+ 0.646
2106
+ 92.9
2107
+ 94.1
2108
+ ACA Dataset
2109
+ Set1 1
2110
+ 93.46
2111
+ 0.510
2112
+ 95.4
2113
+ 95.2
2114
+ Set1 2
2115
+ 94.20
2116
+ 0.448
2117
+ 97.1
2118
+ 95.1
2119
+ Set1 3
2120
+ 93.12
2121
+ 0.534
2122
+ 95.1
2123
+ 93.2
2124
+ Set1 4
2125
+ 93.22
2126
+ 0.512
2127
+ 96.1
2128
+ 94.3
2129
+ Set2 1
2130
+ 93.80
2131
+ 0.476
2132
+ 96.2
2133
+ 94.3
2134
+ Set2 2
2135
+ 92.91
2136
+ 0.487
2137
+ 94.4
2138
+ 93.2
2139
+ Set2 3
2140
+ 94.20
2141
+ 0.448
2142
+ 97.1
2143
+ 95.1
2144
+ Set2 4
2145
+ 93.12
2146
+ 0.493
2147
+ 96.2
2148
+ 93.1
2149
+ HNA Dataset
2150
+ Set1 1
2151
+ 89.12
2152
+ 0.498
2153
+ 95.1
2154
+ 94.0
2155
+ Set1 2
2156
+ 90.10
2157
+ 0.449
2158
+ 96.0
2159
+ 94.2
2160
+ Set1 3
2161
+ 88.81
2162
+ 0.564
2163
+ 93.2
2164
+ 92.9
2165
+ Set1 4
2166
+ 89.11
2167
+ 0.571
2168
+ 92.6
2169
+ 91.7
2170
+ Set2 1
2171
+ 88.24
2172
+ 0.464
2173
+ 95.0
2174
+ 94.0
2175
+ Set2 2
2176
+ 89.98
2177
+ 0.541
2178
+ 95.1
2179
+ 93.9
2180
+ Set2 3
2181
+ 90.10
2182
+ 0.449
2183
+ 96.0
2184
+ 94.2
2185
+ Set2 4
2186
+ 90.01
2187
+ 0.448
2188
+ 95.7
2189
+ 93.7
2190
+ TABLE XVI
2191
+ PERFORMANCE COMPARISON ON THE ASOCA, ACA, AND HNA
2192
+ DATASETS AMONG DIFFERENT SETTINGS OF N_SEGMENTS AND
2193
+ MIN_SIZE_FACTOR. PERFORMANCE IS MEASURED IN TERMS OF FOUR
2194
+ METRICS INCLUDING DICE, ASSD, SP AND SR.
2195
+ atrous convolution for semantic image segmentation,” arXiv preprint
2196
+ arXiv:1706.05587, 2017.
2197
+ [17] H. Zhao, J. Shi, X. Qi, X. Wang, and J. Jia, “Pyramid scene parsing
2198
+ network,” in Proceedings of the IEEE conference on computer vision
2199
+ and pattern recognition, 2017, pp. 2881–2890.
2200
+ [18] A. Tao, K. Sapra, and B. Catanzaro, “Hierarchical multi-scale attention
2201
+ for semantic segmentation,” arXiv preprint arXiv:2005.10821, 2020.
2202
+ [19] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks
2203
+ for biomedical image segmentation,” in International Conference on
2204
+ Medical image computing and computer-assisted intervention. Springer,
2205
+ 2015, pp. 234–241.
2206
+ [20]
2207
+ ¨Ozg¨un C¸ ic¸ek, A. Abdulkadir, S. S. Lienkamp, T. Brox, and O. Ron-
2208
+ neberger, “3d u-net: Learning dense volumetric segmentation from
2209
+ sparse annotation,” in International Conference on Medical Image
2210
+ Computing and Computer-Assisted Intervention, 2016, pp. 424–432.
2211
+ [21] X. Li, H. Chen, X. Qi, Q. Dou, C.-W. Fu, and P.-A. Heng, “H-denseunet:
2212
+ hybrid densely connected unet for liver and tumor segmentation from ct
2213
+ volumes,” IEEE transactions on medical imaging, vol. 37, no. 12, pp.
2214
+ 2663–2674, 2018.
2215
+ [22] Q. Dou, L. Yu, H. Chen, Y. Jin, X. Yang, J. Qin, and P.-A.
2216
+ Heng, “3d deeply supervised network for automated segmentation
2217
+ of volumetric medical images,” Medical Image Analysis, vol. 41,
2218
+ pp. 40–54, 2017, special Issue on the 2016 Conference on Medical
2219
+ Image Computing and Computer Assisted Intervention (Analog to
2220
+ MICCAI 2015). [Online]. Available: https://www.sciencedirect.com/
2221
+ science/article/pii/S1361841517300725
2222
+ [23] Q. Zhu, B. Du, B. Turkbey, P. L. Choyke, and P. Yan, “Deeply-supervised
2223
+ cnn for prostate segmentation,” in 2017 International Joint Conference
2224
+ on Neural Networks (IJCNN), 2017, pp. 178–184.
2225
+ [24] J. Jiang, Y.-C. Hu, C.-J. Liu, D. Halpenny, M. D. Hellmann, J. O. Deasy,
2226
+ G. Mageras, and H. Veeraraghavan, “Multiple resolution residually
2227
+ connected feature streams for automatic lung tumor segmentation from
2228
+ ct images,” IEEE Transactions on Medical Imaging, vol. 38, no. 1, pp.
2229
+ 134–144, 2019.
2230
+ [25] R. Zhang, G. Li, Z. Li, S. Cui, D. Qian, and Y. Yu, “Adaptive context
2231
+ selection for polyp segmentation,” in International Conference on Med-
2232
+ ical Image Computing and Computer-Assisted Intervention.
2233
+ Springer,
2234
+ 2020, pp. 253–262.
2235
+
2236
+ 14
2237
+ IEEE TRANSACTIONS ON MEDICAL IMAGING, VOL. XX, NO. XX, XXXX
2238
+ [26] D.-P. Fan, G.-P. Ji, T. Zhou, G. Chen, H. Fu, J. Shen, and L. Shao,
2239
+ “Pranet: Parallel reverse attention network for polyp segmentation,” in
2240
+ International Conference on Medical Image Computing and Computer-
2241
+ Assisted Intervention.
2242
+ Springer, 2020, pp. 263–273.
2243
+ [27] F. Isensee, J. Petersen, A. Klein, D. Zimmerer, P. F. Jaeger, S. Kohl,
2244
+ J. Wasserthal, G. Koehler, T. Norajitra, S. Wirkert et al., “nnu-net: Self-
2245
+ adapting framework for u-net-based medical image segmentation,” arXiv
2246
+ preprint arXiv:1809.10486, 2018.
2247
+ [28] H. Zhou, J. Guo, Y. Zhang, L. Yu, L. Wang, and Y. Yu, “nnformer:
2248
+ Interleaved transformer for volumetric segmentation,” CoRR, vol.
2249
+ abs/2109.03201, 2021. [Online]. Available: https://arxiv.org/abs/2109.
2250
+ 03201
2251
+ [29] J. M. J. Valanarasu, P. Oza, I. Hacihaliloglu, and V. M. Patel, “Medical
2252
+ transformer: Gated axial-attention for medical image segmentation,” in
2253
+ Medical Image Computing and Computer Assisted Intervention – MIC-
2254
+ CAI 2021, M. de Bruijne, P. C. Cattin, S. Cotin, N. Padoy, S. Speidel,
2255
+ Y. Zheng, and C. Essert, Eds. Cham: Springer International Publishing,
2256
+ 2021, pp. 36–46.
2257
+ [30] D. Zhang, S. Liu, S. Chaganti, E. Gibson, Z. Xu, S. Grbic, W. Cai,
2258
+ and D. Comaniciu, “Graph attention network based pruning for recon-
2259
+ structing 3d liver vessel morphology from contrasted ct images,” arXiv
2260
+ preprint arXiv:2003.07999, 2020.
2261
+ [31] L. Yao, P. Jiang, Z. Xue, Y. Zhan, D. Wu, L. Zhang, Q. Wang, F. Shi, and
2262
+ D. Shen, “Graph convolutional network based point cloud for head and
2263
+ neck vessel labeling,” in International Workshop on Machine Learning
2264
+ in Medical Imaging.
2265
+ Springer, 2020, pp. 474–483.
2266
+ [32] X. Xu, T. Wang, Y. Shi, H. Yuan, Q. Jia, M. Huang, and J. Zhuang,
2267
+ “Whole heart and great vessel segmentation in congenital heart disease
2268
+ using deep neural networks and graph matching,” in International
2269
+ Conference on Medical Image Computing and Computer-Assisted In-
2270
+ tervention.
2271
+ Springer, 2019, pp. 477–485.
2272
+ [33] R. Achanta, A. Shaji, K. Smith, A. Lucchi, P. Fua, and S. S¨usstrunk,
2273
+ “Slic superpixels compared to state-of-the-art superpixel methods,” IEEE
2274
+ transactions on pattern analysis and machine intelligence, vol. 34,
2275
+ no. 11, pp. 2274–2282, 2012.
2276
+ [34] Y. Liu, F. Zhang, Q. Zhang, S. Wang, Y. Wang, and Y. Yu, “Cross-view
2277
+ correspondence reasoning based on bipartite graph convolutional net-
2278
+ work for mammogram mass detection,” in Proceedings of the IEEE/CVF
2279
+ Conference on Computer Vision and Pattern Recognition, 2020, pp.
2280
+ 3812–3822.
2281
+ [35] Q. Yue, X. Luo, Q. Ye, L. Xu, and X. Zhuang, “Cardiac segmentation
2282
+ from lge mri using deep neural network incorporating shape and spatial
2283
+ priors,” in International Conference on Medical Image Computing and
2284
+ Computer-Assisted Intervention.
2285
+ Springer, 2019, pp. 559–567.
2286
+ [36] S. van der Walt, J. L. Sch¨onberger, J. Nunez-Iglesias, F. Boulogne,
2287
+ J. D. Warner, N. Yager, E. Gouillart, T. Yu, and the scikit-image
2288
+ contributors, “scikit-image: image processing in Python,” PeerJ, vol. 2,
2289
+ p. e453, 6 2014. [Online]. Available: https://doi.org/10.7717/peerj.453
2290
+ [37] K. He, X. Zhang, S. Ren, and J. Sun, “Deep residual learning for image
2291
+ recognition,” in Proceedings of the IEEE conference on computer vision
2292
+ and pattern recognition, 2016, pp. 770–778.
2293
+ [38] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury, G. Chanan,
2294
+ T. Killeen, Z. Lin, N. Gimelshein, L. Antiga et al., “Pytorch: An
2295
+ imperative style, high-performance deep learning library,” in Advances
2296
+ in neural information processing systems, 2019, pp. 8026–8037.
2297
+ [39] D. P. Kingma and J. Ba, “Adam: A method for stochastic optimization,”
2298
+ arXiv preprint arXiv:1412.6980, 2014.
2299
+ [40] Z. Zhang, Q. Liu, and Y. Wang, “Road extraction by deep residual u-
2300
+ net,” IEEE Geoscience and Remote Sensing Letters, vol. 15, no. 5, pp.
2301
+ 749–753, 2018.
2302
+ [41] SLIC CUDA, https://github.com/fderue/SLIC CUDA. [Online]. Avail-
2303
+ able: https://github.com/fderue/SLIC CUDA
2304
+
5dE0T4oBgHgl3EQfegDz/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
69AzT4oBgHgl3EQfEvpR/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7962f394cfc25c59442e20668c3410a72f925f539d3346875b4d618ab639b934
3
+ size 8388653
6NE0T4oBgHgl3EQfewDQ/content/tmp_files/2301.02396v1.pdf.txt ADDED
@@ -0,0 +1,1674 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Don’t follow the leader: Independent thinkers create scientific
2
+ innovation
3
+ Sean Kelty,1 Raiyan Abdul Baten,2 Adiba Mahbub Proma,2
4
+ Ehsan Hoque,2 Johan Bollen,3 and Gourab Ghoshal1, 2, ∗
5
+ 1Department of Physics & Astronomy,
6
+ University of Rochester, Rochester, NY 14607, USA
7
+ 2Department of Computer Science, University of Rochester, Rochester, NY 14607, USA
8
+ 3Luddy School of Informatics, Computing and Engineering,
9
+ 919 E. 10th St., Bloomington, IN 47408, USA
10
+ Abstract
11
+ Academic success is distributed unequally; a few top scientists receive the bulk of attention,
12
+ citations, and resources. However, do these “superstars” foster leadership in scientific innovation?
13
+ We introduce three information-theoretic measures that quantify novelty, innovation, and impact
14
+ from scholarly citation networks, and compare the scholarly output of scientists who are either not
15
+ connected or strongly connected to superstar scientists. We find that while connected scientists do
16
+ indeed publish more, garner more citations, and produce more diverse content, this comes at a cost
17
+ of lower innovation and higher redundancy of ideas. Further, once one removes papers co-authored
18
+ with superstars, the academic output of these connected scientists diminishes. In contrast, authors
19
+ that produce innovative content without the benefit of collaborations with scientific superstars
20
+ produce papers that connect a greater diversity of concepts, publish more, and have comparable
21
+ citation rates, once one controls for transferred prestige of superstars. On balance, our results
22
+ indicate that academia pays a price by focusing attention and resources on superstars.
23
+ ∗ Correspondence email address: gghoshal@pas.rochester.edu
24
+ 1
25
+ arXiv:2301.02396v1 [cs.DL] 6 Jan 2023
26
+
27
+ I.
28
+ INTRODUCTION
29
+ “To truly make an apple pie from scratch you must first invent the universe”—a quote
30
+ attributed to Carl Sagan [1]—illustrates the idea that the process by which individuals cre-
31
+ ate is contingent upon the elements on which that creation is based. Whether creating a
32
+ new piece of music, going about daily routines, or engaging in scientific research, people’s
33
+ actions are founded in the information, experiences, and relationships that they have estab-
34
+ lish by themselves and through others [2–5]. Each person has their own basis of knowledge
35
+ that stems from their own lived experiences while also existing in a network of relationships
36
+ through which they share experiences and knowledge with each other, thereby informing a
37
+ collective understanding among a network of connected individuals [6]. Within such net-
38
+ works, hierarchies can emerge in which some actors exert greater social influence over the
39
+ network and thus the creative process that it supports, while others may influence only those
40
+ closest to them or no one at all [7]. This social hierarchy is common in the societal dynamics
41
+ of government and politics, where some individuals and institutions exert a great degree of
42
+ influence over the flow of information in the system and opinion formation [8–10].
43
+ Academia is not immune from the emergence of social hierarchies; some academics can
44
+ function as figures of authority due to the merit and influence of their work and their promi-
45
+ nent position in a network of academic collaborations. Citations as an indicator of academic
46
+ influence [11] have long been known to be distributed very unequally[12], with a minority
47
+ of a few scientists receiving most citations. Such inequality may be increasing at a global
48
+ level[13], at least with respect to citation numbers. In academic publishing, biasing effects
49
+ like this have been studied under the lens of the Matthew Effect, where success begets more
50
+ success and early success compounds into a cumulative advantage as the “rich get richer”
51
+ [14]. There are arguments that this effect is beneficial for academia; the rewards of top
52
+ researchers are proportional to their contributions, which ensures the “epistemic security”
53
+ of the field [15]. This thinking is aligned with the notion that science should operate as a
54
+ meritocracy; those who contribute the most are also valued the most, and will therefore be
55
+ most influential. Indeed, there is a high degree of trust in our most successful academics and
56
+ the value of their mentorship. For instance, junior researchers collaborating with top scien-
57
+ tists at the early stages of their career are likely to become top-cited scientists themselves,
58
+ 2
59
+
60
+ especially those at less prestigious universities [16]. Inexperienced academics can benefit
61
+ from apprenticeships with top scientists; the “chaperoning” of early-career scientists leads to
62
+ higher rates of publication in high-impact journals [17]. These relationships are frequently
63
+ mutually beneficial. Less visible authors benefit from more opportunities to publish papers
64
+ in high quality journals that attract larger audiences, whereas top scientists gain collabo-
65
+ rators with unique skills to produce more high quality work [18]. Close collaboration of
66
+ less visible academics with those in the upper echelons can furthermore create opportunities
67
+ for a first-mover advantage, inducing a positive feedback loop and early bandwagoning of
68
+ innovative ideas [19].
69
+ While top academics (sometimes referred to as “superstars”) may make consistent and
70
+ high impact contributions that benefit their field and collaborators, their status as super-
71
+ stars may also have deleterious effects due to the subsequent concentration of resources and
72
+ attention. For instance, it has been shown that the collaborators of academic superstars
73
+ experience a 5 to 9% drop in publication rates after the sudden death of that superstar [20],
74
+ highlighting their dependence on the superstar’s collaboration. In fact, it is unclear whether
75
+ collaborating with superstars truly fosters independent career development [21, 22] Further-
76
+ more, superstars can induce a high degree of inequality in the distribution of research funding
77
+ due to a funding Matthew-effect. Those who receive funding accumulate twice as much re-
78
+ search funding afterwards compared to those who submitted similarly valued proposals but
79
+ found themselves, by chance, just below the funding threshold. There is no evidence that
80
+ this accumulation of research funding is due to actual achievements enabled by previous
81
+ funding [23, 24]. If successful collaborations with superstars lead to early funding success,
82
+ this can induce a superstar-fueled funding cycle that increasingly widens the gap between
83
+ scientific haves and have-nots.
84
+ The topology, structure, and characteristics of scientific collaboration networks may play
85
+ an important role in these effects since they shape both the production and dissemination
86
+ of ideas, potentially with conflicting outcomes. Tightly connected networks could be more
87
+ efficient in distributing and leveraging knowledge thereby yielding higher productivity, but
88
+ may at the same time lead to a decline of diversity, reducing exploration and discovery [25–
89
+ 27]. Although some spillover effects may occur, i.e. collaborators of highly-acclaimed authors
90
+ benefit by proxy [28], it is not clear whether the concentration of attention of resources
91
+ 3
92
+
93
+ towards superstars yields more novel and innovative research. This is a particularly relevant
94
+ issue with the rise of interdisciplinary research which relies on the ability of scientists to
95
+ collaborate in equitable teams that foster creativity and innovation across various research
96
+ fields [29].
97
+ To investigate the effects of superstar influence on academic productivity, impact, and
98
+ innovation, we perform a comprehensive analysis of the American Physical Society corpus.
99
+ Following [20], we define superstars as academics who are among the top .1% in terms of
100
+ their h-index [30, 31]. We extract the semantic content of over 250,000 abstracts, defining
101
+ a number of information-theoretic measures to quantify the novelty and innovation of each
102
+ paper. We augment this with analysis of publication and citation rates, and examine the
103
+ difference in academic output between researchers who collaborate with or cite frequently
104
+ papers by superstars against those with little-to-no connection to such superstars. We find
105
+ that at the individual level, collaborators and frequent citers of superstars, publish more,
106
+ garner higher citations and produce papers with more diverse content compared to other
107
+ academics. However, their work is no more innovative than the rest of the corpus and its
108
+ content is more redundant. Further, once one excludes papers co-authored with superstars,
109
+ their publication and citation output are no different from the rest of the corpus and in some
110
+ cases output is lower.
111
+ Focusing on early career researchers, we find that those who frequently collaborate with
112
+ superstars in the beginning of their careers, do eventually go on to produce impressive
113
+ academic output, although once the collaboration is removed, their output in terms of publi-
114
+ cation rates, citation impact, and innovation is significantly diminished. On the other hand,
115
+ early career researchers that produce innovative content without the benefit of early super-
116
+ star collaboration, continue to produce such content over the rest of their careers. They
117
+ publish more then early collaborators of superstars and accrue similar citation numbers,
118
+ once one controls for the collaboration itself.
119
+ 4
120
+
121
+ II.
122
+ RESULTS
123
+ A.
124
+ Data
125
+ We use the American Physical Society (APS) corpus [32] that contains articles published
126
+ in APS journals since 1893.
127
+ The data set contains full citation data, i.e. the citations
128
+ pointing from the references of one article to another, allowing a reconstruction of the full
129
+ citation network among all articles, including article-specific fields such as DOI, journal,
130
+ volume, issue, first page and last page OR article id and number of pages, title, authors,
131
+ affiliations, publication history, PACS codes, table of contents heading, article type, and
132
+ copyright information. Given that the data does not include article abstracts, we used a
133
+ web-scraping algorithm [33] to collect abstracts for 250,628 articles corresponding to between
134
+ 35-40% of all published papers across the different APS journals (Fig. S1). We note that
135
+ around 1% of these articles have references not contained in the APS citation network, and
136
+ on average we scraped abstracts for 38% of paper references. The distribution of citations
137
+ and h-index are both heavy-tailed (Fig. S2), with the average number of citations being 14.4
138
+ and the average h-index 1.74. Author disambiguation was done using a rule-based scoring
139
+ method [34] (Cf. Sec.S1.2) We consider authors who first publish on or after 1970, and define
140
+ superstars as those with the top .1% of h-index in the corpus, corresponding to an h-index
141
+ threshold of 21. This yields 303 superstars among 292,394 authors. The summary statistics
142
+ can be found in Tab. S1.
143
+ In order to extract topics from the collected abstracts, we use an unsupervised Latent
144
+ Dirichlet Allocation (LDA) algorithm on phrases (P-LDA) [35] to establish vector embed-
145
+ dings for phrases and documents within our corpus. Stop words in the corpus were removed,
146
+ all words were lemmatized, and phrases were determined based on a significance score that
147
+ determined whether or not phrases occurred due to random chance. These vector embed-
148
+ dings have dimensionality k correspoding to the number of topics defined for our corpus.
149
+ P-LDA utilizes Gibbs Sampling to generate distributions of topics over phrases as well as
150
+ documents [36], from which novelty scores can be extracted based on topic-spread.
151
+ We
152
+ choose a number of topics k based on the UMass coherence measure ([37]), the value of
153
+ which first stabilizes at k = 25 topics (Fig. S3). Tab. S2 shows the top 10 terms per topic.
154
+ The resulting output for each document u is a k-dimensional vector vu whose elements vu
155
+ i
156
+ 5
157
+
158
+ correspond to the frequency of topic i extracted from its abstract (example in Tab. S3).
159
+ B.
160
+ Novelty, innovation and redundancy
161
+ Novelty detection in the literature has been implemented in a variety of ways [38], such
162
+ as contextualizing novelty in machine learning as information retrieval [39, 40], distant com-
163
+ binations of ideas via citation relations [41], first-pass combinations of concepts never before
164
+ connected [42], knowledge-graphs of concepts within social networks [26], and agent-based
165
+ simulations of social and individual learning [27].
166
+ Here we rely on document-level embeddings that represent a distribution of all topics
167
+ contained within the abstract of given paper, using which one can define the topic diversity
168
+ in terms of a paper, its references, and articles that cite the paper. Using this, we define a
169
+ variety of metrics capturing different aspects of novelty and innovation.
170
+ Coupling connections between authors and the content of their works can then elucidate
171
+ the influence that superstars have on the success of and novelty produced by other academics.
172
+ Entropy: For a given document u, we define the Shannon entropy as
173
+ I(S)
174
+ u
175
+ = −
176
+ k
177
+
178
+ i=1
179
+ vu
180
+ i ln vu
181
+ i ,
182
+ (1)
183
+ The expression quantifies the average level of “surprise” or uncertainty over the outcomes
184
+ of a random variable [43]. In this context, papers focusing on limited number of topics in
185
+ abstracts will yield low values of I(S)
186
+ u , whereas those with a wide diversity of topics will yield
187
+ a larger value of the entropy.
188
+ Reference and Citation Diversity: While I(S)
189
+ u
190
+ measures the “surprise” with respect to a
191
+ paper’s content, in this case its abstract, references and citations refer to the degree that the
192
+ ideas in a given paper were inspired by other papers (references) or of inspiration to other
193
+ papers (citations). We can thus measure the novelty of a paper, or its Information Diversity
194
+ [44], by evaluating the dispersion of the topics of its references or the citations its receives.
195
+ The greater the variance of the topic distribution, the higher the information diversity. For
196
+ a set Xu, that can represent either the references in paper u, or citations to paper u, we
197
+ 6
198
+
199
+ define the quantity,
200
+ I(X)
201
+ u
202
+ =
203
+ 1
204
+ |Xu|
205
+
206
+ l∈Xu
207
+
208
+ 1 − cos
209
+
210
+ vl, Xu��
211
+ (2)
212
+ where cos
213
+
214
+ vl, Xu�
215
+ is the cosine similarity of the vector embedding of a particular refer-
216
+ ence/citation vl with the average over the vector embeddings of all references/citations in
217
+ the set Xu. We can as such define reference diversity and citation diversity as the information
218
+ diversity over the references from a paper and citations to the paper respectively.
219
+ Innovation: The metrics defined thus far are based on topic models expressed as topic dis-
220
+ tributions per document derived from the words in their content (abstracts). These metrics
221
+ capture topic diversity of the paper itself, or its influences, but does not express the degree
222
+ to which the paper expanded the literature through innovation. In other words, they express
223
+ what document themselves are about, but not whether this adds to the diversity of the
224
+ literature. We therefore define Innovation as the degree to which the document adds topics
225
+ in new combination to the literature [45, 46]. Specifically, innovation in this context, is a
226
+ measurement of when terms were first introduced or combined in the corpus (cf. Sec. S1.4
227
+ and Fig. S4). Coupled with the novelty measures, this allows us to track how the diversity of
228
+ ideas correlates with new conceptual recombinations and co-occurrences of terms. Following
229
+ this logic, we define the Innovativeness of paper u as
230
+ I(I)
231
+ u
232
+ = 1
233
+ 2
234
+
235
+ w1̸=w2∈u
236
+ I(w1, w2; u)
237
+ (3)
238
+ where w1 and w2 are distinct terms in paper u, I(w1, w2; u) is an indicator function that is
239
+ 1 if terms w1 and w2 are first seen within the corpus in paper u and 0 otherwise, and the 1
240
+ 2
241
+ prefix accounts for double counting. To remove spurious conceptual links due to chance or
242
+ extreme rarity, we calculate a point-wise mutual information for all links as the log ratio of
243
+ co-occurrence probability over the individual probabilities of each concept [46]. In Fig. S5 we
244
+ determine the Pearson’s r correlation coefficients between each measure and find only weak
245
+ correlations, indicating that each measure captures a different aspect of academic output.
246
+ Redundancy: Finally, in a related context, in the field of creative ideation, it has been
247
+ reported that inspirees stimulated by highly creative alters, tend to generate more creative
248
+ ideas [47–49]. However, as a group, the inspirees ideas was found to be similar to each other
249
+ 7
250
+
251
+ leading to redundancy in generated ideas over time at the group level. To check whether
252
+ a similar effect manifests in academic publishing, we compute the cosine similarity score
253
+ between papers u, u′ in the set P(G, s, t) thus
254
+ Sim(G, s, t) =
255
+ 2
256
+ |P(G, s, t)| (|P(G, s, t)| − 1)
257
+
258
+ u,u′∈P(G,s,t)
259
+ cos(vu, vu′).
260
+ (4)
261
+ C.
262
+ Superstar statistics
263
+ We next examine whether the novelty and innovation produced by superstars are sig-
264
+ nificantly different from the rest of the academic corpus. In Fig. 1 we plot the Reference
265
+ and Citation diversity (Eq. (2)), the Shannon entropy (Eq. (1)) and Innovation (Eq. (3))
266
+ comparing the set of superstar academics against the rest of the authors in the corpus. In
267
+ terms of reference diversity, citation diversity and Shannon entropy, superstars outperform
268
+ the remaining academics by 20%, 15%, and 2% respectively. That is, superstars are inspired
269
+ by a higher diversity of content, publish works that are more conceptually diverse, and in-
270
+ spire a wider array of publications than non-superstars. The starkest contrast can be seen in
271
+ terms of Innovation, where there is a factor of ten difference between superstars and other
272
+ academics indicating that the former are more prolific in introducing new combinations of
273
+ terms. We note that there is a monotonic dependence of the metrics with number of pub-
274
+ lications for all academics, although the effect is more pronounced for superstars (Fig. S6).
275
+ Furthermore, there is also a monotonic dependence of citations received by a paper u and
276
+ the novelty/innovation metrics (once again more pronounced for superstars) indicating that
277
+ an increase in conceptual diversity and the ability to connect concepts for the first time is
278
+ rewarded in terms of more attention paid to that paper (Fig. S7).
279
+ D.
280
+ Superstar influence
281
+ Having established that superstars outperform other academics in terms of our metrics,
282
+ we next determine to what degree superstars affect the academic output of their collabo-
283
+ rators and their “inspirees” (those inspired by their work). Inspirees are authors that cite
284
+ a superstar’s papers, for whom we determine the degree of inspiration by the frequency of
285
+ 8
286
+
287
+ Non-Superstar
288
+ Superstar
289
+ 0.09
290
+ 0.10
291
+ 0.11
292
+ 0.12
293
+ 0.13
294
+ I(R )
295
+ (A)
296
+ Non-Superstar
297
+ Superstar
298
+ 0.08
299
+ 0.10
300
+ 0.12
301
+ I(C )
302
+ (B)
303
+ Non-Superstar
304
+ Superstar
305
+ 3.0
306
+ 3.1
307
+ 3.2
308
+ 3.3
309
+ I(S)
310
+ (C)
311
+ Non-Superstar
312
+ Superstar
313
+ 0
314
+ 5
315
+ 10
316
+ 15
317
+ 20
318
+ I(I)
319
+ (D)
320
+ FIG. 1. Average author-level statistics of novelty and innovation A Reference Diversity, B
321
+ Citation Diversity, C Shannon Entropy, D Innovation. The orange bar is for superstars (h-index
322
+ ≥ 21) and the blue bars correspond to all other authors in the corpus.
323
+ citations. We examine inspirees both at the group- and individual-levels. At the group-
324
+ level, we center the superstar in a network of inspirees where the degree of inspiration is
325
+ the number of times a researcher cites the superstar. We then partition the inspirees into
326
+ groups based on their degree of inspiration, where the upper bounds for each bin are the top
327
+ 10% of inspirees, 20%, 30%, 50%, and 100%. These groups represent increasingly weakening
328
+ ties to a given superstar; those in the top 10 percent are the most actively inspired, while
329
+ the bottom 50 percent typically cite the superstar only once. Note that some inspirees in
330
+ the bottom 50 group of one superstar may be in the top group of another superstar. The
331
+ increasing bin sizes are chosen to account for the decreasing frequency of inspired citations
332
+ among the least-inspired inspirees, such that there are sufficient number of papers compared
333
+ between groups.
334
+ Given that we are interested in the temporal evolution of superstar influence on the novelty
335
+ and innovation of the inspirees, we denote the year of the first superstar publication as t0 = 0
336
+ and for every susbsequent year t > t0, we consider the set of publications by the inspirees
337
+ who cite the superstar. For each partitioned group, we calculate the average novelty of all
338
+ of the publications in year t per partition. Denoting the set of papers inspired by superstar
339
+ 9
340
+
341
+ 0
342
+ 10
343
+ 20
344
+ 30
345
+ 40
346
+ 50
347
+ Years After First Superstar Publication
348
+ 3.1
349
+ 3.2
350
+ 3.3
351
+ 3.4
352
+ I(S)
353
+ (A)
354
+ 0.00-0.10
355
+ 0.10-0.20
356
+ 0.20-0.30
357
+ 0.30-0.50
358
+ 0.50-1.00
359
+ 0
360
+ 10
361
+ 20
362
+ 30
363
+ 40
364
+ 50
365
+ 0.0
366
+ 0.4
367
+ 0.8
368
+ 1.2
369
+ 1.6
370
+ I(I)
371
+ (B)
372
+ 0
373
+ 5
374
+ 10
375
+ 15
376
+ 20
377
+ Years after Inspired-Paper Pub.
378
+ 0.0
379
+ 0.4
380
+ 0.8
381
+ 1.2
382
+ 1.6
383
+ Citations per Paper
384
+ (C)
385
+ 0.00-0.10
386
+ 0.10-0.20
387
+ 0.20-0.30
388
+ 0.30-0.50
389
+ 0.50-1.00
390
+ 3.254
391
+ 3.258
392
+ 3.262
393
+ 3.266
394
+ I(S)
395
+ (D)
396
+ 0.00-0.10
397
+ 0.10-0.20
398
+ 0.20-0.30
399
+ 0.30-0.50
400
+ 0.50-1.00
401
+ Inspiration Groups
402
+ 0.20
403
+ 0.24
404
+ 0.28
405
+ I(I)
406
+ (E)
407
+ 0.00-0.10
408
+ 0.10-0.20
409
+ 0.20-0.30
410
+ 0.30-0.50
411
+ 0.50-1.00
412
+ 0.35
413
+ 0.40
414
+ 0.45
415
+ 0.50
416
+ 0.55
417
+ 0.60
418
+ Citations per Paper
419
+ (F)
420
+ FIG. 2. Novelty and Innovation statistics at the group-level Temporal trajectory of average
421
+ paper-level statistics. A: Shannon Entropy, B: Innovation, C: Citations per-paper. Aggregated
422
+ group-level statistics D: Shannon Entropy, E: Innovation, F: Citations per-paper. Curves indicate
423
+ averages, shaded area 95% confidence interval.
424
+ s for partition G at year t as P(G, s, t), the average novelty scores are computed as
425
+ ⟨I(l)
426
+ u ⟩G,s,t =
427
+ 1
428
+ |P(G, s, t)|
429
+
430
+ u∈P(G,s,t)
431
+ I(l)
432
+ u
433
+ (5)
434
+ where l = S, X, I is the novelty or innovation score of paper u.
435
+ We plot the results of our analysis in Fig. 2. In terms of the temporal evolution of the
436
+ Shannon entropy, while there is a monotonic increase—reflecting an increase in the body
437
+ of knowledge with time (Fig. S8)—we find little-to-no differences across the groups as seen
438
+ in Fig. 2A. Averaging over the entire temporal range also indicates a flat trend (Fig. 2D).
439
+ Similar trends are seen for the reference diversity both in terms of its temporal evolution
440
+ (upper panel of Fig. S9A,B) as well as their temporally averaged values (lower panel). Unlike
441
+ the entropy or reference diversity, there is a decreasing trend in time for the citation diversity.
442
+ We observe a 5% decrease in the measure between those in the top 10% as compared to the
443
+ bottom 50%. Figure 2B,E indicates the same trend for Innovation which also decreases in
444
+ time across all groups, reflecting a saturation in the number of combinations of new terms
445
+ 10
446
+
447
+ 0.2
448
+ 0.4
449
+ 0.6
450
+ 0.8
451
+ 0.07
452
+ 0.097
453
+ 0.123
454
+ 0.15
455
+ 0.177
456
+ I(R )
457
+ (A)
458
+ All Papers
459
+ Excluding Superstar Papers
460
+ 0.2
461
+ 0.4
462
+ 0.6
463
+ 0.8
464
+ 0.07
465
+ 0.082
466
+ 0.093
467
+ 0.105
468
+ I(C )
469
+ (B)
470
+ 0.2
471
+ 0.4
472
+ 0.6
473
+ 0.8
474
+ 3.05
475
+ 3.167
476
+ 3.283
477
+ 3.4
478
+ I(S)
479
+ (C)
480
+ 0.2
481
+ 0.4
482
+ 0.6
483
+ 0.8
484
+ 0.0
485
+ 1.333
486
+ 2.667
487
+ 4.0
488
+ I(I)
489
+ (D)
490
+ 0.2
491
+ 0.4
492
+ 0.6
493
+ 0.8
494
+ P ercent of Author P apers that Cite a Superstar
495
+ 12.0
496
+ 16.0
497
+ 20.0
498
+ 24.0
499
+ Avg. Author Citation Count
500
+ (E)
501
+ 0.2
502
+ 0.4
503
+ 0.6
504
+ 0.8
505
+ 8.0
506
+ 16.0
507
+ 24.0
508
+ 32.0
509
+ Avg. Author Publication Count
510
+ (F)
511
+ FIG. 3. Novelty and Innovation statistics at the individual author-level. A Reference
512
+ Diversity, B Citation Diversity, C Shannon Entropy, D Innovation, E Average citation count, F
513
+ Average publication count.
514
+ that are combined by authors as their career progresses. The difference between the top and
515
+ bottom groups is now around 15%. Finally, citations to papers experience an initial boost
516
+ and then decreases in time as seen in Fig. 2C, with now much clearer differences between
517
+ the groups. Indeed, there is a 40% difference in citations per-paper between the most and
518
+ least inspired groups as seen in Fig. 2F.
519
+ In terms of redundancy, in Fig. S9C we plot the cosine similarity (Eq. (4). As the figure
520
+ indicates, across all groups there is a decreasing trend in the temporal evolution of the
521
+ similarity, yet a clear difference exists, whereby papers published by the top 10% are on
522
+ average 8% more similar to each other in terms of content when compared to the bottom
523
+ 50%. Taken together, the results indicate that groups of authors who cite superstar papers
524
+ often do get a citation boost as compared to other sets of authors. However, their output is
525
+ modestly more innovative and equally novel as compared to the rest of the corpus. Rather
526
+ their content is more redundnant than the remaining sets of authors.
527
+ Next, we dis-aggregate the group-level results and examine the degree of superstar in-
528
+ fluence at the individual author level. In Fig. 3 we plot the averages of the novelty and
529
+ innovation metrics as well as citations and publication counts across authors as a function
530
+ of the fraction of their papers that cite superstars. Given that many authors co-publish
531
+ 11
532
+
533
+ with superstars, the blue curve indicates the results when including such papers, while the
534
+ orange curve shows the results excluding these papers. Figure 3A-C indicate that as au-
535
+ thors cite more superstars they experience an increase in reference and citation diversity as
536
+ well as the Shannon entropy irrespective of whether one includes their collaboration with
537
+ superstars. While we see no indications of novelty of content being driven by superstar-
538
+ influence at the group-level, at the individual level the benefits are clear. On the other hand,
539
+ when looking at Innovation (Fig. 3D), the trend is either flat when including all papers,
540
+ and decreasing when co-authored publications are excluded. Indeed, it appears that the
541
+ more authors cite superstars, the less innovative their own publications become (i.e those
542
+ not co-authored with a superstar). The benefit of collaborating with a superstar becomes
543
+ even more apparent when looking at citations (Fig. 3E) and number of publications (Fig. 3
544
+ F). For the former when including collaborations there is a dramatic benefit in terms of
545
+ garnered citations (approximately 67% more citations on average) that drops considerably
546
+ when excluding collaborations. Indeed, the citation-benefit appears to be driven primarily
547
+ by being collaborators of superstars who by definition have the largest number of citations to
548
+ their papers. The same appears to be the case for the latter, with the number of publications
549
+ increasing when including collaborations, and decreasing when excluded.
550
+ E.
551
+ Early Collaborators and Early Innovators
552
+ The results thus far provide evidence for academics inspired by superstars producing out-
553
+ put with diverse content and that receives visibility via citations, while not necessarily being
554
+ innovative in the sense of tying together new concepts. On the other hand, there is also ev-
555
+ idence that these features are significantly boosted by direct collaboration with superstars,
556
+ and when left to their own devices their publication output, novelty and innovation is lower
557
+ than the rest of the corpus. Indeed, it begs the question whether superstars foster indepen-
558
+ dent individual success, or rather inhibits it? For instance, as shown, at the aggregate level,
559
+ the group of authors that cite superstars the most often tend to publish on mostly the same
560
+ topics.
561
+ To further probe this we restrict our analysis to early-career scientists. Given that findings
562
+ from prior studies have shown that collaboration with successful scientists provides a boost
563
+ 12
564
+
565
+ 0
566
+ 5
567
+ 10
568
+ 15
569
+ 20
570
+ 25
571
+ 0
572
+ 2
573
+ 4
574
+ 6
575
+ 8
576
+ 10
577
+ Citations Per Publication
578
+ (A)
579
+ Including Superstar Papers
580
+ Collaborator
581
+ Early Innovators
582
+ 0
583
+ 5
584
+ 10
585
+ 15
586
+ 20
587
+ 25
588
+ 0
589
+ 1
590
+ 2
591
+ 3
592
+ 4
593
+ 5
594
+ 6
595
+ (B)
596
+ Excluding Superstar Papers
597
+ 0
598
+ 5
599
+ 10
600
+ 15
601
+ 20
602
+ 25
603
+ t − t0 (yr)
604
+ 0.00
605
+ 0.25
606
+ 0.50
607
+ 0.75
608
+ 1.00
609
+ 1.25
610
+ 1.50
611
+ Innovation
612
+ (C)
613
+ 0
614
+ 5
615
+ 10
616
+ 15
617
+ 20
618
+ 25
619
+ 0.00
620
+ 0.25
621
+ 0.50
622
+ 0.75
623
+ 1.00
624
+ 1.25
625
+ 1.50
626
+ (D)
627
+ Citation and Novelty Statistics per Academic Group
628
+ FIG. 4. Citations and Innovation for frequent collaborators and early innovators A
629
+ Citations per paper when including superstar papers, B The same when excluding superstar papers.
630
+ C Temporal evolution of Innovation. D The same when excluding superstar papers. The horizontal
631
+ axis t − t0 indicates the time elapsed from the t0 the time of first publication for authors in either
632
+ group.
633
+ for early career researchers [16], and that early success generates a cumulative advantage of
634
+ long-term career success [14], we define early collaborators as those authors who collaborate
635
+ with superstars in at least half of their papers in the first five years of their career. As a
636
+ point of comparison, we define another set of authors who do not collaborate with, or cite
637
+ superstar papers, but are in the top 10% of the corpus in terms of Innovation as measured
638
+ by their first five years of publications. We term these authors early innovators. We use
639
+ innovation as a metric, given that this is the measure by which superstars outperform other
640
+ academics the most (Fig. 1D) and therefore might serve as a robust indicator of academic
641
+ potential.
642
+ 13
643
+
644
+ For academics in each group we track the temporal evolution of the citations per-paper,
645
+ the number of publications, as well as the Innovation, measured from the date of first pub-
646
+ lication t0 for authors in either group. Early collaborators get more citations per paper
647
+ (Fig. 4A) and publish more than early innovators (Fig. S10A) particularly within the first
648
+ ten years of their career. However, when one removes superstar publications, the trend re-
649
+ verses where now early innovators publish more (Fig. S10B) and garner a comparable rate of
650
+ citations as the other group (Fig. 4B ). Additionally the early innovators maintain a higher
651
+ degree of Innovation throughout their careers as compared to early collaborators (Fig. 4C,
652
+ D) with or without including collaborations to superstars. Thus the evidence suggests that
653
+ while early career scientists indeed get a boost from collaborating with superstars, their own
654
+ academic output is less innovative and equally visible in terms of citations, as compared
655
+ to other early career scientists who produce innovative output without the benefit of such
656
+ collaborations.
657
+ III.
658
+ CONCLUSION AND DISCUSSION
659
+ In the exponentially growing knowledge-base of academia in which visibility and funding
660
+ are increasingly being biased towards top academics and institutions, we examine the influ-
661
+ ence that superstar academics have on the community as a whole and in terms of novelty
662
+ and career success. Superstars provide an irreplaceable source of novel ideas and contribu-
663
+ tions at rates that exceed those of other academics in the corpus; our metrics support that
664
+ their accolades are well deserved and should be rewarded as such. We find superstars are
665
+ highly novel and inspire a higher diversity of concepts among their followers and collabo-
666
+ rators. However they do inhibit innovation potential. Those academics most inspired by a
667
+ superstar are individually themselves more diverse in their papers, but at the group level
668
+ add little intrinsic novelty than groups more weakly inspired by the superstar, even though
669
+ they achieve higher citations.
670
+ Additionally, we find indications of a strong Matthew Effect whereby academics who cite a
671
+ superstar highly receive higher citations when collaborating with the superstar than without,
672
+ despite higher gains in concept diversity than academic counterparts. Though collaboration
673
+ with successful academics can stimulate a successful career path, we find these collaborations
674
+ 14
675
+
676
+ can stifle innovation and may not provide the best indicator of long-term independent career
677
+ success.
678
+ Collaboration is a requirement to tackle increasingly difficult interdisciplinary problems.
679
+ Superstars are well-positioned to foster interdisciplinary research efforts by supporting early-
680
+ career researchers. Although the latter receive a citation boost when collaborate with a
681
+ superstar, this does not imply that they are developing more novel work than their colleagues
682
+ who are less connected to top academics. In fact, our results indicate that those closest to
683
+ a superstar show the lowest innovation potential. This is slightly surprising given that the
684
+ literature have shown junior researchers that collaborate with superstars are more likely
685
+ to publish in high quality journals and have increased chances of engaging in high quality
686
+ research with other top scientists. On balance, however, we find that this does not stimulate
687
+ long term independent career success. This could be an indication of individuals getting
688
+ lost in the wake of a superstar, meaning these researchers “bandwagon” off the ideas and
689
+ visibility of their respective superstars and iterate on the superstar’s work. Although there is
690
+ value in iterating upon already developed research questions, this may not foster innovative
691
+ work and stimulate individual careers. Indeed, very recently it has been shown that there
692
+ is a decline in disruptive ideas in both scientific publications and patents [50]. The authors
693
+ attribute this to an ever increasing reliance on a narrower set of extant scientific knowledge
694
+ on which to build ideas, a finding very much in line with our observation that followers of
695
+ superstars produce redundant and less innovative content as a group.
696
+ The observed effects could be a consequence of superstars’ strong hold over their respec-
697
+ tive fields. It’s been shown that paradigm shifts in thinking occur after the sudden deaths of
698
+ superstars. Collaborators of superstars suffer a drop in publication rate after their superstar
699
+ death, and the field may experience a surge of contributions by outsiders who are dispro-
700
+ portionately likely to be highly-cited [51]. One can infer that collaborators of superstars
701
+ are successful because they are collaborating with superstars. Care should be taken when
702
+ considering these proteges themselves for matters of funding and academic hiring. If the
703
+ goal is to foster highly novel work, elements outside of prestige and social connection, such
704
+ as efficacy, equity, and innovation, should be considered.
705
+ Our findings are not limited solely to early innovators, collaborators, and inspirees.
706
+ Though we provide early innovators as an example, many other groups [52] can be iso-
707
+ 15
708
+
709
+ lated and studied in the way we have done here to identify promising academics based on
710
+ early signatures of novelty or a range of social parameters. We outlined multiple differ-
711
+ ent definitions of novelty in the introduction which we have not further developed in this
712
+ study. Implementing the different definitions and distinguishing different types of novelty
713
+ can elucidate what types of novelty are stifled or enhanced by different social configurations.
714
+ A subject that we have not probed but is directly relevant to our discussion is the matter
715
+ of funding. In recent times, funding has increasingly become more biased towards top insti-
716
+ tutions [53], with 90% of NSF funding in 2018 going to 22% of funded institutions, serving
717
+ 43% of all institutions and 34% of underrepresented minorities [54]. This is coupled with a
718
+ history of funding disparities with respect to race and underrepresented communities [55–57].
719
+ Additionally, underrepresented groups produce novel works at higher rates yet are taken up
720
+ by other scholars at lower rates than novel contributions by gender and racial majorities [46].
721
+ Equitable funding programs have been shown to enhance research infrastructure, investiga-
722
+ tor capabilities, and intra- and inter-university collaborations at less prominent institutions
723
+ [58]. As we have shown, those that are least influenced by superstars innovate the most
724
+ and consequently have higher citation rates. Coupling these results with added attention to
725
+ equitable funding practices [59] we believe will reduce the growing inequality in academia
726
+ and stimulate novel and innovative research.
727
+ Finally, we note that our investigation necessarily comes with limitations.
728
+ Given our
729
+ sole focus on the APS body of literature, one should be careful to extrapolate this to other
730
+ academic disciplines. This is also an incomplete subset of the entire journal, so a full corpus
731
+ with an entire citation network would give a more accurate picture.
732
+ [1] Cliff, H. How to make an Apple Pie From Scratch In Search of the Recipe for our Universe
733
+ (Picador, London, 2021).
734
+ [2] McAndrew, S. & Everett, M.
735
+ Music as collective invention:
736
+ A social network analy-
737
+ sis of composers.
738
+ Cultural Sociology 9, 56–80 (2014).
739
+ URL https://doi.org/10.1177/
740
+ 1749975514542486.
741
+ [3] Muller, E. & Peres, R.
742
+ The effect of social networks structure on innovation perfor-
743
+ mance: A review and directions for research.
744
+ International Journal of Research in Mar-
745
+ 16
746
+
747
+ keting 36, 3–19 (2019).
748
+ URL https://www.sciencedirect.com/science/article/pii/
749
+ S0167811618300284.
750
+ [4] Hazarie, S., Barbosa, H., Frank, A., Menezes, R. & Ghoshal, G. Uncovering the differences
751
+ and similarities between physical and virtual mobility. Journal of The Royal Society Interface
752
+ 17, 20200250 (2020). URL https://doi.org/10.1098/rsif.2020.0250.
753
+ [5] Chen, Z. et al.
754
+ Contrasting social and non-social sources of predictability in human
755
+ mobility.
756
+ Nature Communications 13, 1922 (2022).
757
+ URL https://doi.org/10.1038/
758
+ s41467-022-29592-y.
759
+ [6] Nathaniel Rodriguez, Y.-Y. A., Johan Bollen. Collective dynamics of belief evolution under
760
+ cognitive coherence and social conformity. PLoS ONE 11, e0165910 (2016).
761
+ [7] Holme, P. & Ghoshal, G. Dynamics of networking agents competing for high centrality and
762
+ low degree. Physical Review Letters 96, 098701– (2006). URL https://link.aps.org/doi/
763
+ 10.1103/PhysRevLett.96.098701.
764
+ [8] Ghoshal, G. & Newman, M. E. J. Growing distributed networks with arbitrary degree distri-
765
+ butions. The European Physical Journal B 58, 175–184 (2007). URL https://doi.org/10.
766
+ 1140/epjb/e2007-00208-2.
767
+ [9] Recuero, R., Zago, G. & Soares, F. Using social network analysis and social capital to iden-
768
+ tify user roles on polarized political conversations on twitter.
769
+ Social Media + Society 5,
770
+ 2056305119848745 (2019). URL https://doi.org/10.1177/2056305119848745.
771
+ [10] Dubois, E. & Gaffney, D. The multiple facets of influence: Identifying political influentials
772
+ and opinion leaders on twitter. American Behavioral Scientist 58, 1260–1277 (2014). URL
773
+ https://doi.org/10.1177/0002764214527088.
774
+ [11] Radicchi, F., Weissman, A. & Bollen, J. Quantifying perceived impact of scientific publica-
775
+ tions. Journal of Informetrics 11, 704–712 (2017). URL https://www.sciencedirect.com/
776
+ science/article/pii/S1751157717300846.
777
+ [12] Hirsch, J. E. An index to quantify an individual’s scientific research output. Proceedings of
778
+ the National Academy of Sciences 102, 16569–16572 (2005).
779
+ [13] Nielsen, M. W. & Andersen, J. P. Global citation inequality is on the rise. Proceedings of the
780
+ National Academy of Sciences 118, e2012208118 (2021).
781
+ [14] Merton, R. K. The matthew effect in science. Science 159, 56–63 (1968).
782
+ 17
783
+
784
+ [15] Runco, M. & Pritzker, S. Encyclopedia of Creativity. Encyclopedia of Creativity (Elsevier
785
+ Science, 2011).
786
+ [16] Li, W., Aste, T., Caccioli, F. & Livan, G. Early coauthorship with top scientists predicts
787
+ success in academic careers. Nature Communications 10, 5170 (2019).
788
+ [17] Sekara, V. et al. The chaperone effect in scientific publishing. Proceedings of the National
789
+ Academy of Sciences 115, 12603–12607 (2018).
790
+ [18] Xie, Q., Zhang, X., Kim, G. & Song, M. Exploring the influence of coauthorship with top
791
+ scientists on researchers’ affiliation, research topic, productivity, and impact. Journal of Infor-
792
+ metrics 16, 101314 (2022). URL https://www.sciencedirect.com/science/article/pii/
793
+ S1751157722000669.
794
+ [19] Abrahamson, E. & Rosenkopf, L. Social network effects on the extent of innovation diffusion:
795
+ A computer simulation. Organization Science 8, 289–309 (1997). URL http://www.jstor.
796
+ org/stable/2635149.
797
+ [20] Azoulay, P., Graff Zivin, J. S. & Wang, J. Superstar Extinction. The Quarterly Journal of
798
+ Economics 125, 549–589 (2010). URL https://doi.org/10.1162/qjec.2010.125.2.549.
799
+ https://academic.oup.com/qje/article-pdf/125/2/549/5319678/125-2-549.pdf.
800
+ [21] Clauset, A., Arbesman, S. & Larremore, D. B. Systematic inequality and hierarchy in faculty
801
+ hiring networks. Science Advances 1, e1400005 (2015). URL https://doi.org/10.1126/
802
+ sciadv.1400005.
803
+ [22] Janosov, M., Battiston, F. & Sinatra, R. Success and luck in creative careers. EPJ Data
804
+ Science 9, 9 (2020). URL https://doi.org/10.1140/epjds/s13688-020-00227-w.
805
+ [23] Bol, T., de Vaan, M. & van de Rijt, A. The matthew effect in science funding. Proceedings of
806
+ the National Academy of Sciences 115, 4887–4890 (2018). URL https://www.pnas.org/doi/
807
+ abs/10.1073/pnas.1719557115. https://www.pnas.org/doi/pdf/10.1073/pnas.1719557115.
808
+ [24] Petersen, A. M., Jung, W.-S., Yang, J.-S. & Stanley, H. E. Quantitative and empirical demon-
809
+ stration of the matthew effect in a study of career longevity.
810
+ Proceedings of the National
811
+ Academy of Sciences 108, 18–23 (2011). URL https://www.pnas.org/doi/abs/10.1073/
812
+ pnas.1016733108. https://www.pnas.org/doi/pdf/10.1073/pnas.1016733108.
813
+ [25] Lazer, D. & Friedman, A. The network structure of exploration and exploitation. Adminis-
814
+ trative Science Quarterly 52, 667 – 694 (2007).
815
+ 18
816
+
817
+ [26] Rodan, S. & Galunic, C. More than network structure: How knowledge heterogeneity influ-
818
+ ences managerial performance and innovativeness. Strategic Management Journal 25, 541–562
819
+ (2004). URL http://www.jstor.org/stable/20142143.
820
+ [27] Chang, M. & Joseph E. Harrington, J. Discovery and diffusion of knowledge in an endogenous
821
+ social network. American Journal of Sociology 110, 937–976 (2005). URL http://www.jstor.
822
+ org/stable/10.1086/426555.
823
+ [28] Trapido, D. How novelty in knowledge earns recognition: The role of consistent identities.
824
+ Research Policy 44, 1488–1500 (2015).
825
+ URL https://www.sciencedirect.com/science/
826
+ article/pii/S0048733315000839.
827
+ [29] Xu, F. & Evans, J. Flat teams drive scientific innovation. Proceedings of the National Academy
828
+ of Sciences 119 (2022).
829
+ [30] Hirsch, J. E. Does the h-index have predictive power? Proceedings of the National Academy
830
+ of Sciences 104, 19193–19198 (2007).
831
+ [31] Hirsch, J. E. An index to quantify an individual’s scientific research output. Proceedings of the
832
+ National Academy of Sciences 102, 16569–16572 (2005). URL https://www.pnas.org/doi/
833
+ abs/10.1073/pnas.0507655102. https://www.pnas.org/doi/pdf/10.1073/pnas.0507655102.
834
+ [32] American Physical Society. https://journals.aps.org/datasets.
835
+ [33] Richardson,
836
+ L.
837
+ https://sethc23.github.io/wiki/Python/Beautiful_Soup_
838
+ Documentation.pdf.
839
+ [34] Caron, E. & van Eck, N.-J. Large scale author name disambiguation using rule-based scor-
840
+ ing and clustering. In Noyons, E. (ed.) Proceedings of the Science and Technology Indicators
841
+ Conference 2014, 79–86 (Universiteit Leiden, 2014). URL http://sti2014.cwts.nl. Interna-
842
+ tional conference on science and technology indicators, STI 2014 ; Conference date: 03-09-2014
843
+ Through 05-09-2014.
844
+ [35] El-Kishky, A., Song, Y., Wang, C., Voss, C. R. & Han, J. Scalable topical phrase mining from
845
+ text corpora. Proc. VLDB Endow. 8, 305–316 (2014). URL https://doi.org/10.14778/
846
+ 2735508.2735519.
847
+ [36] Lee, S. Y. Gibbs sampler and coordinate ascent variational inference: A set-theoretical review.
848
+ Communications in Statistics - Theory and Methods 51, 1549–1568 (2022).
849
+ URL https:
850
+ //doi.org/10.1080/03610926.2021.1921214.
851
+ 19
852
+
853
+ [37] Mimno, D., Wallach, H., Talley, E., Leenders, M. & McCallum, A.
854
+ Optimizing semantic
855
+ coherence in topic models. In Proceedings of the 2011 Conference on Empirical Methods in
856
+ Natural Language Processing, 262–272 (Association for Computational Linguistics, Edinburgh,
857
+ Scotland, UK., 2011). URL https://aclanthology.org/D11-1024.
858
+ [38] Ouafae, B., Oumaima, L., Mariam, R. & Abdelouahid, L. Novelty detection review state of
859
+ art and discussion of new innovations in the main application domains. In 2020 1st Inter-
860
+ national Conference on Innovative Research in Applied Science, Engineering and Technology
861
+ (IRASET), 1–7 (2020).
862
+ [39] Soboroff, I. & Harman, D. Overview of the TREC 2003 novelty track. In Voorhees, E. M.
863
+ & Buckland, L. P. (eds.) Proceedings of The Twelfth Text REtrieval Conference, TREC
864
+ 2003, Gaithersburg, Maryland, USA, November 18-21, 2003, vol. 500-255 of NIST Special
865
+ Publication, 38–53 (National Institute of Standards and Technology (NIST), 2003).
866
+ URL
867
+ http://trec.nist.gov/pubs/trec12/papers/NOVELTY.OVERVIEW.pdf.
868
+ [40] Ghosal, T., Saikh, T., Biswas, T., Ekbal, A. & Bhattacharyya, P.
869
+ Novelty Detection:
870
+ A Perspective from Natural Language Processing.
871
+ Computational Linguistics 48, 77–117
872
+ (2022). URL https://doi.org/10.1162/coli_a_00429. https://direct.mit.edu/coli/article-
873
+ pdf/48/1/77/2006641/coli a 00429.pdf.
874
+ [41] Uzzi, B., Mukherjee, S., Stringer, M. & Jones, B. Atypical combinations and scientific impact.
875
+ Science 342, 468–472 (2013). URL https://www.science.org/doi/abs/10.1126/science.
876
+ 1240474. https://www.science.org/doi/pdf/10.1126/science.1240474.
877
+ [42] Schumpeter, J. A. The theory of economic development: An inquiry into profits, capital, credit,
878
+ interest, and the business cycle (Theorie der wirtschaftlichen Entwicklung) (Transaction, Edi-
879
+ son, NJ, 1934). Translated by Redvers Opie.
880
+ [43] Cover, T. & Thomas, J. A. Elements of Information Theory. Wiley Series in Telecommunica-
881
+ tions and Signal Processing (Wiley-Interscience, New York, New York, USA, 2006).
882
+ [44] Aral, S. & Dhillon, P.
883
+ What (exactly) is novelty in networks?
884
+ unpacking the vision ad-
885
+ vantages of brokers, bridges, and weak ties. Institute for Operations Research and the Man-
886
+ agement Sciences (INFORMS) (2021). URL http://dx.doi.org/10.2139/ssrn.2388254.
887
+ https://ssrn.com/abstract=2388254.
888
+ [45] Kuhn, T. S. The Structure of Scientific Revolutions (University of Chicago Press, Chicago,
889
+ 20
890
+
891
+ 1962).
892
+ [46] Hofstra, B. et al. The diversityx2013;innovation paradox in science. Proceedings of the National
893
+ Academy of Sciences 117, 9284–9291 (2020).
894
+ URL https://www.pnas.org/doi/abs/10.
895
+ 1073/pnas.1915378117. https://www.pnas.org/doi/pdf/10.1073/pnas.1915378117.
896
+ [47] Baten, R. A. et al. Creativity in temporal social networks: how divergent thinking is impacted
897
+ by one’s choice of peers. Journal of The Royal Society Interface 17, 20200667 (2020).
898
+ [48] Baten, R. A., Aslin, R. N., Ghoshal, G. & Hoque, E. Cues to gender and racial identity
899
+ reduce creativity in diverse social networks. Scientific Reports 11, 10261 (2021). URL https:
900
+ //doi.org/10.1038/s41598-021-89498-5.
901
+ [49] Baten, R. A., Aslin, R. N., Ghoshal, G. & Hoque, M. E.
902
+ Novel idea generation in social
903
+ networks is optimized by exposure to a “goldilocks” level of idea-variability. PNAS Nexus 1,
904
+ pgac255 (2022).
905
+ [50] Park, M., Leahey, E. & Funk, R. J. Papers and patents are becoming less disruptive over time.
906
+ Nature 613, 138–144 (2023). URL https://doi.org/10.1038/s41586-022-05543-x.
907
+ [51] Azoulay, P., Fons-Rosen, C. & Graff Zivin, J. S.
908
+ Does science advance one funeral at a
909
+ time? American Economic Review 109, 2889–2920 (2019). URL https://www.aeaweb.org/
910
+ articles?id=10.1257/aer.20161574.
911
+ [52] He, B., Ding, Y., Tang, J., Reguramalingam, V. & Bollen, J. Mining diversity subgraph in mul-
912
+ tidisciplinary scientific collaboration networks: A meso perspective. Journal of Informetrics
913
+ 7, 117–128 (2013).
914
+ [53] Murray, D. L. et al. Bias in research grant evaluation has dire consequences for small universi-
915
+ ties. PLOS ONE 11, 1–19 (2016). URL https://doi.org/10.1371/journal.pone.0155876.
916
+ [54] of Government Affairs, O. Building america’s stem workforce: Eliminating barriers and un-
917
+ locking advantages. Tech. Rep., American Physical Society, 1 Physics Ellipse, College Park,
918
+ MD 20740-3844 (2021).
919
+ [55] Woodson,
920
+ T. & Boutilier,
921
+ S.
922
+ Impacts for whom?
923
+ Assessing inequalities in NSF-
924
+ funded broader impacts using the Inclusion-Immediacy Criterion.
925
+ Science and Pub-
926
+ lic
927
+ Policy
928
+ 49,
929
+ 168–178
930
+ (2021).
931
+ URL
932
+ https://doi.org/10.1093/scipol/scab072.
933
+ https://academic.oup.com/spp/article-pdf/49/2/168/43395599/scab072.pdf.
934
+ [56] Chen, C. Y. et al. Decades of systemic racial disparities in funding rates at the national science
935
+ 21
936
+
937
+ foundation (2022). URL osf.io/xb57u.
938
+ [57] Ginther, D. et al. Race, ethnicity, and nih research awards. Science (New York, N.Y.) 333,
939
+ 1015–9 (2011).
940
+ [58] Harris, L. A. Established program to stimulate competitive research (epscor): Background
941
+ and selected issues. Tech. Rep. R44689, Congressional Research Service, 1 Physics Ellipse,
942
+ College Park, MD 20740-3844 (2017).
943
+ [59] Bollen, J., Crandall, D., Junk, D., Ding, Y. & B¨orner, K.
944
+ From funding agencies to sci-
945
+ entific agency. EMBO reports 15, 131–133 (2014). URL https://doi.org/10.1002/embr.
946
+ 201338068.
947
+ 22
948
+
949
+ Supplementary Information
950
+ Creativity and Production in Academic Social Networks
951
+ Sean Kelty, Raiyan Abdul Baten, Adiba Proma, Ehsan Hoque, Johann Bollen, Gourab
952
+ Ghoshal
953
+ CONTENTS
954
+ S1. Data
955
+ S-2
956
+ S1.1. Summary statistics
957
+ S-2
958
+ S1.2. Author Disambiguation
959
+ S-4
960
+ S1.3. Coherence of Topic Model
961
+ S-5
962
+ S1.4. Example of Topic Representation
963
+ S-6
964
+ S1.5. Innovation Example
965
+ S-8
966
+ S2. Trends of Novelty and Innovation
967
+ S-10
968
+ S3. Author and Paper-Level Novelty and Output
969
+ S-11
970
+ S-1
971
+ arXiv:2301.02396v1 [cs.DL] 6 Jan 2023
972
+
973
+ S1.
974
+ DATA
975
+ S1.1.
976
+ Summary statistics
977
+ TABLE S1: Summary of citation statistics of corpus with
978
+ and without abstracts.
979
+ No. Dois
980
+ 678,916
981
+ No. Dois (w/ abstracts)
982
+ 250,628
983
+ No. Authors (after disambiguation)
984
+ 307,894
985
+ No. Superstars
986
+ 303
987
+ h-index cutoff for superstars
988
+ 21
989
+ Avg. h-index
990
+ 1.74
991
+ Avg. No. References per paper
992
+ 13.5
993
+ Avg. No. References per paper (w/ abstracts)
994
+ 5.57
995
+ Avg. No. Citations per paper
996
+ 14.4
997
+ Avg. No. Citations per paper (w/ abstracts)
998
+ 6.88
999
+ S-2
1000
+
1001
+ 0.00
1002
+ 0.05
1003
+ 0.10
1004
+ 0.15
1005
+ 0.20
1006
+ 0.25
1007
+ 0.30
1008
+ 0.35
1009
+ 0.40
1010
+ Ratio of Extract Abstracts
1011
+ Ratio of Extracted Abstracts to Total Papers per Journal
1012
+ PRA
1013
+ PRAB
1014
+ PRAPPLIED
1015
+ PRB
1016
+ PRC
1017
+ PRD
1018
+ PRE
1019
+ PRFLUIDS
1020
+ PRL
1021
+ PRMATERIALS
1022
+ PRPER
1023
+ PRRESEARCH
1024
+ PRSTAB
1025
+ PRSTPER
1026
+ PRX
1027
+ PRXQUANTUM
1028
+ Physical Review
1029
+ RMP
1030
+ Journal
1031
+ 101
1032
+ 102
1033
+ 103
1034
+ 104
1035
+ 105
1036
+ Count of Total Extracted Abstracts
1037
+ Paper Statistics per Journal in APS
1038
+ FIG. S1. Upper panel: Proportion of analyzed papers in across APS journals. Lower panel: Count
1039
+ of all papers in each journal.
1040
+ 100
1041
+ 101
1042
+ 102
1043
+ Paper-Level Citations
1044
+ 10−5
1045
+ 10−4
1046
+ 10−3
1047
+ 10−2
1048
+ 10−1
1049
+ Density
1050
+ Citation Distributions
1051
+ Papers with Abstracts Only
1052
+ Full Corpus
1053
+ 100
1054
+ 101
1055
+ h-index
1056
+ 10−6
1057
+ 10−5
1058
+ 10−4
1059
+ 10−3
1060
+ 10−2
1061
+ 10−1
1062
+ 100
1063
+ h-index Distribution
1064
+ Citation and h-index Distributions
1065
+ FIG. S2. Left: Distribution of citations for all papers (orange curve), for papers with extracted
1066
+ abstracts (blue curve). Right: Distribution of the h-index across all authors in the corpus.
1067
+ S-3
1068
+
1069
+ S1.2.
1070
+ Author Disambiguation
1071
+ We use a scoring-based method to disambiguate authors in our dataset.
1072
+ 1. Initials
1073
+ • Two Initials : 5
1074
+ • More than 2 initials : 10
1075
+ • Conflicting Initials : -10
1076
+ 2. First Name
1077
+ • General Name : 3
1078
+ • Non-General Name : 6
1079
+ (A name is considered general if it has been seen more than 1000 times)
1080
+ 3. Address/Affiliation
1081
+ • Country,City : 4
1082
+ • Country,City,Organization : 7
1083
+ • Country,City,Organization,Department : 10
1084
+ 4. Shared Co-Authors
1085
+ • one : 4
1086
+ • two : 7
1087
+ • more than 2 : 10
1088
+ 5. Source
1089
+ • Journal : 6
1090
+ 6. Self-Citation : 10
1091
+ 7. Bibliographic Coupling (two works referencing a common third work)
1092
+ • one : 2
1093
+ • two : 4
1094
+ • three : 6
1095
+ • four : 8
1096
+ • More than four : 10
1097
+ 8. Co-citation (Number of times a third work has cited two works)
1098
+ S-4
1099
+
1100
+ • one : 2
1101
+ • two : 3
1102
+ • three : 4
1103
+ • four : 5
1104
+ • More than 4 : 6
1105
+ S1.3.
1106
+ Coherence of Topic Model
1107
+ We apply the UMass coherence measure to determine a stable number of topics for
1108
+ our topic model. This coherence score measures how similar the top words in a topic
1109
+ are to each other. We aim for the highest possible coherence value that stabilizes in a
1110
+ neighborhood of the number of topics k. Fig. S3 shows the coherence stablizing at roughly
1111
+ k = 25 topics.
1112
+ 0
1113
+ 20
1114
+ 40
1115
+ 60
1116
+ 80
1117
+ 100
1118
+ k
1119
+ 4.0
1120
+ 3.8
1121
+ 3.6
1122
+ 3.4
1123
+ 3.2
1124
+ 3.0
1125
+ 2.8
1126
+ 2.6
1127
+ Coherence (umass)
1128
+ Coherence of Topic Model
1129
+ FIG. S3. Coherence Scores of P-LDA Topic Model
1130
+ S-5
1131
+
1132
+ S1.4.
1133
+ Example of Topic Representation
1134
+ Words and phrases in the corpus, which will generally be referred to as "terms", are
1135
+ represented by a distribution over latent topics that is the frequency of topic assignments
1136
+ of the term over the entire corpus. Topics are characterized by the frequency of terms
1137
+ associated with the topic. For each topic, all terms are ranked based on their relative topic
1138
+ frequency of their own distribution of the given topic. For example, if a phrase had a
1139
+ topic distribution for k = 3 topics of [.1,.2,.7], the phrase is representative of topic 3. Terms
1140
+ are pre-processed by removing stop words and stemming words such that conjugated
1141
+ versions of the same word can be represented as the same word.
1142
+ TABLE S2: Topic Model Summary of most representative
1143
+ terms per topic.
1144
+ Topic
1145
+ Number
1146
+ Representative Terms
1147
+ Topic 1
1148
+ crystal film, ultrathin, mtj, stm tip, stack, freestand, high resolution angle, franz, stm, force
1149
+ micrscop
1150
+ Topic 2
1151
+ center cubic fcc, temperature addit, measur x, tc cuprat, temperature down k, temperature k k, tc
1152
+ k, tc superconduct, tc superconductor, temperature tc k
1153
+ Topic 3
1154
+ spectral line, ωp, raman line, absorpt part, absorpt line, nd3, electroreflect. eliashberg, b1g, endor
1155
+ Topic 4
1156
+ axial magnet, spin angular, moment inertia, moment magnet, parallel magnet field, magnet
1157
+ revers, torqu, interlay exchange, spin texture, moriya
1158
+ Topic 5
1159
+ collim, electron eject, ion yield, ion trap, n4, ion produc, ion plasma, damag, wall carbon, electron
1160
+ drift
1161
+ Topic 6
1162
+ cauchi, broken time, takahashi, hamilton jacobi, symmetri spontan, tachyon, ward ident, polyakov,
1163
+ loop quantum cosmolog, coulomb guage
1164
+ Topic 7
1165
+ excitatori, hub, infect, epidem, volatil, exactli solvabl model, network model, synaps, synapt,
1166
+ integr fire
1167
+ Topic 8
1168
+ nonequilibrium phase transit, first order phase transit, j’, glass order, thouless transit, glass like,
1169
+ glass former, triangluar lattic, nearest neighbor coupl, nearest neighbor distanc
1170
+ S-6
1171
+
1172
+ Topic 9
1173
+ magnitude higher, larg part, fourth gener, even though, order qcd, select rule, third, mach zehnder
1174
+ interferomet, even larger, order raman
1175
+ Topic 10
1176
+ quasilinear, langevin equat, gilbert equat, equate state eo, sand, attractor, classic chaotic, eulerian,
1177
+ chimera state, euler equat
1178
+ Topic 11
1179
+ advanc ligo, mit bag, catalog, model background, dark sector, dark matter, sight, model dark, sky,
1180
+ sno
1181
+ Topic 12
1182
+ nest, der waal force, nodal line, helic edg, non fermi, state degeneraci, hove, majorana zero,
1183
+ majorana bound, sdh
1184
+ Topic 13
1185
+ three dimension 3d, basin attract, fuld ferrel, dimension squar, lz, trap bose, bodi effect, bodi forc,
1186
+ hard core boson, fermion atom
1187
+ Topic 14
1188
+ highest occupi molecular, muffin tin orbit, gaas1, clathrat, cl2, cl, hexagon boron, interstiti, gell, ci
1189
+ Topic 15
1190
+ puls width, optic parametr, sapphir laser, exciton biexciton, optic pump, harmon gener shg, optic
1191
+ puls, inxga1 xa, optic nonlinear, ultrastrong
1192
+ Topic 16
1193
+ clauser, horn shimoni holt, simpl analyt express, us deriv, part paper, analyt formula, cb, exact
1194
+ forumla, exact expression, pauli exclus
1195
+ Topic 17
1196
+ agre reason, foudn good agreement, recent experiment data, find excel agreement, find good
1197
+ agreement, theoret data, theoret cross, reason agreement experiment, found excel agreement,
1198
+ good agreement experimental result
1199
+ Topic 18
1200
+ qutrit, regist, processor, studi entagle, protocol, markovian dynam, purif, decoy state, qkd, error
1201
+ correct
1202
+ Topic 19
1203
+ nucleon nucleon scatter, deep inelast scatter, total angular momentum, inclus cross, transfer cross
1204
+ section, multifragment, multiperipher, depend cross section, forward angle, πn
1205
+ Topic 20
1206
+ full potenti linear augment plane wave, wave born, wannier function, impuls, infield, use path,
1207
+ use mont, within densiti function, jastrow, use harte
1208
+ Topic 21
1209
+ avoid walk, nonergod, time τ, time tail, time t2, time t2, dimension diffus, time random, nonex-
1210
+ ponenti, msd
1211
+ Topic 22
1212
+ even even nuclei, xe136, rich nuclei, gt, v’, p0, cf252, α p, α reaction, p1
1213
+ Topic 23
1214
+ director field, shear modulu, homeotrop, tλ, antivortex, humid, u0, hydrophil, shear band, shear
1215
+ strain
1216
+ S-7
1217
+
1218
+ Topic 24
1219
+ signific role, key role, kibbl zurek, amino acid, play essenti, play domin, play crucial, play critical,
1220
+ play central, remain elus
1221
+ Topic 25
1222
+ paramet η, ev2, rev c, rev lett, eq, right hand, right left, e e collide, e e annhil, f0
1223
+ TABLE S3: Example of Phrase-Topic Distributions.
1224
+ Term
1225
+ Topic-Embedding
1226
+ ...
1227
+ Quantiz
1228
+ [1, 0, 0, 0, 0 2259, 0, 0, 560, 0, 0, 882, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 677, 0, 0]
1229
+ Quantum
1230
+ [29, 0, 0, 21, 0, 4304, 1069, 4276, 0, 308, 0, 6008, 454, 46, 14920, 0, 0, 35931, 0, 1828, 0, 0,
1231
+ 1384, 7, 1]
1232
+ Quark
1233
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 239, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14542]
1234
+ Quarkonia
1235
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 125]
1236
+ Quarkonium
1237
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 299]
1238
+ Quarter
1239
+ [0, 0, 0, 0, 30, 0, 0, 0, 0, 0, 0, 321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1240
+ ...
1241
+ Quantum Wire
1242
+ [1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 342, 0, 0, 292, 0, 0, 23, 0, 0, 0, 0, 91, 0, 0]
1243
+ Quantum Zeno
1244
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 31, 0, 0, 0, 0, 0, 0, 0]
1245
+ Quark Antiquark
1246
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 433]
1247
+ Quark Condens
1248
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 107]
1249
+ Quark Decay
1250
+ [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25]
1251
+ ...
1252
+ S1.5.
1253
+ Innovation Example
1254
+ The innovation metric counts the first time a term or a new combination of terms have
1255
+ been seen in an article over the entire corpus. Fig. S4 shows the introduction of the terms
1256
+ "quantum" and "cosmolog" in the corpus. Note that "cosmolog" is the root of words such
1257
+ as "cosmology" and "cosmological" that were lemmatized in pre-processing. We plot the
1258
+ frequency of the terms in time as well as vertical lines representing the first year the term
1259
+ S-8
1260
+
1261
+ has been seen. We also plot the counts of the phrase "quantum cosmolog" which is an
1262
+ additionally considered term in our topic model.
1263
+ 1925
1264
+ 1950
1265
+ 1975
1266
+ 2000
1267
+ Year
1268
+ 0
1269
+ 500
1270
+ 1000
1271
+ 1500
1272
+ 2000
1273
+ Yearly Counts
1274
+ Quantum
1275
+ Cosmolog
1276
+ Quantum Cosmolog
1277
+ 1925
1278
+ 1950
1279
+ 1975
1280
+ 2000
1281
+ Year
1282
+ 0.00
1283
+ 0.05
1284
+ 0.10
1285
+ 0.15
1286
+ 0.20
1287
+ 0.25
1288
+ Density
1289
+ FIG. S4. Example of innovation measure with terms "quantum" and "cosmolog"
1290
+ S-9
1291
+
1292
+ S2.
1293
+ TRENDS OF NOVELTY AND INNOVATION
1294
+ FIG. S5. Correlations between all novelty and innovation measures based on Pearson’s r.
1295
+ S-10
1296
+
1297
+ Correlation of Novelty Measures
1298
+ R^2:0.00137
1299
+ R^2:0.000218
1300
+ R^2:0.00737
1301
+ 0.40
1302
+ 0.40
1303
+ 0.35
1304
+ 0.35
1305
+ 4.0
1306
+ 3.5
1307
+ OE0
1308
+ Citation Diversity
1309
+ 3.0
1310
+ 0.25
1311
+ 0.25
1312
+ Reference
1313
+ 0.20
1314
+ 0.20
1315
+ 2.0
1316
+ 0.15
1317
+ 0.15
1318
+ 0.10
1319
+ 0.10
1320
+ 1.0
1321
+ 0.05
1322
+ 0.05
1323
+ 0.5
1324
+ 0.00
1325
+ 0.00
1326
+ 0.0
1327
+ 10
1328
+ 12
1329
+ 12
1330
+ 14
1331
+ Innovativeness
1332
+ Innovativeness
1333
+ Innovativeness
1334
+ R^2:0.062
1335
+ R^2:0.0947
1336
+ R^2: 0.0529
1337
+ 0.40
1338
+ 0.35
1339
+ 0't
1340
+ 0't
1341
+ 3.5
1342
+ 3.5
1343
+ OE0
1344
+ Diversity
1345
+ 0.25
1346
+ 2.5
1347
+ 2.5
1348
+ 0.20
1349
+ Citation
1350
+ 2.0
1351
+ 2.0
1352
+ 0.15
1353
+ 15
1354
+ 0.10
1355
+ 1.0
1356
+ 0.05
1357
+ 0.5
1358
+ 0.5
1359
+ 0.00
1360
+ 0.0
1361
+ 0.40
1362
+ 0.10
1363
+ 0.0 -
1364
+ 0.00
1365
+ 0.05
1366
+ 0.10
1367
+ 0.15
1368
+ 0.20
1369
+ 0.25
1370
+ 0.30
1371
+ 0.35
1372
+ 0.00
1373
+ 0.05
1374
+ 0.15
1375
+ 0.20
1376
+ 0.25
1377
+ 0.30
1378
+ 0.35
1379
+ 0.40
1380
+ 0.00
1381
+ 0.05
1382
+ 0.10
1383
+ 0.15
1384
+ 0.20
1385
+ 0.25
1386
+ 0.30
1387
+ 0.35
1388
+ 0.40
1389
+ Reference Diversity
1390
+ Reference Diversity
1391
+ Citation DiversityS3.
1392
+ AUTHOR AND PAPER-LEVEL NOVELTY AND OUTPUT
1393
+ 0
1394
+ 20
1395
+ 40
1396
+ 0.095
1397
+ 0.102
1398
+ 0.108
1399
+ 0.115
1400
+ Author-Level Novelty
1401
+ I(R)(A)
1402
+ All Papers
1403
+ Without Superstar Papers
1404
+ 0
1405
+ 20
1406
+ 40
1407
+ Number of Author Publications
1408
+ 0.075
1409
+ 0.083
1410
+ 0.092
1411
+ 0.1
1412
+ I(C)(B)
1413
+ 0
1414
+ 20
1415
+ 40
1416
+ 3.16
1417
+ 3.183
1418
+ 3.207
1419
+ 3.23
1420
+ 3.253
1421
+ I(S)(C)
1422
+ 0
1423
+ 20
1424
+ 40
1425
+ 0.0
1426
+ 3.333
1427
+ 6.667
1428
+ 10.0
1429
+ I(I) (D)
1430
+ Author-Level Novelty Scores vs Author Success
1431
+ FIG. S6. Novelty and innovation metrics of an author’s publication record as a function of
1432
+ their number of publications. Authors with between 1-50 publications in the corpus have been
1433
+ considered.
1434
+ 100
1435
+ 101
1436
+ 102
1437
+ 0.09
1438
+ 0.103
1439
+ 0.117
1440
+ 0.13
1441
+ Paper-Level Novelty
1442
+ I(R)(A)
1443
+ All Papers
1444
+ Without SS Papers
1445
+ 101
1446
+ 102
1447
+ Number of Citations
1448
+ 0.02
1449
+ 0.07
1450
+ 0.12
1451
+ 0.17
1452
+ I(C)(B)
1453
+ 100
1454
+ 101
1455
+ 102
1456
+ 3.1
1457
+ 3.15
1458
+ 3.2
1459
+ 3.25
1460
+ I(S)(C)
1461
+ 100
1462
+ 101
1463
+ 102
1464
+ 0.0
1465
+ 0.333
1466
+ 0.667
1467
+ 1.0
1468
+ I(I) (D)
1469
+ Paper-Level Novelty Scores vs Paper Success
1470
+ FIG. S7. Novelty and innovation metrics of an author’s publication record as a function of the
1471
+ number of citations their papers garner.
1472
+ S-11
1473
+
1474
+ 1970
1475
+ 1980
1476
+ 1990
1477
+ 2000
1478
+ 2010
1479
+ 2020
1480
+ 3.050
1481
+ 3.075
1482
+ 3.100
1483
+ 3.125
1484
+ 3.150
1485
+ 3.175
1486
+ 3.200
1487
+ 3.225
1488
+ 3.250
1489
+ Entropy
1490
+ 1970
1491
+ 1980
1492
+ 1990
1493
+ 2000
1494
+ 2010
1495
+ 2020
1496
+ Year
1497
+ 0.080
1498
+ 0.085
1499
+ 0.090
1500
+ 0.095
1501
+ 0.100
1502
+ 0.105
1503
+ 0.110
1504
+ 0.115
1505
+ 0.120
1506
+ Reference Diversity
1507
+ 1970
1508
+ 1980
1509
+ 1990
1510
+ 2000
1511
+ 2010
1512
+ 2020
1513
+ 0.04
1514
+ 0.06
1515
+ 0.08
1516
+ 0.10
1517
+ 0.12
1518
+ Citaiton Diversity
1519
+ 1970
1520
+ 1980
1521
+ 1990
1522
+ 2000
1523
+ 2010
1524
+ 2020
1525
+ 0.0
1526
+ 0.2
1527
+ 0.4
1528
+ 0.6
1529
+ 0.8
1530
+ 1.0
1531
+ Innovation
1532
+ Average Novelties per Year
1533
+ FIG. S8. All Novelty Measures per Year.
1534
+ 0
1535
+ 10
1536
+ 20
1537
+ 30
1538
+ 40
1539
+ 50
1540
+ 0.09
1541
+ 0.10
1542
+ 0.11
1543
+ 0.12
1544
+ 0.13
1545
+ 0.14
1546
+ 0.15
1547
+ Temporal
1548
+ Reference Diversity (A)
1549
+ group
1550
+ 0.00-0.10
1551
+ 0.10-0.20
1552
+ 0.20-0.30
1553
+ 0.30-0.50
1554
+ 0.50-1.00
1555
+ 0
1556
+ 10
1557
+ 20
1558
+ 30
1559
+ 40
1560
+ 50
1561
+ Years After First SS Pub.
1562
+ 0.08
1563
+ 0.09
1564
+ 0.10
1565
+ 0.11
1566
+ 0.12
1567
+ 0.13
1568
+ 0.14
1569
+ 0.15
1570
+ Citation Diversity (B)
1571
+ group
1572
+ 0.00-0.10
1573
+ 0.10-0.20
1574
+ 0.20-0.30
1575
+ 0.30-0.50
1576
+ 0.50-1.00
1577
+ 0
1578
+ 10
1579
+ 20
1580
+ 30
1581
+ 40
1582
+ 50
1583
+ 0.500
1584
+ 0.525
1585
+ 0.550
1586
+ 0.575
1587
+ 0.600
1588
+ 0.625
1589
+ 0.650
1590
+ 0.675
1591
+ 0.700
1592
+ Similarities (C)
1593
+ group
1594
+ 0.00-0.10
1595
+ 0.10-0.20
1596
+ 0.20-0.30
1597
+ 0.30-0.50
1598
+ 0.50-1.00
1599
+ 0.00-0.10
1600
+ 0.10-0.20
1601
+ 0.20-0.30
1602
+ 0.30-0.50
1603
+ 0.50-1.00
1604
+ 0.1290
1605
+ 0.1295
1606
+ 0.1300
1607
+ 0.1305
1608
+ 0.1310
1609
+ Aggregate
1610
+ 0.00-0.10
1611
+ 0.10-0.20
1612
+ 0.20-0.30
1613
+ 0.30-0.50
1614
+ 0.50-1.00
1615
+ Inspiration Groups
1616
+ 0.099
1617
+ 0.100
1618
+ 0.101
1619
+ 0.102
1620
+ 0.103
1621
+ 0.104
1622
+ 0.00-0.10
1623
+ 0.10-0.20
1624
+ 0.20-0.30
1625
+ 0.30-0.50
1626
+ 0.50-1.00
1627
+ 0.54
1628
+ 0.55
1629
+ 0.56
1630
+ 0.57
1631
+ 0.58
1632
+ Concept Diversity and Similarities of Inspired Groups
1633
+ FIG. S9. (A) Reference Diversity, (B) Citation Diversity, (C) Within-group paper similarities for
1634
+ the followers of a superstar partitioned by level of inspiration. Upper panel: temporal evolution.
1635
+ Lower panel: averaged in time.
1636
+ S-12
1637
+
1638
+ 0
1639
+ 5
1640
+ 10
1641
+ 15
1642
+ 20
1643
+ 25
1644
+ t − t0 (yr)
1645
+ 0.0
1646
+ 0.2
1647
+ 0.4
1648
+ 0.6
1649
+ 0.8
1650
+ 1.0
1651
+ 1.2
1652
+ 1.4
1653
+ Yearly Publications
1654
+ Including Superstar Papers
1655
+ Early Collaborators
1656
+ Early Innovators
1657
+ 0
1658
+ 5
1659
+ 10
1660
+ 15
1661
+ 20
1662
+ 25
1663
+ 0.0
1664
+ 0.2
1665
+ 0.4
1666
+ 0.6
1667
+ 0.8
1668
+ 1.0
1669
+ Excluding Superstar Papers
1670
+ Publication Rate per Academic Group
1671
+ FIG. S10. Publication rates of academic groups, LEFT including superstar collabortions and RIGHT
1672
+ excluding superstar collaborations
1673
+ S-13
1674
+
6NE0T4oBgHgl3EQfewDQ/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
6NE4T4oBgHgl3EQf1g37/content/2301.05292v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1cee2983f7fd812c2867320eb5e7c069305ce96a456d0d9f0e2951486e1b431
3
+ size 2373256
6NE4T4oBgHgl3EQf1g37/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9fa7bf2455072f437c03d335783c83b931e57ce97f93c301ebddd0f6926f2e13
3
+ size 1048621
6NE4T4oBgHgl3EQf1g37/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76d84708d419c91c8af96023665df90600e1fc8c21c871e08825cbaa81b2e3db
3
+ size 48592
6NFKT4oBgHgl3EQfTS23/content/tmp_files/2301.11779v1.pdf.txt ADDED
@@ -0,0 +1,568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Invariant Meta Learning for Out-of-Distribution Generalization
2
+ Penghao Jiang, Ke Xin, Zifeng Wang, Chunxi Li
3
+ The Australian National University, Canberra, Australia *
4
+ Abstract
5
+ Modern deep learning techniques have illustrated their
6
+ excellent capabilities in many areas, but relies on large
7
+ training data.
8
+ Optimization-based meta-learning train a
9
+ model on a variety tasks, such that it can solve new learn-
10
+ ing tasks using only a small number of training samples.
11
+ However, these methods assumes that training and test data
12
+ are identically and independently distributed. To overcome
13
+ such limitation, in this paper, we propose invariant meta
14
+ learning for out-of-distribution tasks. Specifically, invari-
15
+ ant meta learning find invariant optimal meta-initialization,
16
+ and fast adapt to out-of-distribution tasks with regulariza-
17
+ tion penalty. Extensive experiments demonstrate the effec-
18
+ tiveness of our proposed invariant meta learning on out-
19
+ ofdistribution few-shot tasks.
20
+ 1. Introduction
21
+ Modern deep learning techniques have illustrated their
22
+ excellent capabilities in many areas like computer vision,
23
+ natural language processing and recommendation, etc [11].
24
+ However, these methods relies on large training data. To
25
+ overcome this limitation, few-shot learning methods such
26
+ as meta learning has been proposed [6]. Most popular meta
27
+ learning approaches is the optimization-based metalearning
28
+ [4, 16], which is model-agnostic and can be applied to var-
29
+ ious downstream tasks. However, many recent researches
30
+ have revealed the vulnerability of machine learning model
31
+ when exposed to data with different distributions.
32
+ Such massive gap is induced by the violation of a funda-
33
+ mental assumption that training and test data are identically
34
+ and independently distributed (a.k.a.
35
+ i.i.d.
36
+ assumption),
37
+ upon which most of the existing meta learning models are
38
+ developed [4, 16]. In many real cases where i.i.d. assump-
39
+ tion can hardly be satisfied, especially those high-stake ap-
40
+ plications such as healthcare, military and autonomous driv-
41
+ ing, instead of generalization within the training distribu-
42
+ tion, the ability to generalize under distribution shift is of
43
+ more critical significance. As shown in Figure 1, given tran-
44
+ * The first two authors contributed equally as joint first authorship.
45
+ The last two authors contributed equally as joint second authorship.
46
+ Figure 1. Illustration example of how the distribution shifts be-
47
+ tween training data and testing data hamper the performance of
48
+ model predictions.
49
+ Figure 2. Causal framework of dog perdiction task. Due to the
50
+ spurious correlation, the model tends to focus on both grass and
51
+ dog, which lead to failed prediction in other distributions.
52
+ ing data where dogs are on the grass, model could not make
53
+ accurate predictions in testing data where dogs are in water,
54
+ cage or street. The reason is that the supurious correlation
55
+ between grass and dog in traning data hamper the perfor-
56
+ mance of model. Due to the spurious correlation, the model
57
+ tends to focus on both grass and dog, which lead to failed
58
+ prediction in other distribution such as dogs are in water,
59
+ cage or street as shown in Figure 2. However, recent meta
60
+ learning methods could not overcome the distribution shifts
61
+ between training and testing data. In this paper, we con-
62
+ sider a realistic scenario where tasks come from different
63
+ distributions (out-of-distribution, OOD).
64
+ In this paper, to overcome the problem mentioned above,
65
+ we propose Invariant Meta Learning (IML) for out-of- dis-
66
+ arXiv:2301.11779v1 [cs.LG] 26 Jan 2023
67
+
68
+ Athome
69
+ onbeach
70
+ eating
71
+ incage
72
+ inwater
73
+ lying
74
+ ongrass
75
+ instreet
76
+ running
77
+ Training data
78
+ Model
79
+ Testing dataGrass--Label:Strongcorrelation
80
+ CausalFramework
81
+ Weakcausation
82
+ Dog noseLabel:Strong correlation
83
+ X
84
+ Strong causation
85
+ T:
86
+ grass
87
+ X: dog nose
88
+ Y:labeltribution tasks, a general learning framework that jointly ad-
89
+ justs gradient magnitudes and directions. Specifically, in-
90
+ variant meta learning find invariant optimal metainitializa-
91
+ tion, and fast adapt to out-of-distribution tasks with regular-
92
+ ization penalty. To summarize, our main contributions are:
93
+ • We consider the challenge of out-of-distribution tasks
94
+ faced by few-shot learning, we show a natural idea to
95
+ jointly adjust gradient magnitudes and directions of all
96
+ tasks in the meta optimization process;
97
+ • We propose Invariant Meta Learning (IML) for out-
98
+ ofdistribution tasks, a general learning framework that
99
+ jointly adjusts gradient magnitudes and directions;
100
+ • We conduct extensive experiments and analysis to
101
+ demonstrate that our approach effectively improves the
102
+ performance and generalization ability under both in-
103
+ distribution and out-of-distribution few-shot settings,
104
+ and thus it can be regarded as a better baseline.
105
+ 2. Method
106
+ In this section, we introduce our proposed Invariant Meta
107
+ Learning (IML) to address the out-of-distribution problem
108
+ in few-shot tasks.
109
+ IML learns invariant optimal predic-
110
+ tors based on optimization based meta learning framework.
111
+ To learn invariant optimal meta-initialization in optimiza-
112
+ tion based meta learning, the main challenge is that OOD
113
+ problem exacerbates the inconsistency in both task-gradient
114
+ magnitudes and directions.
115
+ To overcome such problem,
116
+ IML finds invariant optimal initialization, and adapt to
117
+ outof- distribution tasks with regularization penalty.
118
+ Model-agnostic meta-learning (MAML) [4] is an ap-
119
+ proach to optimization-based meta-learning that is related
120
+ to our work. For some parametric model fθ, MAML aims
121
+ to find a single set of parameters θ which, using a few op-
122
+ timization steps, can be successfully adapted to any novel
123
+ task sampled from the same distribution. For a particular
124
+ task instance Ti =
125
+
126
+ Dtr, Dval�
127
+ , the parameters are adapted
128
+ to task-specific model parameters θ′
129
+ i by applying some dif-
130
+ ferentiable function, typically an update rule of the form:
131
+ θ′
132
+ i = G
133
+
134
+ θ, Dtr�
135
+ ,
136
+ (1)
137
+ where G is typically implemented as a step of gradi-
138
+ ent descent on the few-shot training set Dtr , θ′
139
+ i = θ−
140
+ α∇θLtr
141
+ Ti (fθ).
142
+ Generally, multiple sequential adaptation
143
+ steps can be applied. The learning rate α can also be met-
144
+ alearned concurrently, in which case we refer to this algo-
145
+ rithm as Meta-SGD [13]. During meta-training, the param-
146
+ eters θ are updated by back-propagating through the adap-
147
+ tation procedure, in order to reduce errors on the validation
148
+ set Dval :
149
+ θ ← θ − η∇θ
150
+
151
+ Ti∼p(T )
152
+ Lval
153
+ Ti
154
+
155
+ fθ′
156
+ i
157
+
158
+ .
159
+ (2)
160
+ The
161
+ approach
162
+ includes
163
+ the
164
+ main
165
+ ingredients
166
+ of
167
+ optimization-based meta-learning with neural networks:
168
+ initialization is done by maintaining an explicit set of
169
+ model parameters θ; the adaptation procedure, or “inner
170
+ loop”, takes θ as input and returns θ′
171
+ i adapted specifically
172
+ for task instance Ti, by iteratively using gradient descent
173
+ (Eq.
174
+ 1); and termination, which is handled simply by
175
+ choosing a fixed number of optimization steps in the “inner
176
+ loop”.
177
+ MAML updates θ by differentiating through the
178
+ “inner loop” in order to minimize errors of instance-specific
179
+ adapted models fθ′
180
+ i on the corresponding validation set
181
+ (Eq. 2). We refer to this process as the “outer loop” of
182
+ meta-learning. We use the same stages to describe IML.
183
+ Invariant Meta Learning (IML) finds invariant opti-
184
+ mal meta-initialization, and fast adapt to out-of-distribution
185
+ tasks with regularization penalty. MAML fast adapt net-
186
+ work to new task during the inner loop and learns univer-
187
+ sal meta-initialization in outer loop. Similarly, in IML, we
188
+ update network with the bi-level update, optimizing clas-
189
+ sifier in the inner loop and learning feature representation
190
+ in the outer loop. For the inner-level optimization, the pa-
191
+ rameters θ of the predictor become θi while adapting to the
192
+ task ti ∈ Ttr. This correspond to the inner optimization of
193
+ MAML, except that each task ti has a corresponding net-
194
+ work θi. The optimization in the inner loop can be defined
195
+ as follows:
196
+ θ′
197
+ i = θ − α∇θLtr
198
+ Ti (fθ)
199
+ (3)
200
+ where α is a learning rate of the inner optimization.
201
+ With inner optimized network fθ′
202
+ i, we have outer loop
203
+ objective function with variance penalty regularizer:
204
+ Lval =
205
+
206
+ Ti∼p(T tr)
207
+
208
+ Tj∼p(T val)
209
+ Lval
210
+ Tj
211
+
212
+ fθ′
213
+ i
214
+
215
+ (4)
216
+ θ ← θ − η∇θLval − βλ trace
217
+
218
+ VarT val
219
+
220
+ ∇θLval��
221
+ (5)
222
+ where η, β are the learning rate of the outer loop optimiza-
223
+ tion, tj is task j for outer loop optimization for the net-
224
+ work θ′
225
+ i, L is the loss function for outer loop optimization.
226
+ Note that the inner optimized network fθ′
227
+ i is used to up-
228
+ date meta-initialization in outer loop with tj whereas it is
229
+ updated from meta-initialization with ti in ther inner loop.
230
+ IML learn invariant meta-initialization obtained from the
231
+ discrepancy among different training tasks with variance
232
+ penalty regularizer.
233
+ 3. Experiments
234
+ Datasets.
235
+ In this paper, we address the few-shot clas-
236
+ sification problem under both in-distribution and out-
237
+ ofdistribution FSL settings. These settings are conducted
238
+ on three benchmark datasets: miniImageNet [23], Caltech-
239
+ UCSD-Birds 200-2011 (CUB) [25], and SUN Attribute
240
+ Database (SUN) [15].
241
+
242
+ Method
243
+ miniImageNet
244
+ CUB
245
+ SUN
246
+ 5-way 1-shot
247
+ 5-way 5-shot
248
+ 5-way 1-shot
249
+ 5-way 5-shot
250
+ 5-way 1-shot
251
+ 5-way 5-shot
252
+ Meta-Learner LSTM
253
+ 24.99
254
+ 29.79
255
+ 36.23
256
+ 44.39
257
+ 30.99
258
+ 44.86
259
+ MAML
260
+ 45.69
261
+ 60.90
262
+ 48.87
263
+ 63.99
264
+ 57.75
265
+ 71.45
266
+ Reptile
267
+ 26.59
268
+ 39.87
269
+ 27.21
270
+ 42.35
271
+ 28.30
272
+ 51.62
273
+ Matching Network
274
+ 47.63
275
+ 56.28
276
+ 53.06
277
+ 62.19
278
+ 55.02
279
+ 62.57
280
+ Prototypical Network
281
+ 46.15
282
+ 65.56
283
+ 48.21
284
+ 57.80
285
+ 55.70
286
+ 67.32
287
+ Relation Network
288
+ 47.64
289
+ 63.65
290
+ 52.76
291
+ 64.71
292
+ 58.29
293
+ 72.15
294
+ Baseline
295
+ 23.84
296
+ 32.09
297
+ 25.14
298
+ 35.35
299
+ 27.44
300
+ 34.54
301
+ Baseline++
302
+ 30.15
303
+ 41.19
304
+ 32.48
305
+ 42.43
306
+ 35.56
307
+ 44.42
308
+ IML
309
+ 48.35
310
+ 67.21
311
+ 54.18
312
+ 65.85
313
+ 59.24
314
+ 74.18
315
+ Table 1. Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the
316
+ in-distribution FSL setting. Best results are displayed in boldface.
317
+ Method
318
+ miniImageNet→ CUB
319
+ miniImageNet→ SUN
320
+ CUB→miniImageNet
321
+ 5-way 1-shot
322
+ 5-way 5-shot
323
+ 5-way 1-shot
324
+ 5-way 5-shot
325
+ 5-way 1-shot
326
+ 5-way 5-shot
327
+ Meta-Learner LSTM
328
+ 23.77
329
+ 30.58
330
+ 25.52
331
+ 32.14
332
+ 22.58
333
+ 28.18
334
+ MAML
335
+ 40.29
336
+ 53.01
337
+ 46.07
338
+ 59.08
339
+ 33.36
340
+ 41.58
341
+ Reptile
342
+ 24.66
343
+ 40.86
344
+ 32.15
345
+ 50.38
346
+ 24.56
347
+ 40.60
348
+ Matching Network
349
+ 38.34
350
+ 47.64
351
+ 39.58
352
+ 53.20
353
+ 26.23
354
+ 32.90
355
+ Prototypical Network
356
+ 36.60
357
+ 54.36
358
+ 46.31
359
+ 66.21
360
+ 29.22
361
+ 38.73
362
+ Relation Network
363
+ 39.33
364
+ 50.64
365
+ 44.55
366
+ 61.45
367
+ 28.64
368
+ 38.01
369
+ Baseline
370
+ 24.16
371
+ 32.73
372
+ 25.49
373
+ 37.15
374
+ 22.98
375
+ 28.41
376
+ Baseline++
377
+ 29.40
378
+ 40.48
379
+ 30.44
380
+ 41.71
381
+ 23.41
382
+ 25.82
383
+ IML
384
+ 41.27
385
+ 57.34
386
+ 50.42
387
+ 69.15
388
+ 34.26
389
+ 44.17
390
+ Table 2. Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the
391
+ in-distribution FSL setting. Best results are displayed in boldface.
392
+ Baselines.
393
+ To evaluate the effectiveness of the proposed
394
+ framework, we consider the following representative meta
395
+ learning methods on the few-shot image classification task:
396
+ MAML [5], Reptile [14], Matching Network [23], Proto-
397
+ typical Network [20], Relation Network [21], Baseline and
398
+ Baseline++ [3].
399
+ Experimental Settings.
400
+ We conduct experiments on 5-
401
+ way 1-shot and 5-way 5 -shot settings, there are 15 query
402
+ samples per class in each task. We report the average ac-
403
+ curacy (%) and the corresponding 95% confidence interval
404
+ over the 2000 tasks randomly sampled from novel classes.
405
+ To fairly evaluate the original performance of each method,
406
+ we use the same 4-layer ConvNet [23] as the backbone for
407
+ all methods and do not adopt any data augmentation during
408
+ training. All methods are trained via SGD with Adam [10],
409
+ and the initial learning rate is set to e−3. For each method,
410
+ models are trained for 40,000 tasks at most, and the best
411
+ model on the validation classes is used to evaluate the final
412
+ reporting performance in the meta-test phase.
413
+ Evaluation Using the In-Distribution Setting.
414
+ Table 1
415
+ shows the comparative results under the in-distribution FSL
416
+ setting on three benchmark datasets.
417
+ It is observed that
418
+ IML outperforms the original MAML in all in-distribution
419
+ FSL scenarios.
420
+ For 1-shot and 5-shot on miniImageNet
421
+ → miniImageNet, IML achieves about 1% higher perfor-
422
+ mance than Prototypical Network. However, IML achieves
423
+ 5% and 10% higher performance for 1-shot and 5-shot
424
+ on CUB → CUB, and 3% and 6% higher performance
425
+ on SUN → SUN. As the latter two scenarios are con-
426
+ ducted on finegrained classification datasets, we attribute
427
+ the promising improvement to that the categories in these
428
+ fine-grained datasets share more local concepts than those
429
+ in coarsegrained datasets, and thus a more discriminative
430
+ space can be rapidly learned with a few steps of adaptation.
431
+ Moreover, IML achieves the best performance among all
432
+ baselines in all in-distribution FSL scenarios, which shows
433
+ that our approach can be considered as a better baseline op-
434
+ tion under the in-distribution FSL setting.
435
+ Evaluation Using the Out-of-Distribution Setting.
436
+ We
437
+ also conduct out-of-distribution FSL experiments and re-
438
+ port the comparative results in Table 2. Compared to the re-
439
+ sults under the in-distribution setting, it can be observed that
440
+ all approaches suffer from a larger discrepancy between the
441
+
442
+ distributions of training and testing tasks, which results in
443
+ a performance decline in all scenarios. However, IML still
444
+ outperforms the original MAML in all out-of-distribution
445
+ FSL scenarios, demonstrating that the bilevel optimization
446
+ strategy for adaptation and the learning of transferable la-
447
+ tent factors can be utilized to improve simple meta learning
448
+ approaches. Also, IML achieves all the best results, indicat-
449
+ ing that our approach can be regarded as a promising base-
450
+ line under the out-of-distribution setting.
451
+ 4. Conclusion
452
+ In this paper,
453
+ we consider the challenge of out-
454
+ ofdistribution tasks faced by few-shot learning. We propose
455
+ Invariant Meta Learning (IML) for out-of-distribution tasks,
456
+ a general learning framework that jointly adjusts gradient
457
+ magnitudes and directions. Extensive experiments demon-
458
+ strate that our approach effectively improves the perfor-
459
+ mance and generalization ability under both in-distribution
460
+ and out-of-distribution few-shot settings, and thus it can be
461
+ regarded as a better baseline.
462
+ References
463
+ [1] Yoshua Bengio, Samy Bengio, and Jocelyn Cloutier. Learn-
464
+ ing a synaptic learning rule. Citeseer, 1990.
465
+ [2] Fei Chen, Mi Luo, Zhenhua Dong, Zhenguo Li, and
466
+ Xiuqiang He.
467
+ Federated meta-learning with fast con-
468
+ vergence and efficient communication.
469
+ arXiv preprint
470
+ arXiv:1802.07876, 2018.
471
+ [3] Wei-Yu Chen, Yen-Cheng Liu, Zsolt Kira, Yu-Chiang Frank
472
+ Wang, and Jia-Bin Huang. A closer look at few-shot classi-
473
+ fication. arXiv preprint arXiv:1904.04232, 2019. 3
474
+ [4] Chelsea Finn, Pieter Abbeel, and Sergey Levine.
475
+ Model-
476
+ agnostic meta-learning for fast adaptation of deep networks.
477
+ In International conference on machine learning, pages
478
+ 1126–1135. PMLR, 2017. 1, 2
479
+ [5] Chelsea Finn, Pieter Abbeel, and Sergey Levine.
480
+ Model-
481
+ agnostic meta-learning for fast adaptation of deep networks.
482
+ In Proceedings of the 34th International Conference on Ma-
483
+ chine Learning, pages 1126–1135. PMLR, 2017. 3
484
+ [6] Chelsea Finn and Sergey Levine.
485
+ Meta-learning and
486
+ universality:
487
+ Deep representations and gradient descent
488
+ can approximate any learning algorithm.
489
+ arXiv preprint
490
+ arXiv:1710.11622, 2017. 1
491
+ [7] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and
492
+ Amos Storkey. Meta-learning in neural networks: A survey.
493
+ arXiv preprint arXiv:2004.05439, 2020.
494
+ [8] Simon Jenni and Paolo Favaro. Deep bilevel learning. In
495
+ Proceedings of the European conference on computer vision
496
+ (ECCV), pages 618–633, 2018.
497
+ [9] Taewon Jeong and Heeyoung Kim.
498
+ Ood-maml:
499
+ Meta-
500
+ learning for few-shot out-of-distribution detection and clas-
501
+ sification. Advances in Neural Information Processing Sys-
502
+ tems, 33:3907–3916, 2020.
503
+ [10] Diederik P Kingma and Jimmy Ba. Adam: A method for
504
+ stochastic optimization.
505
+ arXiv preprint arXiv:1412.6980,
506
+ 2014. 3
507
+ [11] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple
508
+ layers of features from tiny images. 2009. 1
509
+ [12] Hae Beom Lee, Hayeon Lee, Donghyun Na, Saehoon Kim,
510
+ Minseop Park, Eunho Yang, and Sung Ju Hwang. Learn-
511
+ ing to balance: Bayesian meta-learning for imbalanced and
512
+ out-of-distribution tasks. arXiv preprint arXiv:1905.12917,
513
+ 2019.
514
+ [13] Zhenguo Li, Fengwei Zhou, Fei Chen, and Hang Li. Meta-
515
+ sgd: Learning to learn quickly for few-shot learning. arXiv
516
+ preprint arXiv:1707.09835, 2017. 2
517
+ [14] Alex Nichol, Joshua Achiam, and John Schulman.
518
+ On
519
+ first-order
520
+ meta-learning
521
+ algorithms.
522
+ arXiv
523
+ preprint
524
+ arXiv:1803.02999, 2018. 3
525
+ [15] Genevieve Patterson, Chen Xu, Hang Su, and James Hays.
526
+ The sun attribute database: Beyond categories for deeper
527
+ scene understanding. International Journal of Computer Vi-
528
+ sion, 108(1):59–81, 2014. 2
529
+ [16] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and
530
+ Sergey Levine. Meta-learning with implicit gradients. Ad-
531
+ vances in neural information processing systems, 32, 2019.
532
+ 1
533
+ [17] Sachin Ravi and Hugo Larochelle. Optimization as a model
534
+ for few-shot learning. 2016.
535
+ [18] Andrei A Rusu, Dushyant Rao, Jakub Sygnowski, Oriol
536
+ Vinyals, Razvan Pascanu, Simon Osindero, and Raia Had-
537
+ sell.
538
+ Meta-learning with latent embedding optimization.
539
+ arXiv preprint arXiv:1807.05960, 2018.
540
+ [19] Amrith Setlur, Oscar Li, and Virginia Smith.
541
+ Is support
542
+ set diversity necessary for meta-learning?
543
+ arXiv preprint
544
+ arXiv:2011.14048, 2020.
545
+ [20] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical
546
+ networks for few-shot learning. Advances in neural informa-
547
+ tion processing systems, 30, 2017. 3
548
+ [21] Flood Sung, Yongxin Yang, Li Zhang, Tao Xiang, Philip HS
549
+ Torr, and Timothy M Hospedales. Learning to compare: Re-
550
+ lation network for few-shot learning. In Proceedings of the
551
+ IEEE conference on computer vision and pattern recogni-
552
+ tion, pages 1199–1208, 2018. 3
553
+ [22] Sebastian Thrun and Lorien Pratt.
554
+ Learning to learn.
555
+ Springer Science & Business Media, 2012.
556
+ [23] Oriol Vinyals, Charles Blundell, Timothy Lillicrap, Daan
557
+ Wierstra, et al. Matching networks for one shot learning. Ad-
558
+ vances in neural information processing systems, 29, 2016.
559
+ 2, 3
560
+ [24] Risto Vuorio, Shao-Hua Sun, Hexiang Hu, and Joseph J
561
+ Lim.
562
+ Multimodal model-agnostic meta-learning via task-
563
+ aware modulation. Advances in Neural Information Process-
564
+ ing Systems, 32, 2019.
565
+ [25] Catherine Wah, Steve Branson, Peter Welinder, Pietro Per-
566
+ ona, and Serge Belongie. The caltech-ucsd birds-200-2011
567
+ dataset. 2011. 2
568
+
6NFKT4oBgHgl3EQfTS23/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf,len=300
2
+ page_content='Invariant Meta Learning for Out-of-Distribution Generalization Penghao Jiang, Ke Xin, Zifeng Wang, Chunxi Li The Australian National University, Canberra, Australia * Abstract Modern deep learning techniques have illustrated their excellent capabilities in many areas, but relies on large training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
3
+ page_content=' Optimization-based meta-learning train a model on a variety tasks, such that it can solve new learn- ing tasks using only a small number of training samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
4
+ page_content=' However, these methods assumes that training and test data are identically and independently distributed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
5
+ page_content=' To overcome such limitation, in this paper, we propose invariant meta learning for out-of-distribution tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
6
+ page_content=' Specifically, invari- ant meta learning find invariant optimal meta-initialization, and fast adapt to out-of-distribution tasks with regulariza- tion penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
7
+ page_content=' Extensive experiments demonstrate the effec- tiveness of our proposed invariant meta learning on out- ofdistribution few-shot tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
8
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
9
+ page_content=' Introduction Modern deep learning techniques have illustrated their excellent capabilities in many areas like computer vision, natural language processing and recommendation, etc [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
10
+ page_content=' However, these methods relies on large training data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
11
+ page_content=' To overcome this limitation, few-shot learning methods such as meta learning has been proposed [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
12
+ page_content=' Most popular meta learning approaches is the optimization-based metalearning [4, 16], which is model-agnostic and can be applied to var- ious downstream tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
13
+ page_content=' However, many recent researches have revealed the vulnerability of machine learning model when exposed to data with different distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
14
+ page_content=' Such massive gap is induced by the violation of a funda- mental assumption that training and test data are identically and independently distributed (a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
15
+ page_content='k.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
16
+ page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
17
+ page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
18
+ page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
19
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
20
+ page_content=' assumption), upon which most of the existing meta learning models are developed [4, 16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
21
+ page_content=' In many real cases where i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
22
+ page_content='i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
23
+ page_content='d.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
24
+ page_content=' assump- tion can hardly be satisfied, especially those high-stake ap- plications such as healthcare, military and autonomous driv- ing, instead of generalization within the training distribu- tion, the ability to generalize under distribution shift is of more critical significance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
25
+ page_content=' As shown in Figure 1, given tran- The first two authors contributed equally as joint first authorship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
26
+ page_content=' The last two authors contributed equally as joint second authorship.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
27
+ page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
28
+ page_content=' Illustration example of how the distribution shifts be- tween training data and testing data hamper the performance of model predictions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
29
+ page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
30
+ page_content=' Causal framework of dog perdiction task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
31
+ page_content=' Due to the spurious correlation, the model tends to focus on both grass and dog, which lead to failed prediction in other distributions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
32
+ page_content=' ing data where dogs are on the grass, model could not make accurate predictions in testing data where dogs are in water, cage or street.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
33
+ page_content=' The reason is that the supurious correlation between grass and dog in traning data hamper the perfor- mance of model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
34
+ page_content=' Due to the spurious correlation, the model tends to focus on both grass and dog, which lead to failed prediction in other distribution such as dogs are in water, cage or street as shown in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
35
+ page_content=' However, recent meta learning methods could not overcome the distribution shifts between training and testing data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
36
+ page_content=' In this paper, we con- sider a realistic scenario where tasks come from different distributions (out-of-distribution, OOD).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
37
+ page_content=' In this paper, to overcome the problem mentioned above, we propose Invariant Meta Learning (IML) for out-of- dis- arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
38
+ page_content='11779v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
39
+ page_content='LG] 26 Jan 2023 Athome onbeach eating incage inwater lying ongrass instreet running Training data Model Testing dataGrass--Label:Strongcorrelation CausalFramework Weakcausation Dog noseLabel:Strong correlation X Strong causation T: grass X: dog nose Y:labeltribution tasks, a general learning framework that jointly ad- justs gradient magnitudes and directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
40
+ page_content=' Specifically, in- variant meta learning find invariant optimal metainitializa- tion, and fast adapt to out-of-distribution tasks with regular- ization penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
41
+ page_content=' To summarize, our main contributions are: We consider the challenge of out-of-distribution tasks faced by few-shot learning, we show a natural idea to jointly adjust gradient magnitudes and directions of all tasks in the meta optimization process;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
42
+ page_content=' We propose Invariant Meta Learning (IML) for out- ofdistribution tasks, a general learning framework that jointly adjusts gradient magnitudes and directions;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
43
+ page_content=' We conduct extensive experiments and analysis to demonstrate that our approach effectively improves the performance and generalization ability under both in- distribution and out-of-distribution few-shot settings, and thus it can be regarded as a better baseline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
44
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
45
+ page_content=' Method In this section, we introduce our proposed Invariant Meta Learning (IML) to address the out-of-distribution problem in few-shot tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
46
+ page_content=' IML learns invariant optimal predic- tors based on optimization based meta learning framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
47
+ page_content=' To learn invariant optimal meta-initialization in optimiza- tion based meta learning, the main challenge is that OOD problem exacerbates the inconsistency in both task-gradient magnitudes and directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
48
+ page_content=' To overcome such problem, IML finds invariant optimal initialization, and adapt to outof- distribution tasks with regularization penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
49
+ page_content=' Model-agnostic meta-learning (MAML) [4] is an ap- proach to optimization-based meta-learning that is related to our work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
50
+ page_content=' For some parametric model fθ, MAML aims to find a single set of parameters θ which, using a few op- timization steps, can be successfully adapted to any novel task sampled from the same distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
51
+ page_content=' For a particular task instance Ti = � Dtr, Dval� , the parameters are adapted to task-specific model parameters θ′ i by applying some dif- ferentiable function, typically an update rule of the form: θ′ i = G � θ, Dtr� , (1) where G is typically implemented as a step of gradi- ent descent on the few-shot training set Dtr , θ′ i = θ− α∇θLtr Ti (fθ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
52
+ page_content=' Generally, multiple sequential adaptation steps can be applied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
53
+ page_content=' The learning rate α can also be met- alearned concurrently, in which case we refer to this algo- rithm as Meta-SGD [13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
54
+ page_content=' During meta-training, the param- eters θ are updated by back-propagating through the adap- tation procedure, in order to reduce errors on the validation set Dval : θ ← θ − η∇θ � Ti∼p(T ) Lval Ti � fθ′ i � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
55
+ page_content=' (2) The approach includes the main ingredients of optimization-based meta-learning with neural networks: initialization is done by maintaining an explicit set of model parameters θ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
56
+ page_content=' the adaptation procedure, or “inner loop”, takes θ as input and returns θ′ i adapted specifically for task instance Ti, by iteratively using gradient descent (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
57
+ page_content=' 1);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
58
+ page_content=' and termination, which is handled simply by choosing a fixed number of optimization steps in the “inner loop”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
59
+ page_content=' MAML updates θ by differentiating through the “inner loop” in order to minimize errors of instance-specific adapted models fθ′ i on the corresponding validation set (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
60
+ page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
61
+ page_content=' We refer to this process as the “outer loop” of meta-learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
62
+ page_content=' We use the same stages to describe IML.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
63
+ page_content=' Invariant Meta Learning (IML) finds invariant opti- mal meta-initialization, and fast adapt to out-of-distribution tasks with regularization penalty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
64
+ page_content=' MAML fast adapt net- work to new task during the inner loop and learns univer- sal meta-initialization in outer loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
65
+ page_content=' Similarly, in IML, we update network with the bi-level update, optimizing clas- sifier in the inner loop and learning feature representation in the outer loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
66
+ page_content=' For the inner-level optimization, the pa- rameters θ of the predictor become θi while adapting to the task ti ∈ Ttr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
67
+ page_content=' This correspond to the inner optimization of MAML, except that each task ti has a corresponding net- work θi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
68
+ page_content=' The optimization in the inner loop can be defined as follows: θ′ i = θ − α∇θLtr Ti (fθ) (3) where α is a learning rate of the inner optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
69
+ page_content=' With inner optimized network fθ′ i, we have outer loop objective function with variance penalty regularizer: Lval = � Ti∼p(T tr) � Tj∼p(T val) Lval Tj � fθ′ i � (4) θ ← θ − η∇θLval − βλ trace � VarT val � ∇θLval�� (5) where η, β are the learning rate of the outer loop optimiza- tion, tj is task j for outer loop optimization for the net- work θ′ i, L is the loss function for outer loop optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
70
+ page_content=' Note that the inner optimized network fθ′ i is used to up- date meta-initialization in outer loop with tj whereas it is updated from meta-initialization with ti in ther inner loop.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
71
+ page_content=' IML learn invariant meta-initialization obtained from the discrepancy among different training tasks with variance penalty regularizer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
72
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
73
+ page_content=' Experiments Datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
74
+ page_content=' In this paper, we address the few-shot clas- sification problem under both in-distribution and out- ofdistribution FSL settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
75
+ page_content=' These settings are conducted on three benchmark datasets: miniImageNet [23], Caltech- UCSD-Birds 200-2011 (CUB) [25], and SUN Attribute Database (SUN) [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
76
+ page_content=' Method miniImageNet CUB SUN 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot Meta-Learner LSTM 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
77
+ page_content='99 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
78
+ page_content='79 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
79
+ page_content='23 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
80
+ page_content='39 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
81
+ page_content='99 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
82
+ page_content='86 MAML 45.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
83
+ page_content='69 60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
84
+ page_content='90 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
85
+ page_content='87 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
86
+ page_content='99 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
87
+ page_content='75 71.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
88
+ page_content='45 Reptile 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
89
+ page_content='59 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
90
+ page_content='87 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
91
+ page_content='21 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
92
+ page_content='35 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
93
+ page_content='30 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
94
+ page_content='62 Matching Network 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
95
+ page_content='63 56.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
96
+ page_content='28 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
97
+ page_content='06 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
98
+ page_content='19 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
99
+ page_content='02 62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
100
+ page_content='57 Prototypical Network 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
101
+ page_content='15 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
102
+ page_content='56 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
103
+ page_content='21 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
104
+ page_content='80 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
105
+ page_content='70 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
106
+ page_content='32 Relation Network 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
107
+ page_content='64 63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
108
+ page_content='65 52.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
109
+ page_content='76 64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
110
+ page_content='71 58.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
111
+ page_content='29 72.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
112
+ page_content='15 Baseline 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
113
+ page_content='84 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
114
+ page_content='09 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
115
+ page_content='14 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
116
+ page_content='35 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
117
+ page_content='44 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
118
+ page_content='54 Baseline++ 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
119
+ page_content='15 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
120
+ page_content='19 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
121
+ page_content='48 42.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
122
+ page_content='43 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
123
+ page_content='56 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
124
+ page_content='42 IML 48.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
125
+ page_content='35 67.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
126
+ page_content='21 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
127
+ page_content='18 65.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
128
+ page_content='85 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
129
+ page_content='24 74.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
130
+ page_content='18 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
131
+ page_content=' Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the in-distribution FSL setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
132
+ page_content=' Best results are displayed in boldface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
133
+ page_content=' Method miniImageNet→ CUB miniImageNet→ SUN CUB→miniImageNet 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot 5-way 1-shot 5-way 5-shot Meta-Learner LSTM 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
134
+ page_content='77 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
135
+ page_content='58 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
136
+ page_content='52 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
137
+ page_content='14 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
138
+ page_content='58 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
139
+ page_content='18 MAML 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
140
+ page_content='29 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
141
+ page_content='01 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
142
+ page_content='07 59.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
143
+ page_content='08 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
144
+ page_content='36 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
145
+ page_content='58 Reptile 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
146
+ page_content='66 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
147
+ page_content='86 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
148
+ page_content='15 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
149
+ page_content='38 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
150
+ page_content='56 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
151
+ page_content='60 Matching Network 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
152
+ page_content='34 47.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
153
+ page_content='64 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
154
+ page_content='58 53.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
155
+ page_content='20 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
156
+ page_content='23 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
157
+ page_content='90 Prototypical Network 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
158
+ page_content='60 54.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
159
+ page_content='36 46.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
160
+ page_content='31 66.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
161
+ page_content='21 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
162
+ page_content='22 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
163
+ page_content='73 Relation Network 39.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
164
+ page_content='33 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
165
+ page_content='64 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
166
+ page_content='55 61.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
167
+ page_content='45 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
168
+ page_content='64 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
169
+ page_content='01 Baseline 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
170
+ page_content='16 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
171
+ page_content='73 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
172
+ page_content='49 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
173
+ page_content='15 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
174
+ page_content='98 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
175
+ page_content='41 Baseline++ 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
176
+ page_content='40 40.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
177
+ page_content='48 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
178
+ page_content='44 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
179
+ page_content='71 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
180
+ page_content='41 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
181
+ page_content='82 IML 41.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
182
+ page_content='27 57.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
183
+ page_content='34 50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
184
+ page_content='42 69.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
185
+ page_content='15 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
186
+ page_content='26 44.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
187
+ page_content='17 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
188
+ page_content=' Average accuracy (%) comparison to state-of-the-arts with 95% confidence intervals on 5-way classification tasks under the in-distribution FSL setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
189
+ page_content=' Best results are displayed in boldface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
190
+ page_content=' Baselines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
191
+ page_content=' To evaluate the effectiveness of the proposed framework, we consider the following representative meta learning methods on the few-shot image classification task: MAML [5], Reptile [14], Matching Network [23], Proto- typical Network [20], Relation Network [21], Baseline and Baseline++ [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
192
+ page_content=' Experimental Settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
193
+ page_content=' We conduct experiments on 5- way 1-shot and 5-way 5 -shot settings, there are 15 query samples per class in each task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
194
+ page_content=' We report the average ac- curacy (%) and the corresponding 95% confidence interval over the 2000 tasks randomly sampled from novel classes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
195
+ page_content=' To fairly evaluate the original performance of each method, we use the same 4-layer ConvNet [23] as the backbone for all methods and do not adopt any data augmentation during training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
196
+ page_content=' All methods are trained via SGD with Adam [10], and the initial learning rate is set to e−3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
197
+ page_content=' For each method, models are trained for 40,000 tasks at most, and the best model on the validation classes is used to evaluate the final reporting performance in the meta-test phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
198
+ page_content=' Evaluation Using the In-Distribution Setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
199
+ page_content=' Table 1 shows the comparative results under the in-distribution FSL setting on three benchmark datasets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
200
+ page_content=' It is observed that IML outperforms the original MAML in all in-distribution FSL scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
201
+ page_content=' For 1-shot and 5-shot on miniImageNet → miniImageNet, IML achieves about 1% higher perfor- mance than Prototypical Network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
202
+ page_content=' However, IML achieves 5% and 10% higher performance for 1-shot and 5-shot on CUB → CUB, and 3% and 6% higher performance on SUN → SUN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
203
+ page_content=' As the latter two scenarios are con- ducted on finegrained classification datasets, we attribute the promising improvement to that the categories in these fine-grained datasets share more local concepts than those in coarsegrained datasets, and thus a more discriminative space can be rapidly learned with a few steps of adaptation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
204
+ page_content=' Moreover, IML achieves the best performance among all baselines in all in-distribution FSL scenarios, which shows that our approach can be considered as a better baseline op- tion under the in-distribution FSL setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
205
+ page_content=' Evaluation Using the Out-of-Distribution Setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
206
+ page_content=' We also conduct out-of-distribution FSL experiments and re- port the comparative results in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
207
+ page_content=' Compared to the re- sults under the in-distribution setting, it can be observed that all approaches suffer from a larger discrepancy between the distributions of training and testing tasks, which results in a performance decline in all scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
208
+ page_content=' However, IML still outperforms the original MAML in all out-of-distribution FSL scenarios, demonstrating that the bilevel optimization strategy for adaptation and the learning of transferable la- tent factors can be utilized to improve simple meta learning approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
209
+ page_content=' Also, IML achieves all the best results, indicat- ing that our approach can be regarded as a promising base- line under the out-of-distribution setting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
210
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
211
+ page_content=' Conclusion In this paper, we consider the challenge of out- ofdistribution tasks faced by few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
212
+ page_content=' We propose Invariant Meta Learning (IML) for out-of-distribution tasks, a general learning framework that jointly adjusts gradient magnitudes and directions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
213
+ page_content=' Extensive experiments demon- strate that our approach effectively improves the perfor- mance and generalization ability under both in-distribution and out-of-distribution few-shot settings, and thus it can be regarded as a better baseline.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
214
+ page_content=' References [1] Yoshua Bengio, Samy Bengio, and Jocelyn Cloutier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
215
+ page_content=' Learn- ing a synaptic learning rule.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
216
+ page_content=' Citeseer, 1990.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
217
+ page_content=' [2] Fei Chen, Mi Luo, Zhenhua Dong, Zhenguo Li, and Xiuqiang He.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
218
+ page_content=' Federated meta-learning with fast con- vergence and efficient communication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
219
+ page_content=' arXiv preprint arXiv:1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
220
+ page_content='07876, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
221
+ page_content=' [3] Wei-Yu Chen, Yen-Cheng Liu, Zsolt Kira, Yu-Chiang Frank Wang, and Jia-Bin Huang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
222
+ page_content=' A closer look at few-shot classi- fication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
223
+ page_content=' arXiv preprint arXiv:1904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
224
+ page_content='04232, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
225
+ page_content=' 3 [4] Chelsea Finn, Pieter Abbeel, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
226
+ page_content=' Model- agnostic meta-learning for fast adaptation of deep networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
227
+ page_content=' In International conference on machine learning, pages 1126–1135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
228
+ page_content=' PMLR, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
229
+ page_content=' 1, 2 [5] Chelsea Finn, Pieter Abbeel, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
230
+ page_content=' Model- agnostic meta-learning for fast adaptation of deep networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
231
+ page_content=' In Proceedings of the 34th International Conference on Ma- chine Learning, pages 1126–1135.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
232
+ page_content=' PMLR, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
233
+ page_content=' 3 [6] Chelsea Finn and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
234
+ page_content=' Meta-learning and universality: Deep representations and gradient descent can approximate any learning algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
235
+ page_content=' arXiv preprint arXiv:1710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
236
+ page_content='11622, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
237
+ page_content=' 1 [7] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
238
+ page_content=' Meta-learning in neural networks: A survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
239
+ page_content=' arXiv preprint arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
240
+ page_content='05439, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
241
+ page_content=' [8] Simon Jenni and Paolo Favaro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
242
+ page_content=' Deep bilevel learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
243
+ page_content=' In Proceedings of the European conference on computer vision (ECCV), pages 618–633, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
244
+ page_content=' [9] Taewon Jeong and Heeyoung Kim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
245
+ page_content=' Ood-maml: Meta- learning for few-shot out-of-distribution detection and clas- sification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
246
+ page_content=' Advances in Neural Information Processing Sys- tems, 33:3907–3916, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
247
+ page_content=' [10] Diederik P Kingma and Jimmy Ba.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
248
+ page_content=' Adam: A method for stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
249
+ page_content=' arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
250
+ page_content='6980, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
251
+ page_content=' 3 [11] Alex Krizhevsky, Geoffrey Hinton, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
252
+ page_content=' Learning multiple layers of features from tiny images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
253
+ page_content=' 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
254
+ page_content=' 1 [12] Hae Beom Lee, Hayeon Lee, Donghyun Na, Saehoon Kim, Minseop Park, Eunho Yang, and Sung Ju Hwang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
255
+ page_content=' Learn- ing to balance: Bayesian meta-learning for imbalanced and out-of-distribution tasks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
256
+ page_content=' arXiv preprint arXiv:1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
257
+ page_content='12917, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
258
+ page_content=' [13] Zhenguo Li, Fengwei Zhou, Fei Chen, and Hang Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
259
+ page_content=' Meta- sgd: Learning to learn quickly for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
260
+ page_content=' arXiv preprint arXiv:1707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
261
+ page_content='09835, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
262
+ page_content=' 2 [14] Alex Nichol, Joshua Achiam, and John Schulman.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
263
+ page_content=' On first-order meta-learning algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
264
+ page_content=' arXiv preprint arXiv:1803.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
265
+ page_content='02999, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
266
+ page_content=' 3 [15] Genevieve Patterson, Chen Xu, Hang Su, and James Hays.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
267
+ page_content=' The sun attribute database: Beyond categories for deeper scene understanding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
268
+ page_content=' International Journal of Computer Vi- sion, 108(1):59–81, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
269
+ page_content=' 2 [16] Aravind Rajeswaran, Chelsea Finn, Sham M Kakade, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
270
+ page_content=' Meta-learning with implicit gradients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
271
+ page_content=' Ad- vances in neural information processing systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
272
+ page_content=' 1 [17] Sachin Ravi and Hugo Larochelle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
273
+ page_content=' Optimization as a model for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
274
+ page_content=' 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
275
+ page_content=' [18] Andrei A Rusu, Dushyant Rao, Jakub Sygnowski, Oriol Vinyals, Razvan Pascanu, Simon Osindero, and Raia Had- sell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
276
+ page_content=' Meta-learning with latent embedding optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
277
+ page_content=' arXiv preprint arXiv:1807.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
278
+ page_content='05960, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
279
+ page_content=' [19] Amrith Setlur, Oscar Li, and Virginia Smith.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
280
+ page_content=' Is support set diversity necessary for meta-learning?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
281
+ page_content=' arXiv preprint arXiv:2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
282
+ page_content='14048, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
283
+ page_content=' [20] Jake Snell, Kevin Swersky, and Richard Zemel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
284
+ page_content=' Prototypical networks for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
285
+ page_content=' Advances in neural informa- tion processing systems, 30, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
286
+ page_content=' 3 [21] Flood Sung, Yongxin Yang, Li Zhang, Tao Xiang, Philip HS Torr, and Timothy M Hospedales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
287
+ page_content=' Learning to compare: Re- lation network for few-shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
288
+ page_content=' In Proceedings of the IEEE conference on computer vision and pattern recogni- tion, pages 1199–1208, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
289
+ page_content=' 3 [22] Sebastian Thrun and Lorien Pratt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
290
+ page_content=' Learning to learn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
291
+ page_content=' Springer Science & Business Media, 2012.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
292
+ page_content=' [23] Oriol Vinyals, Charles Blundell, Timothy Lillicrap, Daan Wierstra, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
293
+ page_content=' Matching networks for one shot learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
294
+ page_content=' Ad- vances in neural information processing systems, 29, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
295
+ page_content=' 2, 3 [24] Risto Vuorio, Shao-Hua Sun, Hexiang Hu, and Joseph J Lim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
296
+ page_content=' Multimodal model-agnostic meta-learning via task- aware modulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
297
+ page_content=' Advances in Neural Information Process- ing Systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
298
+ page_content=' [25] Catherine Wah, Steve Branson, Peter Welinder, Pietro Per- ona, and Serge Belongie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
299
+ page_content=' The caltech-ucsd birds-200-2011 dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
300
+ page_content=' 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
301
+ page_content=' 2' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/6NFKT4oBgHgl3EQfTS23/content/2301.11779v1.pdf'}
7NE2T4oBgHgl3EQfPQZw/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6686ae2509b51d0f7e491138107b3bf9830e28fe0329d08bf85e63195c4e4234
3
+ size 232162
89E0T4oBgHgl3EQfwgGc/content/2301.02634v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec76062bdd94914f1b63f1a6c25edf2029673c24f6e641ee2943c96661e20f49
3
+ size 238676
89E0T4oBgHgl3EQfwgGc/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:295dae4f778029a89ff3992a1d5ebc746d383d6e53e1abf3ce95c418083b79f2
3
+ size 1966125
89E0T4oBgHgl3EQfwgGc/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ee97a0bb5cc35a3100c16769c7def0879699feb79cd3a9ad933353e34bfc4fb
3
+ size 86167
8dAyT4oBgHgl3EQfp_jS/content/tmp_files/2301.00536v1.pdf.txt ADDED
@@ -0,0 +1,5015 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00536v1 [math.PR] 2 Jan 2023
2
+ Lp-SOLVABILITY AND H¨OLDER REGULARITY FOR STOCHASTIC
3
+ TIME FRACTIONAL BURGERS’ EQUATIONS DRIVEN BY
4
+ MULTIPLICATIVE SPACE-TIME WHITE NOISE
5
+ BEOMSEOK HAN
6
+ Abstract. We present the Lp-solvability for stochastic time fractional Burgers’ equations
7
+ driven by multiplicative space-time white noise:
8
+ ∂α
9
+ t u = aijuxixj + biuxi + cu + ¯biuuxi + ∂β
10
+ t
11
+ ˆ t
12
+ 0
13
+ σ(u)dWt, t > 0; u(0, ·) = u0
14
+ where α ∈ (0, 1), β < 3α/4 + 1/2, and d < 4 − 2(2β − 1)+/α. The operators ∂α
15
+ t and
16
+ ∂β
17
+ t are the Caputo fractional derivatives of order α and β, respectively. The process Wt
18
+ is an L2(Rd)-valued cylindrical Wiener process, and the coefficients aij, bi, c and σ(u) are
19
+ random.
20
+ In addition to the existence and uniqueness of a solution, we also suggest the H¨older
21
+ regularity of the solution. For example, for any constant T < ∞, small ε, δ > 0, and,
22
+ almost sure ω ∈ Ω, we have
23
+ sup
24
+ x∈Rd |u(ω, ·, x)|
25
+ C
26
+
27
+ α
28
+ 2 ((2−(2β−1)+/α−d/2)∧1)+ (2β−1)−
29
+ 2
30
+
31
+ ∧1−ε
32
+ ([δ,T ])
33
+ < ∞
34
+ and
35
+ sup
36
+ t≤T
37
+ |u(ω, t, ·)|
38
+ C(2−(2β−1)+ /α−d/2)∧1−ε(Rd) < ∞.
39
+ Moreover, δ can be 0 if the initial data u0 = 0. Additionally, the H¨older regularity of the
40
+ solution in time changes behavior at β = 1/2. Furthermore, if β ≥ 1/2, then the H¨older
41
+ regularity of the solution in time is α/2 times the one in space.
42
+ 1. Introduction
43
+ This article investigates the existence, uniqueness, Lp-regularity, and maximal H¨older
44
+ regularity of a solution to stochastic time fractional Burgers’ equations (STFBEs) driven
45
+ by space-time white noise. We consider
46
+ ∂α
47
+ t u = Lu + ¯biuuxi + ∂β
48
+ t
49
+ ˆ t
50
+ 0
51
+ σ(u)dWt,
52
+ (ω, t, x) ∈ Ω × (0, ∞) × Rd;
53
+ u(0, ·) = u0,
54
+ (1.1)
55
+ where α ∈ (0, 1), β < 3
56
+ 4α + 1
57
+ 2, and d < 4 − 2(2β−1)+
58
+ α
59
+ . The operators ∂α
60
+ t and ∂β
61
+ t are the
62
+ Caputo fractional derivatives of order α and β, and the operator L is the second order
63
+ random differential operator defined as follows:
64
+ (Lu)(ω, t, x) = aij(ω, t, x)uxixj + bi(ω, t, x)uxi + c(ω, t, x)u.
65
+ 2020 Mathematics Subject Classification. 35R11, 26A33, 60H15, 35R60.
66
+ Key words and phrases. Stochastic partial differential equation, Time fractional derivative Stochastic
67
+ Burgers’ equation, Time fractional Burgers’ equation, Space-time white noise, H¨older regularity.
68
+ This work was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea
69
+ government (MSIT) (No. NRF-2021R1C1C2007792) and the BK21 Fostering Outstanding Universities for
70
+ Research (FOUR) funded by the Ministry of Education (MOE, Korea) and the National Research Foundation
71
+ of Korea (NRF).
72
+ 1
73
+
74
+ 2
75
+ BEOMSEOK HAN
76
+ The random coefficients aij, bi, and c are predictable, differentiable (or continuous), and
77
+ bounded functions. The diffusion coefficient σ(u) = σ(ω, t, x, u) is a predictable and mea-
78
+ surable function satisfying growth conditions and Lipschitz continuity in u. The detailed
79
+ conditions on aij, bi, c, and σ are described in Assumptions 3.1 and 3.3.
80
+ The random
81
+ measure dWt is induced from an L2(Rd)-valued cylindrical Wiener process Wt.
82
+ When α = β = 1 in equation (1.1), the equation is said to be a stochastic Burgers’
83
+ equation (SBE) of form
84
+ ∂tu = Lu + ¯buux + σ(u) ˙W,
85
+ (ω, t, x) ∈ Ω × (0, ∞) × R;
86
+ u(0, ·) = u0,
87
+ (1.2)
88
+ where
89
+ ˙W is the space-time white noise. Numerous studies have been conducted on the
90
+ equation (1.2), but we only refer to the reader to [13, 14, 29]. In [13], the author proved
91
+ the uniqueness, existence, and continuity of a solution to a semilinear equation, including
92
+ an equation of type (1.2) on the unit interval (0, 1). Additionally, the same properties of a
93
+ solution on R were obtained in [14] when the L2 bounded conditions on σ(u) were imposed.
94
+ In [29], the authors investigated the H¨older regularity and moment estimates of the random
95
+ field solution to (1.2) with L = ∆ and ¯b = −1.
96
+ In contrast, (deterministic) partial differential equations with Caputo fractional deriva-
97
+ tives have been used in many fields, such as electrochemical processes [5, 19], dielectric
98
+ polarization [33], viscoelastic materials [32], biology [31], and physics [11, 18]. Especially,
99
+ equation (1.1) with α ∈ (0, 1) and σ(u) = 0 is called a time fractional Burgers’ equa-
100
+ tion (TFBE), which describes the propagation of waves through viscous media ([1, 2]).
101
+ Indeed, various researches have been conducted on numerical analysis for the TFBE (see
102
+ [3, 9, 10, 20, 30]). From a mathematical standpoint, it is reasonable to wonder whether it
103
+ is possible to demonstrate the uniqueness and existence of a solution to STFBE (1.1), and
104
+ also to obtain the H¨older regularity of the solution. To the best of our knowledge, [36] is
105
+ the only study that answers this question. The authors of [36] demonstrate the existence,
106
+ uniqueness, and regularity of the mild solution to SBEs with fractional derivatives in time
107
+ and space on a bounded domain D ⊂ Rd.
108
+ In this paper, we provide the Lp uniqueness, existence, and regularity of a strong solution
109
+ to equation (1.1) with random second order differential operator L on the whole spatial
110
+ domain Rd.
111
+ Additionally, we achieve the H¨older regularity of the solution in time and
112
+ space. In detail, if u(ω, t, x) denotes the solution to equation (1.1), then for any bounded
113
+ stopping time τ ≤ T and small constant ε, δ > 0, almost surely,
114
+ sup
115
+ x∈Rd |u(ω, ·, x)|
116
+ C
117
+
118
+ α
119
+ 2 ((2−(2β−1)+/α−d/2)∧1)+ (2β−1)−
120
+ 2
121
+
122
+ ∧1−ε
123
+ ([δ,τ])
124
+ < ∞,
125
+ sup
126
+ t≤τ
127
+ |u(ω, t, ·)|C(2−(2β−1)+/α−d/2)∧1−ε(Rd) < ∞.
128
+ (1.3)
129
+ where a+ = (|a| + a)/2, a− = (|a| − a)/2, and Cγ(D) is the H¨older spaces. Observe that
130
+ the behavior of the H¨older regularity of the solution in time changes from β = 1/2. For
131
+ example, if β ≥ 1/2, then the H¨older regularity of the solution in time is α/2 times that
132
+ of the regularity in space. Additionally, we can recover the the H¨older regularity results
133
+ of SBEs by letting α, β ↑ 1. These results are consistent with the well-known results of
134
+ stochastic heat equations driven by space-time white noise (e.g. [26, Remark 8.7] or [16,
135
+ Corollary 3.1]).
136
+ In contrast, if β < 1/2, the H¨older regularity in time gains additional
137
+ regularity by as much as 1/2 − β. (Remark 3.12). Finally, δ = 0 is allowed if the initial
138
+ data u0 is 0 (Remark 3.6).
139
+
140
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
141
+ 3
142
+ Several remarks about the proof are made. The proof strategy for the main theorem
143
+ (Theorem 3.5) is based on [16]. However, some differences exist because since it is not
144
+ certain that Itˆo’s formula and the maximum principle hold for STFBE (1.1).
145
+ Thus, the proof proceeds as follows. As in [16], we focus on proving the uniqueness and
146
+ existence of the Lp solution in each (large) p > 2, and the main difficulty is to demonstrating
147
+ the existence of the solutions. Hence, we consider the cut-off form of equation (1.1) to obtain
148
+ local solutions. Afterward, we construct a global solution candidate u by pasting the local
149
+ solutions (Lemma 4.3 and Remark 4.4). A uniform Lp bound of u is required to show that
150
+ our the candidate u is a global solution; thus, we divide the local solution into two parts:
151
+ the noise-dominating and the nonlinear-dominating part. To estimate the noise-dominating
152
+ parts, we employ the Lp bound of the diffusion coefficient σ(u) (Lemma 4.5). In contrast,
153
+ to control the nonlinear-dominating part (Lemma 4.8), we employ an inequality similar to
154
+ the chain rule (Lemma 4.6) and a version of the Gr¨onwall inequality including the Caputo
155
+ fractional derivatives (Theorem 4.7).
156
+ To obtain the maximal H¨older regularity of the solution to equation (1.1), we require two
157
+ components: the H¨older embedding theorem for the solution space Hγ
158
+ p(τ) (Theorem 2.16)
159
+ and the uniqueness of the solution in p (Theorem 3.10). Indeed, when the Lp existence and
160
+ uniqueness of a solution are given, we have the H¨older regularity of the solution in each
161
+ (large) p > 2 by employing the H¨older embedding theorem for the solution space (Theorem
162
+ 2.16 and Theorem 3.5). The H¨older regularity of the solution becomes larger as a large p
163
+ is chosen; thus, we have to select p that is as large as possible. Therefore, we require the
164
+ uniqueness of solutions in p because p varies.
165
+ This article is organized as follows. Section 2 introduces the definitions and properties
166
+ of space-time white noise, fractional calculus, and stochastic Banach spaces. Additionally,
167
+ we present the H¨older embedding theorem for the solution space Hγ
168
+ p(τ). Section 3 states
169
+ the main results of this article and suggests some remarks. The proof of the main results
170
+ is presented in Section 4. Next, Section 5 proves the H¨older embedding theorem for the
171
+ solution space Hγ
172
+ p(τ).
173
+ We finish this section with an introduction to the notation used in this paper. The sets
174
+ N and R are sets of natural and real numbers, respectively.
175
+ The set Rd denotes the d-
176
+ dimensional Euclidean space of points x = (x1, . . . , xd) for xi ∈ R. Throughout this paper,
177
+ we assume Einstein’s summation convention on i, j, k ∈ N. We use := to denote a definition.
178
+ For a real-valued function f, we set the following:
179
+ f+ := |f| + f
180
+ 2
181
+ and
182
+ f− := |f| − f
183
+ 2
184
+ .
185
+ For a normed space F, a measure space (X, M, µ), and p ∈ [1, ∞), a space Lp(X, M, µ; F)
186
+ is a set of F-valued Mµ-measurable functions such that
187
+ ∥u∥Lp(X,M,µ;F ) :=
188
+ �ˆ
189
+ X
190
+ ∥u(x)∥p
191
+ F µ(dx)
192
+ �1/p
193
+ < ∞.
194
+ A set Mµ is the completion of M with respect to the measure µ.
195
+ For γ ∈ (0, 1] and
196
+ k = 0, 1, 2, . . . , a set Ck+γ(Rd) is the set of R-valued continuous functions u = u(x) such
197
+ that
198
+ |u|Cγ+k(Rd) :=
199
+ sup
200
+ x∈Rd,|β|=k
201
+ ���Dβu(x)
202
+ ��� +
203
+ sup
204
+ x,y∈Rd,x̸=y
205
+ |β|=k
206
+ ��Dβu(x) − Dβu(y)
207
+ ��
208
+ |x − y|γ
209
+ < ∞,
210
+
211
+ 4
212
+ BEOMSEOK HAN
213
+ where β is a multi-index. Similarly, for γ ∈ (0, 1] and 0 ≤ δ < T < ∞, the set Cγ([δ, T]; F)
214
+ is the set of F-valued continuous functions u such that
215
+ |u|Cγ([δ,T];F ) := sup
216
+ t∈[δ,T]
217
+ |u(t)|F +
218
+ sup
219
+ t,s∈[δ,T],
220
+ s̸=t
221
+ |u(t) − u(s)|F
222
+ |t − s|γ
223
+ < ∞.
224
+ For a, b ∈ R, we set a∧b := min{a, b} and a∨b := max{a, b}. Let S = S(Rd) denote the set
225
+ of Schwartz functions on Rd. Let N = N(a1, a2, ..., ak) be a generic constant if N depends
226
+ only on a1, a2, ..., ak. The constant N can vary line by line. For functions depending on ω, t,
227
+ and x, the argument ω ∈ Ω is omitted. Finally, for x ∈ Rd, ¯xi := (x1, . . . , xi−1, xi+1, . . . , xd).
228
+ 2. Preliminaries
229
+ In this section, we introduce the definitions and properties of space-time white noise,
230
+ fractional calculus, and stochastic Banach spaces.
231
+ Throughout this paper, (Ω, F, P) is
232
+ a complete probability space equipped with a filtration {Ft}t≥0.
233
+ Let {Ft}t≥0 denote a
234
+ filtration satisfying the usual conditions. Let P be the predictable σ-field related to {Ft}t≥0.
235
+ First, we present the space-time white noise ˙W to understand the stochastic part of (1.1).
236
+ Definition 2.1 (Space-time white noise). A generalized random field ˙W is said to be the
237
+ space-time white noise if it is a centered Gaussian random field such that its covariance is
238
+ given by
239
+ E ˙W(h) ˙W (g) =
240
+ ˆ ∞
241
+ 0
242
+ ˆ
243
+ Rd h(t, x)g(t, x)dxdt,
244
+ ∀h, g ∈ L2((0, ∞) × Rd).
245
+ Remark 2.2. We employ a series of Itˆo’s stochastic integral to interpret the stochastic part
246
+ of equation (1.1). More precisely, let {ηk : k ∈ N} be an orthonormal basis on L2(Rd). If
247
+ we define
248
+ wk
249
+ t :=
250
+ ˆ t
251
+ 0
252
+ ˆ
253
+ Rd ηk(x) ˙W(ds, dx)
254
+ using the Walsh integral (see [35]), then {wk
255
+ t : k ∈ N} is a set of one dimensional indepen-
256
+ dent Wiener processes. Then, if we set (see [26, Section 8.3], and [23, Section 7])
257
+ Wt :=
258
+
259
+
260
+ k=1
261
+ ηkwk
262
+ t ,
263
+ then Wt is an L2(Rd)-valued cylindrical Wiener process and dWt = �
264
+ k ηkdwk
265
+ t .
266
+ Thus,
267
+ equation (1.1) can be rewritten as
268
+ ∂α
269
+ t u = Lu + ¯biuuxi + ∂β
270
+ t
271
+ ˆ t
272
+ 0
273
+ σ(u)ηkdwk
274
+ t ,
275
+ (ω, t, x) ∈ Ω × (0, ∞) × Rd;
276
+ u(0, ·) = u0.
277
+ Next, we review the facts of fractional calculus. For more information, we refer to the
278
+ reader to [6, 17, 21, 32].
279
+ Definition 2.3. Let α > 0, and for ϕ ∈ L1((0, T)), the Riemann-Liouville fractional
280
+ integral of the order α is defined as follows:
281
+
282
+ t ϕ(t) := (Iα
283
+ t ϕ)(t) :=
284
+ 1
285
+ Γ(α)
286
+ ˆ t
287
+ 0
288
+ (t − s)α−1ϕ(s)ds
289
+ for all
290
+ t ∈ (0, T),
291
+ where Γ(α) :=
292
+ ´ ∞
293
+ 0
294
+ tα−1e−tdt.
295
+
296
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
297
+ 5
298
+ Remark 2.4. For any q ∈ [1, ∞], by Jensen’s inequality
299
+ ∥Iαϕ∥Lq((0,T)) ≤ N(α, p, T)∥ϕ∥Lq((0,T)).
300
+ (2.1)
301
+ Therefore, Iα
302
+ t ϕ(t) is well-defined and finite for almost all t ≤ T. Additionally, Fubini’s
303
+ theorem implies that, for α, β ≥ 0, we have
304
+ Iα+βϕ(t) = IαIβϕ(t).
305
+ Definition 2.5. For α > 0, let n ∈ N be a nonnegative integer such that n − 1 ≤ α < n.
306
+ Suppose ϕ(t) is a real-valued function on [0, T] such that ϕ is (n − 1)-times differentiable
307
+ and ( d
308
+ dt)n−1ϕ is absolutely continuous on [0, T].
309
+ (i) The Riemann-Liouville fractional derivative Dα
310
+ t ϕ is defined as
311
+
312
+ t ϕ(t) :=
313
+ 1
314
+ Γ(n − α)
315
+ dn
316
+ dtn
317
+ ˆ t
318
+ 0
319
+ (t − s)n−α−1ϕ(s)ds.
320
+ (ii) The Caputo fractional derivative ∂α
321
+ t ϕ is defined as
322
+ ∂α
323
+ t ϕ :=
324
+ 1
325
+ Γ(n − α)
326
+ ˆ t
327
+ 0
328
+ (t − s)n−α−1ϕ(n)(s)ds
329
+ :=
330
+ 1
331
+ Γ(n − α)
332
+ d
333
+ dt
334
+ ˆ t
335
+ 0
336
+ (t − s)n−α−1 �
337
+ ϕ(n−1)(s) − ϕ(n−1)(0)
338
+
339
+ ds.
340
+ Remark 2.6.
341
+ (i) For any α, β ≥ 0, Dα
342
+ t Dβ
343
+ t ϕ = Dα+β
344
+ t
345
+ ϕ and
346
+
347
+ t Iβ
348
+ t ϕ = Dα−β
349
+ t
350
+ ϕ1α>β + Iβ−α
351
+ t
352
+ ϕ1α≤β.
353
+ Additionally, if α ∈ (0, 1), I1−α
354
+ t
355
+ ϕ is absolutely continuous, and I1−α
356
+ t
357
+ ϕ(0) = 0, then the
358
+ following equality holds:
359
+
360
+ t Dα
361
+ t ϕ(t) = ϕ(t).
362
+ (ii) By the definition of fractional derivatives, if ϕ(0) = ϕ(1)(0) = · · · = ϕ(n−1)(0) = 0,
363
+ then Dα
364
+ t ϕ = ∂α
365
+ t ϕ.
366
+ Below we recall the definitions and properties of stochastic Banach spaces (for more
367
+ detail, see [12, 25, 26, 27]). The solution space Hγ
368
+ p(T) and embedding theorems for Hγ
369
+ p(T)
370
+ are suggested.
371
+ Definition 2.7. Let p > 1 and γ ∈ R. The space Hγ
372
+ p = Hγ
373
+ p (Rd) is the set of all tempered
374
+ distributions u on R such that
375
+ ∥u∥Hγ
376
+ p :=
377
+ ���(1 − ∆)γ/2u
378
+ ���
379
+ Lp =
380
+ ���F−1 �
381
+ (1 + |ξ|2)γ/2F(u)(ξ)
382
+ ����
383
+ Lp < ∞.
384
+ Similarly, Hγ
385
+ p (l2) = Hγ
386
+ p (Rd; l2) is a space of l2-valued functions g = (g1, g2, · · · ) such that
387
+ ∥g∥Hγ
388
+ p (l2) :=
389
+ ����
390
+ ���(1 − ∆)γ/2g
391
+ ���
392
+ l2
393
+ ����
394
+ Lp
395
+ =
396
+ ����
397
+ ���F−1 ��
398
+ 1 + |ξ|2�γ/2 F(g)(ξ)
399
+ ����
400
+ l2
401
+ ����
402
+ Lp
403
+ < ∞.
404
+ Remark 2.8. Let d ∈ N and γ ∈ (0, ∞). A nonnegative smooth function Rγ(x) exists on
405
+ Rd such that, for u ∈ C∞
406
+ c (Rd),
407
+
408
+ (1 − ∆)−γ/2 u
409
+
410
+ (x) =
411
+ ˆ
412
+ Rd Rγ(y)u(x − y)dy
413
+ and
414
+ |Rγ(x)| ≤ NAγ,d(x)1|x|≤2 + Ne−|x|/21|x|≥2,
415
+
416
+ 6
417
+ BEOMSEOK HAN
418
+ where N = N(γ, d) is a positive constant and
419
+ Aγ,d(x) =
420
+
421
+
422
+
423
+
424
+
425
+ |x|γ−d + 1 + O(|x|γ−d+2)
426
+ for
427
+ 0 < γ < d,
428
+ log(2/|x|) + 1 + O(|x|2)
429
+ for
430
+ γ = d,
431
+ 1 + O(|x|γ−d)
432
+ for
433
+ γ > d.
434
+ For more detail, see [12, Proposition 1.2.5].
435
+ We introduce the space of point-wise multipliers in Hγ
436
+ p .
437
+ Definition 2.9. Fix γ ∈ R and α ∈ [0, 1) such that α = 0 if γ ∈ Z and α > 0 if |γ| + α is
438
+ not an integer. Define
439
+ B|γ|+α =
440
+
441
+
442
+
443
+
444
+
445
+ B(R)
446
+ if γ = 0,
447
+ C|γ|−1,1(R)
448
+ if γ is a nonzero integer,
449
+ C|γ|+α(R)
450
+ otherwise,
451
+ B|γ|+α(ℓ2) =
452
+
453
+
454
+
455
+
456
+
457
+ B(R, ℓ2)
458
+ if γ = 0,
459
+ C|γ|−1,1(R, ℓ2)
460
+ if γ is a nonzero integer,
461
+ C|γ|+α(R, ℓ2)
462
+ otherwise,
463
+ where B(R) is the space of bounded Borel functions on R, C|γ|−1,1(R) represents the space
464
+ of |γ| − 1 times continuous differentiable functions whose derivatives of the (|γ| − 1)th
465
+ order derivative are Lipschitz continuous, and C|γ|+α is the real-valued H¨older spaces. The
466
+ space B(ℓ2) denotes a function space with ℓ2-valued functions instead of real-valued function
467
+ spaces.
468
+ Below we collect the properties of Bessel potential spaces.
469
+ Lemma 2.10. Let γ ∈ R and p > 1.
470
+ (i) The space C∞
471
+ c (Rd) is dense in Hγ
472
+ p .
473
+ (ii) Let γ − d/p = n + ν for some n = 0, 1, · · · and ν ∈ (0, 1].
474
+ Then, for any k ∈
475
+ {0, 1, · · · , n}, we have
476
+ |Dku|C(Rd) + |Dnu|Cν(Rd) ≤ N∥u∥Hγ
477
+ p ,
478
+ (2.2)
479
+ where Cν(Rd) is the Zygmund space.
480
+ (iii) The operator Di : Hγ
481
+ p → Hγ+1
482
+ p
483
+ is bounded. Moreover, for any u ∈ Hγ+1
484
+ p
485
+ ,
486
+ ��Diu
487
+ ��
488
+
489
+ p ≤ N∥u∥Hγ+1
490
+ p
491
+ ,
492
+ where N = N(γ, p).
493
+ (iv) For γ1, γ2 ∈ R, and u ∈ Hγ1+γ2
494
+ p
495
+ , we have
496
+ ∥∆γ1/2u∥Hγ2
497
+ p
498
+ ≤ N∥u∥Hγ1+γ2
499
+ p
500
+ ,
501
+ where N = N(γ1, γ2)
502
+ (v) For γ ∈ (0, 1), and u ∈ Hγ
503
+ p , we have
504
+ ∥(1 − ∆γ)u∥Lp ≤ N
505
+
506
+ ∥u∥Lp + ∥(−∆)γu∥Lp
507
+
508
+ ,
509
+ where N = N(γ, p)
510
+ (vi) For any µ, γ ∈ R, the operator (1 − ∆)µ/2 : Hγ
511
+ p → Hγ−µ
512
+ p
513
+ is an isometry.
514
+
515
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
516
+ 7
517
+ (vii) Let
518
+ ε ∈ [0, 1],
519
+ pi ∈ (1, ∞),
520
+ γi ∈ R,
521
+ i = 0, 1,
522
+ γ = εγ1 + (1 − ε)γ0,
523
+ 1/p = ε/p1 + (1 − ε)/p0.
524
+ Then, we have
525
+ ∥u∥Hγ
526
+ p ≤ ∥u∥ε
527
+ Hγ1
528
+ p1 ∥u∥1−ε
529
+ Hγ0
530
+ p0 .
531
+ (viii) Let u ∈ Hγ
532
+ p . Then, we have
533
+ ∥au∥Hγ
534
+ p ≤ N∥a∥B|γ|+α∥u∥Hγ
535
+ p
536
+ and
537
+ ∥bu∥Hγ
538
+ p (ℓ2) ≤ N∥b∥B|γ|+α(ℓ2)∥u∥Hγ
539
+ p ,
540
+ where N = N(γ, p) and B|γ|+α, B|γ|+α(ℓ2) are introduced in Definition 2.9.
541
+ Proof. The above results are well known. For (i), (iii), (vi), and (vii), see Theorems 13.3.7
542
+ (i), 13.8.1, 13.3.7 (ii), and Exercise 13.3.20 of [27], respectively. In the case of (ii) and (iv),
543
+ see [34]. For (v), see Theorems 1.3.6 and 1.3.8 of [12]. For (viii), we refer the reader to [26,
544
+ Lemma 5.2].
545
+
546
+ Definition 2.11 (Stochastic Banach spaces). Let τ ≤ T be a bounded stopping time, p ≥ 2,
547
+ and γ ∈ R. Set |(0, τ]] := {(ω, t) : 0 < t ≤ τ(ω)} and define
548
+
549
+ p(τ) := Lp
550
+
551
+ |(0, τ]], P, dP × dt; Hγ
552
+ p
553
+
554
+ ,
555
+
556
+ p(τ, l2) := Lp
557
+
558
+ |(0, τ]], P, dP × dt; Hγ
559
+ p (l2)
560
+
561
+ ,
562
+ U α,γ
563
+ p
564
+ := Lp
565
+
566
+ Ω, F0, H
567
+ γ− 2
568
+ αp
569
+ p
570
+
571
+ .
572
+ We write u ∈ Hγ
573
+ p if u ∈ Hγ
574
+ p(τ) exists for any bounded stopping time τ. Additionally, if
575
+ γ = 0, then we use L instead of H, ∥f∥Lp(τ) := ∥f∥H0p(τ). The norm of each space is defined
576
+ naturally, for example,
577
+ ∥f∥Hγ
578
+ p(τ) :=
579
+
580
+ E
581
+ ˆ τ
582
+ 0
583
+ ∥f(t, ·)∥p
584
+
585
+ p dt
586
+ �1/p
587
+ .
588
+ Lemma 2.12 exhibits the relation between the stochastic and fractional integrals, which
589
+ is employed when Iα
590
+ t or Dα
591
+ t is applied to the stochastic part of the SPDEs.
592
+ Lemma 2.12. Let T < ∞ be a constant.
593
+ (i) Let α ≥ 0 and h ∈ L2(Ω × [0, T], P; l2). Then, the equality
594
+
595
+ � ∞
596
+
597
+ k=1
598
+ ˆ ·
599
+ 0
600
+ hk(s)dwk
601
+ s
602
+
603
+ (t) =
604
+
605
+
606
+ k=1
607
+
608
+
609
+ ˆ ·
610
+ 0
611
+ hk(s)dwk
612
+ s
613
+
614
+ (t)
615
+ holds for all t ≤ T almost surely and in L2(Ω × [0, T]), where the series on both sides
616
+ converge in probability.
617
+ (ii) If α ≥ 0 and hn → h in L2(Ω × [0, T], P; l2) as n → ∞, then
618
+
619
+
620
+ k=1
621
+
622
+
623
+ ˆ ·
624
+ 0
625
+ hk
626
+ ndwk
627
+ s
628
+
629
+ (t) →
630
+
631
+
632
+ k=1
633
+
634
+
635
+ ˆ ·
636
+ 0
637
+ hkdwk
638
+ s
639
+
640
+ (t)
641
+ in probability uniformly on [0, T].
642
+
643
+ 8
644
+ BEOMSEOK HAN
645
+ (iii) If α > 1/2 and h ∈ L2(Ω×[0, T], P; l2), then
646
+
647
+ Iα �∞
648
+ k=1
649
+ ´ ·
650
+ 0 hk(s)dwk
651
+ s
652
+
653
+ (t) is differentiable
654
+ in t and
655
+
656
+ ∂t
657
+
658
+
659
+
660
+
661
+ k=1
662
+ ˆ ·
663
+ 0
664
+ hk(s)dwk
665
+ s
666
+
667
+ (t) =
668
+ 1
669
+ Γ(α)
670
+
671
+
672
+ k=1
673
+ ˆ t
674
+ 0
675
+ (t − s)α−1hk(s)dwk
676
+ s
677
+ (a.e.) on Ω × [0, T].
678
+ Proof. See Lemmas 3.1 and 3.3 of [7].
679
+
680
+ Fix a small κ0 > 0. For α ∈ (0, 1) and β < α + 1/2, set
681
+ c0 := (2β − 1)+
682
+ α
683
+ + κ01β=1/2.
684
+ (2.3)
685
+ Next, we introduce the solution spaces (for more detail, see Definitions 2.9 and 2.12 in
686
+ [25]).
687
+ Definition 2.13 (Solution spaces). Let τ ≤ T be a bounded stopping time, α ∈ (0, 1),
688
+ β < α + 1/2, γ ∈ R, and p ≥ 2.
689
+ (i) For u ∈ Hγ
690
+ p(τ), we write u ∈ Hγ
691
+ p(τ) if u0 ∈ U α,γ
692
+ p
693
+ , f ∈ Hγ−2
694
+ p
695
+ (τ), and g ∈ Hγ−2+c0
696
+ p
697
+ (τ, l2)
698
+ such that
699
+ ∂α
700
+ t u(t, x) = f(t, x) + ∂β
701
+ t
702
+ ˆ t
703
+ 0
704
+ gk(s, x)dwk
705
+ s ,
706
+ 0 < t ≤ τ;
707
+ u(0, ·) = u0
708
+ in the sense of distribution. In other words, for any φ ∈ S, the equality
709
+ (u(t, ·), φ) = (u0, φ) + Iα
710
+ t (f, φ) + Iα−β
711
+ t
712
+
713
+
714
+ k=1
715
+ ˆ t
716
+ 0
717
+ (gk(s, ·), φ)dwk
718
+ s
719
+ (2.4)
720
+ holds for a.e. (ω, t) ∈ Ω × [0, τ]. If α − β ∈ (−1/2, 0), we regard Iα−β
721
+ t
722
+ as
723
+
724
+ ∂tIα−β+1
725
+ t
726
+ .
727
+ The norm in Hγ
728
+ p(τ) is defined as follows:
729
+ ∥u∥Hγ
730
+ p(τ) := ∥u∥Hγ
731
+ p(τ) + ∥u0∥Uα,γ
732
+ p
733
+ + inf
734
+ f,g
735
+
736
+ ∥f∥Hγ−2
737
+ p
738
+ (τ) + ∥g∥Hγ−2+c0
739
+ p
740
+ (τ,l2)
741
+
742
+ .
743
+ (2.5)
744
+ (ii) We say u ∈ Hγ
745
+ p,loc(τ) if there exists a sequence τn ↑ τ such that u ∈ Hγ
746
+ p(τn) for
747
+ each n. We write u = v in Hγ
748
+ p,loc(τ) if a sequence of bounded stopping times τn ↑ τ
749
+ exists such that u = v in Hγ
750
+ p(τn) for each n. We omit τ if τ = ∞. In other words,
751
+
752
+ p,loc = Hγ
753
+ p,loc(∞).
754
+ Remark 2.14. If α − β ≥ 0, the stochastic part of (2.4) is considered
755
+ Iα−β
756
+ t
757
+
758
+
759
+ k=1
760
+ ˆ t
761
+ 0
762
+ (gk(s, ·), φ)dwk
763
+ s =
764
+
765
+
766
+ k=1
767
+ Iα−β
768
+ t
769
+ ˆ t
770
+ 0
771
+ (gk(s, ·), φ)dwk
772
+ s .
773
+ Otherwise, if α − β ∈ (−1/2, 0), we regard Iα−β
774
+ t
775
+ as
776
+
777
+ ∂tIα−β+1
778
+ t
779
+ . Then, by Lemma 2.12 (iii),
780
+ the stochastic part of (2.4) is
781
+ Iα−β
782
+ t
783
+ � ∞
784
+
785
+ k=1
786
+ ˆ t
787
+ 0
788
+ (gk(s, ·), φ)dwk
789
+ s
790
+
791
+ = ∂
792
+ ∂t
793
+
794
+ Iα−β+1
795
+
796
+
797
+ k=1
798
+ ˆ t
799
+ 0
800
+ (gk(s, ·), φ)dwk
801
+ s
802
+
803
+ =
804
+ 1
805
+ Γ(α − β + 1)
806
+
807
+
808
+ k=1
809
+ ˆ t
810
+ 0
811
+ (t − s)α−β+1(gk(s, ·), φ)dwk
812
+ s .
813
+
814
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
815
+ 9
816
+ Below, we provide the properties of the solution space Hγ
817
+ p(τ).
818
+ Theorem 2.15. Let τ ≤ T be a bounded stopping time.
819
+ (i) For ν ∈ R, the map (1 − ∆)ν/2 : Hγ+2
820
+ p
821
+ (τ) → Hγ−ν+2
822
+ p
823
+ (τ) is an isometry.
824
+ (ii) If γ ∈ R, α ∈ (0, 1), β < α + 1/2, and p ≥ 2, then Hγ
825
+ p(τ) is a Banach space with the
826
+ norm ∥ · ∥Hγ
827
+ p(τ).
828
+ Proof. The proof is a repeat of [25, Theorem 2.14] with τ instead of T.
829
+
830
+ Next, we suggest the H¨older embedding theorems for u ∈ Hγ
831
+ p(τ). The proof of Theorem
832
+ 2.16 is contained in Section 5.
833
+ Theorem 2.16. Let τ ≤ T be the bounded stopping time, γ ∈ R, α ∈ (0, 1), β < α + 1/2,
834
+ and
835
+ p > 2 ∨ 1
836
+ α ∨
837
+ 1
838
+ α − β + 1/2.
839
+ (2.6)
840
+ Suppose u ∈ Hγ
841
+ p(τ).
842
+ (i) Assume ν satisfies
843
+ 1
844
+ αp < ν < 1 − c0
845
+ 2 ,
846
+ (2.7)
847
+ where c0 is the constant introduced in (2.3). Then, u ∈ C([0, τ]; Hγ−2ν
848
+ p
849
+ ) almost surely
850
+ and
851
+ E sup
852
+ t≤τ
853
+ ∥u(t, ·)∥p
854
+ Hγ−2ν
855
+ p
856
+ ≤ N∥u∥p
857
+
858
+ p(τ),
859
+ (2.8)
860
+ where N = N(α, β, γ, d, p, T).
861
+ (ii) Assume α, β, µ, and ν satisfy
862
+ 1
863
+ αp < µ < (α(ν + c0/2) − β) ∧ 1/2 + 1/2
864
+ α
865
+ and
866
+ 1
867
+ αp < ν < 1 − c0
868
+ 2 ,
869
+ (2.9)
870
+ where c0 is the constant introduced in (2.3). Then, for δ ∈ (0, T), u ∈ Cαµ−1/p([δ, τ]; Hγ−2ν
871
+ p
872
+ )
873
+ almost surely and
874
+ E∥u∥p
875
+ Cαµ−1/p([δ,τ];Hγ−2ν
876
+ p
877
+ ) ≤ N∥u∥p
878
+
879
+ p(τ),
880
+ (2.10)
881
+ where N = N(α, β, γ, δ, d, p, T).
882
+ Remark 2.17. Theorem 2.16 is consistent with the previous results ([26, Theorem 7.2]).
883
+ In other words, if we let α, β ↑ 1 in Theorem 2.16, conditions (2.6), (2.7), and (2.9), and
884
+ the results in (2.8) and (2.10) approach those of the case of α = β = 1.
885
+ Remark 2.18. As stated in Theorem 2.16 (ii), the H¨older regularity of solution in time is
886
+ given on [δ, T], where δ ∈ (0, T) (see Remark 5.6). Moreover, if u0 = 0, Theorem 2.16 (ii)
887
+ holds for δ = 0 (see Remark 5.8).
888
+ By combining Lemma 2.10 (ii) and Theorem 2.16, we have the H¨older embedding results
889
+ of solution space H(2−c0−d/2)∧1
890
+ p
891
+ (τ) which is a preparation to obtain the maximum H¨older
892
+ regularity of solutions.
893
+ Corollary 2.19. Let τ ≤ T be a bounded stopping time, α ∈ (0, 1), β < α + 1/2, and
894
+ 0 < γ < (2 − c0 − d/2) ∧ 1, where c0 is introduced in (2.3). Suppose p satisfies (2.6) and
895
+ u ∈ Hγ
896
+ p(τ).
897
+
898
+ 10
899
+ BEOMSEOK HAN
900
+ (i) If α, β, γ, ν, d, and p satisfy (2.7) and
901
+ ν < 1
902
+ 2
903
+
904
+ γ − d
905
+ p
906
+
907
+ ,
908
+ (2.11)
909
+ then u ∈ C([0, τ]; Cγ−2ν−d/p) almost surely and
910
+ E sup
911
+ t≤τ
912
+ ∥u(t, ·)∥p
913
+ Cγ−2ν−d/p(Rd) ≤ N∥u∥p
914
+
915
+ p(τ),
916
+ where N = N(α, β, γ, d, p, T).
917
+ (ii) If α, β, γ, µ, ν, d and p satisfy (2.9) and (2.11), then for a small δ > 0, we have
918
+ u ∈ Cαµ−1/p([δ, τ]; Cγ−2ν−d/p)
919
+ almost surely and
920
+ E∥u∥p
921
+ Cαµ−1/p([δ,τ];Cγ−2ν−d/p(Rd)) ≤ N∥u∥p
922
+
923
+ p(τ),
924
+ where N = N(α, β, γ, δ, d, p, T).
925
+ Proof. To demonstrate (i), we employ Lemma 2.10 (ii) and Theorem 2.16 (i). Then, we
926
+ have
927
+ E sup
928
+ t≤τ
929
+ ∥u(t, ·)∥p
930
+ Cγ−2ν−d/p(Rd) ≤ NE sup
931
+ t≤τ
932
+ ∥u(t, ·)∥p
933
+ Hγ−2ν
934
+ p
935
+ (Rd) ≤ N∥u∥p
936
+
937
+ p(τ).
938
+ In the case of (ii), Lemma 2.10 (ii) and Theorem 2.16 (ii) imply
939
+ E∥u∥p
940
+ Cαµ−1/p([δ,τ];Cγ−2ν−d/p(Rd)) ≤ E∥u∥p
941
+ Cαµ−1/p([δ,τ];Hγ−2ν
942
+ p
943
+ (Rd)) ≤ N∥u∥p
944
+
945
+ p(τ).
946
+ Thus, the corollary is proved.
947
+
948
+ 3. Main Results
949
+ This section presents the uniqueness, existence, Lp-regularity, and H¨older regularity of
950
+ the solution to the following equation:
951
+ ∂α
952
+ t u = Lu + ¯biuuxi + ∂β
953
+ t
954
+
955
+ k
956
+ ˆ t
957
+ 0
958
+ σ(u)ηkdwk
959
+ s,
960
+ t > 0;
961
+ u(0, ·) = u0,
962
+ (3.1)
963
+ where Lu = aijuxixj + biuxi + cu. The coefficients aij, bi, and c are P × B(Rd)-measurable,
964
+ ¯bi is P × B(Rd−1)-measurable, and aij, bi, c, and ¯bi (and their derivatives) are uniformly
965
+ bounded (see Assumption 3.1). Additionally, we assume the coefficient ¯bi is independent
966
+ of xi. Indeed, because ¯bi is independent of xi, we can employ the fundamental theorem
967
+ of calculus to control the nonlinear term ¯biuuxi (see Remark 3.2). Moreover, the diffusion
968
+ coefficient σ(u) is dominated by an Lp function h (see Assumption 3.3) and it is used to
969
+ obtain a uniform Lp bound of the local solutions (see Remark 3.4).
970
+ In Theorem 3.5, we obtain the existence and uniqueness of a solution in Hγ
971
+ p, where
972
+ γ ∈ (0, 2 − c0 − d/2) ∧ 1. The components of equation (3.1) affect the properties of the
973
+ solution u. For example, if α, β, d, and p are given, the regularity γ is determined. Remarks
974
+ 3.7, 3.8, and 3.9 provide explanations for these relations.
975
+ Additionally, in Corollary 3.11, we have the maximal H¨older regularity of the solution by
976
+ employing the H¨older embedding theorem for solution spaces and the H¨older regularity of
977
+ the solution is given in (1.3). Observe that (1.3) derives from Corollary 2.19, and we have
978
+ the following. The constant δ can be taken as 0 if the initial data u0 = 0 (see Remarks 2.18
979
+ and 3.6). Furthermore, depending on the range of β, the behavior of the H¨older regularity
980
+
981
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
982
+ 11
983
+ of the solution in time varies. In detail, when β ≥ 1/2, then the H¨older regularity of the
984
+ solution in space is α/2 times of the H¨older regularity of the solution in time. Moreover,
985
+ if we consider the case α, β ↑ 1, then the H¨older regularity in time and space approaches
986
+ 1/4 and 1/2, which are the results of the SPDEs driven by space-time white noise (e.g. [26,
987
+ Remark 8.7] or [16, Corollary 3.1]). In the case of β < 1/2, 1/2− β of the H¨older regularity
988
+ in time is obtained due to the regularity of the stochastic integral (Remark 3.12).
989
+ The following are assumptions on coefficients.
990
+ Assumption 3.1.
991
+ (i) The coefficients aij = aij(t, x), bi = bi(t, x), and c = c(t, x) are
992
+ P × B(Rd)-measurable.
993
+ (ii) The coefficient ¯bi(t, ¯xi) = ¯bi(t, x1, . . . , xi−1, xi+1, . . . , xd) is P × B(Rd−1)-measurable.
994
+ (iii) There exists K > 0 such that
995
+ K−1|ξ|2 ≤ aij(t, x)ξiξj ≤ K|ξ|2
996
+ for all
997
+ (ω, t, x) ∈ Ω × [0, ∞) × Rd,
998
+ ξ ∈ Rd,
999
+ (3.2)
1000
+ and
1001
+
1002
+ i,j
1003
+ ��aij(t, ·)
1004
+ ��
1005
+ C2(Rd) +
1006
+
1007
+ i
1008
+ ��bi(t, ·)
1009
+ ��
1010
+ C2(Rd) + |c(t, ·)|C2(Rd) +
1011
+
1012
+ i
1013
+ ��¯bi(t, ·)
1014
+ ��
1015
+ C2(Rd−1) ≤ K
1016
+ (3.3)
1017
+ for all (ω, t) ∈ Ω × [0, ∞).
1018
+ Remark 3.2. To prove the existence of a global solution, we need to acquire a uniform Lp
1019
+ bound of the local solutions. Thus, we separate the local solutions into two parts: noise-
1020
+ dominating and nonlinear-dominating parts.
1021
+ In this remark, we consider the nonlinear-
1022
+ dominating parts related to ¯biuuxi.
1023
+ If coefficient ¯bi is independent of xi, coefficient ¯bi can be taken out of the integral for
1024
+ xi. Then, by the fundamental theorem of calculus to xi, the nonlinear term ¯biuuxi is elimi-
1025
+ nated in the Lp estimate of the nonlinear-dominating part of the local solutions. Thus, the
1026
+ nonlinear-dominating parts are controlled by the initial data and diffusion coefficient σ(u)
1027
+ (for more information, see Lemma 4.8).
1028
+ To introduce the assumptions on the diffusion coefficient, we may assume p ≥ 2.
1029
+ Assumption 3.3 (p).
1030
+ (i) The coefficient σ(t, x, u) is P × B(Rd) × B(R)-measurable.
1031
+ (ii) There exists a constant K such that
1032
+ |σ(t, x, u) − σ(t, x, v)| ≤ K|u − v|
1033
+ for all
1034
+ (ω, t, x) ∈ Ω × [0, ∞) × Rd,
1035
+ u, v ∈ R.
1036
+ (iii) There exists a P × B(Rd)-measurable function h ∈ Lp such that
1037
+ |σ(t, x, u)| ≤ |h(t, x)|
1038
+ for all
1039
+ (ω, t, x) ∈ Ω × [0, ∞) × Rd,
1040
+ u ∈ R.
1041
+ (3.4)
1042
+ Remark 3.4. As mentioned in Remark 3.2, we divide the local solutions into two parts, and
1043
+ the nonlinear-dominating parts are controlled by the initial data u0 and diffusion coefficients
1044
+ σ(u). Then, to deal with the noise-dominating term and the terms including σ(u), we employ
1045
+ the function h(t, x) introduced in Assumption 3.3 (p) (iii). Indeed, the terms related to the
1046
+ diffusion coefficient σ(u) are controlled by the initial data and h so that a uniform Lp bound
1047
+ of u is obtained (see Lemmas 4.5 and 4.8).
1048
+ Next, we introduce the main results.
1049
+
1050
+ 12
1051
+ BEOMSEOK HAN
1052
+ Theorem 3.5. Let
1053
+ α ∈ (0, 1),
1054
+ β < 3
1055
+ 4α + 1
1056
+ 2,
1057
+ d < 4 − 2c0,
1058
+ 0 < γ < (2 − c0 − d/2) ∧ 1
1059
+ (3.5)
1060
+ and
1061
+ p = 2k
1062
+ for some
1063
+ k ∈ N
1064
+ and
1065
+ p > 2 ∨ 1
1066
+ α ∨
1067
+ 1
1068
+ α − β + 1/2 ∨ 2 + αd
1069
+ αγ
1070
+
1071
+ d
1072
+ 1 − γ ,
1073
+ (3.6)
1074
+ where c0 are the constants introduced in (2.3). Suppose Assumptions 3.1 and 3.3 (p) hold.
1075
+ If u0 ∈ U α,γ
1076
+ p
1077
+ , then there exists a unique solution u ∈ Hγ
1078
+ p,loc satisfying (3.1). Furthermore,
1079
+ for ν satisfying (2.7) and (2.11), and for any T ∈ (0, ∞) and bounded stopping time τ ≤ T,
1080
+ we have
1081
+ u ∈ C([0, τ]; Cγ−2ν−d/p)
1082
+ and
1083
+ sup
1084
+ t≤τ
1085
+ ∥u(t, ·)∥Cγ−2ν−d/p < ∞
1086
+ (3.7)
1087
+ almost surely. Additionally, for µ and ν satisfying (2.9) and (2.11), and for any T ∈ (0, ∞),
1088
+ bounded stopping time τ ≤ T, and small δ > 0, we have
1089
+ u ∈ Cαµ−1/p([δ, τ]; Cγ−2ν−d/p)
1090
+ and
1091
+ ∥u∥
1092
+ Cαµ− 1
1093
+ p ([δ,τ];Cγ−2ν−d/p) < ∞
1094
+ (3.8)
1095
+ almost surely. If initial data u0 = 0, (3.8) holds with δ = 0.
1096
+ Proof. See Proof of Theorem 3.5 in Section 4.
1097
+
1098
+ Remark 3.6. If the initial data u0 = 0, we can consider the case δ = 0 because we employ
1099
+ Theorem 2.16 to obtain (3.8) (see Theorem 2.16 and Remark 2.18).
1100
+ Remark 3.7.
1101
+ (i) We assume
1102
+ α ∈ (0, 1)
1103
+ because an inequality acting like the chain rule is employed to deal with the nonlinear-
1104
+ dominating part of the local solution (see Lemma 4.6).
1105
+ (ii) The conditions
1106
+ β < 3α/4 + 1/2
1107
+ and
1108
+ d < 4 − 2c0
1109
+ are expected to obtain the uniqueness and existence of solutions to SPDEs with Caputo
1110
+ time fractional derivatives and space-time white noise even for the semilinear case. For
1111
+ example, see [23, Section 7]. Additionally, observe that the choice of α and β allows
1112
+ d = 1, 2, 3, where c0 is the constant introduced in (2.3).
1113
+ Remark 3.8.
1114
+ (i) For the existence and uniqueness of local solutions, we impose
1115
+ γ ∈ (0, 2 − c0 − d/2).
1116
+ (3.9)
1117
+ Heuristically, if u is a measurable, continuous, and bounded solution to equation (3.1),
1118
+ then for given T < ∞, we can define a bounded stopping time as follows:
1119
+ τm := inf
1120
+
1121
+ t ≥ 0 : sup
1122
+ x∈Rd |u(t, x)| ≥ m
1123
+
1124
+ ∧ T.
1125
+ Then, the solution u satisfies the localized version of equation (3.1) on (0, τm). In
1126
+ other words,
1127
+ ∂α
1128
+ t u = Lu + 1
1129
+ 2
1130
+ ¯bi �
1131
+ (|u| ∧ m)2�
1132
+ xi + ∂β
1133
+ t
1134
+
1135
+ k
1136
+ ˆ t
1137
+ 0
1138
+ σ(u)ηkdwk
1139
+ s
1140
+ (3.10)
1141
+
1142
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
1143
+ 13
1144
+ holds on 0 < t < τm with u(0, ·) = u0. Then, as (3.10) is a semilinear equation, (3.9)
1145
+ has to be satisfied by [23, Theorem 7.1] (for more detail, see [23, Section 7] and [25,
1146
+ Section 5].
1147
+ (ii) The following condition
1148
+ γ ∈ (0, 1)
1149
+ (3.11)
1150
+ is assumed due to the nonlinear term ¯biuuxi lowering the regularity of the solution.
1151
+ Even for SBEs (α = β = 1), the condition in (3.11) is required (for more information,
1152
+ see [13, 14, 15, 16, 29]).
1153
+ Remark 3.9.
1154
+ (i) To obtain the local solution, we employ the Lp theory for the semilinear
1155
+ equation (see [26, Theorem 5.1]). When we control the nonlinear term ¯biuuxi in the
1156
+ Lp estimate, the kernel of (1 − ∆)− γ−1
1157
+ 2
1158
+ has to be controlled. Hence,
1159
+ p >
1160
+ d
1161
+ 1 − γ
1162
+ is imposed (see Lemma 4.3).
1163
+ (ii) We require R-valued continuous solutions to consider the cut-off version of equation
1164
+ (3.1). Therefore, we assume
1165
+ p > 2 ∧ 1
1166
+ α ∧
1167
+ 1
1168
+ α − β + 1/2 ∧ 2 + αd
1169
+ αγ
1170
+ which is required to apply the H¨older embedding theorem for Hγ
1171
+ p (see Theorem 2.16
1172
+ and Corollary 2.19).
1173
+ (iii) As mentioned in Remark 3.7 (i), we employ an inequality similar to the chain rule.
1174
+ To apply (4.13) instead of chain rule for the Caputo fractional derivative, we assume
1175
+ p = 2k
1176
+ for some k ∈ N.
1177
+ To achieve the maximal H¨older regularity, we require the uniqueness of the solution in p.
1178
+ Theorem 3.10. Suppose all the conditions of Theorem 3.5 hold. Let u ∈ Hγ
1179
+ p,loc be the
1180
+ solution of equation (3.1) introduced in Theorem 3.5. If q > p, u0 ∈ U α,γ
1181
+ q
1182
+ , and Assumption
1183
+ 3.3 (q) hold, then u ∈ Hγ
1184
+ q,loc.
1185
+ Proof. See Proof of Theorem 3.10 in Section 4.
1186
+
1187
+ Finally, we obtain the maximal H¨older regularity of the solution by combining Theorems
1188
+ 3.5 and 3.10. Recall that c0 is introduced in (2.3).
1189
+ Corollary 3.11. Suppose α, β, d, and γ satisfy (3.5), and u0 ∈ ∩p>2U α,(2−c0−d/2)∧1
1190
+ p
1191
+ , and
1192
+ h ∈ ∩p>2Lp satisfies (3.4). Then, for T > 0, (1.3) holds almost surely.
1193
+ Proof. When α, β, d, and γ are given in (3.5), we choose p as in (3.6). For each p, there
1194
+ exists a unique solution up ∈ Hγ
1195
+ p,loc to equation (3.1). Due to Theorem 3.10, up ∈ Hγ
1196
+ q,loc for
1197
+ any q ≥ p so that we write u instead of up and u is independent of p. Thus, by letting p
1198
+ large in (3.7) and (3.8), we have (1.3). Thus, the corollary is proved.
1199
+
1200
+ Remark 3.12.
1201
+ (i) If 1/2 ≤ β < α + 1/2, the H¨older regularity in space is α/2 times that
1202
+ in time. Furthermore, we can recover the H¨older regularity results of SBEs (α = β = 1)
1203
+ by considering the case α, β ↑ 1. We cite [29, Proposition 5.1] or [16, Corollary 3.1]
1204
+ for reader’s convenience.
1205
+
1206
+ 14
1207
+ BEOMSEOK HAN
1208
+ (ii) If β < 1/2, then the H¨older regularity in time obtains additional regularity by as much
1209
+ as 1/2 − β. This phenomenon is caused by the stochastic integral of equation (3.1)
1210
+ adding the H¨older regularity of noise in time almost 1/2, and ∂β
1211
+ t reducing the regularity
1212
+ of the noise by β.
1213
+ 4. Proof of Theorems 3.5 and 3.10
1214
+ We assume that all conditions in Theorem 3.5 hold for the remainder of this section.
1215
+ To establish the existence of a global solution, we need to obtain the uniqueness and
1216
+ existence of local solutions (Lemma 4.3). With these local solutions, we build a candidate
1217
+ for a global solution. More precisely, we paste the local solutions and demonstrate that the
1218
+ local existence time explodes almost surely (Lemma 4.9). To prove that the local existence
1219
+ time explodes almost surely, we demonstrate that a uniform Lp bound of local solutions
1220
+ exists. In detail, we separate the local solution into noise- and nonlinear-dominating parts.
1221
+ The noise-dominating part is affected by the stochastic part of the equation, and the other
1222
+ part is influenced by the nonlinear term biuuxi. When we deal with the noise-dominating
1223
+ part of the solution, the dominating function of the diffusion coefficient provides a uniform
1224
+ Lp bound for the noise-dominating part of the local solutions (see Assumption 3.3 (p) (iii)
1225
+ and Lemma 4.5). The other part is controlled by employing a version of the chain rule and
1226
+ Gr¨onwall inequality (see Lemmas 4.6 and 4.8 and Theorem 4.7).
1227
+ First, we introduce the uniqueness and existence theorem for semilinear SPDEs.
1228
+ Assumption 4.1 (τ).
1229
+ (i) The functions f(t, x, u) and gk(t, x, u) are P × B(Rd) × B(R)-
1230
+ measurable functions satisfying the following:
1231
+ f(t, x, 0) ∈ Hγ
1232
+ p(τ)
1233
+ and
1234
+ g(t, x, 0) = (g1(t, x, 0), g2(t, x, 0), . . . ) ∈ Hγ+1
1235
+ p
1236
+ (τ, l2).
1237
+ (ii) For any ε > 0, there exists a constant Nε such that for any u, v ∈ Hγ
1238
+ p(τ),
1239
+ ∥f(u) − f(v)∥p
1240
+ Hγ−2
1241
+ p
1242
+ (τ) + ∥g(u) − g(v)∥p
1243
+ Hγ−2+c0
1244
+ p
1245
+ (τ,l2) ≤ ε∥u − v∥p
1246
+
1247
+ p(τ) + Nε∥u − v∥p
1248
+ Hγ−2
1249
+ p
1250
+ (τ),
1251
+ where c0 is the constant introduced in (2.3).
1252
+ Lemma 4.2. Let τ ≤ T be a bounded stopping time. Suppose Assumption 4.1 (τ) hold.
1253
+ Then, for initial data u0 ∈ U α,γ
1254
+ p
1255
+ , the following equation:
1256
+ ∂α
1257
+ t u = Lu + f(u) + ∂β
1258
+ t
1259
+ ˆ t
1260
+ 0
1261
+ gk(u)dwk
1262
+ t ,
1263
+ 0 < t ≤ τ;
1264
+ u(0, ·) = u0
1265
+ (4.1)
1266
+ has a unique solution u ∈ Hγ
1267
+ p(τ). Moreover,
1268
+ ∥u∥p
1269
+
1270
+ p(τ) ≤ N
1271
+
1272
+ ∥u0∥p
1273
+ Uα,γ
1274
+ p
1275
+ + ∥f(0)∥p
1276
+ Hγ−2
1277
+ p
1278
+ (τ) + ∥g(0)∥p
1279
+ Hγ−2+c0
1280
+ p
1281
+ (τ,l2)
1282
+
1283
+ ,
1284
+ (4.2)
1285
+ where N = N(α, β, γ, d, p, K, T) and c0 is the constant introduced in (2.3).
1286
+ Proof. Theorem 5.1 of [26] is the motivation of the proof. The case τ ≡ T is obtained by
1287
+ [25, Theorem 2.18]; thus, we only consider the case τ ≤ T.
1288
+ (Step 1). (Existence) Set
1289
+ ¯f(t, u) := 1t≤τf(t, u)
1290
+ and
1291
+ ¯g(t, u) := 1t≤τg(t, u).
1292
+ Additionally, ¯f(u) and ¯g(u) satisfy Assumption 4.1 (T). Then, by [25, Theorem 2.18], there
1293
+ exists a unique solution u ∈ Hγ
1294
+ p(T) such that u satisfies equation (4.1) with ¯f and ¯g, instead
1295
+
1296
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
1297
+ 15
1298
+ of f and g, respectively. As τ ≤ T, we have u ∈ Hγ
1299
+ p(τ) and u satisfies equation (4.1) and
1300
+ estimate (4.2) with f and g.
1301
+ (Step 2). (Uniqueness) Let u, v ∈ Hγ
1302
+ p(τ) be two solutions of equation (4.1). Then, [25,
1303
+ Theorem 2.18] yields there exists a unique solution ¯v ∈ Hγ
1304
+ p(T) satisfying
1305
+ ∂α
1306
+ t ¯v = L¯v + ¯f(v) +
1307
+
1308
+
1309
+ k=1
1310
+ ∂β
1311
+ t
1312
+ ˆ t
1313
+ 0
1314
+ ¯gk(v)dwk
1315
+ t ,
1316
+ 0 < t ≤ T ;
1317
+ ¯v(0, ·) = u0.
1318
+ (4.3)
1319
+ Notice that in (4.3), ¯f(v) and ¯g(v) are used instead of ¯f(¯v) and ¯g(¯v), respectively. Set
1320
+ ˜v := v − ¯v. Then, for fixed ω ∈ Ω, we have
1321
+ ∂α
1322
+ t ˜v = L˜v,
1323
+ 0 < t ≤ τ ;
1324
+ ˜v(0, ·) = 0.
1325
+ By the deterministic version of [25, Theorem 2.18], we have ˜v = 0 in Lp((0, τ] × Rd) almost
1326
+ surely. Additionally, it implies v(t, ·) = ¯v(t, ·) in Lp((0, τ] × Rd) almost surely. Thus, in
1327
+ equation (4.3), we can replace ¯f(v) and ¯g(v) with ¯f(¯v) and ¯g(¯v). Therefore, ¯v ∈ Hγ
1328
+ p(T)
1329
+ satisfies equation (4.1) on (0, T] with ¯f, ¯g instead of f, g, respectively. Similarly, by following
1330
+ word for word, there exists ¯u ∈ Hγ
1331
+ p(T) such that ¯u satisfies equation (4.1) on (0, T] with
1332
+ ¯f and ¯g instead of f and g. Thus, by the uniqueness result in Hγ
1333
+ p(T), we have ¯u = ¯v in
1334
+
1335
+ p(T), which implies u = v in Hγ
1336
+ p(τ). Thus, the lemma is proved.
1337
+
1338
+ Next, we provide the uniqueness and existence of a local solution to equation (3.1). As an
1339
+ auxiliary function, we choose ρ(·) ∈ C∞
1340
+ c (R) such that ρ(z) ≥ 0 on z ∈ (−∞, ∞), ρ(z) = 1
1341
+ on |z| ≤ 1, ρ(z) = 0 on |z| ≥ 2, and
1342
+ d
1343
+ dzρ(z) ≤ 0 on z ≥ 0. We define the following:
1344
+ ρm(z) := ρ(z/m).
1345
+ (4.4)
1346
+ Lemma 4.3. Let τ ≤ T be a bounded stopping time. For m ∈ N, there exists um ∈ Hγ
1347
+ p(τ)
1348
+ such that
1349
+ ∂α
1350
+ t um = Lum + ¯bi �
1351
+ u2
1352
+ mρm(um)
1353
+
1354
+ xi + ∂β
1355
+ t
1356
+ ˆ t
1357
+ 0
1358
+ σ(t, x, um)ηk(x)dwk
1359
+ t , 0 < t ≤ τ; um(0, ·) = u0,
1360
+ where ρm is the function introduced in (4.4). Furthermore, um ∈ C([0, τ]; C(Rd)) almost
1361
+ surely and
1362
+ E sup
1363
+ t≤τ
1364
+ sup
1365
+ x∈Rd |um(t, x)|p ≤ N∥um∥p
1366
+
1367
+ p(τ) < ∞
1368
+ (4.5)
1369
+ almost surely.
1370
+ Proof. Due to Lemma 4.2 and Corollary 2.19, it suffices to show that Assumption 4.1 (τ)
1371
+ holds. Because σ(t, x, 0) ≤ h(t, x) for all ω, t, x and h ∈ Lp, Assumption 4.1 (i) is satisfied.
1372
+ In the case of Assumption 4.1 (ii), notice that for u, v ∈ R, we have
1373
+ ��u2ρm(u) − v2ρm(v)
1374
+ �� ≤ Nm|u − v|.
1375
+ Then, for u, v ∈ Hγ
1376
+ p(τ), by Remark 2.8 and Lemmas 2.10 (viii) and (iii), we have
1377
+ ��¯bi �
1378
+ (u(t, ·))2ρm(u(t, ·)) − (v(t, ·))2ρm(v(t, ·))
1379
+
1380
+ xi
1381
+ ��p
1382
+ Hγ−2
1383
+ p
1384
+ ≤ N
1385
+ ��(u(t, ·))2ρm(u(t, ·)) − (v(t, ·))2ρm(v(t, ·))
1386
+ ��p
1387
+ Hγ−1
1388
+ p
1389
+ ≤ N
1390
+ ˆ
1391
+ Rd
1392
+ �ˆ
1393
+ Rd |R1−γ(x − y)|
1394
+
1395
+ (u(·))2ρm(·, u(·)) − (v(·))2ρm(·, v(·))
1396
+
1397
+ (t, y)dy
1398
+ �p
1399
+ dx
1400
+ ≤ Nm
1401
+ �ˆ
1402
+ Rd |R1−γ(x)|dx
1403
+ �p ˆ
1404
+ Rd |u(t, x) − v(t, x)|pdx
1405
+ (4.6)
1406
+
1407
+ 16
1408
+ BEOMSEOK HAN
1409
+ and
1410
+ ∥σ(u)η − σ(v)η∥p
1411
+ Hγ−2+c0
1412
+ p
1413
+ (l2)
1414
+
1415
+ ˆ
1416
+ Rd
1417
+ ��
1418
+ k
1419
+ �ˆ
1420
+ Rd |R−γ+2−c0(x − y)| (σ(·, u(·)) − σ(·, v(·)))(t, y)ηk(y)dy
1421
+ �2�p/2
1422
+ dx
1423
+
1424
+ ˆ
1425
+ Rd
1426
+ �ˆ
1427
+ Rd |R−γ+2−c0(x − y)|2 (σ(t, y, u(t, y)) − σ(t, y, v(t, y)))2 dy
1428
+ �p/2
1429
+ dx
1430
+ ≤ Kp
1431
+ ˆ
1432
+ Rd
1433
+ �ˆ
1434
+ Rd |R−γ+2−c0(y)|2 (u(t, x − y) − v(t, x − y))2dy
1435
+ �p/2
1436
+ dx
1437
+ ≤ Kp
1438
+ �ˆ
1439
+ Rd |R−γ+2−c0(y)|2 dy
1440
+ �p/2 ˆ
1441
+ Rd |u(t, x) − v(t, x)|pdx
1442
+ (4.7)
1443
+ on almost every (ω, t) ∈ |(0, τ]]. Due to Remark 2.8, we have
1444
+ ˆ
1445
+ Rd |R1−γ(y)| dy +
1446
+ ˆ
1447
+ Rd |R−γ+2−c0(y)|2 dy < ∞.
1448
+ By integrating with respect to (ω, t) to (4.6) and (4.7), employing Lemma 2.10 (vii), and
1449
+ Young’s inequality, we have
1450
+ ��¯bi �
1451
+ u2ρm(u) − v2ρm(v)
1452
+
1453
+ xi
1454
+ ��p
1455
+ Hγ−2
1456
+ p
1457
+ (τ) + ∥σ(u)η − σ(v)η∥p
1458
+ Hγ−2+c0
1459
+ p
1460
+ (τ,l2)
1461
+ ≤ Nm∥u − v∥p
1462
+ Lp(τ)
1463
+ ≤ ε∥u − v∥p
1464
+
1465
+ p(τ) + Nm∥u − v∥p
1466
+ Hγ−2
1467
+ p
1468
+ (τ).
1469
+ (4.8)
1470
+ The lemma is proved.
1471
+
1472
+ Remark 4.4. We introduce a candidate for a global solution. Let T < ∞. For m ∈ N, let
1473
+ um ∈ Hγ
1474
+ p(T) be the solution introduced in Lemma 4.3. Then, for R ∈ {1, 2, . . . , m}, define
1475
+ a stopping time τ R
1476
+ m
1477
+ τ R
1478
+ m := inf
1479
+
1480
+ t ≥ 0 : sup
1481
+ x∈R
1482
+ |um(t, x)| ≥ R
1483
+
1484
+ ∧ T.
1485
+ (4.9)
1486
+ Observe that
1487
+ τ R
1488
+ R ≤ τ m
1489
+ m
1490
+ (4.10)
1491
+ Indeed, if R = m, (4.10) is obvious. If R < m, we have um ∧ m = um ∧ m ∧ R = um ∧ R
1492
+ for t ≤ τ R
1493
+ m. Therefore, um and uR are solutions to equation
1494
+ ∂α
1495
+ t u = Lu + ¯bi �
1496
+ u1+λρR(u)
1497
+
1498
+ xi + σ(u)ηkdwk
1499
+ t ,
1500
+ 0 < t ≤ τ R
1501
+ m ;
1502
+ u(0, ·) = u0.
1503
+ In contrast, uR ∧ R = uR ∧ R ∧ m = uR ∧ m for t ≤ τ R
1504
+ R . Thus, um and uR are solutions to
1505
+ equation
1506
+ ∂α
1507
+ t u = Lu + ¯b
1508
+
1509
+ u1+λρm(u)
1510
+
1511
+ xi + σ(u)ηkdwk
1512
+ t ,
1513
+ 0 < t ≤ τ R
1514
+ R ;
1515
+ u(0, ·) = u0.
1516
+ Observe that the uniqueness and continuity results in Lemma 4.3 yields that um = uR for
1517
+ all t ≤ (τ R
1518
+ m ∨ τ R
1519
+ R ). Therefore, for t ≤ τ R
1520
+ m,
1521
+ sup
1522
+ s≤t
1523
+ sup
1524
+ x∈R
1525
+ |uR(s, x)| = sup
1526
+ s≤t
1527
+ sup
1528
+ x∈R
1529
+ |um(s, x)| ≤ R,
1530
+
1531
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
1532
+ 17
1533
+ and this implies τ R
1534
+ m ≤ τ R
1535
+ R . Similarly, τ R
1536
+ m ≥ τ R
1537
+ R ; thus,
1538
+ τ R
1539
+ R = τ R
1540
+ m
1541
+ almost surely. Moreover, we have τ R
1542
+ m ≤ τ m
1543
+ m because m > R. Therefore, we have (4.10).
1544
+ Further, we define
1545
+ u(t, x) := um(t, x)
1546
+ on
1547
+ t ≤ τ m
1548
+ m
1549
+ and set
1550
+ τ∞ := lim sup
1551
+ m→∞ lim sup
1552
+ T→∞
1553
+ τ m
1554
+ m .
1555
+ (4.11)
1556
+ It should be remarked that u(t, x) is well-defined on Ω × [0, ∞) × Rd and the nontrivial
1557
+ domain of u is Ω × [0, τ∞) × Rd.
1558
+ To obtain a uniform Lp bound of the local solution um, we separate um into noise- and
1559
+ nonlinear-dominating parts. Lemma 4.5 provides the existence, uniqueness, and estimate
1560
+ of noise-dominating parts of um.
1561
+ Lemma 4.5. Let T < ∞. Then, there exists v ∈ Hγ
1562
+ p(T) such that
1563
+ ∂α
1564
+ t v = Lv + ∂β
1565
+ t
1566
+ ˆ t
1567
+ 0
1568
+ σ(s, x, u)ηk(x)dwk
1569
+ s,
1570
+ 0 < t ≤ T,
1571
+ u(0, ·) = u0
1572
+ Furthermore, v ∈ C([0, T]; C(Rd)) almost surely, and
1573
+ E sup
1574
+ t≤T
1575
+ sup
1576
+ x∈Rd |v(t, x)|p + E sup
1577
+ t≤T
1578
+ ∥v(t, ·)∥p
1579
+ Lp ≤ N∥v∥p
1580
+
1581
+ p(T) ≤ N∥u0∥p
1582
+ Uα,γ
1583
+ p
1584
+ + N∥h∥p
1585
+ Lp(τ),
1586
+ where N = N(α, β, γ, d, p, K, T).
1587
+ Proof. Similar to the proof of Lemma 4.3, it is enough to show that Assumption 4.1 (τ)
1588
+ holds. Set η = (η1, η2, . . . ). Then, by Remark 2.8, for t ≤ T
1589
+ ∥σ(t, ·, u(t, ·))η∥p
1590
+ Hγ−2+c0
1591
+ p
1592
+ (l2)
1593
+ =
1594
+ ˆ
1595
+ Rd
1596
+ � ∞
1597
+
1598
+ k=1
1599
+ �ˆ
1600
+ Rd R−γ+2−c0(x − y)σ(t, y, u(t, y))ηk(y)dy
1601
+ �2�p/2
1602
+ dx
1603
+ =
1604
+ �ˆ
1605
+ Rd |R−γ+2−c0(x)|2dx
1606
+ �p/2 ˆ
1607
+ Rd |σ(t, y, u(t, y))|pdy
1608
+
1609
+ �ˆ
1610
+ Rd |R−γ+2−c0(x)|2dx
1611
+ �p/2 ˆ
1612
+ Rd |h(t, y)|pdy
1613
+ ≤ N∥h(t, ·)∥p
1614
+ Lp.
1615
+ (4.12)
1616
+ Therefore,
1617
+ ∥σ(u)η∥p
1618
+ Hγ−2+c0
1619
+ p
1620
+ (T,l2) ≤ E
1621
+ ˆ T
1622
+ 0
1623
+ ∥σ(t, ·, u(t, ·))η∥p
1624
+ Hγ−2+c0
1625
+ p
1626
+ (l2)dt ≤ N∥h∥p
1627
+ Lp.
1628
+ Thus, the lemma is proved by Lemma 4.2.
1629
+
1630
+ Next, we control the nonlinear-dominating parts of the local solutions. The following two
1631
+ lemmas are crucial in obtaining uniform Lp bounds. Lemma 4.6 functions as a chain rule,
1632
+ and Theorem 4.7 is a version of the Gr¨onwall inequality.
1633
+
1634
+ 18
1635
+ BEOMSEOK HAN
1636
+ Lemma 4.6. Suppose α ∈ (0, 1) and k ∈ N. For any ψ ∈ C∞
1637
+ c ((0, ∞) × Rd), we have
1638
+ ∂α
1639
+ t (ψ(·, x))2k(t) ≤ 2kψ(t, x)|ψ(t, x)|2k−2∂α
1640
+ t ψ(t, x),
1641
+ (4.13)
1642
+ for all (t, x) ∈ (0, ∞) × Rd.
1643
+ Proof. We employ the mathematical induction. The results and proof are motivated by
1644
+ (4.2) of [8].
1645
+ (Step 1). First, we consider the case k = 1. Although the proof is in the proof of [8,
1646
+ Proposition 4.1], we include the proof for the completeness of this paper.
1647
+ Let ψ ∈ C∞
1648
+ c ((0, ∞) × Rd) and t ∈ (0, ∞) and x ∈ Rd. For s ∈ (0, t], set
1649
+ F1(s) := 1
1650
+ 2|ψ(s, x)|2,
1651
+ F2(s) := ψ(s, x)ψ(t, x),
1652
+ and
1653
+ F(s) := 1
1654
+ 2
1655
+
1656
+ |ψ(s, x)|2 − |ψ(t, x)|2�
1657
+ − (ψ(s, x) − ψ(t, x))ψ(t, x).
1658
+ Further,
1659
+ F(s) = 1
1660
+ 2|ψ(s, x) − ψ(t, x)|2 ≥ 0
1661
+ on s ≤ t, and the equality holds for s = t. Notice that the integration by parts implies that
1662
+ ˆ t
1663
+ 0
1664
+ (t − s)−α(F ′
1665
+ 1(s) − F ′
1666
+ 2(s))ds =
1667
+ ˆ t
1668
+ 0
1669
+ (t − s)−αF ′(s)ds ≤ 0.
1670
+ Then, by the definition of ∂α
1671
+ t (Definition 2.5), we have (4.13) with k = 1.
1672
+ (Step 2).
1673
+ Let n ∈ N and assume that the results hold for k = 1, 2, . . . , n − 1.
1674
+ Set
1675
+ ˜ψ(t, x) := (ψ(t, x))2. Since ˜ψ(t, x) ∈ C∞
1676
+ c ((0, ∞) × Rd), we have
1677
+ ∂α
1678
+ t (ψ(·, x))2n(t) = ∂α
1679
+ t ( ˜ψ(·, x))2n−1(t)
1680
+ ≤ 2n−1 ˜ψ(t, x)
1681
+ ��� ˜ψ(t, x)
1682
+ ���
1683
+ 2n−1−2
1684
+ ∂α
1685
+ t ˜ψ(t, x)
1686
+ = 2n−1 |ψ(t, x)|2n−2 ∂α
1687
+ t (ψ(t, x))2
1688
+ ≤ 2nψ(t, x) |ψ(t, x)|2n−2 ∂α
1689
+ t ψ(t, x).
1690
+ The lemma is proved.
1691
+
1692
+ Theorem 4.7 (Theorem 8 of [4]). Let ψ(t) be a nonnegative integrable function on [0, T].
1693
+ For a constant N1, if the function ψ satisfies
1694
+ ψ(t) ≤ ψ0 + N1Iα
1695
+ t ψ
1696
+ on t ∈ [0, T], then
1697
+ ψ(t) ≤
1698
+
1699
+ 1 +
1700
+
1701
+
1702
+ k=0
1703
+ N k
1704
+ 1
1705
+ Γ(kα)
1706
+ (Γ(α)tα)k
1707
+
1708
+
1709
+ ψ0
1710
+ on t ∈ [0, T].
1711
+ We consider following lemma to control the remainder of the local solution um.
1712
+
1713
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
1714
+ 19
1715
+ Lemma 4.8. Let um ∈ Hγ
1716
+ p(T) and v ∈ Hγ
1717
+ p(T) be functions introduced in Lemmas 4.3 and
1718
+ 4.5, and τ m
1719
+ m be the stopping time introduced in (4.9). Then,
1720
+ ∥um(t, ·) − v(t, ·)∥p
1721
+ Lp
1722
+ ≤ N sup
1723
+ t≤T
1724
+ sup
1725
+ x∈Rd |v(t, x)|p sup
1726
+ t≤T
1727
+ ∥v(s, ·)∥p
1728
+ Lp
1729
+
1730
+ 1 +
1731
+
1732
+
1733
+ k=0
1734
+
1735
+ 1 + sups≤t,x∈Rd |v(s, x)|2�k
1736
+ Γ(kα)
1737
+ (Γ(α)T α)k
1738
+
1739
+
1740
+ for all t ≤ τ m
1741
+ m almost surely, where N = N(p, K).
1742
+ Proof. Set
1743
+ wm := um − v
1744
+ and
1745
+ fm := Lwm + ¯bi((um)2ρm(um))xi.
1746
+ Then, we have fm ∈ Hγ−2
1747
+ p
1748
+ (T) since wm, um ∈ Hγ
1749
+ p(T) and estimates similar to (4.8). Ad-
1750
+ ditionally, ∂α
1751
+ t wm = fm.
1752
+ Let (ω, t) ∈
1753
+ |(0, τ m
1754
+ m ]].
1755
+ Due to [23, Remark 2.9], there exists
1756
+ wn
1757
+ m ∈ C∞
1758
+ c ((0, ∞) × Rd) such that wn
1759
+ m → wm in Lp((0, t); Hγ
1760
+ p ), and ∂α
1761
+ t wn
1762
+ m is a Cauchy
1763
+ in Lp((0, t); Hγ−2
1764
+ p
1765
+ ). Define
1766
+ f n
1767
+ m := ∂α
1768
+ t wn
1769
+ m.
1770
+ Moreover, fm is the limit of f n
1771
+ m as n → ∞ in Lp((0, t); Hγ−2
1772
+ p
1773
+ ) (see [23, Remark 2.9]).
1774
+ Choose a nonnegative function ζ ∈ C∞
1775
+ c (Rd) with a unit integral and set ζε(x) := ε−dζ(x/ε)
1776
+ for ε > 0. For h ∈ L1,loc(Rd), set h(ε)(x) :=
1777
+ ´
1778
+ Rd h(y)ζε(x − y)dy.
1779
+ Next, let ε > 0 and x ∈ Rd. Since wn(ε)
1780
+ m
1781
+ ∈ C∞
1782
+ c ((0, ∞) × Rd) and p = 2k, Lemma 4.6
1783
+ yields
1784
+ 1
1785
+ p∂α
1786
+ t
1787
+
1788
+ wn(ε)
1789
+ m
1790
+ (·, x)
1791
+ �p
1792
+ (t) ≤ f n(ε)
1793
+ m
1794
+ (t, x)wn(ε)
1795
+ m
1796
+ (t, x)
1797
+ ���wn(ε)
1798
+ m
1799
+ (t, x)
1800
+ ���
1801
+ p−2
1802
+ (4.14)
1803
+ on t ∈ (0, ∞). Additionally, as wn
1804
+ m(0, x) = 0, we have
1805
+ wn(ε)
1806
+ m
1807
+ (0, x) = 0.
1808
+ (4.15)
1809
+ Thus, if we take stochastic integral Iα
1810
+ t on both sides of (4.14), we have
1811
+ 1
1812
+ p
1813
+ ���wn(ε)
1814
+ m
1815
+ (t, x)
1816
+ ���
1817
+ p
1818
+ ≤ Iα
1819
+ t
1820
+
1821
+ f n(ε)
1822
+ m
1823
+ (·, x)wn(ε)
1824
+ m
1825
+ (·, x)
1826
+ ���wn(ε)
1827
+ m
1828
+ (·, x)
1829
+ ���
1830
+ p−2�
1831
+ (4.16)
1832
+ due to
1833
+
1834
+ wn(ε)
1835
+ m
1836
+ �p
1837
+ ∈ C∞
1838
+ c ((0, ∞)×Rd), (4.15), and Remark 2.6. Observe that (2.1) with q = ∞
1839
+ and the H¨older inequality imply that
1840
+ ����Iα
1841
+ ·
1842
+
1843
+ f n(ε)
1844
+ m
1845
+ (·, x)wn(ε)
1846
+ m
1847
+ (·, x)
1848
+ ���wn(ε)
1849
+ m
1850
+ (·, x)
1851
+ ���
1852
+ p−2
1853
+ − f (ε)
1854
+ m (·, x)w(ε)
1855
+ m (·, x)
1856
+ ���w(ε)
1857
+ m (·, x)
1858
+ ���
1859
+ p−2�����
1860
+ L1((0,t))
1861
+
1862
+ ˆ t
1863
+ 0
1864
+ ����f n(ε)
1865
+ m
1866
+ (s, x)wn(ε)
1867
+ m
1868
+ (s, x)
1869
+ ���wn(ε)
1870
+ m
1871
+ (s, x)
1872
+ ���
1873
+ p−2
1874
+ − f (ε)
1875
+ m (s, x)w(ε)
1876
+ m (s, x)
1877
+ ���w(ε)
1878
+ m (s, x)
1879
+ ���
1880
+ p−2���� ds
1881
+ ≤ N
1882
+ ˆ t
1883
+ 0
1884
+ ���f n(ε)
1885
+ m
1886
+ (s, x) − f (ε)
1887
+ m (s, x)
1888
+ ���
1889
+ ���wn(ε)
1890
+ m
1891
+ (s, x)
1892
+ ���
1893
+ p−1
1894
+ ds
1895
+ + N
1896
+ ˆ t
1897
+ 0
1898
+ ����f (ε)
1899
+ m (s, x)
1900
+
1901
+ wn(ε)
1902
+ m
1903
+ (s, x)
1904
+ ���wn(ε)
1905
+ m
1906
+ (s, x)
1907
+ ���
1908
+ p−2
1909
+ − w(ε)
1910
+ m (s, x)
1911
+ ���w(ε)
1912
+ m (s, x)
1913
+ ���
1914
+ p−2����� ds
1915
+ ≤ N
1916
+
1917
+ An
1918
+ ���wn(ε)
1919
+ m
1920
+ (·, x)
1921
+ ���
1922
+ 2
1923
+ Lp(0,t) + BnCn
1924
+ ���f (ε)
1925
+ m (·, x)
1926
+ ���
1927
+ Lp(0,t)
1928
+ � ���wn(ε)
1929
+ m
1930
+ (·, x)
1931
+ ���
1932
+ p−3
1933
+ Lp(0,t) ,
1934
+ (4.17)
1935
+
1936
+ 20
1937
+ BEOMSEOK HAN
1938
+ where
1939
+ An =
1940
+ ���f n(ε)
1941
+ m
1942
+ (·, x) − f (ε)
1943
+ m (·, x)
1944
+ ���
1945
+ Lp(0,t)
1946
+ Bn =
1947
+ ���wn(ε)
1948
+ m
1949
+ (·, x) − w(ε)
1950
+ m (·, x)
1951
+ ���
1952
+ Lp(0,t) ,
1953
+ and
1954
+ Cn =
1955
+ ���wn(ε)
1956
+ m
1957
+ (·, x)
1958
+ ���
1959
+ Lp(0,t) +
1960
+ ���w(ε)
1961
+ m (·, x)
1962
+ ���
1963
+ Lp(0,t) .
1964
+ Moreover,
1965
+ An, Bn → 0
1966
+ and
1967
+ Cn → 2
1968
+ ���w(ε)
1969
+ m (·, x)
1970
+ ���
1971
+ Lp(0,t)
1972
+ as
1973
+ n → ∞
1974
+ (4.18)
1975
+ since wn
1976
+ m → wm and f n
1977
+ m → fm in Lp((0, t); Hγ
1978
+ p ). Then, by applying (4.18) to (4.17), we
1979
+ have
1980
+ ����Iα
1981
+ ·
1982
+
1983
+ f n(ε)
1984
+ m
1985
+ (·, x)wn(ε)
1986
+ m
1987
+ (·, x)
1988
+ ���wn(ε)
1989
+ m
1990
+ (·, x)
1991
+ ���
1992
+ p−2
1993
+ − f (ε)
1994
+ m (·, x)w(ε)
1995
+ m (·, x)
1996
+ ���w(ε)
1997
+ m (·, x)
1998
+ ���
1999
+ p−2�����
2000
+ L1((0,t))
2001
+ → 0
2002
+ as n → ∞. Therefore, there exists a sequence nl such that wnl(ε)
2003
+ m
2004
+ (·, x) → w(ε)
2005
+ m (·, x) and
2006
+
2007
+ ·
2008
+
2009
+ f nl(ε)
2010
+ m
2011
+ wnl(ε)
2012
+ m
2013
+ ���wnl(ε)
2014
+ m
2015
+ ���
2016
+ p−2�
2017
+ → Iα
2018
+ ·
2019
+
2020
+ f (ε)
2021
+ m w(ε)
2022
+ m
2023
+ ���w(ε)
2024
+ m
2025
+ ���
2026
+ p−2�
2027
+ almost everywhere on [0, t]. Further-
2028
+ more, the convergence holds everywhere on [0, t] due to the continuity in t.
2029
+ Then, by
2030
+ considering sequence nl instead of n and letting l → ∞ for (4.16), we have
2031
+ 1
2032
+ p
2033
+ ���w(ε)
2034
+ m (t, x)
2035
+ ���
2036
+ p
2037
+ ≤ Iα
2038
+ t
2039
+
2040
+ f (ε)
2041
+ m (·, x)w(ε)
2042
+ m (·, x)
2043
+ ���w(ε)
2044
+ m (·, x)
2045
+ ���
2046
+ p−2�
2047
+ .
2048
+ Since t ≤ τ m
2049
+ m , ρm(um) = 1. By integrating with respect to x, we have
2050
+ Γ(α)
2051
+ p
2052
+ ˆ
2053
+ Rd
2054
+ ���w(ε)
2055
+ m (t, x)
2056
+ ���
2057
+ p
2058
+ dx
2059
+
2060
+ ˆ t
2061
+ 0
2062
+ (t − s)α−1
2063
+ ˆ
2064
+ Rd(Lwm(s, ·))(ε)(x)w(ε)
2065
+ m (s, x)
2066
+ ���w(ε)
2067
+ m (s, x)
2068
+ ���
2069
+ p−2
2070
+ dxds
2071
+ +
2072
+ ˆ t
2073
+ 0
2074
+ (t − s)α−1
2075
+ ˆ
2076
+ Rd
2077
+
2078
+ ¯bi(s, ¯xi)
2079
+
2080
+ |wm(s, ·) + v(s, ·)|2�(ε)
2081
+ xi (x)
2082
+
2083
+ w(ε)
2084
+ m (s, x)
2085
+ ���w(ε)
2086
+ m (s, x)
2087
+ ���
2088
+ p−2
2089
+ dxds.
2090
+ (4.19)
2091
+ Furthermore, by integration by parts, we obtain
2092
+ ˆ
2093
+ Rd
2094
+
2095
+ (Lwm(s, ·))(ε)(x) + ¯bi �
2096
+ |wm(s, ·) + v(s, ·)|2�(ε)
2097
+ xi (x)
2098
+
2099
+ w(ε)
2100
+ m (s, x)
2101
+ ���w(ε)
2102
+ m (s, x)
2103
+ ���
2104
+ p−2
2105
+ dx
2106
+ ≤ −(p − 1)
2107
+ ˆ
2108
+ Rd
2109
+
2110
+ aijwm
2111
+ �(ε)
2112
+ xj (s, x)
2113
+ ���w(ε)
2114
+ m (s, x)
2115
+ ���
2116
+ p−2
2117
+ w(ε)
2118
+ mxi(s, x)dx
2119
+ + (p − 1)
2120
+ ˆ
2121
+ Rd
2122
+ ��
2123
+ 2aij
2124
+ xj − bi�
2125
+ wm
2126
+ �(ε)
2127
+ (s, x)
2128
+ ���w(ε)
2129
+ m (s, x)
2130
+ ���
2131
+ p−2
2132
+ w(ε)
2133
+ mxi(s, x)dx
2134
+ +
2135
+ ˆ
2136
+ Rd
2137
+ ��
2138
+ aij
2139
+ xixj − bi
2140
+ xi + c
2141
+
2142
+ wm
2143
+ �(ε)
2144
+ (s, x)w(ε)
2145
+ m (s, x)
2146
+ ���w(ε)
2147
+ m (s, x)
2148
+ ���
2149
+ p−2
2150
+ dx
2151
+ − (p − 1)
2152
+ ˆ
2153
+ Rd
2154
+ ¯bi(s, ¯xi)
2155
+
2156
+ (wm(s, ·) + v(s, ·))2�(ε) (x)
2157
+ ���w(ε)
2158
+ m (s, x)
2159
+ ���
2160
+ p−2
2161
+ w(ε)
2162
+ mxi(s, x)dx.
2163
+ (4.20)
2164
+
2165
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
2166
+ 21
2167
+ Additionally, observe that
2168
+ ���
2169
+
2170
+ aijwm
2171
+ �(ε)
2172
+ xj (s, x) − aij(s, x)w(ε)
2173
+ mxj(s, x)
2174
+ ���
2175
+ = ε−1
2176
+ ����
2177
+ ˆ
2178
+ Rd
2179
+
2180
+ aij(s, x − εy) − aij(s, x)
2181
+
2182
+ wm(s, x − εy)ζyj(y)dy
2183
+ ����
2184
+ ≤ N(K)
2185
+ ˆ
2186
+ Rd |wm(s, x − εy)||y||ζy(y)|dy,
2187
+ (4.21)
2188
+ and by (3.2),
2189
+
2190
+ ˆ
2191
+ Rd aij(s, x)w(ε)
2192
+ mxi(s, x)w(ε)
2193
+ mxj(s, x)
2194
+ ���w(ε)
2195
+ m (s, x)
2196
+ ���
2197
+ p−2
2198
+ dx
2199
+ ≤ −K−1
2200
+ ˆ
2201
+ Rd
2202
+ ���w(ε)
2203
+ m (s, x)
2204
+ ���
2205
+ p−2 ���w(ε)
2206
+ mx(s, x)
2207
+ ���
2208
+ 2
2209
+ dx.
2210
+ (4.22)
2211
+ Thus, by combining (4.21) and (4.22)
2212
+
2213
+ ˆ
2214
+ Rd
2215
+
2216
+ aijwm
2217
+ �(ε)
2218
+ xj (s, x)
2219
+ ���w(ε)
2220
+ m (s, x)
2221
+ ���
2222
+ p−2
2223
+ w(ε)
2224
+ mxi(s, x)dx
2225
+ = −
2226
+ ˆ
2227
+ Rd
2228
+ ���w(ε)
2229
+ m (s, x)
2230
+ ���
2231
+ p−2
2232
+ w(ε)
2233
+ mxi(s, x)
2234
+
2235
+ (awm)(ε)
2236
+ xj (s, x) − a(s, x)w(ε)
2237
+ mxj(s, x)
2238
+
2239
+ dx
2240
+
2241
+ ˆ
2242
+ Rd aij(s, x)w(ε)
2243
+ mxi(s, x)w(ε)
2244
+ mxj(s, x)
2245
+ ���w(ε)
2246
+ m (s, x)
2247
+ ���
2248
+ p−2
2249
+ dx
2250
+ ≤ N
2251
+ ˆ
2252
+ Rd
2253
+ ���w(ε)
2254
+ m (s, x)
2255
+ ���
2256
+ p−2 ���w(ε)
2257
+ mxi(s, x)
2258
+ ���
2259
+ ˆ
2260
+ Rd |wm(s, x − εy)||y||ζy(y)|dydx
2261
+ − K−1
2262
+ ˆ
2263
+ Rd
2264
+ ���w(ε)
2265
+ m (s, x)
2266
+ ���
2267
+ p−2 ���w(ε)
2268
+ mx(s, x)
2269
+ ���
2270
+ 2
2271
+ dx
2272
+ ≤ N
2273
+ ˆ
2274
+ Rd
2275
+ ���w(ε)
2276
+ m (s, x)
2277
+ ���
2278
+ p−2 �ˆ
2279
+ Rd |wm(s, x − εy)||y||ζy(y)|dy
2280
+ �2
2281
+ dx
2282
+ − 1
2283
+ 2K−1
2284
+ ˆ
2285
+ Rd
2286
+ ���w(ε)
2287
+ m (s, x)
2288
+ ���
2289
+ p−2 ���w(ε)
2290
+ mx(s, x)
2291
+ ���
2292
+ 2
2293
+ dx,
2294
+ (4.23)
2295
+ where N = N(K). Moreover,
2296
+ ����
2297
+ ��
2298
+ 2aij
2299
+ xj − bi�
2300
+ wm
2301
+ �(ε)
2302
+ (s, x)
2303
+ ���� =
2304
+ ����
2305
+ ˆ
2306
+ Rd
2307
+
2308
+ 2aij
2309
+ yj(s, y) − bi(s, y)
2310
+
2311
+ wm(s, y)ζε(x − y)dy
2312
+ ����
2313
+ ≤ K
2314
+ ˆ
2315
+ Rd |wm(s, y)|ζε(x − y)dy
2316
+ = K(|wm(s, ·)|)(ε)(x)
2317
+ (4.24)
2318
+ and
2319
+ ����
2320
+ ��
2321
+ aij
2322
+ xixj − bi
2323
+ xi + c
2324
+
2325
+ wm
2326
+ �(ε)
2327
+ (s, x)
2328
+ ���� ≤ K(|wm(s, ·)|)(ε)(x).
2329
+ (4.25)
2330
+
2331
+ 22
2332
+ BEOMSEOK HAN
2333
+ Thus, by applying H¨older’s inequality, (4.23), (4.24), and (4.25) to (4.20), we have
2334
+ ˆ
2335
+ Rd
2336
+
2337
+ (Lwm(s, ·))(ε)(x) + ¯bi �
2338
+ |wm(s, ·) + v(s, ·)|2�(ε)
2339
+ xi (x)
2340
+
2341
+ w(ε)
2342
+ m (s, x)
2343
+ ���w(ε)
2344
+ m (s, x)
2345
+ ���
2346
+ p−2
2347
+ dx
2348
+ ≤ N
2349
+ ˆ
2350
+ R
2351
+ ���w(ε)
2352
+ m (s, x)
2353
+ ���
2354
+ p−2 �ˆ
2355
+ R
2356
+ |wm(s, x − εy)||y||ζy(y)|dy
2357
+ �2
2358
+ dx
2359
+ − p − 1
2360
+ 4K
2361
+
2362
+ i
2363
+ ˆ
2364
+ R
2365
+ ���w(ε)
2366
+ m (s, x)
2367
+ ���
2368
+ p−2 ���w(ε)
2369
+ mxi(s, x)
2370
+ ���
2371
+ 2
2372
+ dx
2373
+ + N
2374
+
2375
+ i
2376
+ ˆ
2377
+ Rd
2378
+
2379
+ (|wm(s, ·)|)(ε)(x)
2380
+ �2 ���w(ε)
2381
+ m (s, x)
2382
+ ���
2383
+ p−2
2384
+ dx
2385
+ + N
2386
+ ˆ
2387
+ Rd(|wm(s, ·)|)(ε)(x)
2388
+ ���w(ε)
2389
+ m (s, x)
2390
+ ���
2391
+ p−1
2392
+ dx.
2393
+ − (p − 1)
2394
+
2395
+ i
2396
+ ˆ
2397
+ Rd
2398
+ ¯bi(s, ¯xi)
2399
+
2400
+ (wm(s, ·) + v(s, ·))2�(ε) (x)
2401
+ ���w(ε)
2402
+ m (s, x)
2403
+ ���
2404
+ p−2
2405
+ w(ε)
2406
+ mxi(s, x)dx,
2407
+ (4.26)
2408
+ where N = N(K). Furthermore, note that
2409
+ ˆ
2410
+ Rd
2411
+ ���w(ε)
2412
+ m (s, x)
2413
+ ���
2414
+ p
2415
+ w(ε)
2416
+ mxi(s, x)dx = 0
2417
+ for
2418
+ s ≤ t.
2419
+ (4.27)
2420
+ Indeed, take a nonnegative smooth function φ ∈ C∞
2421
+ c (Rd) such that φ(x) = 1 on |x| < 1,
2422
+ φ(x) = 0 on |x| > 2, and supx∈Rd |φ′(x)| ≤ 2. Then, integration by parts yields
2423
+ ˆ
2424
+ Rd
2425
+ ���w(ε)
2426
+ m (s, x)
2427
+ ���
2428
+ p
2429
+ w(ε)
2430
+ mxi(s, x)φ(x/n)dx
2431
+ = −p
2432
+ ˆ
2433
+ Rd
2434
+ ���w(ε)
2435
+ m (s, x)
2436
+ ���
2437
+ p
2438
+ w(ε)
2439
+ mxi(s, x)φ(x/n)dx − 1
2440
+ n
2441
+ ˆ
2442
+ Rd
2443
+ ���w(ε)
2444
+ m (s, x)
2445
+ ���
2446
+ p
2447
+ w(ε)
2448
+ m (s, x)φ′(x/n)dx.
2449
+ Thus, we have
2450
+ lim sup
2451
+ n→∞
2452
+ ����
2453
+ ˆ
2454
+ Rd
2455
+ ���w(ε)
2456
+ m (s, x)
2457
+ ���
2458
+ p
2459
+ w(ε)
2460
+ mxi(s, x)φ(x/n)dx
2461
+ ���� ≤ lim sup
2462
+ n→∞
2463
+ 2
2464
+ n(p + 1)
2465
+ ˆ
2466
+ Rd
2467
+ ���w(ε)
2468
+ m (s, x)
2469
+ ���
2470
+ p+1
2471
+ dx = 0
2472
+ (4.28)
2473
+ and (4.28) yields (4.27). Then, from the last term of (4.26), by applying (4.27) and the
2474
+ H¨older’s inequality, we have
2475
+ �����
2476
+
2477
+ i
2478
+ ˆ
2479
+ Rd−1
2480
+ ¯bi(s, ¯xi)
2481
+ ˆ
2482
+ R
2483
+
2484
+ |wm(t, ·) + v(t, ·)|2�(ε)
2485
+ (x)
2486
+ ���w(ε)
2487
+ m (t, x)
2488
+ ���
2489
+ p−2
2490
+ w(ε)
2491
+ mxi(t, x)dxid¯xi
2492
+ �����
2493
+ ≤ N
2494
+
2495
+ i
2496
+ ˆ
2497
+ Rd
2498
+ ����
2499
+
2500
+ |wm(t, ·) + v(t, ·)|2�(ε)
2501
+ (x) −
2502
+ ���w(ε)
2503
+ m (t, x)
2504
+ ���
2505
+ 2����
2506
+ ���w(ε)
2507
+ m (t, x)
2508
+ ���
2509
+ p−2 ���w(ε)
2510
+ mxi(t, x)
2511
+ ��� dx
2512
+ ≤ N
2513
+ ˆ
2514
+ Rd
2515
+ ��
2516
+ |wm(t, ·) + v(t, ·)|2�(ε)
2517
+ (x) −
2518
+ ���w(ε)
2519
+ m (t, x)
2520
+ ���
2521
+ 2�2 ���w(ε)
2522
+ m (t, x)
2523
+ ���
2524
+ p−2
2525
+ dx
2526
+ +
2527
+ 1
2528
+ 8KN
2529
+
2530
+ i
2531
+ ˆ
2532
+ Rd
2533
+ ���w(ε)
2534
+ mxi(t, x)
2535
+ ���
2536
+ 2 ���w(ε)
2537
+ m (t, x)
2538
+ ���
2539
+ p−2
2540
+ dx,
2541
+ (4.29)
2542
+
2543
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
2544
+ 23
2545
+ where N = N(K). Then, by applying (4.26) and (4.29) to (4.19), we have
2546
+ ˆ
2547
+ Rd
2548
+ ���w(ε)
2549
+ m (t, x)
2550
+ ���
2551
+ p
2552
+ dx
2553
+ ≤ NIα
2554
+ t
2555
+ ˆ
2556
+ R
2557
+ ���w(ε)
2558
+ m (s, x)
2559
+ ���
2560
+ p−2 �ˆ
2561
+ R
2562
+ |wm(s, x − εy)||y||ζy(y)|dy
2563
+ �2
2564
+ dx
2565
+ + NIα
2566
+ t
2567
+ ˆ
2568
+ Rd
2569
+ ���(|wm(s, ·)|)(ε)(x)
2570
+ ���
2571
+ 2 ���w(ε)
2572
+ m (s, x)
2573
+ ���
2574
+ p−2
2575
+ + (|wm(s, ·)|)(ε)(x)
2576
+ ���w(ε)
2577
+ m (s, x)
2578
+ ���
2579
+ p−1
2580
+ dx
2581
+ + NIα
2582
+ t
2583
+ ˆ
2584
+ Rd
2585
+ ��
2586
+ |wm(t, ·) + v(t, ·)|2�(ε)
2587
+ (x) −
2588
+ ���w(ε)
2589
+ m (t, x)
2590
+ ���
2591
+ 2�2 ���w(ε)
2592
+ m (t, x)
2593
+ ���
2594
+ p−2
2595
+ dx,
2596
+ where N = N(p, K). By letting ε ↓ 0, we have
2597
+ ∥wm(t, ·)∥p
2598
+ Lp
2599
+ ≤ NIα
2600
+ t
2601
+ ˆ
2602
+ R
2603
+ |wm(·, x)|p dx + NIα
2604
+ t
2605
+ ˆ
2606
+ Rd
2607
+
2608
+ |wm(·, x) + v(·, x)|2 − |wm(·, x)|2�2
2609
+ |wm(·, x)|p−2 dx
2610
+ ≤ NIα
2611
+ t
2612
+ ˆ
2613
+ R
2614
+ |wm(·, x)|p dx + NIα
2615
+ t
2616
+ ˆ
2617
+ Rd |v(·, x)|2 |wm(·, x)|p + |v(·, x)|4 |wm(·, x)|p−2 dx
2618
+ ≤ N
2619
+
2620
+ 1 +
2621
+ sup
2622
+ s≤t,x∈Rd |v(s, x)|2
2623
+
2624
+
2625
+ t ∥wm(·, ·)∥p
2626
+ Lp + N sup
2627
+ s≤t
2628
+ ∥v(s, ·)∥2p
2629
+ L2p.
2630
+ for all t ≤ τ m
2631
+ m . Then, by Theorem 4.7, we obtain
2632
+ ∥wm(t, ·)∥p
2633
+ Lp ≤ N sup
2634
+ s≤t
2635
+ ∥v(s, ·)∥2p
2636
+ L2p
2637
+
2638
+ 1 +
2639
+
2640
+
2641
+ k=0
2642
+
2643
+ 1 + sups≤t,x∈Rd |v(s, x)|2�k
2644
+ Γ(kα)
2645
+ (Γ(α)T α)k
2646
+
2647
+
2648
+ for all t ≤ τ m
2649
+ m . The lemma is proved.
2650
+
2651
+ Finally, we demonstrate that the global solution candidate does not explode in a finite
2652
+ time.
2653
+ Lemma 4.9. For any T < ∞, we have
2654
+ lim
2655
+ R→∞ P
2656
+ ��
2657
+ ω ∈ Ω :
2658
+ sup
2659
+ t≤T,x∈Rd |u(t, x)| > R
2660
+ ��
2661
+ = 0.
2662
+ Proof. Let v be the function introduced in Lemma 4.5. Define
2663
+ τ 1(S) := inf
2664
+
2665
+ t ≥ 0 : ∥v(t, ·)∥Lp ≥ S
2666
+
2667
+ ∧ T,
2668
+ τ 2(S) := inf
2669
+
2670
+ t ≥ 0 : sup
2671
+ x∈Rd |v(t, x)| ≥ S
2672
+
2673
+ ∧ T.
2674
+ and
2675
+ τ 0
2676
+ m(S) := τ m
2677
+ m ∧ τ 1(S) ∧ τ 2(S),
2678
+
2679
+ 24
2680
+ BEOMSEOK HAN
2681
+ where τ m
2682
+ m is the stopping time introduced in (4.9). Set r :=
2683
+ p
2684
+ p−1. Then, by Lemmas 4.2 and
2685
+ 2.10, (viii), H¨older inequality, and Minkowski inequality, we have
2686
+ ∥um∥p
2687
+
2688
+ p(τ 0m(S)) − N∥u0∥p
2689
+ Uα,γ
2690
+ p
2691
+ ≤ N
2692
+ �����
2693
+
2694
+ i
2695
+ ¯bi(u2
2696
+ mρm(um))xi
2697
+ �����
2698
+ p
2699
+ Hγ−2
2700
+ p
2701
+ (τ 0m(S))
2702
+ + N∥σ(um)η∥p
2703
+ Hγ−2+c0
2704
+ p
2705
+ (τ 0m(S),l2)
2706
+ ≤ N
2707
+ ��u2
2708
+ m
2709
+ ��p
2710
+ Hγ−1
2711
+ p
2712
+ (τ 0m(S)) + N∥σ(um)η∥p
2713
+ Hγ−2+c0
2714
+ p
2715
+ (τ 0m(S),l2)
2716
+ ≤ NE
2717
+ ˆ τ 0
2718
+ m(S)
2719
+ 0
2720
+ ˆ
2721
+ Rd
2722
+ ����
2723
+ ˆ
2724
+ Rd R1−γ(y)|um(s, x − y)|2dy
2725
+ ����
2726
+ p
2727
+ dxds
2728
+ + NE
2729
+ ˆ τ 0
2730
+ m(S)
2731
+ 0
2732
+ ˆ
2733
+ Rd
2734
+ ����
2735
+ ˆ
2736
+ Rd |R−γ+2−c0(y)|2 |um(s, x − y)|2dy
2737
+ ����
2738
+ p/2
2739
+ dxds
2740
+ ≤ NE
2741
+ ˆ τ 0
2742
+ m(S)
2743
+ 0
2744
+ ˆ
2745
+ Rd
2746
+ ����
2747
+ ˆ
2748
+ Rd |R1−γ(y)|r |um(s, x − y)|rdy
2749
+ ����
2750
+ p/r
2751
+ dx
2752
+ ˆ
2753
+ Rd |um(s, x)|pdxds
2754
+ + NE
2755
+ ˆ τ 0
2756
+ m(S)
2757
+ 0
2758
+ �ˆ
2759
+ Rd |R−γ+2−c0(x)|2 dx
2760
+ �p/2 ˆ
2761
+ Rd |um(s, x)|pdxds
2762
+ ≤ N0E
2763
+ ˆ τ 0
2764
+ m(S)
2765
+ 0
2766
+
2767
+ 1 +
2768
+ ˆ
2769
+ Rd |um(s, x)|pdx
2770
+ � ˆ
2771
+ Rd |um(s, x)|pdxds,
2772
+ (4.30)
2773
+ where N0 = N(α, β, γ, d, p, K, T)
2774
+ ��´
2775
+ Rd |R1−γ(x)|r dx
2776
+ �p/r +
2777
+ �´
2778
+ Rd |R−γ+2−c0(x)|2 dx
2779
+ �p/2�
2780
+ . Note
2781
+ that N0 < ∞ due to r <
2782
+ d
2783
+ d+γ−1 and Remark 2.8. Then, by Lemma 4.8 and the definitions
2784
+ of τ1(S) and τ2(S),
2785
+ ˆ
2786
+ Rd |um(t, x)|pdx
2787
+ ≤ N
2788
+ ˆ
2789
+ Rd |um(t, x) − v(t, x)|p + |v(t, x)|pdx
2790
+ ≤ N sup
2791
+ s≤t
2792
+ sup
2793
+ x∈Rd |v(t, x)| sup
2794
+ s≤t
2795
+ ∥v(s, ·)∥p
2796
+ p
2797
+
2798
+ 1 +
2799
+
2800
+
2801
+ k=0
2802
+
2803
+ 1 + sups≤t,x∈Rd |v(t, x)|2�k
2804
+ Γ(kα)
2805
+ (Γ(α)T α)k
2806
+
2807
+
2808
+ +
2809
+ ˆ
2810
+ Rd |v(t, x)|pdx
2811
+ < N(p, S, K).
2812
+ (4.31)
2813
+ Therefore, by combining (4.30) and (4.31), we have
2814
+ ∥um∥p
2815
+
2816
+ p(τ 0m(S)) ≤ N + N∥u0∥p
2817
+ Uα,γ
2818
+ p
2819
+ ,
2820
+ (4.32)
2821
+
2822
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
2823
+ 25
2824
+ where N = N(α, β, γ, d, p, S, K, T). It should be noted that the right-hand side of (4.32) is
2825
+ independent of m. Therefore, by Chebyshev’s inequality and Lemma 4.3, we have
2826
+ P
2827
+
2828
+ sup
2829
+ t≤τ 0m(S)
2830
+ sup
2831
+ x∈Rd |u(t, x)| > R
2832
+
2833
+ ≤ 1
2834
+ RpE
2835
+ sup
2836
+ t≤τ 0m,x∈Rd |u(t, x)|p
2837
+ ≤ 1
2838
+ RpE
2839
+ sup
2840
+ t≤τ 0m,x∈Rd |um(t, x)|p
2841
+ ≤ 1
2842
+ Rp∥um∥p
2843
+
2844
+ p(τ 0m)
2845
+ ≤ N
2846
+ Rp,
2847
+ where N = N(u0, α, β, γ, d, p, S, K, T). In contrast, by Lemma 4.5,
2848
+ P
2849
+
2850
+ τ 1(S) < T
2851
+
2852
+ + P
2853
+
2854
+ τ 2(S) < T
2855
+
2856
+ ≤ P
2857
+
2858
+ sup
2859
+ t≤T
2860
+ ∥v(t, ·)∥Lp > S
2861
+
2862
+ + P
2863
+
2864
+ sup
2865
+ t≤T
2866
+ sup
2867
+ x∈Rd |v(t, x)| > S
2868
+
2869
+ ≤ 1
2870
+ Sp E sup
2871
+ t≤T
2872
+ ∥v(t, ·)∥p
2873
+ Lp + 1
2874
+ Sp E
2875
+ sup
2876
+ t≤T,x∈Rd |v(t, x)|p
2877
+ ≤ 1
2878
+ Sp N(u0, h, α, β, γ, d, p, K, T).
2879
+ Thus,
2880
+ P
2881
+
2882
+ sup
2883
+ t≤T,x∈Rd |u(t, x)| > R
2884
+
2885
+ ≤ lim inf
2886
+ m→∞ P
2887
+
2888
+ sup
2889
+ t≤τ 0m(S),x∈Rd |u(t, x)| > R
2890
+
2891
+ + P
2892
+
2893
+ τ 1(S) < T
2894
+
2895
+ + P
2896
+
2897
+ τ 2(S) < T
2898
+
2899
+ ≤ N1
2900
+ Rp + N2
2901
+ Sp ,
2902
+ where N1 = N1(u0, α, β, γ, d, p, S, K, T) and N2 = N2(u0, h, α, β, γ, d, p, K, T). The lemma
2903
+ is proved by letting R → ∞ and S → ∞ in order.
2904
+
2905
+ Proof of Theorem 3.5. Step 1. (Uniqueness).
2906
+ Suppose u, ¯u ∈ Hγ
2907
+ p,loc are nonnegative
2908
+ solutions of equation (3.1). By Definition 2.13, there are bounded stopping times τn (n =
2909
+ 1, 2, · · · ) such that
2910
+ τn ↑ ∞
2911
+ and
2912
+ u, ¯u ∈ Hγ
2913
+ p(τn).
2914
+ Fix n ∈ N. Note that u, ¯u ∈ C([0, τn]; C(Rd)) almost surely and
2915
+ E sup
2916
+ t≤τn
2917
+ sup
2918
+ x∈Rd |u(t, x)|p + E sup
2919
+ t≤τn
2920
+ sup
2921
+ x∈Rd |¯u(t, x)|p < ∞.
2922
+ (4.33)
2923
+
2924
+ 26
2925
+ BEOMSEOK HAN
2926
+ Then, for m ∈ N, define
2927
+ τ 1
2928
+ m,n := inf
2929
+
2930
+ t ≥ 0 : sup
2931
+ x∈Rd |u(t, x)| > m
2932
+
2933
+ ∧ τn,
2934
+ τ 2
2935
+ m,n := inf
2936
+
2937
+ t ≥ 0 : sup
2938
+ x∈Rd |¯u(t, x)| > m
2939
+
2940
+ ∧ τn,
2941
+ and
2942
+ τm,n := τ 1
2943
+ m,n ∧ τ 2
2944
+ m,n.
2945
+ (4.34)
2946
+ Due to (4.33), τ 1
2947
+ m,n and τ 2
2948
+ m,n are well-defined stopping times; thus, τm,n is a stopping time.
2949
+ Observe that u, ¯u ∈ Hγ
2950
+ p(τm,n) and τm,n ↑ τn as m → ∞ almost surely. Fix m ∈ N. Notice
2951
+ that u, ¯u ∈ Hγ
2952
+ p(τm,n) are solutions to equation
2953
+ ∂α
2954
+ t v = Lv + ¯bi �
2955
+ v2ρm(v)
2956
+
2957
+ xi + ∂β
2958
+ t
2959
+ ˆ t
2960
+ 0
2961
+ σ(v)dWt,
2962
+ 0 < t ≤ τm,n;
2963
+ v(0, ·) = u0,
2964
+ where Lv = aijvxixj + bivxi + cv. By the uniqueness result in Lemma 4.3, we conclude that
2965
+ u = ¯u in Hγ
2966
+ p(τm,n) for each m ∈ N. The monotone convergence theorem yields u = ¯u in
2967
+
2968
+ p(τn), which implies u = ¯u in Hγ
2969
+ p,loc.
2970
+ Step 2 (Existence.). Let T < ∞. For m ∈ N, define τ m
2971
+ m and u as in Remark 4.4. Observe
2972
+ that
2973
+ P (τ m
2974
+ m < T) ≤ P
2975
+
2976
+ sup
2977
+ t≤T,x∈Rd |u(t, x)| ≥ m
2978
+
2979
+ .
2980
+ Indeed, if τ m
2981
+ m < T, then supt≤τ m
2982
+ m ,x∈Rd |u(t, x)| = supt≤τ m
2983
+ m ,x∈Rd |um(t, x)| = m almost surely.
2984
+ Then, by Lemma 4.9, we have
2985
+ lim sup
2986
+ m→∞ P (τ m
2987
+ m < T) ≤ lim sup
2988
+ m→∞ P
2989
+
2990
+ sup
2991
+ t≤T,x∈Rd |u(t, x)| ≥ m
2992
+
2993
+ = 0
2994
+ Since T < ∞ is arbitrary, τ m
2995
+ m → ∞ in probability. In addition, we conclude that τ m
2996
+ m ↑ ∞
2997
+ almost surely, because τ m
2998
+ m is increasing in m.
2999
+ Last, set τm := τ m
3000
+ m ∧ m. Note that (see Remark 4.4)
3001
+ u(t, x) = um(t, x)
3002
+ for
3003
+ t ∈ [0, τm].
3004
+ Observe that supx∈Rd |um(t, x)| ≤ m for t ∈ [0, τm]; thus, um satisfies (3.1) almost every-
3005
+ where t ∈ [0, τm] almost surely. Because u = um for t ∈ [0, τm] and um ∈ Hγ
3006
+ p(τm), it follows
3007
+ that u ∈ Hγ
3008
+ p(τm) and u satisfies (3.1) for all t ≤ τm almost surely. We have u ∈ Hγ
3009
+ p,loc
3010
+ because τm ↑ ∞ as m → ∞ almost surely. The theorem is proved.
3011
+
3012
+ Proof of Theorem 3.10. The proof of Theorem 3.10 is motivated by [26, Corollarly 5.11].
3013
+ Since q > p, by Theorem 3.5, there exists a unique solution ¯u ∈ Hγ
3014
+ q,loc satisfying equation
3015
+ (3.1). By Definition 2.13, there exists τn such that τn → ∞ almost surely as n → ∞,
3016
+ u ∈ Hγ
3017
+ p(τn) and ¯u ∈ Hγ
3018
+ q(τn). Fix n ∈ N. Because 2+αd
3019
+ αγ
3020
+ < p < q, we can define τm,n (m ∈ N)
3021
+ as in (4.34). Notice that for any p0 > p, we have
3022
+ u ∈ Lp0(τm,n)
3023
+
3024
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
3025
+ 27
3026
+ since
3027
+ E
3028
+ ˆ τm,n
3029
+ 0
3030
+ ˆ
3031
+ R
3032
+ |u(t, x)|p0dxdt ≤ mp0−pE
3033
+ ˆ τm,n
3034
+ 0
3035
+ ˆ
3036
+ R
3037
+ |u(t, x)|pdxdt < ∞.
3038
+ Observe that ¯biuuxi ∈ Hγ−2
3039
+ q
3040
+ (τm,n) ⊂ H−2
3041
+ q (τm,n). Indeed, similar to (4.6),
3042
+ E
3043
+ ˆ τm,n
3044
+ 0
3045
+ ����
3046
+ 1
3047
+ 2
3048
+ ¯bi(s, ·)
3049
+
3050
+ (u(s, ·))2�
3051
+ xi
3052
+ ����
3053
+ q
3054
+ Hγ−2
3055
+ q
3056
+ ds ≤ NE
3057
+ ˆ τm,n
3058
+ 0
3059
+ ˆ
3060
+ R
3061
+ |u(s, x)|2qdxds < ∞.
3062
+ Additionally, we have
3063
+ auxx ∈ H−2
3064
+ q (τm,n),
3065
+ bux ∈ H−1
3066
+ q (τm,n),
3067
+ and
3068
+ cu ∈ Lq(τm,n).
3069
+ Therefore, because Lq(τm,n) ⊂ H−1
3070
+ q (τm,n) ⊂ H−2
3071
+ q (τm,n),
3072
+ aijuxixj + biuxi + cu + ¯biuuxi ∈ H−2
3073
+ q (τm,n).
3074
+ (4.35)
3075
+ Similar to (4.12), we have
3076
+ ∥σ(u)η∥q
3077
+ Hγ−2+c0
3078
+ q
3079
+ (τm,n,l2) ≤ N
3080
+ ˆ τm,n
3081
+ 0
3082
+ ∥h(t, ·)∥q
3083
+ Lq dt < ∞.
3084
+ (4.36)
3085
+ Thus, we have
3086
+ σ(u)η ∈ Hγ−2+c0
3087
+ q
3088
+ (τm,n, l2) ⊂ H−2+c0
3089
+ q
3090
+ (τm,n, l2).
3091
+ (4.37)
3092
+ Due to (4.35), (4.37), and Lemma 4.2, u is in Lq(τm,n) and u satisfies (3.1) for almost
3093
+ everywhere t ≤ τm,n almost surely. On the other hand, since ¯biuuxi ∈ Hγ−2
3094
+ q
3095
+ (τm,n) and
3096
+ σ(u)η ∈ Hγ−2+c0
3097
+ q
3098
+ (τm,n, l2), Lemma 4.2 implies that there exists v ∈ Hγ
3099
+ q(τm,n) satisfying
3100
+ ∂α
3101
+ t v = Lv + ¯biuuxi + ∂α
3102
+ t
3103
+ ˆ t
3104
+ 0
3105
+ σk(u)dWt,
3106
+ 0 < t ≤ τm,n ;
3107
+ v(0, ·) = u0,
3108
+ (4.38)
3109
+ where Lv = aijvxixj + bivxi + cv. In (4.38), note that ¯biuuxi and σk(u) are used instead of
3110
+ ¯bivvxi and σk(v). Moreover, because u ∈ Lq(τm,n) satisfies equation (4.38), ¯v := u − v ∈
3111
+ Lq(τm,n) satisfies
3112
+ ∂α
3113
+ t ¯v = aij¯vxixj + bi¯vxi + c¯v,
3114
+ 0 < t ≤ τm,n ;
3115
+ ¯v(0, ·) = 0.
3116
+ By the deterministic version of Lemma 4.2, we have ¯v = 0 in Lq(τm,n); thus, u = v in
3117
+ Lq(τm,n). Therefore, u is in Hγ
3118
+ q(τm,n). As ¯u ∈ Hγ
3119
+ q(τm,n) and ¯u satisfies equation (3.1), by
3120
+ Lemma 4.3, we have u = ¯u in Hγ
3121
+ q(τm,n). The theorem is proved.
3122
+
3123
+ 5.
3124
+ Proof of Theorem 2.16
3125
+ This section provides a proof of the embedding theorem for solution spaces Hγ
3126
+ p(τ). Con-
3127
+ sider the following fractional diffusion equation
3128
+ ∂α
3129
+ t u = ∆u
3130
+ t > 0 ;
3131
+ u(0, ·) = u0(·),
3132
+ (5.1)
3133
+ where α ∈ (0, 1) and u0(·) ∈ C∞
3134
+ c (Rd). It turns out that a fundamental solution p(t, x) exists
3135
+ such that
3136
+ p(t, ·) ∈ L1(Rd)
3137
+ and
3138
+ F(p(t, ·))(ξ) = Eα(−tα|ξ|2)
3139
+ (e.g., [24, Theorem 2.1]), and the solution of (5.1) is given by
3140
+ u(t, x) = (u0 ∗ p(t, ·))(x) =
3141
+ ˆ
3142
+ Rd u0(y)p(t, x − y)dy.
3143
+
3144
+ 28
3145
+ BEOMSEOK HAN
3146
+ For convenience, define
3147
+ qα,β(t, x) :=
3148
+
3149
+ Iα−βp(t, x)
3150
+ if
3151
+ α ≥ β
3152
+ Dβ−αp(t, x)
3153
+ if
3154
+ α < β.
3155
+ We gather some facts related to p(t, x) and qα,β(t, x) (for more information, see [22, 23, 24]).
3156
+ Lemma 5.1. Let d ∈ N, α ∈ (0, 1), β < α + 1/2, γ ∈ [0, 2), and σ ∈ R.
3157
+ (i) For all t ̸= 0 and x ̸= 0,
3158
+ ∂α
3159
+ t p(t, x) = ∆p(t, x)
3160
+ and
3161
+ ∂tp(t, x) = ∆qα,1(t, x).
3162
+ Additionally, for each x ̸= 0,
3163
+
3164
+ ∂tp(t, x) → 0 as t ↓ 0. Moreover,
3165
+
3166
+ ∂tp(t, x) is integrable
3167
+ in Rd uniformly on t ∈ [δ, T] for any δ > 0.
3168
+ (ii) There exist constants c = c(α, d) and N = N(α, d) such that if |x|2 ≥ tα,
3169
+ |p(t, x)| ≤ N|x|−d exp
3170
+
3171
+ −c|x|
3172
+ 2
3173
+ 2−α t−
3174
+ α
3175
+ 2−α
3176
+
3177
+ .
3178
+ (iii) Let n ∈ N. Then, there exists N = N(α, γ, n) such that
3179
+ ���Dσ
3180
+ t Dn
3181
+ x(−∆)γ/2qα,β(1, x)
3182
+ ��� ≤ N
3183
+
3184
+ |x|−d+2−γ−n ∧ |x|−d−γ−n�
3185
+ .
3186
+ (iv) The scaling properties hold. In other words,
3187
+ qα,β(t, x) = t− αd
3188
+ 2 +α−βqα,β(1, xt− α
3189
+ 2 ),
3190
+ (−∆)γ/2qα,β(t, x) = t− α(d+γ)
3191
+ 2
3192
+ +α−β(−∆)γ/2qα,β(1, xt− α
3193
+ 2 ).
3194
+ Proof. To see (i), (ii), and (iii) follow from Theorems 2.1 and 2.3 of [23]. For (iv), see (5.2)
3195
+ in [23].
3196
+
3197
+ Remark 5.2. To prove Theorem 2.16, we define the operators.
3198
+ Let φ ∈ C∞
3199
+ c (Rd) and
3200
+ f ∈ C∞
3201
+ c ((0, ∞) × Rd). Take a function g = (g1, g2, . . . ) satisfying the form
3202
+ gk(t, x) =
3203
+ ��n
3204
+ i=1 1 |(τi−1,τi]](t)gik(x)
3205
+ for
3206
+ k = 1, 2, . . . , n,
3207
+ 0
3208
+ for
3209
+ k = n + 1, . . .
3210
+ (5.2)
3211
+ for some n ∈ N, where τi is the bounded stopping time and gik ∈ C∞
3212
+ c (Rd). Further, we set
3213
+ T 1
3214
+ t φ(x) :=
3215
+ ˆ
3216
+ Rd p(t, x − y)φ(y)dy,
3217
+ (5.3)
3218
+ T 2
3219
+ t f(t, x) :=
3220
+ ˆ t
3221
+ 0
3222
+ ˆ
3223
+ Rd qα,1(t − r, x − y)f(r, y)dyds,
3224
+ (5.4)
3225
+ T 3
3226
+ t g(t, x) :=
3227
+
3228
+ k
3229
+ ˆ t
3230
+ 0
3231
+ ˆ
3232
+ Rd qα,β(t − r, x − y)gk(r, y)dydwk
3233
+ s .
3234
+ (5.5)
3235
+ It is well-known that T 1
3236
+ t φ, T 2
3237
+ t f, and T 3
3238
+ t g are solutions to
3239
+ ∂α
3240
+ t u1 = ∆u1;
3241
+ u1(0, ·) = φ,
3242
+ ∂α
3243
+ t u2 = ∆u2 + f;
3244
+ u2(0, ·) = 0,
3245
+ ∂α
3246
+ t u3 = ∆u3 + ∂β
3247
+ t
3248
+ ˆ t
3249
+ 0
3250
+ gkdwk
3251
+ s;
3252
+ u3(0, ·) = 0,
3253
+
3254
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
3255
+ 29
3256
+ respectively (for more information, see [22, 23, 24]).
3257
+ First, we provide a smoothing effect of T 1
3258
+ t , which implies the continuity of T 1
3259
+ t φ in t.
3260
+ Lemma 5.3. Let τ ≤ T be a bounded stopping time and α ∈ (0, 1). If p ∈ (1, ∞), θ ∈ [0, α),
3261
+ φ ∈ C∞
3262
+ c (Rd), and t ∈ (0, T), we have
3263
+ e−t ��T 1
3264
+ t φ
3265
+ ��
3266
+ H
3267
+
3268
+ α
3269
+ p
3270
+ ≤ Nt−θ∥φ∥Lp
3271
+ (5.6)
3272
+ where N = N(α, θ, d, p, T).
3273
+ Proof. In the case of θ = 0, by Mink¨owski’s inequality, we have
3274
+ ∥T 1
3275
+ t φ∥Lp ≤ ∥p(t, ·) ∗ φ∥Lp ≤ ∥p(t, ·)∥L1∥φ∥Lp ≤ ∥φ∥Lp.
3276
+ (5.7)
3277
+ Thus, we have (5.6). For θ ∈ (0, α), observe that
3278
+ ∥e−tT 1
3279
+ t φ∥H2θ/α
3280
+ p
3281
+ = ∥(1 − ∆)θ/α(e−tT 1
3282
+ t φ)∥Lp ≤ ∥e−tT 1
3283
+ t φ∥Lp + ∥(−∆)θ/α(e−tT 1
3284
+ t φ)∥Lp,
3285
+ where the last inequality follows from Lemma 2.10 (v). As e−t ≤ (Nt−θ ∧ 1), we have
3286
+ ��e−tT 1
3287
+ t φ
3288
+ ��
3289
+ H2θ/α
3290
+ p
3291
+ ≤ N
3292
+
3293
+ t−θ ��T 1
3294
+ t φ
3295
+ ��
3296
+ Lp +
3297
+ ���(−∆)θ/αT 1
3298
+ t φ
3299
+ ���
3300
+ Lp
3301
+
3302
+ .
3303
+ (5.8)
3304
+ By inequality (5.7), we have
3305
+ t−θ∥T 1
3306
+ t φ∥Lp ≤ t−θ∥φ∥Lp.
3307
+ (5.9)
3308
+ On the other hand, Minkowski’s inequality yields
3309
+ ∥(−∆)θ/αT 1
3310
+ t h∥Lp = ∥(−∆)θ/α(p(t, ·) ∗ φ)∥Lp ≤ ∥((−∆)θ/αp)(t, ·)∥L1∥φ∥Lp.
3311
+ (5.10)
3312
+ Additionally Lemma 5.1 (iv), (ii), and (iii) imply
3313
+ ∥((−∆)θ/αp)(t, ·)∥L1 =
3314
+ ˆ
3315
+ Rd |((−∆)θ/αp)(t, x)|dx
3316
+ ≤ t−θ
3317
+ ˆ
3318
+ Rd |((−∆)θ/αp)(1, x)|dx
3319
+ ≤ N(α, θ, d, p)t−θ.
3320
+ (5.11)
3321
+ Then, by applying (5.11) to (5.10), we have
3322
+ ∥(−∆)θ/αT 1
3323
+ t φ∥Lp ≤ Nt−θ∥φ∥Lp.
3324
+ (5.12)
3325
+ Thus, by plugging in (5.9) and (5.12) into (5.8), we have (5.6). The lemma is proved.
3326
+
3327
+ To proceed further, we introduce one of the embedding theorems for Slobodetskii’s spaces.
3328
+ Lemma 5.4. If µp > 1 and p ≥ 1, for any continuous Lp-valued function φ and γ ≤ ρ, we
3329
+ have the following:
3330
+ ∥φ(ρ) − φ(γ)∥p
3331
+ Lp ≤ N(ρ − γ)µp−1
3332
+ ˆ ρ
3333
+ γ
3334
+ ˆ ρ
3335
+ γ
3336
+ 1t>s
3337
+ ∥φ(t) − φ(s)∥p
3338
+ Lp
3339
+ |t − s|1+µp
3340
+ dsdt
3341
+ �0
3342
+ 0 := 0
3343
+
3344
+ ,
3345
+ (5.13)
3346
+ where N = N(µ, p). In particular,
3347
+ E
3348
+ sup
3349
+ 0≤s<t≤T
3350
+ ∥φ(t) − φ(s)∥p
3351
+ Lp
3352
+ |t − s|µp−1
3353
+ ≤ N
3354
+ ˆ T
3355
+ 0
3356
+ ˆ T
3357
+ 0
3358
+ 1t>s
3359
+ E ∥φ(t) − φ(s)∥p
3360
+ Lp
3361
+ |t − s|1+µp
3362
+ dsdt.
3363
+ (5.14)
3364
+
3365
+ 30
3366
+ BEOMSEOK HAN
3367
+ With the help of Lemma 5.4, we obtain the continuity of T 2
3368
+ t f and T 3
3369
+ t g on t ∈ [0, T], and
3370
+ the H¨older continuity of T 1
3371
+ t φ, T 2
3372
+ t f, and T 3
3373
+ t g on [δ, T].
3374
+ First, we suggest the H¨older continuity of T 1
3375
+ t φ in t.
3376
+ Lemma 5.5. Let T < ∞, δ > 0, and α ∈ (0, 1). If p ∈ (1, ∞) and µ ∈ (0, 1) satisfy
3377
+ 1
3378
+ αp < µ < 1
3379
+ α,
3380
+ and φ ∈ C∞
3381
+ c (Rd), then
3382
+ sup
3383
+ δ≤s<t≤T
3384
+ ��T 1
3385
+ t φ − T 1
3386
+ s ��
3387
+ ��p
3388
+ Lp
3389
+ |t − s|αµp−1
3390
+ ≤ N ∥φ∥p
3391
+ Lp
3392
+ (5.15)
3393
+ where N = N(α, δ, d, p, T).
3394
+ Proof. By (5.14), we have
3395
+ sup
3396
+ δ≤s<t≤T
3397
+ ��T 1
3398
+ t φ − T 1
3399
+ s φ
3400
+ ��p
3401
+ Lp
3402
+ |t − s|αµp−1
3403
+ =
3404
+ sup
3405
+ 0≤s<t≤T−δ
3406
+ ��T 1
3407
+ t+δφ − T 1
3408
+ s+δφ
3409
+ ��p
3410
+ Lp
3411
+ |t − s|αµp−1
3412
+ ≤ N
3413
+ ˆ T−δ
3414
+ 0
3415
+ ˆ T−δ
3416
+ 0
3417
+ 1t>s
3418
+ ��T 1
3419
+ t+δφ − T 1
3420
+ s+δφ
3421
+ ��p
3422
+ Lp
3423
+ |t − s|1+αµp
3424
+ dsdt.
3425
+ By Minkowski’s inequality,
3426
+ ��T 1
3427
+ t+δφ − T 1
3428
+ s+δφ
3429
+ ��
3430
+ Lp ≤
3431
+ ˆ
3432
+ Rd |p(t + δ, y) − p(s + δ, y)| dy ∥φ∥Lp .
3433
+ Then, by the fundamental theorem of calculus, the change of variable, and Lemma 5.1 (i)
3434
+ - (iii),
3435
+ ˆ T−δ
3436
+ 0
3437
+ ˆ T−δ
3438
+ 0
3439
+ 1t>s|t − s|−1−αµp ��T 1
3440
+ t+δφ − T 1
3441
+ s+δφ
3442
+ ��p
3443
+ Lp dsdt
3444
+
3445
+ ˆ T−δ
3446
+ 0
3447
+ ˆ T−δ
3448
+ 0
3449
+ 1t>s|t − s|−1−αµp
3450
+ �ˆ
3451
+ Rd |p(t + δ, y) − p(s + δ, y)| dy ∥φ∥Lp
3452
+ �p
3453
+ dsdt
3454
+ =
3455
+ ˆ T−δ
3456
+ 0
3457
+ t−1−αµp
3458
+ ˆ T−t
3459
+ δ
3460
+ �ˆ
3461
+ Rd |p(t + s, y) − p(s, y)| dy
3462
+ �p
3463
+ dsdt ∥φ∥p
3464
+ Lp
3465
+
3466
+ ˆ T−δ
3467
+ 0
3468
+ t−1−αµp+p
3469
+ ˆ T−t
3470
+ δ
3471
+ �ˆ
3472
+ Rd sup
3473
+ r∈[δ,T]
3474
+ |∂tp(r, y)| dy
3475
+ �p
3476
+ dsdt ∥φ∥p
3477
+ Lp
3478
+ ≤ N ∥φ∥p
3479
+ Lp .
3480
+ (5.16)
3481
+ Thus, we have (5.15). The lemma is proved.
3482
+
3483
+ Remark 5.6. It should be remarked that we assume δ > 0 in Lemma 5.5, and it is a
3484
+ sufficient condition. Indeed, if we try δ = 0, the term
3485
+ ˆ T
3486
+ 0
3487
+ t−1−αµp
3488
+ ˆ T−t
3489
+ 0
3490
+ �ˆ
3491
+ Rd |p(t + s, y) − p(s, y)| dy
3492
+ �p
3493
+ dsdt
3494
+ in (5.16) blows up.
3495
+ Next we introduce the continuities of T 2
3496
+ t f and T 3
3497
+ t g.
3498
+
3499
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
3500
+ 31
3501
+ Lemma 5.7. Let α ∈ (0, 1).
3502
+ (i) If p ∈ (1, ∞), θ ∈ (0, α), and µ ∈ (0, 1) satisfy
3503
+ p >
3504
+ 1
3505
+ α − θ,
3506
+ µ ∈
3507
+ � 1
3508
+ αp, α − θ
3509
+ α
3510
+
3511
+ and f ∈ Lp((0, T) × Rd), then for t, s ∈ (0, T),
3512
+ ��T 2
3513
+ t f − T 2
3514
+ s f
3515
+ ��p
3516
+ H2θ/α
3517
+ p
3518
+ ≤ N|t − s|αµp−1
3519
+ ˆ T
3520
+ 0
3521
+ ∥f(r, ·)∥p
3522
+ Lp dr
3523
+ (5.17)
3524
+ where N = N(α, θ, d, p, T). Additionally,
3525
+ sup
3526
+ 0≤s<t≤T
3527
+ ��T 2
3528
+ t f − T 2
3529
+ s f
3530
+ ��p
3531
+ H2θ/α
3532
+ p
3533
+ |t − s|αµp−1
3534
+ ≤ N
3535
+ ˆ T
3536
+ 0
3537
+ ∥f(r, ·)∥p
3538
+ Lp dr,
3539
+ (5.18)
3540
+ where N = N(α, θ, d, p, T).
3541
+ (ii) Let τ ≤ T be a bounded stopping time. If β < α + 1
3542
+ 2, p ∈ [2, ∞), θ ∈ (0, α − β + 1/2),
3543
+ and µ ∈ (0, 1) satisfy
3544
+ p >
3545
+ 1
3546
+ (α − β − θ) ∧ 1/2 + 1/2,
3547
+ µ ∈
3548
+ � 1
3549
+ αp, (α − β − θ) ∧ 1/2 + 1/2
3550
+ α
3551
+
3552
+ ,
3553
+ and g ∈ Lp(τ, l2), then, for t, s ∈ (0, T), we have
3554
+ E
3555
+ ��T 3
3556
+ t∧τg − T 3
3557
+ s∧τg
3558
+ ��p
3559
+ H2θ/α
3560
+ p
3561
+ (l2) ≤ N|t − s|αµp−1∥g∥p
3562
+ Lp(τ,l2),
3563
+ (5.19)
3564
+ where N = N(α, β, θ, d, p, T). Additionally,
3565
+ E
3566
+ sup
3567
+ 0≤s<t≤τ
3568
+ ��T 3
3569
+ t g − T 3
3570
+ s g
3571
+ ��p
3572
+ H2θ/α
3573
+ p
3574
+ (l2)
3575
+ |t − s|αµp−1
3576
+ ≤ N∥g∥p
3577
+ Lp(τ,l2),
3578
+ (5.20)
3579
+ where N = N(α, β, θ, d, p, T).
3580
+ Proof. Proof of (i) Let ρ > 0 and γ > 0. Notice that (5.13) yields
3581
+ ��T 2
3582
+ ρ f − T 2
3583
+ γ f
3584
+ ��p
3585
+ H2θ/α
3586
+ p
3587
+ ≤ N|ρ − γ|αµp−1
3588
+ ˆ T
3589
+ 0
3590
+ ˆ T
3591
+ 0
3592
+ 1t>s
3593
+ ��T 2
3594
+ t f − T 2
3595
+ s f
3596
+ ��p
3597
+ H2θ/α
3598
+ p
3599
+ |t − s|1+αµp
3600
+ dsdt.
3601
+ (5.21)
3602
+ Then, by definition of T 2
3603
+ t f (see (5.4)),
3604
+ ��T 2
3605
+ t f − T 2
3606
+ s f
3607
+ ��
3608
+ H2θ/α
3609
+ p
3610
+
3611
+ ����
3612
+ ˆ t
3613
+ s
3614
+ ˆ
3615
+ Rd qα,1(t − r, y)f(r, · − y)dydr
3616
+ ����
3617
+ H2θ/α
3618
+ p
3619
+ +
3620
+ ����
3621
+ ˆ s
3622
+ 0
3623
+ ˆ
3624
+ Rd (qα,1(t − r, y) − qα,1(s − r, y)) f(r, · − y)dydr
3625
+ ����
3626
+ H2θ/α
3627
+ p
3628
+ .
3629
+ (5.22)
3630
+ Set
3631
+ I1 :=
3632
+ ˆ T
3633
+ 0
3634
+ ˆ T
3635
+ 0
3636
+ 1t>s
3637
+ ���
3638
+ ´ t
3639
+ s
3640
+ ´
3641
+ Rd qα,1(t − r, y)f(r, · − y)dydr
3642
+ ���
3643
+ p
3644
+ H2θ/α
3645
+ p
3646
+ |t − s|1+αµp
3647
+ dsdt,
3648
+ I2 :=
3649
+ ˆ T
3650
+ 0
3651
+ ˆ T
3652
+ 0
3653
+ 1t>s
3654
+ ��´ s
3655
+ 0
3656
+ ´
3657
+ Rd (qα,1(t − r, y) − qα,1(s − r, y)) f(r, · − y)dydr
3658
+ ��p
3659
+ H2θ/α
3660
+ p
3661
+ |t − s|1+αµp
3662
+ dsdt.
3663
+ (5.23)
3664
+
3665
+ 32
3666
+ BEOMSEOK HAN
3667
+ Then, apply (5.22) and (5.23) to (5.21),
3668
+ ��T 2
3669
+ t f − T 2
3670
+ s f
3671
+ ��p
3672
+ H2θ/α
3673
+ p
3674
+ ≤ |t − s|αµp−1(I1 + I2).
3675
+ (5.24)
3676
+ To deal with I1, we employ Minkowski’s inequality, the change of variable, and Lemma 2.10
3677
+ (v). Then,
3678
+ I1 ≤
3679
+ ˆ T
3680
+ 0
3681
+ t−1−αµp
3682
+ �ˆ t
3683
+ 0
3684
+ ˆ
3685
+ Rd
3686
+ ���
3687
+
3688
+ (1 − ∆)θ/αqα,1
3689
+
3690
+ (r, y)
3691
+ ��� dydr
3692
+ �p
3693
+ dt
3694
+ ˆ T
3695
+ 0
3696
+ ∥f(s, ·)∥p
3697
+ Lp ds
3698
+ ≤ I11 + I12,
3699
+ (5.25)
3700
+ where
3701
+ I11 :=
3702
+ ˆ T
3703
+ 0
3704
+ t−1−αµp
3705
+ �ˆ t
3706
+ 0
3707
+ ˆ
3708
+ Rd |qα,1(r, y)| dydr
3709
+ �p
3710
+ dt
3711
+ ˆ T
3712
+ 0
3713
+ ∥f(s, ·)∥p
3714
+ Lp ds,
3715
+ I12 :=
3716
+ ˆ T
3717
+ 0
3718
+ t−1−αµp
3719
+ �ˆ t
3720
+ 0
3721
+ ˆ
3722
+ Rd
3723
+ ���
3724
+
3725
+ ∆θ/αqα,1
3726
+
3727
+ (r, y)
3728
+ ��� dydr
3729
+ �p
3730
+ dt
3731
+ ˆ T
3732
+ 0
3733
+ ∥f(s, ·)∥p
3734
+ Lp ds.
3735
+ Because µ < 1, Lemma 5.1 (iv) and (iii) yield
3736
+ I11 =
3737
+ ˆ T
3738
+ 0
3739
+ t−1−αµp+αpdt
3740
+ �ˆ
3741
+ Rd |qα,1(1, y)| dy
3742
+ �p ˆ T
3743
+ 0
3744
+ ∥f(s, ·)∥p
3745
+ Lp ds
3746
+ ≤ N
3747
+ ˆ T
3748
+ 0
3749
+ ∥f(s, ·)∥p
3750
+ Lp ds.
3751
+ (5.26)
3752
+ Similarly, since µ < 1 − θ/α,
3753
+ I12 =
3754
+ ˆ T
3755
+ 0
3756
+ t−1−αµp+αp−θpdt
3757
+ �ˆ
3758
+ Rd
3759
+ ���
3760
+
3761
+ ∆θ/αqα,1
3762
+
3763
+ (1, y)
3764
+ ��� dy
3765
+ �p ˆ T
3766
+ 0
3767
+ ∥f(s, ·)∥p
3768
+ Lp ds
3769
+ ≤ N
3770
+ ˆ T
3771
+ 0
3772
+ ∥f(s, ·)∥p
3773
+ Lp ds.
3774
+ (5.27)
3775
+ Thus, by applying (5.26) and (5.27) to (5.25), we have
3776
+ I1 ≤ N
3777
+ ˆ T
3778
+ 0
3779
+ ∥f(s, ·)∥p
3780
+ Lpds.
3781
+ (5.28)
3782
+ Next, we address I2. Similar to the case for I1, we have
3783
+ I2 ≤
3784
+ ˆ T
3785
+ 0
3786
+ ´ T−t
3787
+ 0
3788
+ �´ s
3789
+ 0
3790
+ ´
3791
+ Rd
3792
+ ��(1 − ∆)θ/α (qα,1(t + r, y) − qα,1(r, y))
3793
+ �� dy∥f(s − r, ·)∥Lpdr
3794
+ �p ds
3795
+ t1+αµp
3796
+ dt
3797
+ ≤ I21 + I22,
3798
+ (5.29)
3799
+ where
3800
+ I21 :=
3801
+ ˆ T
3802
+ 0
3803
+ ´ T−t
3804
+ 0
3805
+ �´ s
3806
+ 0
3807
+ ´
3808
+ Rd |qα,1(t + r, y) − qα,1(r, y)| dy∥f(s − r, ·)∥Lpdr
3809
+ �p ds
3810
+ t1+αµp
3811
+ dt,
3812
+ I22 :=
3813
+ ˆ T
3814
+ 0
3815
+ ´ T−t
3816
+ 0
3817
+ �´ s
3818
+ 0
3819
+ ´
3820
+ Rd
3821
+ ��∆θ/α (qα,1(t + r, y) − qα,1(r, y))
3822
+ �� dy∥f(s − r, ·)∥Lpdr
3823
+ �p ds
3824
+ t1+αµp
3825
+ dt.
3826
+
3827
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
3828
+ 33
3829
+ Since µ < 1, by Minkowski’s inequality and the fundamental theorem of calculus, we have
3830
+ I21 ≤
3831
+ ˆ T
3832
+ 0
3833
+ t−1−αµp
3834
+ �ˆ T−t
3835
+ 0
3836
+ ˆ
3837
+ Rd |qα,1(t + r, y) − qα,1(r, y)| dydr
3838
+ �p
3839
+ dt
3840
+ ˆ T
3841
+ 0
3842
+ ∥f(s, ·)∥p
3843
+ Lp(Rd)ds
3844
+
3845
+ ˆ T
3846
+ 0
3847
+ t−1−αµp
3848
+ �ˆ T−t
3849
+ 0
3850
+ ˆ t+r
3851
+ r
3852
+ ˆ
3853
+ Rd |qα,2(s, y)| dydsdr
3854
+ �p
3855
+ dt
3856
+ ˆ T
3857
+ 0
3858
+ ∥f(s, ·)∥p
3859
+ Lp(Rd)ds
3860
+ ≤ N
3861
+ ˆ T
3862
+ 0
3863
+ t−1−αµp
3864
+ �ˆ T−t
3865
+ 0
3866
+ rα−1 − (t + r)α−1dr
3867
+ �p
3868
+ dt
3869
+ ˆ T
3870
+ 0
3871
+ ∥f(s, ·)∥p
3872
+ Lp(Rd)ds
3873
+ ≤ N
3874
+ ˆ T
3875
+ 0
3876
+ t−1−αµp+αpdt
3877
+ ˆ T
3878
+ 0
3879
+ ∥f(s, ·)∥p
3880
+ Lp(Rd)ds
3881
+ ≤ N
3882
+ ˆ T
3883
+ 0
3884
+ ∥f(s, ·)∥p
3885
+ Lp(Rd)ds.
3886
+ (5.30)
3887
+ Additionally, since µ < 1 − θ/α,
3888
+ I22 ≤
3889
+ ˆ T
3890
+ 0
3891
+ t−1−αµp
3892
+ �ˆ T−t
3893
+ 0
3894
+ ˆ
3895
+ Rd
3896
+ ˆ t+r
3897
+ r
3898
+ ���(∆θ/αqα,2(s, y)
3899
+ ��� dsdydr
3900
+ �p
3901
+ dt
3902
+ ˆ T
3903
+ 0
3904
+ ∥f(s, ·)∥p
3905
+ Lpds
3906
+ ≤ N
3907
+ ˆ T
3908
+ 0
3909
+ t−1−αµp+αp−θpdt
3910
+ ˆ T
3911
+ 0
3912
+ ∥f(s, ·)∥p
3913
+ Lpds
3914
+ ≤ N
3915
+ ˆ T
3916
+ 0
3917
+ ∥f(s, ·)∥p
3918
+ Lpds.
3919
+ (5.31)
3920
+ Therefore, by employing (5.30) and (5.31) to (5.29), we have
3921
+ I2 ≤ N
3922
+ ˆ T
3923
+ 0
3924
+ ∥f(s, ·)∥p
3925
+ Lpds,
3926
+ (5.32)
3927
+ and thus by combining (5.28) and (5.32) to (5.24), we have (5.17).
3928
+ To obtain (5.18), employ (5.14) instead of (5.13) and repeat the proof word for word.
3929
+ Proof of (ii) By (5.13), we have
3930
+ E
3931
+ ��T 3
3932
+ ρ g − T 3
3933
+ γ g
3934
+ ��p
3935
+ H2θ/α
3936
+ p
3937
+ (l2) ≤ N|ρ − γ|αµp−1
3938
+ ˆ T
3939
+ 0
3940
+ ˆ T
3941
+ 0
3942
+ 1t>s
3943
+ E
3944
+ ��T 3
3945
+ t g − T 3
3946
+ s g
3947
+ ��p
3948
+ H2θ/α
3949
+ p
3950
+ (l2)
3951
+ |t − s|1+αµp
3952
+ dsdt.
3953
+
3954
+ 34
3955
+ BEOMSEOK HAN
3956
+ Notice that the Burkholder-Davis-Gundy and Minkowski’s inequalities imply that
3957
+ E
3958
+ ��T 3
3959
+ t g − T 3
3960
+ s g
3961
+ ��p
3962
+ H2θ/α
3963
+ p
3964
+ (l2)
3965
+ ≤ N
3966
+ ˆ
3967
+ Rd E
3968
+ ��
3969
+ k
3970
+ ����
3971
+ ˆ t
3972
+ s
3973
+ ˆ
3974
+ Rd((1 − ∆)θ/α qα,β)(t − r, y)gk(r, x − y)dydwk
3975
+ r
3976
+ ����
3977
+ 2�p/2
3978
+ +
3979
+ ��
3980
+ k
3981
+ ����
3982
+ ˆ s
3983
+ 0
3984
+ ˆ
3985
+ Rd
3986
+
3987
+ (1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y))
3988
+
3989
+ gk(r, x − y)dydwk
3990
+ r
3991
+ ����
3992
+ 2�p/2
3993
+ dx
3994
+ ≤ N
3995
+ ˆ
3996
+ Rd E
3997
+ �ˆ t−s
3998
+ 0
3999
+ �ˆ
4000
+ Rd
4001
+ ���((1 − ∆)θ/α qα,β)(t − s − r, y)
4002
+ ��� |g(s + r, x − y)|l2dy
4003
+ �2
4004
+ dr
4005
+ �p/2
4006
+ +
4007
+ �ˆ s
4008
+ 0
4009
+ �ˆ
4010
+ Rd
4011
+ ���(1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y))
4012
+ ��� |g(r, x − y)|l2dy
4013
+ �2
4014
+ dr
4015
+ �p/2
4016
+ dx
4017
+ ≤ NE
4018
+ �ˆ t−s
4019
+ 0
4020
+ �ˆ
4021
+ Rd
4022
+ ���((1 − ∆)θ/α qα,β)(t − s − r, y)
4023
+ ��� dy
4024
+ �2
4025
+ ∥g(s + r, ·)∥2
4026
+ Lp(l2)dr
4027
+ �p/2
4028
+ + NE
4029
+ �ˆ s
4030
+ 0
4031
+ �ˆ
4032
+ Rd
4033
+ ���(1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y))
4034
+ ��� dy
4035
+ �2
4036
+ ∥g(r, ·)∥2
4037
+ Lp(l2)dr
4038
+ �p/2
4039
+ .
4040
+ Then, set
4041
+ I3 :=
4042
+ ˆ T
4043
+ 0
4044
+ ˆ T
4045
+ 0
4046
+ 1t>s|t − s|−1−αµpE
4047
+ �ˆ t−s
4048
+ 0
4049
+ A(t, s, r)∥g(s + r, ·)∥2
4050
+ Lp(l2)dr
4051
+ �p/2
4052
+ dsdt,
4053
+ I4 :=
4054
+ ˆ T
4055
+ 0
4056
+ ˆ T
4057
+ 0
4058
+ 1t>s|t − s|−1−αµpE
4059
+ �ˆ s
4060
+ 0
4061
+ B(t, s, r)∥g(r, ·)∥2
4062
+ Lp(l2)dr
4063
+ �p/2
4064
+ dsdt,
4065
+ where
4066
+ A(t, s, r) =
4067
+ �ˆ
4068
+ Rd | (1 − ∆)θ/α qα,β(t − s − r, y)|dy
4069
+ �2
4070
+ ,
4071
+ B(t, s, r) =
4072
+ �ˆ
4073
+ Rd
4074
+ ���(1 − ∆)θ/α (qα,β(t − r, y) − qα,β(s − r, y))
4075
+ ��� dy
4076
+ �2
4077
+ .
4078
+ Note that Minkowski’s inequality and Lemma 2.10 (v) imply that
4079
+ I3 ≤
4080
+ ˆ T
4081
+ 0
4082
+ t−1−αµp
4083
+ �ˆ t
4084
+ 0
4085
+ �ˆ
4086
+ Rd
4087
+ ���((1 − ∆)θ/α qα,β)(t − r, y)
4088
+ ��� dy
4089
+ �2
4090
+ dr
4091
+ �p/2
4092
+ dt∥g∥p
4093
+ Lp(T,l2)
4094
+ ≤ I31 + I32,
4095
+ (5.33)
4096
+ where
4097
+ I31 :=
4098
+ ˆ T
4099
+ 0
4100
+ t−1−αµp
4101
+ �ˆ t
4102
+ 0
4103
+ �ˆ
4104
+ Rd |qα,β(r, y)|dy
4105
+ �2
4106
+ dr
4107
+ �p/2
4108
+ dt∥g∥p
4109
+ Lp(T,l2),
4110
+ I32 :=
4111
+ ˆ T
4112
+ 0
4113
+ t−1−αµp
4114
+ �ˆ t
4115
+ 0
4116
+ �ˆ
4117
+ Rd
4118
+ ���
4119
+
4120
+ ∆θ/αqα,β
4121
+
4122
+ (r, y)
4123
+ ��� dy
4124
+ �2
4125
+ dr
4126
+ �p/2
4127
+ dt∥g∥p
4128
+ Lp(T,l2).
4129
+
4130
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
4131
+ 35
4132
+ Since 1
4133
+ α
4134
+
4135
+ α − β + 1
4136
+ 2
4137
+
4138
+ > µ, by Lemma 5.1 (iii), we have
4139
+ I31 =
4140
+ ˆ T
4141
+ 0
4142
+ t−1−αµp
4143
+ �ˆ t
4144
+ 0
4145
+ �ˆ
4146
+ Rd |qα,β(r, y)|dy
4147
+ �2
4148
+ dr
4149
+ �p/2
4150
+ dt∥g∥p
4151
+ Lp(T,l2)
4152
+ =
4153
+ ˆ T
4154
+ 0
4155
+ t−1−αµp
4156
+ �ˆ t
4157
+ 0
4158
+ r2(α−β)dr
4159
+ �p/2
4160
+ dt
4161
+ �ˆ
4162
+ Rd |qα,β(1, y)|dy
4163
+ �p
4164
+ ∥g∥p
4165
+ Lp(T,l2)
4166
+ =
4167
+ ˆ T
4168
+ 0
4169
+ t−1+(α−β+1/2−αµ)pdt
4170
+ �ˆ
4171
+ Rd |qα,β(1, y)|dy
4172
+ �p
4173
+ ∥g∥p
4174
+ Lp(T,l2)
4175
+ ≤ N∥g∥p
4176
+ Lp(T,l2).
4177
+ (5.34)
4178
+ Similarly, as 1
4179
+ α
4180
+
4181
+ α − β − θ + 1
4182
+ 2
4183
+
4184
+ > µ,
4185
+ I32 ≤
4186
+ ˆ T
4187
+ 0
4188
+ t−1+(−αµ+α−β−θ+1/2)pdt
4189
+ �ˆ
4190
+ Rd |(∆θ/αqα,β)(1, y)|dy
4191
+ �p
4192
+ ∥g∥p
4193
+ Lp(T,l2)
4194
+ ≤ N∥g∥p
4195
+ Lp(T,l2).
4196
+ (5.35)
4197
+ Therefore, by employing (5.34) and (5.35) to (5.33), we have
4198
+ I3 ≤ N∥g∥p
4199
+ Lp(T,l2).
4200
+ (5.36)
4201
+ In the case of I4, by Minkowski’s inequality and Lemma 2.10 (v), we have
4202
+ I4 ≤ I41 + I42.
4203
+ Further,
4204
+ I41 :=
4205
+ ˆ T
4206
+ 0
4207
+ t−1−αµpE
4208
+ ˆ T−t
4209
+ 0
4210
+ �ˆ s
4211
+ 0
4212
+ �ˆ
4213
+ Rd |C(t, r, y)|dy
4214
+ �2
4215
+ ∥g(s − r, ·)∥2
4216
+ Lp(l2)dr
4217
+ �p/2
4218
+ dsdt,
4219
+ I42 :=
4220
+ ˆ T
4221
+ 0
4222
+ t−1−αµpE
4223
+ ˆ T−t
4224
+ 0
4225
+ �ˆ s
4226
+ 0
4227
+ �ˆ
4228
+ Rd |∆θ/αC(t, r, y)|dy
4229
+ �2
4230
+ ∥g(s − r, ·)∥2
4231
+ Lp(l2)dr
4232
+ �p/2
4233
+ dsdt,
4234
+ where
4235
+ C(t, r, y) = qα,β(t + r, y) − qα,β(r, y).
4236
+ We address I41.
4237
+ By Minkowski’s inequality, the fundamental theorem of calculus, and
4238
+ Lemma 5.1, we have
4239
+ I41 ≤ N
4240
+ ˆ T
4241
+ 0
4242
+ t−1−αµp
4243
+ �ˆ T−t
4244
+ 0
4245
+ �ˆ
4246
+ Rd |qα,β(t + r, y) − qα,β(r, y)|dy
4247
+ �2
4248
+ dr
4249
+ �p/2
4250
+ dt∥g∥p
4251
+ Lp(T,l2)
4252
+ ≤ N
4253
+ ˆ T
4254
+ 0
4255
+ t−1−αµp
4256
+ �ˆ T−t
4257
+ 0
4258
+ �ˆ t+r
4259
+ r
4260
+ ˆ
4261
+ Rd |qα,β+1(s, y)|dyds
4262
+ �2
4263
+ dr
4264
+ �p/2
4265
+ dt∥g∥p
4266
+ Lp(T,l2)
4267
+ ≤ NH1∥g∥p
4268
+ Lp(T,l2),
4269
+ (5.37)
4270
+ where
4271
+ H1 :=
4272
+ ˆ T
4273
+ 0
4274
+ t−1−αµp
4275
+ �ˆ T−t
4276
+ 0
4277
+ �ˆ t+r
4278
+ r
4279
+ sα−β−1ds
4280
+ �2
4281
+ dr
4282
+ �p/2
4283
+ dt
4284
+ (5.38)
4285
+
4286
+ 36
4287
+ BEOMSEOK HAN
4288
+ and N = N(α, β, d, p, T). Next, we claim that
4289
+ H1 < ∞
4290
+ (5.39)
4291
+ To demonstrate (5.39), set
4292
+ χ(t) :=
4293
+ ˆ T−t
4294
+ 0
4295
+ �ˆ t+r
4296
+ r
4297
+ sα−β−1ds
4298
+ �2
4299
+ dr.
4300
+ Furthermore,
4301
+ H1 =
4302
+ ˆ T
4303
+ 0
4304
+ t−1−αµp(χ(t))p/2dt
4305
+ (5.40)
4306
+ Depending on the range of α − β, we consider the following five cases.
4307
+ (Case 1.) −1/2 < α − β < 0
4308
+ For t ∈ (0, T), we have
4309
+ 0 ≤ χ(t) ≤ N
4310
+ ˆ T−t
4311
+ 0
4312
+ r2(α−β) − (t + r)2(α−β)dr ≤ Nt2(α−β)+1,
4313
+ (5.41)
4314
+ where N = N(α, β). Then, since 1
4315
+ α(α − β + 1/2) > µ, by combining (5.40) and (5.41),
4316
+ H1 ≤ N(α, β, p)
4317
+ ˆ T
4318
+ 0
4319
+ t−1+p(−αµ+α−β+1/2)dt < ∞.
4320
+ (Case 2.) α − β = 0
4321
+ Notice that
4322
+ χ(t) =
4323
+ ˆ T−t
4324
+ 0
4325
+ �ˆ t+r
4326
+ r
4327
+ s−1ds
4328
+ �2
4329
+ dr =
4330
+ ˆ T−t
4331
+ 0
4332
+
4333
+ log
4334
+ �t + r
4335
+ r
4336
+ ��2
4337
+ dr.
4338
+ Obviously, χ(0) = 0. Note that
4339
+ χ′(t) ≤ 2
4340
+ ˆ ∞
4341
+ 0
4342
+ 1
4343
+ 1 + r log
4344
+ �1 + r
4345
+ r
4346
+
4347
+ dr = 2
4348
+ ˆ ∞
4349
+ 0
4350
+ x(ex − 1)−1dx = π2/3
4351
+ on t ∈ (0, T/2). Thus,
4352
+ 0 ≤ χ(t) = χ(t) − χ(0) =
4353
+ ˆ t
4354
+ 0
4355
+ χ′(s)ds ≤ π2
4356
+ 3 t
4357
+ (5.42)
4358
+ on t ∈ (0, T/2). Additionally, χ(t) ≤ N on t ∈ (T/2, T). Therefore,
4359
+ H1 ≤ N
4360
+ ˆ T/2
4361
+ 0
4362
+ t−1+(−αµ+1/2)pdt + N < ∞,
4363
+ where N = N(α, β, p).
4364
+ (Case 3.) 0 < α − β < 1/2
4365
+ Observe that χ is twice continuously differentiable, and
4366
+ χ′(t) = (α − β)−2 �
4367
+ −T 2(α−β) − (T − t)2(α−β) + 2T α−β(T − t)α−β�
4368
+ + 2(α − β)−1
4369
+ ˆ T−t
4370
+ 0
4371
+ (t + r)2(α−β)−1 − (t + r)α−β−1rα−βdr,
4372
+
4373
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
4374
+ 37
4375
+ and
4376
+ χ′′(t) = 2(α − β)−1(T − t)2(α−β)−1 − 2(α − β)−1T α−β(T − t)α−β−1
4377
+ + 2(α − β)−1T α−β−1(T − t)α−β − 2(α − β)−1t2(α−β)−1
4378
+ − 2(α − β)−1(α − β − 1)
4379
+ ˆ T−t
4380
+ 0
4381
+ (t + r)α−β−2rα−βdr.
4382
+ (5.43)
4383
+ In addition, χ(0) = χ′(0) = 0. Then, by using the fundamental theorem of calculus and
4384
+ α − β ∈ (0, 1/2), we obtain
4385
+ χ(t) =
4386
+ ˆ t
4387
+ 0
4388
+ ˆ s
4389
+ 0
4390
+ χ′′(ρ)dρds
4391
+ ≤ 2(α − β)−1
4392
+ ˆ t
4393
+ 0
4394
+ ˆ s
4395
+ 0
4396
+ (T − ρ)2(α−β)−1dρds
4397
+ − 2(α − β)−1(α − β − 1)
4398
+ ˆ t
4399
+ 0
4400
+ ˆ s
4401
+ 0
4402
+ ˆ T−ρ
4403
+ 0
4404
+ (ρ + r)α−β−2rα−βdrdρds
4405
+ ≤ N
4406
+ ˆ t
4407
+ 0
4408
+
4409
+ T 2(α−β) − (T − s)2(α−β)�
4410
+ ds + N
4411
+ ˆ t
4412
+ 0
4413
+ ˆ s
4414
+ 0
4415
+ ˆ T−ρ
4416
+ 0
4417
+ (ρ + r)2(α−β)−2drdρds
4418
+ ≤ N
4419
+ ˆ t
4420
+ 0
4421
+ s2(α−β)ds + N
4422
+ ˆ t
4423
+ 0
4424
+ ˆ s
4425
+ 0
4426
+ T 2(α−β)−1drdρds
4427
+ ≤ Nt2(α−β)+1,
4428
+ (5.44)
4429
+ where N = N(α, β, T). Thus,
4430
+ H1 ≤ N(α, β, p, T)
4431
+ ˆ T
4432
+ 0
4433
+ t−1+p(−αµ+α−β+1/2)dt < ∞.
4434
+ (Case 4.) α − β = 1/2
4435
+ Because χ(0) = χ′(0) = 0, by the fundamental theorem of calculus, we have
4436
+ χ(t) =
4437
+ ˆ t
4438
+ 0
4439
+ ˆ s
4440
+ 0
4441
+ χ′′(ρ)dρds
4442
+ ≤ N
4443
+ ˆ t
4444
+ 0
4445
+ ˆ s
4446
+ 0
4447
+ (T − ρ)1/2 + N
4448
+ ˆ T−ρ
4449
+ 0
4450
+ (ρ + r)−3/2r1/2drdρds
4451
+ ≤ Nt2(1 + | log t|),
4452
+ where N = N(T). Therefore,
4453
+ H1 ≤ N(p, T)
4454
+ ˆ T
4455
+ 0
4456
+ t−1+p(−αµ+1)(1 + | log t|)p/2dt < ∞.
4457
+ (Case 5.) α − β > 1/2
4458
+
4459
+ 38
4460
+ BEOMSEOK HAN
4461
+ Similar to before, χ(0) = χ′(0) = 0. Additionally, as in (5.44), we have
4462
+ χ(t) =
4463
+ ˆ t
4464
+ 0
4465
+ ˆ s
4466
+ 0
4467
+ χ′′(r)drds
4468
+ ≤ N
4469
+ ˆ t
4470
+ 0
4471
+
4472
+ T 2(α−β) − (T − s)2(α−β)�
4473
+ ds + N
4474
+ ˆ t
4475
+ 0
4476
+ ˆ s
4477
+ 0
4478
+ ˆ T−ρ
4479
+ 0
4480
+ (ρ + r)2(α−β)−2drdρds
4481
+ ≤ N
4482
+ ˆ t
4483
+ 0
4484
+ sds + N
4485
+ ˆ t
4486
+ 0
4487
+ ˆ s
4488
+ 0
4489
+ dρds
4490
+ ≤ Nt2,
4491
+ where N = N(α, β, T). Therefore,
4492
+ H1 ≤ N(α, β, p, T)
4493
+ ˆ T
4494
+ 0
4495
+ t−1+p(−αµ+1)dt < ∞.
4496
+ Thus, we have (5.39). Then, by combining (5.39) and (5.37), we have I41 ≤ N∥g∥p
4497
+ Lp(T,l2).
4498
+ Next, we deal with I42. Minkowski’s inequality, the fundamental theorem of calculus,
4499
+ and Lemma 5.1 yield
4500
+ I42 ≤ N
4501
+ ˆ T
4502
+ 0
4503
+ t−1−αµp
4504
+ �ˆ T−t
4505
+ 0
4506
+ �ˆ
4507
+ Rd |∆θ/αqα,β(t + r, y) − ∆θ/αqα,β(r, y)|dy
4508
+ �2
4509
+ dr
4510
+ �p/2
4511
+ dt∥g∥p
4512
+ Lp(T,l2)
4513
+ ≤ N
4514
+ ˆ T
4515
+ 0
4516
+ t−1−αµp
4517
+ �ˆ T−t
4518
+ 0
4519
+ �ˆ t+r
4520
+ r
4521
+ ˆ
4522
+ Rd |∂s∆θ/αqα,β(s, y)|dyds
4523
+ �2
4524
+ dr
4525
+ �p/2
4526
+ dt∥g∥p
4527
+ Lp(T,l2)
4528
+ ≤ NH2∥g∥p
4529
+ Lp(T,l2),
4530
+ where N = N(α, β, d, p, T) and H2 :=
4531
+ ´ T
4532
+ 0 t−1−αµp
4533
+ �´ T−t
4534
+ 0
4535
+ �´ t+r
4536
+ r
4537
+ sα−β−θ−1ds
4538
+ �2
4539
+ dr
4540
+ �p/2
4541
+ dt.
4542
+ Similar to the case of H1 (see (5.38) and (5.39)), we demonstrate that
4543
+ H2 < ∞
4544
+ by considering five cases for α − β − θ instead of α − β. Then, we have I42 ≤ N∥g∥p
4545
+ Lp(T,l2).
4546
+ The lemma is proved.
4547
+
4548
+ Proof of Theorem 2.16. It suffices to show that the assertion holds for τ = T. Indeed,
4549
+ assume the results holds for Hγ
4550
+ p(T), and let τ ≤ T be a bounded stopping time and u ∈
4551
+
4552
+ p(τ). Then, by Definition 2.4 for ε > 0, there exists (f, g) ∈ Hγ−2
4553
+ p
4554
+ (τ)×Hγ−2+c0
4555
+ p
4556
+ (τ, l2) such
4557
+ that
4558
+ ∂α
4559
+ t u = f + ∂β
4560
+ t
4561
+ ˆ t
4562
+ 0
4563
+ gkdwk
4564
+ t ;
4565
+ u(0, ·) = u0(·)
4566
+ and
4567
+ ∥u∥Hγ
4568
+ p(τ) + ∥u0∥Uα,γ
4569
+ p
4570
+ + ∥f∥Hγ−2
4571
+ p
4572
+ (τ) + ∥g∥Hγ−2+c0
4573
+ p
4574
+ (τ,l2) ≤ ∥u∥Hγ
4575
+ p(τ) + ε.
4576
+ Set ¯f := (f − ∆u)1t≤τ and ¯g := g1t≤τ ; thus, u satisfies
4577
+ ∂α
4578
+ t u = ∆u + ¯f + ∂β
4579
+ t
4580
+ ˆ t
4581
+ 0
4582
+ ¯gkdwk
4583
+ t ,
4584
+ 0 < t ≤ τ ;
4585
+ u(0, ·) = u0(·).
4586
+ (5.45)
4587
+
4588
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
4589
+ 39
4590
+ In contrast, by [25, Theorem 2.18], there exists v ∈ Hγ
4591
+ p(T) such that v satisfies
4592
+ ∂α
4593
+ t v = ∆v + ¯f + ∂β
4594
+ t
4595
+ ˆ t
4596
+ 0
4597
+ ¯gkdwk
4598
+ t ,
4599
+ 0 < t ≤ τ ;
4600
+ v(0, ·) = u0(·).
4601
+ Additionally,
4602
+ ∥v∥Hγ
4603
+ p (T) ≤ N
4604
+ ��� ¯f
4605
+ ��
4606
+ Hγ−2
4607
+ p
4608
+ (T) + ∥¯g∥Hγ−2+c0
4609
+ p
4610
+ (T,l2) + ∥u0∥Uα,γ
4611
+ p
4612
+
4613
+ ≤ N
4614
+
4615
+ ∥u∥Hγ
4616
+ p(τ) + ∥u0∥Uα,γ
4617
+ p
4618
+ + ∥f∥Hγ−2
4619
+ p
4620
+ (τ) + ∥g∥Hγ−2+c0
4621
+ p
4622
+ (τ,l2)
4623
+
4624
+ ≤ N∥u∥Hγ
4625
+ p(τ) + Nε,
4626
+ (5.46)
4627
+ where N is independent of ε. Because v ∈ Hγ
4628
+ p(T), v ∈ C
4629
+
4630
+ [0, T]; Hγ−2ν
4631
+ p
4632
+
4633
+ almost surely and
4634
+ E sup
4635
+ t≤T
4636
+ ∥v(t, ·)∥p
4637
+ Hγ−2ν
4638
+ p
4639
+ ≤ N∥v∥p
4640
+
4641
+ p(T)
4642
+ (5.47)
4643
+ by the hypothesis. Therefore, due to τ ≤ T, (5.46) and (5.47) yield
4644
+ E sup
4645
+ t≤τ
4646
+ ∥v(t, ·)∥p
4647
+ Hγ−2ν
4648
+ p
4649
+ ≤ E sup
4650
+ t≤T
4651
+ ∥v(t, ·)∥p
4652
+ Hγ−2ν
4653
+ p
4654
+ ≤ N∥v∥p
4655
+
4656
+ p(T) ≤ N∥u∥p
4657
+
4658
+ p(τ) + Nε,
4659
+ (5.48)
4660
+ where N is independent of ε. Note that ¯u := u − v satisfies
4661
+ ∂α
4662
+ t ¯u = ∆¯u,
4663
+ 0 < t ≤ τ;
4664
+ ¯u(0, ·) = 0.
4665
+ Then, by the deterministic version of [25, Theorem 2.18], we have u(t, ·) = v(t, ·) for almost
4666
+ every (ω, t) ∈ Ω × [0, τ]. Thus, v is an Hγ−2ν
4667
+ p
4668
+ -valued continuous version of u. Additionally,
4669
+ from (5.48)
4670
+ E sup
4671
+ t≤τ
4672
+ ∥u(t, ·)∥p
4673
+ Hγ−2ν
4674
+ p
4675
+ ≤ N∥u∥p
4676
+
4677
+ p(τ) + Nε.
4678
+ In addition, as ε > 0 is arbitrary and N is independent of ε, we have (2.8). Furthermore,
4679
+ observe that we have (2.10) similarly. Additionally, notice that (5.46) allows us to prove
4680
+ the assertions with ∥f∥Hγ−2
4681
+ p
4682
+ (T) + ∥g∥Hγ−2+c0
4683
+ p
4684
+ (T,l2) + ∥u0∥Uα,γ
4685
+ p
4686
+ instead of ∥u∥Hγ
4687
+ p(T).
4688
+ Due to Lemma 2.10 (vi), we only consider the case γ = 2ν, where ν ∈ (0, 1) satisfies
4689
+ (2.7). Moreover, by using the approximation to the identity, we may assume that u0 is
4690
+ infinitely differentiable and compactly supported in x. Furthermore, we also assume that
4691
+ f and g = (g1, g2, . . . ) denotes the function of the form satisfying (5.2) (see [26, Theorem
4692
+ 3.10]). Additionally, it should be remarked that u can be written as
4693
+ u(t, x) = T 1
4694
+ t u0 + T 2
4695
+ t f + T 3
4696
+ t g
4697
+ since u satisfies
4698
+ ∂α
4699
+ t u = ∆u + f + ∂β
4700
+ t
4701
+ ˆ t
4702
+ 0
4703
+ gkdwk
4704
+ t ;
4705
+ u(0, ·) = u0(·)
4706
+ for almost every (ω, t) ∈ |(0, τ]] (see [22, 23, 24]).
4707
+
4708
+ 40
4709
+ BEOMSEOK HAN
4710
+ First, we prove (i).
4711
+ To obtain the continuity of u, notice that T 1
4712
+ t u0 is a continuous
4713
+ Lp-valued function in t on [0, T]. Indeed, by Remark 5.2 and Lemma 5.3,
4714
+ ∥T 1
4715
+ t u0 − u0∥Lp = N
4716
+ ����
4717
+ ˆ t
4718
+ 0
4719
+ (t − s)α−1T 1
4720
+ s ∆u0ds
4721
+ ����
4722
+ Lp
4723
+ ≤ N
4724
+ ˆ t
4725
+ 0
4726
+ (t − s)α−1∥T 1
4727
+ s ∆u0∥Lpds
4728
+ ≤ N
4729
+ ˆ t
4730
+ 0
4731
+ (t − s)α−1∥∆u0∥Lpds
4732
+ ≤ Ntα∥∆u0∥Lp
4733
+ for t > 0. Then, we have
4734
+ lim
4735
+ t↓0 ∥T 1
4736
+ t u0 − u0∥Lp → 0.
4737
+ (5.49)
4738
+ Additionally, for t, h > 0, Lemma 5.3 applies
4739
+ ��T 1
4740
+ t+hu0 − T 1
4741
+ t u0
4742
+ ��
4743
+ Lp ≤
4744
+ ���
4745
+ T 1
4746
+ t+hu0 − u0
4747
+
4748
+
4749
+
4750
+ T 1
4751
+ t u0 − u0
4752
+ ���
4753
+ Lp
4754
+
4755
+ ����
4756
+ ˆ t+h
4757
+ 0
4758
+ (t + h − s)α−1∆T 1
4759
+ s u0ds −
4760
+ ˆ t
4761
+ 0
4762
+ (t − s)α−1∆T 1
4763
+ s u0ds
4764
+ ����
4765
+
4766
+ ����
4767
+ ˆ t+h
4768
+ t
4769
+ (t + h − s)α−1∆T 1
4770
+ s u0ds
4771
+ ����
4772
+ Lp
4773
+ +
4774
+ ����
4775
+ ˆ t
4776
+ 0
4777
+ (t + h − s)α−1∆T 1
4778
+ s u0ds −
4779
+ ˆ t
4780
+ 0
4781
+ (t − s)α−1∆T 1
4782
+ s u0ds
4783
+ ����
4784
+ Lp
4785
+
4786
+ ˆ t+h
4787
+ t
4788
+ (t + h − s)α−1 ��∆T 1
4789
+ s u0
4790
+ ��
4791
+ Lp ds
4792
+ +
4793
+ ˆ t
4794
+ 0
4795
+
4796
+ (t − s)α−1 − (t + h − s)α−1� ��∆T 1
4797
+ s u0
4798
+ ��
4799
+ Lp ds
4800
+ ≤ α−1 (2hα + tα − (t + h)α) ∥∆u0∥Lp,
4801
+ and thus
4802
+ lim
4803
+ h→0
4804
+ ��T 1
4805
+ t+hu0 − T 1
4806
+ t u0
4807
+ ��
4808
+ Lp = 0.
4809
+ (5.50)
4810
+ Because C∞
4811
+ c (Rd) is dense in Lp, (5.49) and (5.50) imply that T 1
4812
+ t is continuous on Lp.
4813
+ For T 2
4814
+ t f and T 3
4815
+ t g, by combining (5.17), (5.19), Jensen’s inequality, and the Kolmogorov
4816
+ continuity theorem (e.g., [28, Theorem 1.4.8]), we have T 2
4817
+ t f ∈ C([0, T]; Hγ−2ν
4818
+ p
4819
+ ) and T 3
4820
+ t g ∈
4821
+ C([0, T]; Hγ−2ν
4822
+ p
4823
+ (l2)) almost surely.
4824
+ To demonstrate (2.8), we recall that ν is taken as in (2.7). Then, Lemma 5.3 implies that
4825
+ E sup
4826
+ t≤T
4827
+ ��T 1
4828
+ t u0
4829
+ ��p
4830
+ Hγ−2ν
4831
+ p
4832
+ ≤ NE∥u0∥p
4833
+ Lp ≤ NE∥u0∥p
4834
+ H
4835
+ γ− 2
4836
+ αp
4837
+ p
4838
+ .
4839
+ (5.51)
4840
+ Additionally, as (2.7) is assumed, Lemma 2.10 (vi) and (5.17) with θ = α − αν, and
4841
+ lims↓0 ∥T 2
4842
+ s f∥Hγ−2ν
4843
+ p
4844
+ = 0 yield
4845
+ E sup
4846
+ t≤T
4847
+ ��T 2
4848
+ t f
4849
+ ��p
4850
+ Hγ−2ν
4851
+ p
4852
+ ≤ N∥f∥p
4853
+ Hγ−2
4854
+ p
4855
+ (T).
4856
+ (5.52)
4857
+
4858
+ STFBES DRIVEN BY MULTIPLICATIVE SPACE-TIME WHITE NOISE
4859
+ 41
4860
+ Furthermore, due to (2.7), Lemma 2.10 (vi), (5.19) with θ = α(1 − c0/2) − αν, and
4861
+ lims↓0 ∥T 3
4862
+ s g∥Hγ−2ν
4863
+ p
4864
+ = 0, we have
4865
+ E sup
4866
+ t≤T
4867
+ ��T 3
4868
+ t g
4869
+ ��p
4870
+ Hγ−2ν
4871
+ p
4872
+ ≤ N∥g∥p
4873
+ Hγ−2+c0
4874
+ p
4875
+ (T,l2),
4876
+ (5.53)
4877
+ where c0 is the constant introduced in (2.3). By combining (5.51), (5.52), and (5.53), we
4878
+ obtain (2.8).
4879
+ Next we prove (ii). Due to (5.15), (5.18), and (5.20) and Theorem 2.16 (i), we have
4880
+ u ∈ Cαµ−1/p([δ, T]; Hγ−2ν
4881
+ p
4882
+ ) almost surely. To demonstrate (2.10), choose µ and ν satisfy
4883
+ (2.9). Observe that Lemma 5.5 implies that
4884
+ E
4885
+ sup
4886
+ δ≤s<t≤T
4887
+ ∥T 1
4888
+ t u0 − T 1
4889
+ s u0∥p
4890
+ Hγ−2ν
4891
+ p
4892
+ |t − s|αµp−1
4893
+ ≤ NE ∥u0∥p
4894
+ Hγ−2ν
4895
+ p
4896
+ .
4897
+ (5.54)
4898
+ As (2.7) is assumed, Lemma 2.10 (vi) and (5.18) with θ = α − αν yield the following:
4899
+ E
4900
+ sup
4901
+ 0≤s<t≤τ
4902
+ ∥T 2
4903
+ t f − T 2
4904
+ 2 f∥p
4905
+ Hγ−2ν
4906
+ p
4907
+ |t − s|αµp−1
4908
+ ≤ N∥f∥p
4909
+ Hγ−2
4910
+ p
4911
+ (τ).
4912
+ Furthermore, by (2.7), Lemma 2.10 (vi), and (5.20) with θ = α(1 − c0/2) − αν, we have
4913
+ E
4914
+ sup
4915
+ 0≤s<t≤T
4916
+ ��T 3
4917
+ t g − T 3
4918
+ s g
4919
+ ��p
4920
+ Hγ−2ν
4921
+ p
4922
+ (l2)
4923
+ |t − s|αµp−1
4924
+ ≤ N∥g∥p
4925
+ Hγ−2+c0
4926
+ p
4927
+ (τ,l2).
4928
+ The theorem is proved.
4929
+
4930
+ Remark 5.8. It should be noted that if u0 = 0, we can consider δ = 0 in Theorem 2.16 (ii)
4931
+ since an estimate of T 1
4932
+ t u0 (5.54) is not required.
4933
+ References
4934
+ [1] Garra, Roberto. Fractional-calculus model for temperature and pressure waves in fluid-saturated porous
4935
+ rocks. Physical Review E, 84(3):036605, 2011.
4936
+ [2] Keller, Jakob J. Propagation of simple non-linear waves in gas filled tubes with friction. Zeitschrift f¨ur
4937
+ angewandte Mathematik und Physik ZAMP, 32(2):170–181, 1981.
4938
+ [3] Tayyaba Akram, Muhammad Abbas, Muhammad Riaz, Ahmad Ismail, and Norhashidah Ali. An effi-
4939
+ cient numerical technique for solving time fractional burgers equation. Alexandria Engineering Journal,
4940
+ 59(4):2201–2220, 2020.
4941
+ [4] Ricardo Almeida. A Gronwall inequality for a general caputo fractional operator. Mathematical Inequal-
4942
+ ities Applications, 20(4), 2017.
4943
+ [5] Ronald Bagley and Peter Torvik. Fractional calculus in the transient analysis of viscoelastically damped
4944
+ structures. AIAA journal, 23(6):918–925, 1985.
4945
+ [6] Dumitru Baleanu, Kai Diethelm, Enrico Scalas, and Juan Trujillo. Fractional calculus: models and
4946
+ numerical methods, volume 3. World Scientific, 2012.
4947
+ [7] Zhen-Qing Chen, Kyeong-Hun Kim, and Panki Kim. Fractional time stochastic partial differential
4948
+ equations. Stochastic Processes and their Applications, 125(4):1470–1499, 2015.
4949
+ [8] Hongjie Dong and Doyoon Kim. Lp-estimates for time fractional parabolic equations with coefficients
4950
+ measurable in time. Advances in Mathematics, 345:289–345, 2019.
4951
+ [9] Talaat El-Danaf and Adel Hadhoud. Parametric spline functions for the solution of the one time frac-
4952
+ tional burgers’ equation. Applied Mathematical Modelling, 36(10):4557–4564, 2012.
4953
+ [10] Alaattin Esen and O34589631333 Tasbozan. Numerical solution of time fractional burgers equation.
4954
+ Acta Universitatis Sapientiae, Mathematica, 7(2):167–185, 2015.
4955
+ [11] Massimiliano Giona and Hector Eduardo Roman. Fractional diffusion equation for transport phenomena
4956
+ in random media. Physica A: Statistical Mechanics and its Applications, 185(1-4):87–97, 1992.
4957
+ [12] Loukas Grafakos. Modern fourier analysis, volume 250. Springer, 2009.
4958
+
4959
+ 42
4960
+ BEOMSEOK HAN
4961
+ [13] Istv´an Gy¨ongy. Existence and uniqueness results for semilinear stochastic partial differential equations.
4962
+ Stochastic Processes and their Applications, 73(2):271–299, 1998.
4963
+ [14] Istv´an Gy¨ongy and David Nualart. On the stochastic Burgers’ equation in the real line. The Annals of
4964
+ Probability, 27(2):782–802, 1999.
4965
+ [15] Beom-Seok Han. Lp-regularity theory for semilinear stochastic partial differential equations with mul-
4966
+ tiplicative white noise. Journal of Mathematical Analysis and Applications, page 126366, 2022.
4967
+ [16] Beom-Seok Han. A regularity theory for stochastic generalized Burgers’ equation driven by a multiplica-
4968
+ tive space-time white noise. Stochastics and Partial Differential Equations: Analysis and Computations,
4969
+ pages 1–41, 2022.
4970
+ [17] Richard Herrmann. Fractional calculus: an introduction for physicists. World Scientific, 2011.
4971
+ [18] Rudolf Hilfer. Applications of fractional calculus in physics. World scientific, 2000.
4972
+ [19] Mitsunojo Ichise, Yutaka Nagayanagi, and Tsugio Kojima. An analog simulation of non-integer or-
4973
+ der transfer functions for analysis of electrode processes. Journal of Electroanalytical Chemistry and
4974
+ Interfacial Electrochemistry, 33(2):253–265, 1971.
4975
+ [20] Mustafa Inc. The approximate and exact solutions of the space-and time-fractional Burgers equations
4976
+ with initial conditions by variational iteration method. Journal of Mathematical Analysis and Applica-
4977
+ tions, 345(1):476–484, 2008.
4978
+ [21] Anatoly Kilbas, Oleg Marichev, and Stefan Samko. Fractional integrals and derivatives (theory and
4979
+ applications), 1993.
4980
+ [22] Ildoo Kim, Kyeong-Hun Kim, and Sungbin Lim. An Lq(Lp)-theory for the time fractional evolution
4981
+ equations with variable coefficients. Advances in Mathematics, 306:123–176, 2017.
4982
+ [23] Ildoo Kim, Kyeong-Hun Kim, Sungbin Lim. A sobolev space theory for stochastic partial differential
4983
+ equations with time-fractional derivatives. The Annals of Probability, 47(4):2087–2139, 2019.
4984
+ [24] Kyeong-Hun Kim and Sungbin Lim. Asymptotic behaviors of fundamental solution and its derivatives
4985
+ related to space-time fractional differential equations. Journal of the Korean Mathematical Society,
4986
+ 53(4):929–967, 2015.
4987
+ [25] Kyeong-Hun Kim and Daehan Park. A sobolev space theory for the time-fractional stochastic partial
4988
+ differential equations driven by levy processes. arXiv preprint arXiv:2006.05050, 2020.
4989
+ [26] Nicolai Krylov. An analytic approach to SPDEs. Stochastic partial differential equations: six perspec-
4990
+ tives, 64:185–242, 1999.
4991
+ [27] Nicolai Krylov. Lectures on elliptic and parabolic equations in Sobolev spaces, volume 96. American
4992
+ Mathematical Society, 2008.
4993
+ [28] Nicolai Krylov. Introduction to the theory of random processes, volume 43. American Mathematical
4994
+ Society, 2002.
4995
+ [29] Peter Lewis and David Nualart. Stochastic Burgers’ equation on the real line: regularity and moment
4996
+ estimates. Stochastics, 90(7):1053–1086, 2018.
4997
+ [30] Dongfang Li, Chengjian Zhang, and Maohua Ran. A linear finite difference scheme for generalized time
4998
+ fractional Burgers’ equation. Applied Mathematical Modelling, 40(11-12):6069–6081, 2016.
4999
+ [31] Richard Magin. Fractional calculus in bioengineering, part 1. Critical Reviews™ in Biomedical Engi-
5000
+ neering, 32(1), 2004.
5001
+ [32] Igor Podlubny. Fractional differential equations: an introduction to fractional derivatives, fractional
5002
+ differential equations, to Methods of their solution and some of their applications. ISSN. Elsevier Science,
5003
+ 1998.
5004
+ [33] Hun H Sun, Banu Onaral, and Yuan-Ying Tso. Application of the positive reality principle to metal
5005
+ electrode linear polarization phenomena. IEEE Transactions on Biomedical Engineering, (10):664–674,
5006
+ 1984.
5007
+ [34] Hans Triebel. Theory of Function Spaces. Modern Birkh¨auser Classics. Springer Basel, 2010.
5008
+ [35] John Walsh. An introduction to stochastic partial differential equations. In ´Ecole d’ ´Et´e de Probabilit´es
5009
+ de Saint Flour XIV-1984, pages 265–439. Springer, 1986.
5010
+ [36] Guang-an Zou and Bo Wang. Stochastic Burgers’ equation with fractional derivative driven by multi-
5011
+ plicative noise. Computers & Mathematics with Applications, 74(12):3195–3208, 2017.
5012
+ Department of Mathematics, Pohang University of Science and Technology, 77, Cheongam-
5013
+ ro, Nam-gu, Pohang, Gyeongbuk, 37673, Republic of Korea
5014
+ Email address: hanbeom@postech.ac.kr
5015
+
8dAyT4oBgHgl3EQfp_jS/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8tE2T4oBgHgl3EQfPwar/content/tmp_files/2301.03763v1.pdf.txt ADDED
@@ -0,0 +1,2171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.03763v1 [quant-ph] 10 Jan 2023
2
+ Quantum Speedups for Zero-Sum Games
3
+ via Improved Dynamic Gibbs Sampling
4
+ Adam Bouland
5
+ Yosheb Getachew
6
+ Yujia Jin
7
+ Aaron Sidford
8
+ Kevin Tian∗
9
+ {abouland,yoshebg,yujiajin,sidford}@stanford.edu, tiankevin@microsoft.com
10
+ Abstract
11
+ We give a quantum algorithm for computing an ǫ-approximate Nash equilibrium of a zero-
12
+ sum game in a m × n payoff matrix with bounded entries. Given a standard quantum oracle
13
+ for accessing the payoff matrix our algorithm runs in time �O(√m + n · ǫ−2.5 + ǫ−3) and outputs
14
+ a classical representation of the ǫ-approximate Nash equilibrium. This improves upon the best
15
+ prior quantum runtime of �O(√m + n·ǫ−3) obtained by [vAG19] and the classic �O((m+n)·ǫ−2)
16
+ runtime due to [GK95] whenever ǫ = Ω((m + n)−1). We obtain this result by designing new
17
+ quantum data structures for efficiently sampling from a slowly-changing Gibbs distribution.
18
+ ∗Work partly completed while at Stanford.
19
+
20
+ 1
21
+ Introduction
22
+ There is now a broad family of quantum algorithms for machine learning and fast numerical linear al-
23
+ gebra [BWP+17], built on many quantum algorithmic primitives, e.g. [BHMT02, HHL09, GSLW19].
24
+ More specifically, for a wide range of problems it has been shown how quantum algorithms can (in
25
+ certain parameter regimes) yield faster runtimes.1 These quantum algorithms obtain runtimes which
26
+ improve upon the dimension dependence of classical algorithms, but often at the cost of a worse
27
+ dependence on the error tolerance and/or implicit access to the solution (e.g. query or sampling
28
+ access for solution entries). Consequently, this paper is motivated by the following question.
29
+ To what degree is there an inherent accuracy versus dimension-dependence tradeoff for
30
+ quantum optimization algorithms? What algorithmic techniques improve this tradeoff?
31
+ In this paper we consider this question for the fundamental optimization problem of computing
32
+ ǫ-approximate Nash equilibrium in zero-sum games. Our main result is an improved dependence
33
+ on ǫ for quantum algorithms solving zero-sum games, which is very close to that of its classical
34
+ counterpart. Further, we show that for our algorithms, obtaining a classical representation of the
35
+ solution is obtainable at no additional asymptotic cost. Our work builds upon [vAG19, LCW19],
36
+ which already took a large and important step towards answering the above question by designing
37
+ quantum data structures for efficiently implementing algorithms for solving zero-sum games.
38
+ Interestingly, to obtain our result we provide improved quantum algorithms for solving a dy-
39
+ namic data structure problem of sampling from a slowly-changing Gibbs distribution. Such dynamic
40
+ sampling problems arise as a natural component of stochastic gradient methods for solving zero-sum
41
+ games. We obtain our speedups by improving a Gibbs sampling subroutine developed in [vAG19].
42
+ We design a new dynamic quantum data structure which performs the necessary Gibbs sampling in
43
+ time �O(ǫ− 1
44
+ 2), which is faster than the corresponding �O(ǫ−1) runtime achieved by [vAG19]. Beyond
45
+ the intrinsic utility of solving this problem, we hope our improved Gibbs sampler showcases poten-
46
+ tial algorithmic insights that can be gleaned by seeking improved error dependencies for quantum
47
+ optimization algorithms. Moreover, we hope this work encourages the study and design of quantum
48
+ data structures for efficient optimization.
49
+ 1.1
50
+ Zero-sum games
51
+ For matrix A ∈ Rm×n its associated zero-sum game is the pair of equivalent optimization problems
52
+ min
53
+ u∈∆m max
54
+ v∈∆n u⊤Av = max
55
+ v∈∆n min
56
+ u∈∆m u⊤Av, where ∆k := {x ∈ Rk
57
+ ≥0 : �
58
+ i∈[k] xi = 1}.
59
+ In such a game, we refer to A as the payoff matrix and view the m and n-dimensional simplices, i.e.
60
+ ∆m and ∆n, as the space of distributions over [m] and [n] respectively. From this perspective u⊤Av,
61
+ known as payoff or utility of (u, v), is the expected value of Aij when sampling i ∈ [m] and j ∈ [n]
62
+ independently from the distributions corresponding to u and v. Thus, a zero-sum game models a
63
+ two-player game where a minimization player seeks to minimize the payoff while, simultaneously, a
64
+ maximization player seeks to maximize it.
65
+ In this paper, we consider the canonical problem of computing an approximate Nash equilibrium
66
+ of a zero-sum game. Given the payoff matrix A ∈ Rm×n we call a pair (u, v) ∈ ∆m × ∆n an ǫ-
67
+ approximate Nash equilibrium (NE) for ǫ ∈ R>0 if
68
+
69
+ max
70
+ v′∈∆n u⊤Av′
71
+
72
+
73
+
74
+ min
75
+ u′∈∆m(u′)⊤Av
76
+
77
+ ≤ ǫ.
78
+ 1Note that quantifying the end-to-end speedups obtained by these methods can be subtle due to I/O overheads,
79
+ different access models [Aar15], and classical de-quantization algorithms [Tan19, CGL+20, GLG22].
80
+ 1
81
+
82
+ We assume that the payoff matrix A and the error-tolerance are given as input to an algorithm, and
83
+ that, for simplicity, ∥A∥max ≤ 1, i.e. the largest entry of A has magnitude at most 1 (this is without
84
+ loss of generality by rescaling A ← ∥A∥���1
85
+ max A and ǫ ← ∥A∥−1
86
+ max ǫ). The main goal of this paper is
87
+ to design improved zero-sum game solvers, i.e. algorithms that compute ǫ-approximate NEs.
88
+ Zero-sum games are foundational to theoretical computer science, optimization, and economics.
89
+ The problem of approximately solving zero-sum games is a natural formulation of approximate linear
90
+ programming (LP) and correspondingly, this problem is a prominent testbed for new optimization
91
+ techniques. Over the past decades there have been numerous advances in the computational com-
92
+ plexity of solving zero-sum games under various assumptions on problem parameter (see Section 1.3
93
+ for a survey). Recent advancements in interior point methods (IPMs) for linear programming, e.g.
94
+ [vdBLL+21] and references therein (discussed in more detail in Section 1.3), solve zero sum-games
95
+ in time �O(mn + min(m, n)2.5).2 Further the linear programming algorithm of [vdB20], shows that
96
+ zero-sum games can be solved deterministically in �O((m+n)ω) time where ω < 2.373 is the current
97
+ matrix multiplication constant [AW21], or �O((m + n)3) without fast matrix multiplication. In this
98
+ paper, we primarily focus on sublinear-time algorithms for approximating NEs.
99
+ A well-known algorithm by [GK95] achieves a runtime of �O((m + n) · ǫ−2), which is the state-
100
+ of-the-art sublinear runtime amongst classical algorithms, without further problem assumptions.
101
+ Recently it has been shown that quantum algorithms can yield strikingly runtime improvements for
102
+ solving zero-sum games and their generalizations [LCW19, vAG19, LWCW21]. In particular, in 2019
103
+ Li, Chakrabati and Wu [LCW19] gave a quantum algorithm for zero sum games in time �O(√m + n·
104
+ ǫ−4), and simultaneously van Apeldoorn and Gilyen [vAG19] gave an algorithm running in time
105
+ �O(√m + n · ǫ−3). These algorithms yield a quadratic improvement in the dimension dependence of
106
+ the best classical algorithm, at the cost of a higher error dependence.
107
+ The algorithms of [LCW19, vAG19, LWCW21] operate using a standard quantum oracle for A
108
+ (formally stated in Section 2), in which one can query the entries of A in superposition. We focus on
109
+ the algorithm of [vAG19] for the rest of this paper, as we focus on improving error dependence. The
110
+ [vAG19] algorithm generalizes the classical algorithm of Grigoriadis and Khachiyan [GK95], and
111
+ obtains a runtime improvement by speeding up a key dynamic Gibbs sampling subroutine required
112
+ by the [GK95] method. As we discuss in greater detail in Section 3, van Apeldoorn and Gilyen give
113
+ a quantum data structure to efficiently perform this sampling in time quadratically faster in the
114
+ dimension, which lies at the core of their algorithmic speedup.
115
+ Our result.
116
+ We give a new quantum algorithm for solving zero-sum games which improves upon
117
+ the runtime of the prior state-of-the-art quantum algorithm, due to [vAG19].
118
+ Theorem 1 (informal, see Theorem 4). Let A ∈ Rm×n with ∥A∥max ≤ 1, and ǫ ∈ (0, 1). Given
119
+ a quantum oracle for A (defined in Section 2), there is an �O(√m + n · ǫ−2.5 + ǫ−3) time algorithm
120
+ which yields a classical output (u, v) ∈ ∆m × ∆n that is an ǫ-approximate NE with high probability.
121
+ Our new algorithm simultaneously improves the best known quantum [vAG19] and classical
122
+ [GK95] algorithms in the parameter regime where IPMs do not dominate sublinear algorithms. In
123
+ particular, it is faster than the classical �O((m+n)·ǫ−2) runtime of [GK95] whenever ǫ−1 = �O(m+n),
124
+ which includes the regime where [GK95] offers advantages over the �O((m + n)ω) runtime of the
125
+ [vdB20] IPM, as ω < 3. This is in contrast to the prior quantum rate of [vAG19], which does
126
+ not achieve an improvement upon [GK95] in the full parameter range where sublinear algorithms
127
+ 2We use the �O notation to hide polylogarithmic dependences on problem parameters when convenient for exposi-
128
+ tion; see Section 2 for a more detailed statement of hidden parameters. In informal theorem statements, we use “with
129
+ high probability” to indicate a polylogarithmic dependence on the failure probability.
130
+ 2
131
+
132
+ are currently preferable to IPMs.
133
+ For example, when m ≈ n and (up to logarithmic factors)
134
+ ǫ ∈ [n−c, n− 1
135
+ 2 ] where c = 1
136
+ 2(ω − 1), the rate of [GK95] is favorable to that of [vAG19] and state-of-
137
+ the-art IPMs [vdB20, CLS21].3
138
+ 1.2
139
+ Dynamic Gibbs sampling
140
+ We obtain the improved error dependence in our zero-sum game solver by producing a new, faster
141
+ quantum data structure to perform the Gibbs sampling as used in the algorithm of [vAG19], which
142
+ may be of independent interest. Gibbs sampling is a fundamental algorithmic primitive — the basic
143
+ task is, given vector v ∈ Rn, sample from the probability distribution proportional to exp(v). Gibbs
144
+ sampling is used as a subroutine in many quantum and classical optimization algorithms, e.g. [BS17]
145
+ and follow-up works. In general, quantum algorithms can perform this task more efficiently using
146
+ amplitude estimation, which can boost the acceptance probability of rejection sampling schemes.
147
+ This strategy was implemented in [vAG19], which approximate the maximum entry vmax of v
148
+ using quantum maximum finding [DH96], uniformly sample i ∈ [n], and accept the sample with
149
+ probability exp(vi −vmax) ≤ 1 using quantum rejection sampling. We give a more detailed overview
150
+ of the [vAG19] Gibbs sampler and its complexity analysis in Section 3.2.
151
+ We give a data structure which quadratically improves the error dependence of the [vAG19]
152
+ Gibbs sampling subroutine runtime, from �O(√m + n· ǫ−1) per sample to an amortized �O(√m + n ·
153
+ ǫ− 1
154
+ 2) per sample. A key fact which enables this improvement is that the Gibbs distributions one
155
+ samples from in the zero-sum game solver of [GK95] change slowly over time: the base vector v
156
+ receives bounded sparse updates in each iteration. By storing partial information about the Gibbs
157
+ distribution, namely an efficiently-computable overestimate to its entries which remains valid across
158
+ many consecutive iterations, we obtain an improved dynamic Gibbs sampler, which we also provide
159
+ a detailed overview of in Section 3.2.
160
+ We now define our notion of an approximate Gibbs sampler, and then state the dynamic sampling
161
+ problem we consider, which arises naturally in zero-sum game algorithms with sublinear runtimes.
162
+ Definition 1 (Approximate Gibbs oracle). For v ∈ Rn, its associated Gibbs distribution is pv ∈ ∆n
163
+ such that for all i ∈ [n], [pv]i ∝ exp(vi). We say Ogibbs
164
+ v
165
+ is a δ-approximate Gibbs oracle if it samples
166
+ from ˜p ∈ ∆n with ∥˜p − pv∥1 ≤ δ.
167
+ Problem 1 (Sampling maintenance). Let η > 0, δ ∈ (0, 1), and suppose we have a quantum oracle
168
+ for A ∈ Rm×n. Consider a sequence of T Update operations to a dynamic vector x ∈ Rm
169
+ ≥0, each
170
+ of the form xi ← xi + η for some i ∈ [m]. In the sampling maintenance problem, in amortized
171
+ Tupdate time per Update we must maintain a δ-approximate Gibbs oracle, Osamp, for A⊤x which is
172
+ queryable in worst-case time Tsamp.
173
+ Our result.
174
+ We provide a quantum algorithm for solving Problem 1, which improves upon the
175
+ runtime implied by the corresponding component in the algorithm of [vAG19].
176
+ Theorem 2 (informal, see Theorem 3). There is a quantum algorithm which solves Problem 1 with
177
+ high probability with max(Tsamp, Tupdate) = �O
178
+ �√n · Tη1.5�
179
+ and an initialization cost of �O
180
+
181
+ η3T 3�
182
+ .
183
+ Theorem 2 improves upon the solution to the sampling maintenance Problem 1 implied by
184
+ [vAG19] by a η− 1
185
+ 2 factor; in the setting of the [GK95] solver, where T = �O(ǫ−2) and η = Θ(ǫ),
186
+ this is an ǫ− 1
187
+ 2-factor improvement.
188
+ At a high level, our improvement is obtained by storing a
189
+ hint consisting of a vector which overestimates the true Gibbs distribution, and an approximate
190
+ 3There is evidence that ω = 2 cannot be achieved with current techniques, e.g. [Alm21].
191
+ 3
192
+
193
+ Table 1: Algorithms for computing ǫ-approximate Nash equilibria of zero-sum games.
194
+ Hides polylogarithmic factors and assumes A ∈ Rm×n with ∥A∥max ≤ 1.
195
+ Method
196
+ Query model
197
+ Total runtime
198
+ interior point method [CLS21]
199
+ classical
200
+ max(m, n)ω
201
+ interior point method [vdBLL+21]
202
+ classical
203
+ mn + min(m, n)2.5
204
+ extragradient [Nem04, Nes07]
205
+ classical
206
+ mn · ǫ−1
207
+ stochastic mirror descent (SMD) [GK95]
208
+ classical
209
+ (m + n) · ǫ−2
210
+ variance-reduced SMD [CJST19]
211
+ classical
212
+ mn +
213
+
214
+ mn(m + n) · ǫ−1
215
+ [vAG19]
216
+ quantum
217
+
218
+ m + n · ǫ−3
219
+ Theorem 1 (our work)
220
+ quantum
221
+
222
+ m + n · ǫ−2.5 + ǫ−3
223
+ Table 2: Solutions to Problem 1, T = ǫ−2, η = ǫ. Hides polylogarithmic factors.
224
+ Method
225
+ Query model
226
+ Tsamp
227
+ Tupdate
228
+ explicit updates [GK95]
229
+ classical
230
+ 1
231
+ m + n
232
+ max-based rejection sampling [vAG19]
233
+ quantum
234
+
235
+ m + n · ǫ−1
236
+
237
+ m + n · ǫ−1
238
+ Theorem 2 (our work)
239
+ quantum
240
+
241
+ m + n · ǫ− 1
242
+ 2
243
+
244
+ m + n · ǫ− 1
245
+ 2
246
+ normalization factor, which are infrequently updated. Our maintained hint satisfies the desirable
247
+ properties that: (i) it remains valid for a batch of consecutive iterations, and (ii) the degree of
248
+ overestimation is bounded. The former property ensures a fast amortized update time, and the
249
+ latter ensures a fast sample time by lower bounding the acceptance probability of our quantum
250
+ rejection sampler. Our high-level strategy for maintaining improved hints is to repeatedly call our
251
+ sampling access to accurately estimate large entries of the Gibbs distribution, and to exploit stability
252
+ of the distribution under the setting of Problem 1. We discuss our dynamic Gibbs sampler in more
253
+ detail and compare it with previous methods for solving Problem 1 in Section 3.2.
254
+ The initialization cost of Theorem 2 is due to the current state-of-the-art in numerically stable
255
+ implementations of the quantum singular value transformation (SVT) framework of [GSLW19].
256
+ This cost is also the cause of the additive �O(ǫ−3) term in Theorem 1.
257
+ We discuss this cost in
258
+ Appendix D; improvements to numerically stable implementations of [GSLW19] would be reflected
259
+ in the runtimes of Theorems 1 and 2.
260
+ 1.3
261
+ Related work
262
+ Quantum optimization and machine learning.
263
+ There are a wide array of quantum algorithms
264
+ for optimization and machine learning which make use of fundamental algorithmic primitives such
265
+ as amplitude amplification [BHMT02], the HHL algorithm [HHL09], and the quantum singular
266
+ value transformation [GSLW19]. For example, a number of works gave HHL-based algorithms for
267
+ a variety of machine learning tasks such as PCA [LMR14], SVMs [RML14], and recommendation
268
+ systems [KP16]. For more details see the survey article of [BWP+17].
269
+ Most relevant to our current work are quantum algorithms for optimization problems.
270
+ For
271
+ example, Brandao and Svore [BS17] gave a quantum algorithm for SDP solving based on the Arora-
272
+ 4
273
+
274
+ Kale algorithm [AK07], which was later improved by [VAGGdW20b]. There have also been quantum
275
+ IPM-based methods for LPs and SDPs [KP20].
276
+ Additionally a series of works have considered
277
+ quantum algorithms for general convex optimization [CCLW20, vAGGdW20a], which make use of
278
+ Jordan’s algorithm for fast gradient estimation [Jor05, GAW19].
279
+ In the area of zero-sum games, in addition to the works previously mentioned [vAG19, LCW19]
280
+ on ℓ1-ℓ1 games (where both players are ℓ1-constrained), there have been several works considering
281
+ different variants of zero-sum games. For example Li, Chakrabati and Wu [LCW19] gave quan-
282
+ tum algorithms for ℓ2-ℓ1 games with quadratic improvement on the dimension. Later Li, Wang,
283
+ Chakrabati and Wu [LWCW21] extended this algorithm to more general ℓq-ℓ1 games with q ∈ (1, 2].
284
+ Zero-sum games.
285
+ Zero-sum games are a canonical modeling tool in optimization, economics and
286
+ machine learning [Neu28]. The classic extragradient (mirror prox) method [Nem04, Nes07] computes
287
+ an ǫ-approximate NE in �O(mn · ǫ−1) time; as discussed previously, the stochastic mirror descent
288
+ method of [GK95] obtains the same accuracy in time �O((m + n) · ǫ−2). An intermediate runtime
289
+ was recently obtained by [CJST19] using variance reduction, described in Table 1.
290
+ Improved runtimes are available under more fine-grained characterizations of the matrix A, such
291
+ as sparsity (e.g. number of nonzero entries per row or column) or numerical sparsity (e.g. rows and
292
+ columns with bounded ℓ1-to-ℓ2 norm ratios) [CJST20]. Notably, the [GK95] algorithm also offers
293
+ runtime improvements under a sparsity assumption, as does the algorithm of [vAG19] in certain
294
+ sparsity-to-accuracy ratio regimes. In this paper, we focus on NE algorithms in the general setting
295
+ (without further sparsity or numerical sparsity assumptions).
296
+ In parallel, a long line of research improving IPMs for solving linear programming [Kar84,
297
+ Ren88, LS14, LS19, vdBLSS20, JSWZ21] has led to a number of different zero-sum game solvers
298
+ with polylogarithmic runtime dependencies on the problem accuracy ǫ. The current state-of-the-
299
+ art variants of IPMs are [CLS21] and [vdBLL+21], which achieve runtimes of �O(max(m, n)ω) and
300
+ �O(mn + min(m, n)2.5) respectively. We refer readers to Table 1 for detailed comparisons. Finally,
301
+ for strongly polynomial runtimes (i.e. with no dependence on ǫ), which are outside the scope of this
302
+ paper, we refer readers to [DNV20] and references therein.
303
+ 1.4
304
+ Future work
305
+ Theorem 1’s ǫ dependence is within an ǫ− 1
306
+ 2 factor of matching classical counterparts. To the best
307
+ of our knowledge, removing this ǫ− 1
308
+ 2 overhead would represent the first quantum algorithm for a
309
+ natural optimization problem which improves upon classical counterparts across all parameters.
310
+ Both our work and [vAG19] solve Problem 1 by leveraging a powerful polynomial approximation-
311
+ based technique developed in [GSLW19], known as the quantum singular value transform (QSVT). In
312
+ both cases, QSVT is used with a polynomial of degree �O(ǫ−1). We note that in closely-related classi-
313
+ cal settings (discussed in [SV14]), Chebyshev polynomial-based approximations yield a quadratically
314
+ smaller degree. However, a boundedness requirement (due to the spectra of quantum gates) pre-
315
+ vents straightforwardly applying these constructions within QSVT. Sidestepping this barrier is a
316
+ natural avenue towards improving our work, which we leave as an open problem.
317
+ More generally, establishing optimal oracle query complexities of dynamic Gibbs sampling (e.g.
318
+ Problem 1) and solving zero-sum games are key problems left open by our work. These questions
319
+ are potentially more approachable than establishing tight time complexity characterizations. For
320
+ example, could max(Tsamp, Tupdate) be improved to �O(√n) in the context of Theorem 1, or can we
321
+ rule out such an improvement in the query model?
322
+ 5
323
+
324
+ 1.5
325
+ Organization
326
+ In Section 2 we state the notation used throughout the paper, as well as the (classical and quantum)
327
+ computational models we assume.
328
+ In Section 3, we give a brief technical overview of the core
329
+ components of our algorithm used to prove Theorem 1: the stochastic gradient method our method
330
+ is built on, and an efficient quantum implementation of a key subroutine using a new dynamic Gibbs
331
+ sampler. Finally in Section 4 we give our new quantum sampler, and prove Theorem 2.
332
+ We aim to give a self-contained, but simplified, description of our algorithm in Section 3 to
333
+ improve the readability of the paper for readers with an optimization background unfamiliar with
334
+ quantum computing, and vice versa. In particular, we abstract away the core optimization machin-
335
+ ery (stochastic mirror descent) and quantum machinery (quantum SVT) developed in prior work
336
+ into the statements of Propositions 1 and 2, and focus on how we use these statements black-box
337
+ to build a faster algorithm. The proofs of these statements can be found in Appendices A and B.
338
+ 2
339
+ Preliminaries
340
+ General notation.
341
+ �O hides logarithmic factors in problem dimensions (denoted m and n), target
342
+ accuracies (denoted ǫ), and failure probabilities (denoted α). When discussing runtimes for Prob-
343
+ lem 1, we additionally use �O to hide logarithmic factors in the parameters η, T. For all i ∈ [n] we let
344
+ ei ∈ Rn denote the ith standard basis vector for i ∈ [n] when n is clear. ∥·∥p denotes the ℓp norm of
345
+ a vector. For A ∈ Rm×n, its ith row and jth column are respectively Ai:, A:j. For v ∈ Rn, diag (v)
346
+ is the diagonal n × n matrix with v as the diagonal. Conjugate transposes of A are denoted A∗;
347
+ when the matrix is real we use A⊤. The all-ones and all-zeros vectors of dimension n are 1n and
348
+ 0n. Finally, throughout a := ⌈log2 m⌉ and b := ⌈log2 n⌉, so [m] ⊆ [2a] and [n] ⊆ [2b].
349
+ Computation models.
350
+ We assume entries of A are w-bit reals for w = O(log(mn)), and work in
351
+ the word RAM model where w-bit arithmetic operations take O(1) time; for simplicity, we assume
352
+ mathematical operations such as trigonometric functions and radicals can also be implemented ex-
353
+ actly for w-bit words in O(1) time. Throughout, “quantum states” mean unit vectors, and “quantum
354
+ gates” or “oracles” O mean unitary matrices. We follow standard notation and identify a standard
355
+ basis vector ei for i ∈ [n] with |i⟩, an a-qubit state, in which i is represented in binary (i.e. more for-
356
+ mally, |i⟩ = |bin(i)⟩, and bin is omitted for brevity). We consider the standard model of quantum
357
+ access to oracles, in which the oracle O, which is defined by its operation on |s⟩ for all {0, 1}∗-
358
+ valued s (where length is clear from context), can be queried in superposition. If O is queried on
359
+ |v⟩ := �
360
+ s αs|s⟩, the result is O|v⟩ = �
361
+ s αi(O|s⟩). We use |g⟩, |g′⟩, etc. (when clear from context)
362
+ to denote arbitrary sub-unit vectors, which represent garbage states (unused in computations). The
363
+ tensor product of states |u⟩ and |v⟩ on a and b qubits is denoted |u⟩|v⟩, an (a + b)-qubit state. The
364
+ runtime of a quantum circuit is its maximum depth (in arithmetic gates on w-bit words).
365
+ Access model.
366
+ Throughout the paper, we assume a standard quantum oracle for accessing A
367
+ (recall ∥A∥max ≤ 1). In particular, by a quantum oracle for A we mean an oracle OA which, when
368
+ queried with |i⟩|j⟩|s⟩ for i ∈ [m], j ∈ [n], s ∈ {0, 1}w, reversibly writes Aij (in binary) to the third
369
+ register in O(1) time, i.e. OA|i⟩|j⟩|s⟩ = |i⟩|j⟩|s ⊕ Aij⟩ where ⊕ is bitwise mod-2 addition.
370
+ Given a quantum oracle for A, with two queries, by standard constructions one can construct
371
+ an oracle which places the value in the amplitude of the state rather than the register itself. More
372
+ 6
373
+
374
+ formally, one can construct4 an O′
375
+ A, which operates as:
376
+ O′
377
+ A|0⟩|i⟩|j⟩ =
378
+
379
+ Aij|0⟩|i⟩|j⟩ +
380
+
381
+ 1 − |Aij||1⟩|g⟩, for (i, j) ∈ [m] × [n].
382
+ It is standard in the literature to (using ancilla qubits to store the output register where Aij is
383
+ written) construct such an O′
384
+ A from OA under our classical model of computation, see e.g. [GR02].
385
+ For simplicity, we omit discussion of ancilla qubits in the remainder of the paper and assume direct
386
+ access to O′
387
+ A. We also note that there is ambiguity in the implementation of O′
388
+ A in that the square
389
+ root is not unique, and that we have control over the signing used in this implementation. We will
390
+ use this flexibility crucially later in the paper, specifically Corollary 6.
391
+ 3
392
+ Overview of approach
393
+ In this section, we give an overview of the approach we take to prove our main results: an improved
394
+ quantum runtime for solving zero-sum games (Theorem 4) and an improved quantum data structures
395
+ for dynamic Gibbs sampling (Theorem 3). We organize this section as follows.
396
+ In Section 3.1, we state Algorithm 1, the optimization method framework we use to solve zero-
397
+ sum games. This framework is a generalization of the classical algorithm of [GK95]. We state its
398
+ guarantees in Proposition 1 and defer the proof to Appendix A. Algorithm 1 assumes access to
399
+ an approximate Gibbs oracle (Definition 1) for sampling from dynamic distributions as stated in
400
+ Problem 1. The bulk of our work is devoted to obtaining an efficient quantum implementation of
401
+ such an oracle (Theorem 3) and using this result we prove Theorem 4 at the end of Section 3.1.
402
+ In Section 3.2, we overview the main technical innovation of this paper, an improved solution to
403
+ Problem 1. Whereas prior work by [vAG19] solves Problem 1 at an amortized ≈ √m + n · ǫ−1 cost
404
+ per iteration, we show how to solve the problem at an amortized ≈ √m + n · ǫ− 1
405
+ 2 cost. We remark
406
+ that the only quantum components of our algorithm (quantum SVT and amplitude amplification)
407
+ are abstracted away by Proposition 2, which is proven in Appendix B.
408
+ 3.1
409
+ Solving matrix games with a Gibbs sampling oracle
410
+ Our proof of Theorem 4 uses an efficient implementation of the algorithmic framework stated in
411
+ Algorithm 1, based on stochastic mirror descent. In specifying Algorithm 1, we recall our earlier
412
+ Definition 1, which captures the approximate sampling access we require for Algorithm 1’s execution.
413
+ Algorithm 1: MatrixGameSolver(δ, η, T)
414
+ 1 Input: A ∈ Rm×n, desired accuracy ǫ ∈ (0, 1), δ-approximate Gibbs oracles for the
415
+ (dynamic) vectors −A⊤xt and Ayt
416
+ 2 Parameters: Gibbs sampler parameter δ ∈ (0, 1), step size η > 0, iteration count T
417
+ 3 Initialize ˆu ← 0m, ˆv ← 0n, x0 ← 0m, and y0 ← 0n
418
+ 4 for t = 0 to T − 1 do
419
+ 5
420
+ Independently sample jt, j′
421
+ t ∈ [n] using Ogibbs
422
+ −A⊤xt and it, i′
423
+ t ∈ [m] using Ogibbs
424
+ Ayt
425
+ 6
426
+ Update yt+1 ← yt + ηejt and xt+1 ← xt + ηeit
427
+ // Update iterates.
428
+ 7
429
+ Update ˆu ← ˆu + 1
430
+ T ei′
431
+ t and ˆv ← ˆv + 1
432
+ T ej′
433
+ t
434
+ // Update output.
435
+ 8 return (ˆu, ˆv)
436
+ 4This follows e.g. by calling the oracle to obtain the value of Aij in binary (interpreted as a signed number
437
+ between 0 and 1), adding an ancilla qubit, performing arithmetric to compute the rotation angle needed on that
438
+ ancilla, applying a tower of controlled rotation gates to an ancilla qubit using that rotation angle express in binary,
439
+ then calling the standard oracle a second time to uncompute the binary value of Aij. See e.g. [GR02] for details.
440
+ 7
441
+
442
+ The main skeleton of Algorithm 1 (Lines 5-6) using exact oracles is identical to the method of
443
+ [GK95]. However, our framework builds upon [GK95] in the following three ways.
444
+ 1. We tolerate total variation error in the sampling procedure via δ-approximate Gibbs oracles.
445
+ 2. We provide a high-probability guarantee on the duality gap using martingale arguments.
446
+ 3. We subsample the output to obtain a sparse solution yielding a comparable duality gap.
447
+ We remark that several of these improvements have appeared previously, either explicitly or
448
+ implicitly, in the stochastic gradient method literature. For example, an approximation-tolerant
449
+ stochastic gradient method was given in [CJST20], and our proofs of the high-probability guarantees
450
+ are based on arguments in [AL17, CDST19]. For completeness we give a self-contained proof of the
451
+ following guarantee on Algorithm 1 in Appendix A.
452
+ Proposition 1. Let A ∈ Rm×n satisfy ∥A∥max ≤ 1 and ǫ, α ∈ (0, 1). Let δ ≤
453
+ ǫ
454
+ 20, η =
455
+ ǫ
456
+ 60, and
457
+ T = Θ(ǫ−2 log mn
458
+ α ) for an appropriate constant. With probability ≥ 1 − α, Algorithm 1 outputs an
459
+ ǫ-approximate NE for A.
460
+ Given Proposition 1 to obtain our faster zero-sum game solvers, we simply need to efficiently im-
461
+ plement the Gibbs sampling in Line 5. As introduced in Section 1, Problem 1, describes a dynamic
462
+ approximate Gibbs oracle sampling problem sufficient for this task. Indeed, solving two appropriate
463
+ parameterizations of Problem 1 provides the oracles needed by Algorithm 1. By combining Propo-
464
+ sition 1 with the following Theorem 3 (our solution to Problem 1, discussed in greater detail in
465
+ Section 3.2), we prove our main result Theorem 4.
466
+ Theorem 3. Let α ∈ (0, 1) and δ ≤ η. Given a quantum oracle for A ∈ Rm×n (defined in Section 2)
467
+ with ∥A∥max ≤ 1, we can solve Problem 1 with probability ≥ 1 − α with
468
+ max(Tsamp, Tupdate) = O
469
+
470
+ 1 + √n · Tη log4 �mn
471
+ δ
472
+
473
+ ·
474
+ ��
475
+ η log
476
+ �nηT
477
+ α
478
+
479
+ + η log
480
+ �nηT
481
+ α
482
+ ���
483
+ ,
484
+ and an additive initialization cost of
485
+ O
486
+
487
+ η3T 3 log4
488
+ �nηT
489
+ δ
490
+
491
+ + log7
492
+ �nηT
493
+ δ
494
+ ��
495
+ .
496
+ Theorem 4. Let A ∈ Rm×n satisfy ∥A∥max ≤ 1, and let ǫ, α ∈ (0, 1). Given a quantum oracle for A
497
+ (defined in Section 2), there is a quantum algorithm which yields a classical output (u, v) ∈ ∆m×∆n
498
+ that is an ǫ-approximate NE for A with probability ≥ 1 − α in time
499
+ O
500
+ �√m + n
501
+ ǫ2.5
502
+ log4 �mn
503
+ ǫ
504
+
505
+ log2.5 �mn
506
+ αǫ
507
+
508
+ +
509
+ √m + n
510
+ ǫ2
511
+ log4 �mn
512
+ ǫ
513
+
514
+ log3 �mn
515
+ αǫ
516
+
517
+ + 1
518
+ ǫ3 log7 �mn
519
+ ǫ
520
+ ��
521
+ .
522
+ Proof. We apply two instances of Theorem 3 to implement the δ-approximate Gibbs oracle for
523
+ the dynamic vectors −A⊤xt and Ayt, to implement each iteration of Algorithm 1 in amortized
524
+ O(1 + Tsamp + Tupdate) time. Using the settings of parameters T, η in Proposition 1 and setting
525
+ δ = Θ(ǫ), which suffices for Algorithm 1 and Theorem 3, we have
526
+ max(Tsamp, Tupdate) = O
527
+ �√m + n
528
+ ǫ
529
+ log4 �mn
530
+ ǫ
531
+
532
+ log
533
+ �mn
534
+ αǫ
535
+ � �
536
+ ǫ log
537
+ �mn
538
+ αǫ
539
+
540
+ +
541
+
542
+ ǫ log
543
+ �mn
544
+ αǫ
545
+ ���
546
+ .
547
+ The conclusion follows since, by observation, Algorithm 1 costs O(T · (1 + Tsamp + Tupdate)). As
548
+ remarked in the introduction, the additive term in the runtime comes from the cost of stably
549
+ implementing a quantum circuit required in the use of Theorem 3 representing a polynomial trans-
550
+ formation in finite precision, which we discuss in greater detail in Appendix D.
551
+ 8
552
+
553
+ 3.2
554
+ Dynamic sampling maintenance via dynamic hint maintenance
555
+ In this section, we overview our proof of Theorem 3, which proceeds in two steps.
556
+ 1. We reduce sampling maintenance (Problem 1) to a problem which we call hint maintenance.
557
+ This latter problem is a specialization of the sampling maintenance problem where suitable
558
+ advice, which we call the hint throughout, is provided.
559
+ 2. We show how to solve the hint maintenance problem required by Proposition 2 in Theorem 3,
560
+ by recursively calling Proposition 2 in phases, allowing us to maintain hints of suitable quality.
561
+ Reducing sampling maintenance to hint maintenance.
562
+ First, we introduce the following
563
+ data structure for maintaining the x variable in Problem 1, which was used crucially in [vAG19] for
564
+ dynamic Gibbs sampling. This data structure allows efficient queries to subsets of the coordinates
565
+ of x and we use it in our Gibbs sampler as well.
566
+ Lemma 1 (Sampler tree). Let η ∈ R≥0 and m ∈ N. There is a classical data structure, SamplerTree,
567
+ supporting a tree on O(m) nodes such that [m] corresponds to leaves, with the following operations.
568
+ • Init(m, ηfixed): initialize x ← 0m and η ← ηfixed
569
+ • Update(i): xi ← xi + η
570
+ • SubtreeSum(v): return the sum of all xi, where i is in the subtree of v
571
+ The total runtime of T calls to Update is O(T log m), and calls to SubtreeSum cost O(1).
572
+ An implementation of SamplerTree based on propagating subtree sums upon updates is standard
573
+ classical data structure, and we omit further description for brevity. Next, we state our first building
574
+ block towards solving Problem 1, a result which can be thought of as quantum sampling with a hint.
575
+ We defer its proof to Appendix B, as it is primarily based on generalizing dynamic block-encoding
576
+ strategies with bounded-degree polynomial approximations, as pioneered by [GSLW19, vAG19].
577
+ Proposition 2. Let x ∈ Rm
578
+ ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. Let p be
579
+ the Gibbs distribution associated with A⊤x, let Z := �
580
+ j∈[n] exp([A⊤x]j) and �Z ∈ [Z, CZ] for some
581
+ C ≥ 1. Finally, let q ∈ Rn have entries classically queriable in O(1) time, satisfy q ≥ p entrywise,
582
+ qj ∈ [ δ
583
+ n, 1] for all j ∈ [n], and ∥q∥1 = ρ. Suppose �Z, C, ρ, and β are explicitly known. Given
584
+ a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can implement a
585
+ δ-approximate Gibbs oracle which has query cost O(√ρC · β log4 � Cmn
586
+ δ
587
+
588
+ ). The total additional cost
589
+ incurred if x undergoes T Update calls which preserve the invariants on �Z, C, ρ, β is O(T log m).
590
+ Proposition 2 makes use of an overestimating hint vector q and approximate normalization
591
+ constant �Z, which we collectively call the hint. The acceptance probability of our rejection sampling
592
+ is governed by two primary parameters: ρ = ∥q∥1, which reflects the degree of overestimation
593
+ (and can be thought of as a hint quality), and C ≥ 1, which reflects our inability to accept with
594
+ probability pj
595
+ qj when p is implicit (which can be thought of as a normalization quality). In particular,
596
+ the rejection sampling scheme used in Proposition 2 will instead accept with probability
597
+ pj
598
+ Cqj .5
599
+ Here we elaborate briefly on the implementation of Proposition 2 (for more details, see Ap-
600
+ pendix 4). We follow notation of Proposition 2, and also let w := A⊤x such that the unnormalized
601
+ 5Exactly computing Z may require time Ω(n) in standard implementations, an obstacle to runtimes ∝ √n.
602
+ 9
603
+
604
+ Gibbs distribution is exp(w), and p = exp(w)
605
+ Z
606
+ . Proposition 2 is a rejection sampler which first loads
607
+ the hint q into superposition, and then applies a filter. Overall, our scheme has the form
608
+ sample j ∼ q
609
+ ρ, then accept with probability exp(wj)
610
+ CZ · qj
611
+ = pj
612
+ Cqj
613
+ ,
614
+ (1)
615
+ which results in an accepted sample with probability ≈
616
+ 1
617
+ ρC , and hence requires ≈ √ρC trials to suc-
618
+ ceed after applying quantum amplitude amplification, a generalization of Grover search [BHMT02].6
619
+ The latter filtering step is implemented using appropriate block-encoding technology.
620
+ The above discussion suggests that the hint and normalization qualities, parameterized by ρ
621
+ and C, are crucial in controlling the acceptance probability of our scheme. More concretely, in
622
+ our applications of Proposition 2, β = ηT = �O(1
623
+ ǫ ), which is the bound on the ℓ1 norm of the
624
+ xt and yt iterates in Algorithm 1 under the parameter settings of Proposition 1.
625
+ Overall, the
626
+ cost of implementing an approximate Gibbs oracle is then (up to logarithmic factors) √ρC · 1
627
+ ǫ.
628
+ Proposition 2 hence reduces Problem 1 to the problem of maintaining the hint consisting of a vector
629
+ q and a normalization estimate �Z. We mention that Proposition 2 is a strict generalization of a
630
+ corresponding building block in [vAG19], which only used q set to the all-ones vector.
631
+ Approaches for Problem 1.
632
+ We now overview our improved solution to Problem 1 via efficient
633
+ use of Proposition 2. To motivate our solution, we outline three solutions to Problem 1 offering
634
+ different tradeoffs in the overall quality ρC. The first only uses classical information and does not
635
+ use Proposition 2 at all, the second uses Proposition 2 but maintains no history across iterates, and
636
+ the third (building upon the first two) is our approach.
637
+ Solution 1: [GK95]. A standard way to solve Problem 1 is to explicitly update w = A⊤x and
638
+ exp(w), and exactly maintain the normalizing constant Z. This allows us to sample from p in �O(1)
639
+ time. Since w changes by one row of A under a 1-sparse Update operation to x, this is implementable
640
+ in O(n) time per iteration. We can view this as an instance of the scheme (1) with q = p, C = 1,
641
+ and ρ = 1. It yields the (unbalanced) tradeoff for Problem 1 of Tsamp = �O(1) and Tupdate = O(n).
642
+ Solution 2: [vAG19]. A recent work [vAG19] introduced a quantum implementation of the scheme
643
+ (1) with an improved tradeoff. The [vAG19] scheme first uniformly samples, which in the language
644
+ of (1) means q = 1n and ρ = n. It then applies quantum maximum finding [DH96] to obtain an
645
+ approximate maximum entry of w, which they show takes time �O(β · √n); for the sake of simplicity
646
+ here, we assume this exactly yields wmax := maxj∈[n] wj. Finally, the acceptance probability
647
+ pj
648
+ Cqj is
649
+ set to exp(wj − wmax). For q = 1n, this translates to
650
+ pj · exp(wmax − wj) = exp(wmax)
651
+ Z
652
+ ≤ 1,
653
+ implying C = 1 suffices.
654
+ We note this bound on C can be tight when w is very non-uniform.
655
+ Overall, the [vAG19] scheme’s update time requires maximum finding, and its sampling time (via
656
+ Proposition 2) requires time �O(β · √ρC) = �O(β · √n).
657
+ For β = �O(1
658
+ ǫ) as in Algorithm 1, this
659
+ yields the balanced tradeoff max(Tsamp, Tupdate) = �O
660
+ �√n · ǫ−1�
661
+ . As discussed earlier, our key in-
662
+ sight is to improve upon this specific choice of hint in [vAG19], for their implicit use of Proposition 2.
663
+ Solution 3: this work. We design better hints for Proposition 2 by executing our algorithm in phases
664
+ corresponding to batches of ≈ 1
665
+ η iterations. At the start of each phase, we use the Gibbs access
666
+ 6The β in Proposition 2 comes from loading exp(wj) into a quantum oracle via polynomials of degree ≈ β.
667
+ 10
668
+
669
+ afforded by Proposition 2 to produce a suitable hint for efficiently implementing the next phase. Our
670
+ execution of this strategy, parameterized by an integer k ∈ [n], relies on the following observations.
671
+ 1. During ⌈ 1
672
+ η⌉ iterations t ∈ {τ + s}s∈[⌈ 1
673
+ η ⌉] (where τ starts the phase), the dynamic Gibbs
674
+ distribution pt (where t is the iteration index) changes by O(1) multiplicatively, since w
675
+ entrywise changes by O(1) additively. Thus, the quality of a hint vector deteriorates by at
676
+ most a constant in the phase, so it suffices to give a good hint qτ ≥ pτ at the phase start.
677
+ 2. By using access to Proposition 2 at the end of the previous phase, we can efficiently estimate
678
+ large entries of pτ.
679
+ More precisely, we sample �O(k) times from pτ, and let the empirical
680
+ distribution of these samples be ˜q. Chernoff bounds show that any large entry [pτ]j = Ω( 1
681
+ k)
682
+ will be accurately reflected in the empirical sample. Hence, we set the hint to
683
+ qj =
684
+
685
+ ˜qj · O(1)
686
+ ˜qj = Ω( 1
687
+ k)
688
+ 1
689
+ k · O(1)
690
+ ˜qj = O( 1
691
+ k) ,
692
+ for appropriate constants. This yields an improved hint quality of ρ ≈ n
693
+ k , since large entries
694
+ of the hint sum to at most O(1) (as ˜qj ≈ pj), and small entries sum to O(n
695
+ k ).
696
+ 3. We show a similar strategy of using empirical concentration, combined with a testing variant
697
+ of Proposition 2, accurately estimates the normalizing factor Z, yielding C = O(1).
698
+ This strategy yields Tsamp = �O(β ·
699
+
700
+ n/k) and Tupdate = �O(Tsamp · kη) (since we amortize Tupdate
701
+ over ≈ 1
702
+ η iterations). For the parameter settings of Algorithm 1, optimizing k yields
703
+ max(Tsamp, Tupdate) = �O
704
+ �√n · ǫ− 1
705
+ 2
706
+
707
+ .
708
+ We prove Theorem 3, our improved solution to Problem 1, in Section 4. Ignoring logarithmic fac-
709
+ tors and assuming η ≪ 1 (as in our setting), Theorem 3 shows we can maintain max(Tsamp, Tupdate) =
710
+ �O(√n · Tη1.5). For the parameter settings T = �O(ǫ−2), η = Θ(ǫ), as stated in Proposition 1, this
711
+ indeed equates to max(Tsamp, Tupdate) = �O(√n · ǫ− 1
712
+ 2).
713
+ 4
714
+ Gibbs sampling oracle implementation
715
+ In this section, we prove Theorem 3, which gives our solution to Problem 1. To do so, we follow the
716
+ outline given in Section 3.2, wherein we solve Problem 1 in batches of ⌈ 1
717
+ η⌉ iterations, each of which
718
+ we call a “phase.” In Sections 4.1 and 4.2, we only discuss a single phase of Problem 1, consisting
719
+ of the iterations τ + s for s ∈ [⌈ 1
720
+ η⌉] and some initial iteration τ, assuming certain invariants (stated
721
+ below) hold at the start of the phase. We give a complete solution to Problem 1 in Section 4.3.
722
+ Invariant 1 (Approximate normalization access). We explicitly have �Zprev with �Zprev ∈ [Zτ, CZτ]
723
+ for some C = O(1).
724
+ Invariant 2 (Initial sampling maintenance). We have Oτ solving Problem 1 in iteration τ.
725
+ The remainder of this section is then organized as follows.
726
+ • Section 4.1: We show that assuming Invariants 1 and 2 hold at the start of a phase, we can
727
+ perform preprocessing used to construct our hint, consisting of the estimated normalization
728
+ �Z and vector q, in an application of Proposition 2. This gives the cost of Tsamp in Problem 1.
729
+ 11
730
+
731
+ • Section 4.2: We show that at the conclusion of each phase we can maintain Invariants 1 and 2
732
+ for use in the next phase. This gives the cost of Tupdate in Problem 1.
733
+ • Section 4.3: We recursively call the subroutine of Sections 4.1 and 4.2 (which solves Problem 1
734
+ for all the iterations τ + s where s ∈ [⌈ 1
735
+ η⌉] for some τ) ≈ ηT times to prove Theorem 3.
736
+ 4.1
737
+ Preprocessing and approximate Gibbs oracle implementation
738
+ In this section, we show how to construct the “hint” q which will be used throughout a phase
739
+ (starting in iteration τ) given access to Oτ, and bound ρ = ∥q∥1 which quantifies the quality of our
740
+ hint, under the assumption that Invariants 1 and 2 hold in the phase. We first show a multiplicative
741
+ stability property of the relevant Gibbs distributions in a phase.
742
+ Lemma 2. For all s ∈ [⌈ 1
743
+ η⌉], we have
744
+ Zτ+s ∈
745
+ �1
746
+ 3Zτ, 3Zτ
747
+
748
+ , and pτ+s ∈
749
+ �1
750
+ 9pτ, 9pτ
751
+
752
+ entrywise.
753
+ Proof. Let νt := exp(A⊤xt) for all t, such that pt = νt
754
+ Zt . We have that for any j ∈ [n],
755
+ [ντ+s]j
756
+ [ντ]j
757
+ = exp
758
+ ��
759
+ A⊤ (xτ+s − xτ)
760
+
761
+ j
762
+
763
+ ∈ [exp (− ∥A∥max ∥xτ+s − xτ∥1) , exp (∥A∥max ∥xτ+s − xτ∥1)]
764
+ ∈ [exp (−ηs) , exp (ηs)] ∈
765
+ �1
766
+ 3, 3
767
+
768
+ .
769
+ Similarly, Zτ+s ∈ [1
770
+ 3Zτ, 3Zτ], and combining yields the conclusion.
771
+ Next, our computation of the overestimating vector q is parameterized by an integer k ∈ [n]
772
+ which will be fixed throughout this section and Section 4.2. We will simply set q to be an upscaled
773
+ variant of an empirical distribution of roughly k draws from Oτ.
774
+ Lemma 3. Let k ∈ [n], α ∈ (0, 1), and suppose δ ≤
775
+ 1
776
+ 16k. Draw N = Θ(k log nηT
777
+ α ) samples from
778
+ Oτ for an appropriately large constant, and let ˜q ∈ ∆n be the empirical distribution over these N
779
+ samples. Define B := {i ∈ [n] | ˜qi ≥
780
+ 1
781
+ 2k}. Then for
782
+ qj =
783
+
784
+ 18˜qj
785
+ j ∈ B
786
+ 18
787
+ k
788
+ j ̸∈ B ,
789
+ with probability ≥ 1 −
790
+ α
791
+ 2⌈ηT⌉, ∥q∥1 = O(n
792
+ k ) and q ≥ pτ+s entrywise, for all s ≤ 1
793
+ η.
794
+ Proof. The first conclusion ∥q∥1 = O(n
795
+ k ) is immediate from the definition of q, since ∥q∥1 ≤ 18 ∥˜q∥1+
796
+ 18n
797
+ k . In light of Lemma 2 (which holds deterministically), to show the second conclusion, it suffices
798
+ to show that with the desired success probability, we have both
799
+ 2˜qj ≥ [pτ]j for all j ∈ B
800
+ (2)
801
+ and
802
+ 2
803
+ k ≥ [pτ]j for all j ̸∈ B.
804
+ (3)
805
+ Denote α′ :=
806
+ α
807
+ 2⌈ηT⌉ for notational convenience, and let ˜p denote the distribution of samples from Oτ,
808
+ and recall that ∥˜p − pτ∥1 ≤
809
+ 1
810
+ 16k. Because we are taking Θ(k log n
811
+ α′ ) samples from ˜p, we have by a
812
+ standard Chernoff bound that with probability at least 1 − α′ (union bounding over all coordinates
813
+ j ∈ [n]), both of the following hold.
814
+ 12
815
+
816
+ 1. For all j ∈ [n] such that ˜pj ≥
817
+ 1
818
+ 4k, ˜qj ≥ 2˜pj
819
+ 3 .
820
+ 2. For all j ∈ [n] such that ˜pj ≤
821
+ 1
822
+ 4k, ˜qj ≤
823
+ 1
824
+ 2k.
825
+ We condition on these events for the remainder of the proof; we now show (2), (3) in turn.
826
+ Proof of (2). To see (2), the second event above implies that if ˜pj ≤
827
+ 1
828
+ 4k, then j ̸∈ B. Hence, for
829
+ all j ∈ B, we have ˜qj ≥ 2˜pj
830
+ 3 ≥ [pτ]j
831
+ 2
832
+ since ∥˜p − pτ∥∞ ≤
833
+ 1
834
+ 16k ≤ 1
835
+ 4 ˜pj for all j ∈ B.
836
+ Proof of (3). To see (3), suppose for contradiction that j ̸∈ B and [pτ]j > 2
837
+ k. This implies that
838
+ ˜pj > 1
839
+ k, and hence by the first event above, ˜qj ≥
840
+ 1
841
+ 2k, contradicting j ̸∈ B.
842
+ Corollary 1. Assume that Invariants 1, 2 hold for the phase consisting of iterations τ +s, s ∈ [⌈ 1
843
+ η⌉].
844
+ We can solve Problem 1 for the phase with probability ≥ 1 −
845
+ α
846
+ 2⌈ηT⌉, and
847
+ Tsamp := O
848
+ ��n
849
+ k · Tη log4 �mn
850
+ δ
851
+ ��
852
+ .
853
+ Proof. We will run the algorithm described in the proof of Lemma 3, and condition on it succeeding,
854
+ giving the failure probability. It then suffices to apply Proposition 2 with q defined in Lemma 3. For
855
+ this q, we parameterize Proposition 2 with C = O(1) (see Invariant 1), ρ = O(n
856
+ k ) (see Lemma 3),
857
+ and β = Tη. It is clear the lower bound on entries of q in Proposition 2 holds.
858
+ 4.2
859
+ Maintaining invariants
860
+ We now show how to maintain Invariant 1 at iteration τ ′ := τ + ⌈ 1
861
+ η⌉, for use in the next phase, and
862
+ bound the cost of doing so. We note that Invariant 2 follows immediately from our construction in
863
+ Corollary 1. First, by combining Lemma 2 with Invariant 1,
864
+ Zτ ′ ∈
865
+ � �Zprev
866
+ 3C , 3 �Zprev
867
+
868
+ .
869
+ (4)
870
+ This suggests that we may use 3 �Zprev = �Z for the next phase; however, this would lead to an
871
+ exponential blowup in the multiplicative range C. To sidestep this, we develop a tester for a hidden
872
+ parameter governing a success probability, which will be used to give a refined estimate �Z. We
873
+ require the following corollary of Proposition 2, whose proof we defer to Appendix B.
874
+ Corollary 2. Following notation of Proposition 2, let R :=
875
+ �Z
876
+ Z . There is a quantum oracle Otest
877
+ which can be implemented under T Update calls to x in O(T log m) time, and has query cost
878
+ O
879
+ ��
880
+ ρC · β log4
881
+ �Cmn
882
+ ℓδ
883
+ ��
884
+ .
885
+ Furthermore, for explicitly known constants Cℓ and Cu, Otest returns “success” with probability p for
886
+ Cℓ
887
+ √Rρ ≤ p ≤
888
+ Cu
889
+ √Rρ.
890
+ Corollary 2 differs from Proposition 2 in that it returns a Boolean-valued answer (as opposed to
891
+ a sample from an approximate Gibbs distribution), and has a success probability parameterized by
892
+ explicit constants. We now show how to use Corollary 2 to maintain Invariant 1.
893
+ 13
894
+
895
+ Lemma 4. Assume Invariants 1, 2 hold for the phase consisting of iterations τ + s, s ∈ [⌈ 1
896
+ η⌉], and
897
+ suppose C ≥ 4C2
898
+ u
899
+ C2
900
+
901
+ for C = O(1), where Cu and Cℓ are the constants from Corollary 2. Further,
902
+ suppose we have obtained q satisfying the conclusion of Lemma 3 (i.e. that the algorithm in Lemma 3
903
+ succeeded). We can determine �Z such that �Z ∈ [Zτ ′, CZτ ′] with probability ≥ 1 −
904
+ α
905
+ 2⌈ηT⌉, in time
906
+ O
907
+ ��n
908
+ k · Tη log4 �mn
909
+ δ
910
+
911
+ log
912
+ �ηT
913
+ α
914
+ ��
915
+ .
916
+ Proof. Define �Z0 := 3 �Zprev, R0 :=
917
+ �Z0
918
+ Zτ′ , and note that �Z0 ∈ [Zτ ′, 9CZτ ′] by Invariant 1 and Lemma 2.
919
+ Next, assuming the success of Lemma 3, we have that the success probability p of Otest from
920
+ Corollary 2 using the estimate �Z0 satisfies (for the unknown R0 ∈ [1, 9C], and known Cℓ, Cu, ρ)
921
+ Cℓ
922
+ √R0ρ ≤ p ≤
923
+ Cu
924
+ √R0ρ.
925
+ For N := 27 log 4⌈ηT⌉
926
+ α
927
+ · 3√Cρ
928
+ Cℓ , we first run Otest N times and check the number of successes, denoted
929
+ by S, which fits within the runtime budget by Corollary 2. By a Chernoff bound, we have that with
930
+ probability ≥ 1 −
931
+ α
932
+ 2⌈ηT⌉, we have
933
+ 54 log 4⌈ηT⌉
934
+ α
935
+ ·
936
+
937
+ C
938
+ R0
939
+ ≤ 2
940
+ 3pN ≤ S ≤ 4
941
+ 3pN ≤ 108 log 4⌈ηT⌉
942
+ α
943
+ · Cu
944
+ Cℓ
945
+ ·
946
+
947
+ C
948
+ R0
949
+ .
950
+ Hence, we can determine the quantity R0 up to a multiplicative factor of 4C2
951
+ u
952
+ C2
953
+
954
+ ≤ C, which also
955
+ implies the same multiplicative approximation factor for Zτ ′, as desired.
956
+ 4.3
957
+ Proof of Theorem 3
958
+ Theorem 3. Let α ∈ (0, 1) and δ ≤ η. Given a quantum oracle for A ∈ Rm×n (defined in Section 2)
959
+ with ∥A∥max ≤ 1, we can solve Problem 1 with probability ≥ 1 − α with
960
+ max(Tsamp, Tupdate) = O
961
+
962
+ 1 + √n · Tη log4 �mn
963
+ δ
964
+
965
+ ·
966
+ ��
967
+ η log
968
+ �nηT
969
+ α
970
+
971
+ + η log
972
+ �nηT
973
+ α
974
+ ���
975
+ ,
976
+ and an additive initialization cost of
977
+ O
978
+
979
+ η3T 3 log4
980
+ �nηT
981
+ δ
982
+
983
+ + log7
984
+ �nηT
985
+ δ
986
+ ��
987
+ .
988
+ Proof. We first claim that for any k ∈ [n], we can solve Problem 1 with probability ≥ 1 − α and
989
+ Tsamp = O
990
+ ��n
991
+ k · Tη log4 �mn
992
+ δ
993
+ ��
994
+ ,
995
+ Tupdate = O
996
+ ���n
997
+ k · Tη log4 �mn
998
+ δ
999
+ ��
1000
+ · kη log
1001
+ �nηT
1002
+ α
1003
+ ��
1004
+ .
1005
+ This follows from combining Lemma 3 (amortized over ⌈ 1
1006
+ η⌉ iterations), Corollary 1, and Lemma 4,
1007
+ and taking a union bound over at most ⌈ηT⌉ phases. Here we note that the cost of log m per
1008
+ iteration to support Update costs to x in Lemma 1, Proposition 2, and Corollary 2 is not dominant.
1009
+ By choosing k = Θ(max(1, (η log mn
1010
+ αǫ )−1)), we balance the costs of Tsamp and Tupdate, yielding the
1011
+ conclusion. We finally note that by picking an appropriate constant in the definition of k, we have
1012
+ δ ≤ η =⇒ δ ≤
1013
+ 1
1014
+ 16k as required by Lemma 3, the only component specifying a bound on δ.
1015
+ 14
1016
+
1017
+ Acknowledgments
1018
+ We thank András Gilyén for communication regarding the prior work [vAG19]. AB was supported
1019
+ in part by the DOE QuantISED grant DE-SC0020360, by the AFOSR under grant FA9550-21-
1020
+ 1-0392, and by the U.S. DOE Office of Science under Award Number DE-SC0020266.
1021
+ YG was
1022
+ supported in part by the Stanford MS&E DE&I Research program. YJ was supported in part by a
1023
+ Stanford Graduate Fellowship and a Danzig-Lieberman Graduate Fellowship. AS was supported in
1024
+ part by a Microsoft Research Faculty Fellowship, NSF CAREER Award CCF1844855, NSF Grant
1025
+ CCF-1955039, a PayPal research award, and a Sloan Research Fellowship. KT thanks Ewin Tang
1026
+ for her expertise on quantum linear algebra and for fielding many of our questions.
1027
+ References
1028
+ [Aar15]
1029
+ Scott Aaronson. Read the fine print. Nature Physics, 11(4):291–293, 2015.
1030
+ [AK07]
1031
+ Sanjeev Arora and Satyen Kale. A combinatorial, primal-dual approach to semidefi-
1032
+ nite programs. In Proceedings of the thirty-ninth annual ACM symposium on Theory
1033
+ of computing, pages 227–236, 2007.
1034
+ [AL17]
1035
+ Zeyuan Allen-Zhu and Yuanzhi Li. Follow the compressed leader: Faster online
1036
+ learning of eigenvectors and faster MMWU. In Doina Precup and Yee Whye Teh,
1037
+ editors, Proceedings of the 34th International Conference on Machine Learning,
1038
+ ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings
1039
+ of Machine Learning Research, pages 116–125. PMLR, 2017.
1040
+ [Alm21]
1041
+ Josh Alman. Limits on the universal method for matrix multiplication. Theory
1042
+ Comput., 17:1–30, 2021.
1043
+ [AW21]
1044
+ Josh Alman and Virginia Vassilevska Williams. A refined laser method and faster
1045
+ matrix multiplication. In Dániel Marx, editor, Proceedings of the 2021 ACM-SIAM
1046
+ Symposium on Discrete Algorithms, SODA 2021, Virtual Conference, January 10
1047
+ - 13, 2021, pages 522–539. SIAM, 2021.
1048
+ [BHMT02]
1049
+ Gilles Brassard, Peter Høyer, Michele Mosca, and Alain Tapp. Quantum amplitude
1050
+ amplification and estimation. Quantum Computation and Quantum Information,
1051
+ 305:53–74, 2002.
1052
+ [BS17]
1053
+ Fernando GSL Brandao and Krysta M Svore.
1054
+ Quantum speed-ups for solving
1055
+ semidefinite programs.
1056
+ In 2017 IEEE 58th Annual Symposium on Foundations
1057
+ of Computer Science (FOCS), pages 415–426. IEEE, 2017.
1058
+ [Bub15]
1059
+ Sébastien Bubeck. Convex optimization: Algorithms and complexity. Foundations
1060
+ and Trends in Machine Learning, 8(3-4):231–357, 2015.
1061
+ [BWP+17]
1062
+ Jacob Biamonte, Peter Wittek, Nicola Pancotti, Patrick Rebentrost, Nathan Wiebe,
1063
+ and Seth Lloyd. Quantum machine learning. Nature, 549(7671):195–202, 2017.
1064
+ [CCLW20]
1065
+ Shouvanik Chakrabarti, Andrew M Childs, Tongyang Li, and Xiaodi Wu. Quantum
1066
+ algorithms and lower bounds for convex optimization. Quantum, 4:221, 2020.
1067
+ 15
1068
+
1069
+ [CDST19]
1070
+ Yair Carmon, John C. Duchi, Aaron Sidford, and Kevin Tian. A rank-1 sketch
1071
+ for matrix multiplicative weights. In Alina Beygelzimer and Daniel Hsu, editors,
1072
+ Conference on Learning Theory, COLT 2019, 25-28 June 2019, Phoenix, AZ, USA,
1073
+ volume 99 of Proceedings of Machine Learning Research, pages 589–623. PMLR,
1074
+ 2019.
1075
+ [CGL+20]
1076
+ Nai-Hui Chia, András Gilyén, Tongyang Li, Han-Hsuan Lin, Ewin Tang, and Chun-
1077
+ hao Wang. Sampling-based sublinear low-rank matrix arithmetic framework for
1078
+ dequantizing quantum machine learning. In Proceedings of the 52nd Annual ACM
1079
+ SIGACT symposium on theory of computing, pages 387–400, 2020.
1080
+ [CJST19]
1081
+ Yair Carmon, Yujia Jin, Aaron Sidford, and Kevin Tian. Variance reduction for
1082
+ matrix games. In Hanna M. Wallach, Hugo Larochelle, Alina Beygelzimer, Florence
1083
+ d’Alché-Buc, Emily B. Fox, and Roman Garnett, editors, Advances in Neural Infor-
1084
+ mation Processing Systems 32: Annual Conference on Neural Information Process-
1085
+ ing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada,
1086
+ pages 11377–11388, 2019.
1087
+ [CJST20]
1088
+ Yair Carmon, Yujia Jin, Aaron Sidford, and Kevin Tian. Coordinate methods for
1089
+ matrix games. In Sandy Irani, editor, 61st IEEE Annual Symposium on Foundations
1090
+ of Computer Science, FOCS 2020, Durham, NC, USA, November 16-19, 2020,
1091
+ pages 283–293. IEEE, 2020.
1092
+ [CLS21]
1093
+ Michael B Cohen, Yin Tat Lee, and Zhao Song. Solving linear programs in the
1094
+ current matrix multiplication time. Journal of the ACM (JACM), 68(1):1–39, 2021.
1095
+ [DH96]
1096
+ Christoph Dürr and Peter Høyer. A quantum algorithm for finding the minimum.
1097
+ CoRR, quant-ph/9607014, 1996.
1098
+ [DNV20]
1099
+ Daniel Dadush, Bento Natura, and Làszlò A Vègh. Revisiting tardos’s framework
1100
+ for linear programming: faster exact solutions using approximate solvers. In Sandy
1101
+ Irani, editor, 61st IEEE Annual Symposium on Foundations of Computer Science,
1102
+ FOCS 2020, Durham, NC, USA, November 16-19, 2020, pages 931–942. IEEE,
1103
+ 2020.
1104
+ [GAW19]
1105
+ András Gilyén, Srinivasan Arunachalam, and Nathan Wiebe. Optimizing quantum
1106
+ optimization algorithms via faster quantum gradient computation. In Proceedings of
1107
+ the Thirtieth Annual ACM-SIAM Symposium on Discrete Algorithms, pages 1425–
1108
+ 1444. SIAM, 2019.
1109
+ [GK95]
1110
+ Michael D. Grigoriadis and Leonid G. Khachiyan. A sublinear-time randomized
1111
+ approximation algorithm for matrix games. Operation Research Letters, 18(2):53–
1112
+ 58, 1995.
1113
+ [GLG22]
1114
+ Sevag Gharibian and François Le Gall. Dequantizing the quantum singular value
1115
+ transformation: Hardness and applications to quantum chemistry and the quantum
1116
+ pcp conjecture. In Proceedings of the 54th Annual ACM SIGACT Symposium on
1117
+ Theory of Computing, pages 19–32, 2022.
1118
+ [GR02]
1119
+ Lov Grover and Terry Rudolph. Creating superpositions that correspond to effi-
1120
+ ciently integrable probability distributions. CoRR, abs/quant-ph/0208112, 2002.
1121
+ 16
1122
+
1123
+ [GSLW19]
1124
+ András Gilyén, Yuan Su, Guang Hao Low, and Nathan Wiebe. Quantum singular
1125
+ value transformation and beyond: exponential improvements for quantum matrix
1126
+ arithmetics. In Moses Charikar and Edith Cohen, editors, Proceedings of the 51st
1127
+ Annual ACM SIGACT Symposium on Theory of Computing, STOC 2019, Phoenix,
1128
+ AZ, USA, June 23-26, 2019, pages 193–204. ACM, 2019.
1129
+ [Haa19]
1130
+ Jeongwan Haah. Product decomposition of periodic functions in quantum signal
1131
+ processing. Quantum, 3:190, 2019.
1132
+ [HHL09]
1133
+ Aram W Harrow, Avinatan Hassidim, and Seth Lloyd.
1134
+ Quantum algorithm for
1135
+ linear systems of equations. Physical review letters, 103(15):150502, 2009.
1136
+ [Jor05]
1137
+ Stephen P Jordan.
1138
+ Fast quantum algorithm for numerical gradient estimation.
1139
+ Physical review letters, 95(5):050501, 2005.
1140
+ [JSWZ21]
1141
+ Shunhua Jiang, Zhao Song, Omri Weinstein, and Hengjie Zhang. A faster algorithm
1142
+ for solving general lps. In Proceedings of the 53rd Annual ACM SIGACT Symposium
1143
+ on Theory of Computing, STOC 2021, 2021, pages 823–832, 2021.
1144
+ [Kar84]
1145
+ Narendra Karmarkar. A new polynomial-time algorithm for linear programming.
1146
+ In Proceedings of the sixteenth annual ACM symposium on Theory of computing,
1147
+ pages 302–311, 1984.
1148
+ [KP16]
1149
+ Iordanis Kerenidis and Anupam Prakash.
1150
+ Quantum recommendation systems.
1151
+ arXiv preprint arXiv:1603.08675, 2016.
1152
+ [KP20]
1153
+ Iordanis Kerenidis and Anupam Prakash. A quantum interior point method for lps
1154
+ and sdps. ACM Transactions on Quantum Computing, 1(1):1–32, 2020.
1155
+ [LCW19]
1156
+ Tongyang Li, Shouvanik Chakrabarti, and Xiaodi Wu. Sublinear quantum algo-
1157
+ rithms for training linear and kernel-based classifiers. In International Conference
1158
+ on Machine Learning, pages 3815–3824. PMLR, 2019.
1159
+ [LMR14]
1160
+ Seth Lloyd, Masoud Mohseni, and Patrick Rebentrost. Quantum principal compo-
1161
+ nent analysis. Nature Physics, 10(9):631–633, 2014.
1162
+ [LS14]
1163
+ Yin Tat Lee and Aaron Sidford. Path finding methods for linear programming:
1164
+ Solving linear programs in o (vrank) iterations and faster algorithms for maximum
1165
+ flow. In 2014 IEEE 55th Annual Symposium on Foundations of Computer Science,
1166
+ pages 424–433. IEEE, 2014.
1167
+ [LS19]
1168
+ Yin Tat Lee and Aaron Sidford. Solving linear programs with sqrt (rank) linear
1169
+ system solves. arXiv preprint arXiv:1910.08033, 2019.
1170
+ [LWCW21]
1171
+ Tongyang Li, Chunhao Wang, Shouvanik Chakrabarti, and Xiaodi Wu. Sublinear
1172
+ classical and quantum algorithms for general matrix games. In Proceedings of the
1173
+ AAAI Conference on Artificial Intelligence, volume 35, pages 8465–8473, 2021.
1174
+ [Nem04]
1175
+ Arkadi Nemirovski. Prox-method with rate of convergence O(1/t) for variational in-
1176
+ equalities with lipschitz continuous monotone operators and smooth convex-concave
1177
+ saddle point problems. SIAM Journal on Optimization, 15(1):229–251, 2004.
1178
+ 17
1179
+
1180
+ [Nes07]
1181
+ Yurii Nesterov. Dual extrapolation and its applications to solving variational in-
1182
+ equalities and related problems. Mathematical Programing, 109(2-3):319–344, 2007.
1183
+ [Neu28]
1184
+ John Von Neumann. Zur theorie der gesellschaftsspiele. Mathematische Annalen,
1185
+ 100:295–320, 1928.
1186
+ [NJLS09]
1187
+ Arkadi Nemirovski, Anatoli B. Juditsky, Guanghui Lan, and Alexander Shapiro.
1188
+ Robust stochastic approximation approach to stochastic programming. SIAM J.
1189
+ Optim., 19(4):1574–1609, 2009.
1190
+ [Ren88]
1191
+ James Renegar. A polynomial-time algorithm, based on newton’s method, for linear
1192
+ programming. Mathematical programming, 40(1):59–93, 1988.
1193
+ [RML14]
1194
+ Patrick Rebentrost, Masoud Mohseni, and Seth Lloyd. Quantum support vector
1195
+ machine for big data classification. Physical review letters, 113(13):130503, 2014.
1196
+ [SV14]
1197
+ Sushant Sachdeva and Nisheeth K. Vishnoi. Faster algorithms via approximation
1198
+ theory. Found. Trends Theor. Comput. Sci., 9(2):125–210, 2014.
1199
+ [Tan19]
1200
+ Ewin Tang. A quantum-inspired classical algorithm for recommendation systems. In
1201
+ Proceedings of the 51st Annual ACM SIGACT Symposium on Theory of Computing,
1202
+ pages 217–228, 2019.
1203
+ [vAG19]
1204
+ Joran van Apeldoorn and András Gilyén. Quantum algorithms for zero-sum games.
1205
+ CoRR, abs/1904.03180, 2019.
1206
+ [vAGGdW20a] Joran van Apeldoorn, András Gilyén, Sander Gribling, and Ronald de Wolf. Convex
1207
+ optimization using quantum oracles. Quantum, 4:220, 2020.
1208
+ [VAGGdW20b] Joran Van Apeldoorn, András Gilyén, Sander Gribling, and Ronald de Wolf. Quan-
1209
+ tum sdp-solvers: Better upper and lower bounds. Quantum, 4:230, 2020.
1210
+ [vdB20]
1211
+ Jan van den Brand. A deterministic linear program solver in current matrix mul-
1212
+ tiplication time. In Proceedings of the Thirty-first Annual ACM-SIAM Symposium
1213
+ on Discrete Algorithms, SODA 2020, 2020, pages 259–278, 2020.
1214
+ [vdBLL+21]
1215
+ Jan van den Brand, Yin Tat Lee, Yang P. Liu, Thatchaphol Saranurak, Aaron
1216
+ Sidford, Zhao Song, and Di Wang. Minimum cost flows, mdps, and ℓ1-regression
1217
+ in nearly linear time for dense instances. In Proceedings of the 53rd Annual ACM
1218
+ SIGACT Symposium on Theory of Computing, STOC 2021, 2021, pages 859–869,
1219
+ 2021.
1220
+ [vdBLSS20]
1221
+ Jan van den Brand, Yin Tat Lee, Aaron Sidford, and Zhao Song. Solving tall dense
1222
+ linear programs in nearly linear time. In Proceedings of the 52nd Annual ACM
1223
+ SIGACT Symposium on Theory of Computing, pages 775–788, 2020.
1224
+ 18
1225
+
1226
+ A
1227
+ Solving matrix games with a Gibbs sampling oracle
1228
+ In this section, we prove Proposition 1, which shows how to solve a zero-sum matrix game using
1229
+ an approximate Gibbs sampling oracle (via Algorithm 1). To briefly motivate the algorithm we use
1230
+ and our proof of its guarantees, we recall the problem we consider is of the form
1231
+ min
1232
+ v∈∆n max
1233
+ u∈∆m f(u, v) := u⊤Av,
1234
+ where
1235
+ ∥A∥max ≤ 1,
1236
+ (5)
1237
+ and we define the associated gradient operator as
1238
+ g(u, v) = (−Av, A⊤u).
1239
+ (6)
1240
+ Taking (stochastic) mirror descent steps on the gradient operator in (5) is well-known to yield an
1241
+ approximate NE to the matrix game [Bub15]. We show that an approximate implementation of this
1242
+ strategy, combined with appropriate subsampling, efficiently yields an approximate NE. We begin
1243
+ by making the following observation.
1244
+ Lemma 5. Let u, ˜u ∈ ∆m have ∥u − ˜u∥1 ≤ δ. Let ˜g := Ai: where i ∼ ˜u, and g := A⊤u. Then,
1245
+ ∥g − E˜g∥∞ ≤ δ.
1246
+ Proof. Note that E˜g = A⊤˜u, and
1247
+ ��A⊤(u − ˜u)
1248
+ ��
1249
+ ∞ ≤ ∥u − ˜u∥1 ≤ δ since ∥A∥max ≤ 1.
1250
+ We next present a variant of the classical mirror descent analysis, which bounds the expected
1251
+ approximation quality of iterates of Algorithm 1 prior to subsampling.
1252
+ Proposition 3. Let δ ≤
1253
+ ǫ
1254
+ 20, η =
1255
+ ǫ
1256
+ 15 and T ≥ 6 log(mn)
1257
+ ηǫ
1258
+ in Algorithm 1. Let the iterates of Algorithm 1
1259
+ be {xt, yt}T−1
1260
+ t=0 , and denote ut :=
1261
+ exp(Ayt)
1262
+ ∥exp(Ayt)∥1 , vt :=
1263
+ exp(−A⊤xt)
1264
+ ∥exp(−A⊤xt)∥1
1265
+ for all 0 ≤ t < T. For (¯u, ¯v) :=
1266
+ 1
1267
+ T
1268
+ �T−1
1269
+ t=0 (ut, vt), we have
1270
+ E
1271
+
1272
+ max
1273
+ u∈∆m u⊤A¯v − min
1274
+ v∈∆n ¯u⊤Av
1275
+
1276
+ ≤ ǫ.
1277
+ (7)
1278
+ Proof. By definition of the updates, at every iteration 0 ≤ t ≤ T − 1, we have
1279
+ ut+1 = argminu∈∆m
1280
+
1281
+
1282
+ η⟨−A:jt, u⟩ +
1283
+
1284
+ i∈[m]
1285
+ [u]i log [u]i
1286
+ [ut]i
1287
+
1288
+
1289
+  ,
1290
+ vt+1 = argminv∈∆n
1291
+
1292
+
1293
+ η⟨Ait:, v⟩ +
1294
+
1295
+ j∈[n]
1296
+ [v]j log [v]j
1297
+ [vt]j
1298
+
1299
+
1300
+  .
1301
+ Consequently, by the optimality conditions of ut+1 and vt+1 respectively, we have for any u ∈ ∆m,
1302
+ v ∈ ∆n, and letting Vx(x′) := �
1303
+ k[x′]k log [x′]k
1304
+ [x]k be the KL divergence between simplex variables of
1305
+ appropriate dimension,
1306
+ ⟨−A:j, ut − u⟩ + ⟨Ai:, vt − v⟩ ≤ 1
1307
+ η
1308
+
1309
+ Vut(u) − Vut+1(u) + Vvt(v) − Vvt+1(v)
1310
+
1311
+ +
1312
+
1313
+ ⟨−A:j, ut − ut+1⟩ − 1
1314
+ η Vut(ut+1)
1315
+
1316
+ +
1317
+
1318
+ ⟨Ai:, vt − vt+1⟩ − 1
1319
+ η Vvt(vt+1)
1320
+
1321
+ ≤ 1
1322
+ η
1323
+
1324
+ Vut(u) − Vut+1(u) + Vvt(v) − Vvt+1(v)
1325
+
1326
+ + η
1327
+ 2 ∥A:j∥2
1328
+ ∞ + η
1329
+ 2 ∥Ai:∥2
1330
+ ∞ ,
1331
+ (8)
1332
+ 19
1333
+
1334
+ where for the last inequality we use Hölder’s inequality and the fact that V is 1-strongly convex in
1335
+ the ℓ1 norm (by Pinsker’s inequality). Averaging the above for 0 ≤ t < T, and denoting wt := (ut, vt)
1336
+ and ˜gt := (−A:jt, Ait:), we obtain for any w = (u, v) ∈ ∆m × ∆n,
1337
+ 1
1338
+ T
1339
+ T−1
1340
+
1341
+ t=0
1342
+ ⟨˜gt, wt − w⟩ ≤ 1
1343
+ ηT (Vu0(u) + Vv0(v)) + η.
1344
+ (9)
1345
+ In the above, we further recalled the bound ∥A∥max ≤ 1 by assumption. In order to bound the
1346
+ deviation of the left-hand side from its expectation, we use a “ghost iterate” argument following
1347
+ [NJLS09, CJST19]. In particular, we define iterates ˜ut, ˜vt as follows: let ˜u0 ← u0, ˜v0 ← v0, and
1348
+ then for each 0 ≤ t < T, define
1349
+ ˜ut+1 := argminu∈∆m
1350
+
1351
+
1352
+ η⟨−Avt + A:jt, ¯u⟩ +
1353
+
1354
+ i∈[m]
1355
+ [u]i log [u]i
1356
+ [˜ut]i
1357
+
1358
+
1359
+  ,
1360
+ ˜vt+1 := argminv∈∆n
1361
+
1362
+
1363
+ η⟨A⊤ut − A:it, ¯v⟩ +
1364
+
1365
+ j∈[n]
1366
+ [v]j log [v]j
1367
+ [˜vt]j
1368
+
1369
+
1370
+  ,
1371
+ where i, j above are the same coordinates as were used in defining the updates to ut+1 and vt+1.
1372
+ By an analogous bound to (8), where we note
1373
+ ��A:jt − A⊤vt
1374
+ ��
1375
+ ∞ , ∥Aut − Ait:∥∞ ≤ 2,
1376
+
1377
+ −A⊤vt + A:jt, ˜ut − u
1378
+
1379
+ + ⟨Aut − Ait:, ˜vt − v⟩ ≤ 1
1380
+ η
1381
+
1382
+ V˜ut(u) − V˜ut+1(u) + V˜vt(v) − V˜vt+1(v)
1383
+
1384
+ + 4η.
1385
+ Averaging the above for 0 ≤ t < T, and denoting ˜wt := (˜ut, ˜vt) and gt := g(wt) (see (5)), we obtain
1386
+ for any w = (u, v) ∈ ∆m × ∆n,
1387
+ 1
1388
+ T
1389
+
1390
+ t∈[T]−1
1391
+ ⟨gt − ˜gt, ˜wt − w⟩ ≤ 1
1392
+ ηT (Vu0(u) + Vv0(v)) + 4η.
1393
+ (10)
1394
+ Summing inequalities (9) and (10), and maximizing over w = (u, v) ∈ ∆m × ∆n, we have
1395
+ max
1396
+ w∈∆m×∆n
1397
+ 1
1398
+ T
1399
+ T−1
1400
+
1401
+ t=0
1402
+ ⟨gt, wt − w⟩ ≤
1403
+ max
1404
+ u∈∆n,v∈∆m
1405
+ 2
1406
+ ηT (Vu0(u) + Vv0(v))
1407
+ + 5η + 1
1408
+ T
1409
+ T−1
1410
+
1411
+ t=0
1412
+ ⟨gt − ˜gt, wt − ˜wt⟩.
1413
+ (11)
1414
+ Taking expectations over the above, we have
1415
+ E
1416
+
1417
+ max
1418
+ w∈∆m×∆n
1419
+ 1
1420
+ T
1421
+ T−1
1422
+
1423
+ t=0
1424
+ ⟨gt, wt − w⟩
1425
+
1426
+
1427
+ max
1428
+ u∈∆n,v∈∆m
1429
+ 2
1430
+ ηT [Vu0(u) + Vv0(v)]
1431
+ + 5η + E
1432
+
1433
+ 1
1434
+ T
1435
+ T−1
1436
+
1437
+ t=0
1438
+ ⟨gt − ˜gt, wt − ˜wt⟩
1439
+
1440
+ (i)
1441
+ ≤ 2 log(mn)
1442
+ ηT
1443
+ + 5η + 1
1444
+ T
1445
+
1446
+ t∈[T]−1
1447
+ ⟨gt − E˜gt, wt − ¯wt⟩,
1448
+ (ii)
1449
+ ≤ 2 log(mn)
1450
+ ηT
1451
+ + 5η + 4δ
1452
+ (iii)
1453
+ ≤ ǫ.
1454
+ 20
1455
+
1456
+ In the above, (i) used the diameter bound of the KL divergence from the uniform distribution, i.e.
1457
+ maxu∈∆m Vu0(u) = log m (and a similar bound for Vv0(v)). Further, (ii) uses that ˜gt is conditionally
1458
+ independent of wt and ˜wt, and by the assumption on the Gibbs sampler ∥gt − E˜gt∥∞ ≤ δ (via
1459
+ Lemma 5), and Hölder, and (iii) uses our choices of T, η and δ.
1460
+ Finally, we note that the desired claim follows by linearity: for any w = (u, v),
1461
+ 1
1462
+ T
1463
+ T−1
1464
+
1465
+ t=0
1466
+ ⟨gt, wt − w⟩ =
1467
+
1468
+ g
1469
+
1470
+ 1
1471
+ T
1472
+ T−1
1473
+
1474
+ t=0
1475
+ wt
1476
+
1477
+ , 1
1478
+ T
1479
+ T−1
1480
+
1481
+ t=0
1482
+ wt − w
1483
+
1484
+ = u⊤A¯v − ¯u⊤Av.
1485
+ By using a simple martingale argument (inspired by those in [AL17, CDST19]) to bound the
1486
+ error term in (11), we show that the guarantee of Proposition 3 holds with high probability.
1487
+ Corollary 3. Let α ∈ (0, 1), and let δ ≤
1488
+ ǫ
1489
+ 20, η =
1490
+ ǫ
1491
+ 20 and T ≥ 8 log(mn)
1492
+ ηǫ
1493
+ +
1494
+ 2048 log 1
1495
+ α
1496
+ ǫ2
1497
+ in Algorithm 1.
1498
+ Then with probability at least 1−α, following notation of Proposition 3, (¯u, ¯v) are an ǫ-approximate
1499
+ NE for A.
1500
+ Proof. Consider the filtration given by Ft = σ(u0, v0, ˜g0, · · · , ˜gt, ut+1, vt+1).
1501
+ We will bound the
1502
+ terms �T−1
1503
+ t=0 ⟨gt − ˜gt, wt − ¯wt⟩ in (7). To do so, we define a martingale difference sequence of the
1504
+ form Dt := ⟨gt − ˜gt, wt − ¯wt⟩ − ⟨gt − E [˜gt|Ft−1] , wt − ¯wt⟩ which is adapted to the filtration Ft. We
1505
+ first note that Dt ≤ ∥gt−1 − ˜gt−1∥∞ ∥wt−1 − ¯wt−1∥1 ≤ 8 with probability 1. Consequently, applying
1506
+ the Azuma-Hoeffding inequality yields
1507
+ T−1
1508
+
1509
+ t=0
1510
+ Dt ≤
1511
+
1512
+ 128T log 1
1513
+ α with probability ≥ 1 − α.
1514
+ Plugging this back into (11) and using the KL divergence range bound, Lemma 5 with our definition
1515
+ of Ogibbs, and choices of parameters, we thus have with probability 1 − α,
1516
+ max
1517
+ w∈∆m×∆n
1518
+ 1
1519
+ T
1520
+ T−1
1521
+
1522
+ t=0
1523
+ ⟨gt, wt − w⟩ ≤ 2 log mn
1524
+ ηT
1525
+ + 5η + 4δ +
1526
+
1527
+ 128 log 1
1528
+ α
1529
+ T
1530
+ ≤ ǫ.
1531
+ (12)
1532
+ The remainder of the proof follows analogously to Proposition 3.
1533
+ The Gibbs sampling oracles implicitly maintain access to ut ∝ exp(Ayt) and vt ∝ exp(−A⊤xt),
1534
+ which by averaging gives (¯u, ¯v) =
1535
+ 1
1536
+ T
1537
+ �T−1
1538
+ t=0 (ut, vt) as one approximate equilibrium as guaranteed
1539
+ in Corollary 3. To turn the implicitly maintained iterates into an actual classic output, we subsample
1540
+ the iterates. Below we formally show one can take the empirical average of independent samples
1541
+ from distributions close to ¯u and ¯v to also obtain an approximate equilibrium (with the same
1542
+ approximation factor up to constant factors) with high probability.
1543
+ Lemma 6. Suppose ¯u = 1
1544
+ T
1545
+ �T−1
1546
+ t=0 ut for {ut}T−1
1547
+ t=0 ⊂ ∆m and ¯v = 1
1548
+ T
1549
+ �T−1
1550
+ t=0 vt for {vt}T−1
1551
+ t=0 ⊂ ∆n are an
1552
+ ǫ-approximate NE for A. Further suppose that for some δ ∈ (0, 1), {˜ut}T−1
1553
+ t=0 ⊂ ∆m, {˜vt}T−1
1554
+ t=0 ⊂ ∆n,
1555
+ and for all 0 ≤ t < T − 1, we have ∥˜ut − ut∥1 ≤ δ and ∥˜vt − vt∥1 ≤ δ. Let ˆu = 1
1556
+ T
1557
+ �T−1
1558
+ t=0 eit where
1559
+ each eit ∈ Rm is sampled independently according to ˜ut; similarly, let ˆv = 1
1560
+ T
1561
+ �T−1
1562
+ t=0 ejt where each
1563
+ ejt ∈ Rn is sampled independently according to ˜vt. Suppose T ≥
1564
+ 16 log mn
1565
+ α
1566
+ ǫ2
1567
+ . Then with probability at
1568
+ least 1 − α, (ˆu, ˆv) are a (2ǫ + 2δ)-approximate NE for A.
1569
+ 21
1570
+
1571
+ Proof. First, let ˜uavg =
1572
+ 1
1573
+ T
1574
+ �T−1
1575
+ t=0 ˜ut and ˜vavg =
1576
+ 1
1577
+ T
1578
+ �T−1
1579
+ t=0 ˜vt.
1580
+ By convexity of norms, we have
1581
+ ∥˜uavg − ¯u∥1 ≤ δ and ∥˜vavg − ¯v∥1 ≤ δ, and hence under the NE approximation guarantee of (¯u, ¯v)
1582
+ and Hölder’s inequality,
1583
+ max
1584
+ u∈∆m u⊤A˜vavg − min
1585
+ v∈∆m ˜u⊤
1586
+ avgAv ≤ ǫ + 2δ.
1587
+ Let z be a fixed vector in [−1, 1]n. By Hoeffding’s inequality, since each random variable ⟨z, ejt⟩ lies
1588
+ in the range [−1, 1] and Eˆv = ˜vavg, we have that
1589
+ Pr
1590
+
1591
+ |⟨z, ˆv − ˜vavg⟩| ≥ ǫ
1592
+ 2
1593
+
1594
+ ≤ 2 exp
1595
+
1596
+ −Tǫ2
1597
+ 8
1598
+
1599
+
1600
+ α
1601
+ m + n.
1602
+ (13)
1603
+ Next, note that maxu∈∆m u⊤A˜vavg is achieved by a basis vector u = ei. Hence, applying a union
1604
+ bound over (13) for all z = Ai: shows that with probability at least 1 −
1605
+ αm
1606
+ m+n,
1607
+ max
1608
+ u∈∆m u⊤Aˆv ≤ max
1609
+ u∈∆m u⊤A˜vavg + ǫ
1610
+ 2.
1611
+ By symmetry, with probability at least 1 −
1612
+ αn
1613
+ m+n,
1614
+ min
1615
+ v∈∆n ˆu⊤Av ≥ min
1616
+ v∈∆n ˜u⊤
1617
+ avgAv − ǫ
1618
+ 2.
1619
+ The conclusion follows from a union bound, and combining the above three displays.
1620
+ Finally, we put these pieces together to give a complete guarantee.
1621
+ Proposition 1. Let A ∈ Rm×n satisfy ∥A∥max ≤ 1 and ǫ, α ∈ (0, 1). Let δ ≤
1622
+ ǫ
1623
+ 20, η =
1624
+ ǫ
1625
+ 60, and
1626
+ T = Θ(ǫ−2 log mn
1627
+ α ) for an appropriate constant. With probability ≥ 1 − α, Algorithm 1 outputs an
1628
+ ǫ-approximate NE for A.
1629
+ Proof. We follow notation of Proposition 3. By applying Corollary 3 (up to constant factors), we
1630
+ have that with probability at least 1 − α
1631
+ 2 , ¯u := 1
1632
+ T
1633
+ �T−1
1634
+ t=0 ut and ¯v := 1
1635
+ T
1636
+ �T−1
1637
+ t=0 vt satisfy
1638
+ max
1639
+ u∈∆m u⊤A¯v − min
1640
+ v∈∆n ¯u⊤Av ≤ ǫ
1641
+ 3.
1642
+ Finally, Lemma 6 (with failure probability α
1643
+ 2 ) and a union bound yields the desired conclusion.
1644
+ B
1645
+ Quantum rejection sampling with a hint
1646
+ In this section, we prove Proposition 2, which gives a dynamic quantum rejection sampling subrou-
1647
+ tine and bounds its cost of implementation. Our result is an extension of analogous developments
1648
+ in [vAG19], but are stated more generally to allow for the use of an appropriate “hint” vector in the
1649
+ rejection sampling procedure. We build up to our main result in several pieces.
1650
+ Amplitude amplification.
1651
+ First, for a quantum decision algorithm which applies unitary U and
1652
+ then measures, yielding an accepting state with probability α, quantum amplification [BHMT02]
1653
+ shows we can apply U ≈ α− 1
1654
+ 2 times to obtain an accepting state with high probability.
1655
+ Proposition 4 (Theorem 3, [BHMT02]). Let S ⊆ {0, 1}s, let U be a s-qubit quantum oracle, and
1656
+ let α be the probability that measuring the result of applying U yields an accepting state. There
1657
+ is a (quantum) algorithm using O(α− 1
1658
+ 2 log 1
1659
+ δ ) queries to U and O(log s log 1
1660
+ δ) additional time that
1661
+ returns s with s ∈ S with probability ≥ 1 − δ.
1662
+ 22
1663
+
1664
+ Loading from trees.
1665
+ Given a dynamic vector x ∈ Rm
1666
+ ≥0 which is supported in an appropriate
1667
+ efficient data structure SamplerTree (see Lemma 1), and a known bound β ≥ ∥x∥1, we recall a result
1668
+ of [GR02] which allows us to form a superposition of the entries in x (suitably rescaled).
1669
+ Lemma 7. Let x ∈ Rm
1670
+ ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. We can maintain
1671
+ a quantum oracle OSamplerTree which takes O(log m) time to apply, such that the total cost of building
1672
+ OSamplerTree after T calls to Update is O(T log m), and
1673
+ OSamplerTree|0⟩⊗(a+1) =
1674
+
1675
+ i∈[m]
1676
+ �xi
1677
+ β |0⟩|i⟩ +
1678
+
1679
+ 1 − ∥x∥1
1680
+ β
1681
+ |1⟩|g⟩.
1682
+ Proof. This is implicit in [GR02]. We first apply a 1-qubit gate to condition on selecting from the
1683
+ tree (with probability ∥x∥1
1684
+ β ), and then apply the [GR02] procedure conditioned on the first qubit
1685
+ being |0⟩, which controls for one qubit at a time while propagating subtree sums (provided by
1686
+ SamplerTree via SubtreeSum). The cost to build the circuit follows because on an Update we need
1687
+ to change the gates corresponding to the relevant leaf-to-root path.
1688
+ Corollary 4. Let x ∈ Rm
1689
+ ≥0 correspond to an instance of SamplerTree, and let β ≥ ∥x∥1, and suppose
1690
+ A ∈ Rm×n has ∥A∥max ≤ 1. We can maintain a quantum oracle OA⊤x which takes O(log m) time
1691
+ to apply, with total building cost O(T log m) after T calls to Update, such that for any j ∈ [n],
1692
+ OA⊤x|0⟩⊗(a+2)|j⟩ = |0⟩
1693
+
1694
+  �
1695
+ i∈[m]
1696
+
1697
+ Aijxi
1698
+ β
1699
+ |0⟩|i⟩ + |1⟩|g⟩
1700
+
1701
+  |j⟩.
1702
+ Proof. We apply O′
1703
+ A (see Section 2) to the output of OSamplerTree, ignoring the additional qubit.
1704
+ We remark here that the additional qubit in Corollary 4 will shortly become useful in constructing
1705
+ an appropriate block-encoding of a scaling of diag
1706
+
1707
+ A⊤x
1708
+
1709
+ .
1710
+ Polynomial approximation.
1711
+ In order to give approximate Gibbs samplers for the types of dy-
1712
+ namic vectors Algorithm 1 encounters, we further require some tools from polynomial approximation
1713
+ theory. We first state a helper result on boundedly approximating the exponential, a variant of which
1714
+ was also used in [vAG19]. We provide a proof in Appendix C.
1715
+ Lemma 8 (Lemma 7, [vAG19]). Let β ≥ 1, ξ ≤
1716
+ 1
1717
+ 10. There is a polynomial Pβ,ξ of degree O(β log 1
1718
+ ξ )
1719
+ such that maxx∈[−1,1] |Pβ,ξ(x)| ≤ 3 and maxx∈[−1,0] |Pβ,ξ(x) − exp(βx)| ≤ ξ.
1720
+ Next, we state a further corollary of Lemma 8 to be used in our rejection sampler.
1721
+ Corollary 5. Let B, δ ≥ 0 and suppose v ∈ Rn has ∥v∥∞ ≤ B. Further, suppose for some c ≥ 0,
1722
+ −c ≤ maxj∈[n] vj ≤ 0. Let q ∈ Rn
1723
+ ≥0 satisfy qj ∈ [ℓ, 1] entrywise. Finally, define uj := vj
1724
+ 2B entrywise.
1725
+ There is a degree-∆ polynomial P, for ∆ = O(B · (c + log n
1726
+ ℓδ)), such that for wj := P(uj)2qj and
1727
+ zj := exp(2Buj)qj entrywise,
1728
+ ����
1729
+ w
1730
+ ∥w∥1
1731
+
1732
+ z
1733
+ ∥z∥1
1734
+ ����
1735
+ 1
1736
+ ≤ δ.
1737
+ (14)
1738
+ Moreover, maxx∈[−1,1] |P(x)| ≤ 1
1739
+ 2, and ∥w∥1 ≥ 1−δ
1740
+ 36 ∥z∥1.
1741
+ 23
1742
+
1743
+ Proof. Assume δ ≤ 2 else the statement is clearly true. First, uj ∈ [− 1
1744
+ 2, 0] entrywise by the stated
1745
+ assumptions (since vj ∈ [−B, 0] entrywise). Let Pβ,ξ(·) be the polynomial given by Lemma 8 which
1746
+ ξ-approximates exp(β·) on [− 1
1747
+ 2, 0]. We define
1748
+ P(u) := 1
1749
+ 6PB,ξ (u) , for ξ :=
1750
+ δℓ
1751
+ 6n exp(c).
1752
+ The degree bound and absolute value bound of this polynomial follows immediately from Lemma 8,
1753
+ so it remains to show the distance bound. The guarantees of Lemma 8 then imply for all j ∈ [n],
1754
+ |6P(uj) − exp (Buj)| ≤ ξ.
1755
+ (15)
1756
+ We further have that uj ≤ 0, so exp(Buj) ≤ 1. Hence, we also have
1757
+ |6P(uj) + exp (Buj)| ≤ 2 + ξ ≤ 3.
1758
+ Combining yields for all j ∈ [n],
1759
+ ��36P(uj)2 − exp (2Buj)
1760
+ �� ≤ 3ξ.
1761
+ (16)
1762
+ Next, let yj := 36wj for all j ∈ [n], and note that
1763
+ y
1764
+ ∥y∥1 =
1765
+ w
1766
+ ∥w∥1. We bound
1767
+ ����
1768
+ w
1769
+ ∥w∥1
1770
+
1771
+ z
1772
+ ∥z∥1
1773
+ ����
1774
+ 1
1775
+ =
1776
+
1777
+ j∈[n]
1778
+ ����
1779
+ yj
1780
+ ∥y∥1
1781
+
1782
+ zj
1783
+ ∥z∥1
1784
+ ���� ≤
1785
+
1786
+ j∈[n]
1787
+ ����
1788
+ yj
1789
+ ∥y∥1
1790
+
1791
+ yj
1792
+ ∥z∥1
1793
+ ���� +
1794
+
1795
+ j∈[n]
1796
+ ����
1797
+ yj
1798
+ ∥z∥1
1799
+
1800
+ zj
1801
+ ∥z∥1
1802
+ ����
1803
+
1804
+ ����1 − ∥y∥1
1805
+ ∥z∥1
1806
+ ���� + ∥y − z∥1
1807
+ ∥z∥1
1808
+ ≤ 2 ∥y − z∥1
1809
+ ∥z∥1
1810
+ .
1811
+ (17)
1812
+ By using the definitions of y, z and (16), as well as the assumed ranges on q,
1813
+ ∥y − z∥1 ≤ 3nξ, ∥z∥1 ≥ ℓ exp(−c).
1814
+ The second inequality used that some vj = 2Buj is at least −c by assumption. Combining the above
1815
+ display with (17) and the definition of ξ concludes the proof of (14). Finally, using the bounds on
1816
+ ∥y − z∥1 , ∥z∥1 above shows that
1817
+ ∥w∥1 = 1
1818
+ 36∥y∥1 ≥ 1 − δ
1819
+ 36 ∥z∥1.
1820
+ Block-encoding.
1821
+ Our approximate Gibbs oracle follows an implementation strategy pioneered by
1822
+ [GSLW19] termed “block-encoding.” Specifically, we follow [GSLW19] and say that U, an (a + ℓ)-
1823
+ qubit quantum gate, is an ℓ-bit block-encoding of M if the top-left 2a �� 2a submatrix of U is M.
1824
+ Block-encoded matrices admit efficient composable operations, such as the application of linear
1825
+ combinations and bounded polynomials. We summarize these properties in the following.
1826
+ Proposition 5 (Lemma 52, [GSLW19]). Let U1 and U2 be ℓ-bit block-encodings of M1, M2 of the
1827
+ same size. There is an O(ℓ)-bit block-encoding of 1
1828
+ 2M1 + 1
1829
+ 2M2 which takes the same asymptotic
1830
+ time to apply as applying U1 and U2.
1831
+ Proposition 6 (Theorem 56, [GSLW19]). Let U be an ℓ-bit block-encoding of M, and P : [−1, 1] →
1832
+ [− 1
1833
+ 2, 1
1834
+ 2] be a degree-∆ polynomial. There is an O(ℓ)-bit block-encoding of P(M) which can be applied
1835
+ in O(∆) applications of U and U† and O(ℓ∆) additional time.
1836
+ 24
1837
+
1838
+ We also demonstrate that an application of Corollary 4 yields a simple block-encoding of
1839
+ diag
1840
+
1841
+ A⊤x
1842
+ β
1843
+
1844
+ . A similar construction previously appeared in [vAG19].
1845
+ Corollary 6. Let x ∈ Rm
1846
+ ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. Let M :=
1847
+ diag
1848
+
1849
+ A⊤x
1850
+ β
1851
+
1852
+ and U := O∗
1853
+ A⊤x(SWAP12 ⊗ I)OA⊤x, where SWAP12 swaps the first two qubits and
1854
+ OA⊤x is from Corollary 4. Then U is a block-encoding of M, and can be applied in time O(log m),
1855
+ with total building cost O(T log m) after T calls to Update.
1856
+ Proof. Define wij := Aijxi
1857
+ β
1858
+ for convenience. By the definition of OA⊤x, we have that
1859
+ (SWAP12 ⊗ I) OA⊤x
1860
+
1861
+ |0⟩⊗(a+2)|j⟩
1862
+
1863
+ =
1864
+
1865
+ |00⟩
1866
+
1867
+ i∈[m]
1868
+ √wij|i⟩ + |10⟩|g⟩
1869
+
1870
+  |j⟩.
1871
+ Hence, for j, j′ ∈ [n], we compute ⟨j′|⟨0|⊗(a+2)U|0⟩⊗(a+2)|j⟩ as:
1872
+ ⟨j′|
1873
+
1874
+ |00⟩
1875
+
1876
+ i∈[m]
1877
+ √wij|i⟩ + |01⟩|g⟩
1878
+
1879
+
1880
+ ∗ 
1881
+ |00⟩
1882
+
1883
+ i∈[m]
1884
+ √wij|i⟩ + |10⟩|g⟩
1885
+
1886
+  |j⟩
1887
+ =
1888
+ ��
1889
+ i∈[m] wij = [A⊤x]j
1890
+ β
1891
+ j = j′
1892
+ 0
1893
+ j ̸= j′ .
1894
+ In particular the |01⟩ and |10⟩ terms disappear, and |j⟩, |j′⟩ are orthogonal unless j = j′. In the
1895
+ above, we required that √wij∗√wij = wij, which is only true if wij is nonnegative. To bypass this
1896
+ issue, we will implement the two copies of OA⊤x in slightly different ways, to obtain the correct
1897
+ signing. For notational clarity, we let OL be the oracle which is conjugated on the left and OR
1898
+ be the oracle on the right, such that U = (OL)∗(SWAP12 ⊗ I)(OR). Note that x is entrywise
1899
+ nonnegative and β > 0, and hence the only factor determining the sign of wij is Aij.
1900
+ When
1901
+ Aij ≥ 0, we will define the oracles O′
1902
+ A used to load
1903
+
1904
+ Aij for OL and OR in a consistent way
1905
+ (i.e. use the same-signed square root), so that √wij2 = wij. When Aij < 0 we will define them
1906
+ in an inconsistent way, so that after the conjugation operation, −√wij√wij = wij. We have thus
1907
+ shown that ⟨0|⊗(a+2)U|0⟩⊗(a+2) = M which implies the first conclusion. To see the second, all our
1908
+ gates are reversible (arithmetic circuits are reversible, and OA is its own inverse), and hence the
1909
+ complexity of applying O∗
1910
+ A⊤x is the same as OA⊤x.
1911
+ Finally, we put together the pieces and prove Proposition 2, which we use repeatedly throughout
1912
+ the paper to implement our Gibbs sampling oracles.
1913
+ Proposition 2. Let x ∈ Rm
1914
+ ≥0 correspond to an instance of SamplerTree, and β ≥ ∥x∥1. Let p be
1915
+ the Gibbs distribution associated with A⊤x, let Z := �
1916
+ j∈[n] exp([A⊤x]j) and �Z ∈ [Z, CZ] for some
1917
+ C ≥ 1. Finally, let q ∈ Rn have entries classically queriable in O(1) time, satisfy q ≥ p entrywise,
1918
+ qj ∈ [ δ
1919
+ n, 1] for all j ∈ [n], and ∥q∥1 = ρ. Suppose �Z, C, ρ, and β are explicitly known. Given
1920
+ a quantum oracle for A ∈ Rm×n (defined in Section 2) with ∥A∥max ≤ 1, we can implement a
1921
+ δ-approximate Gibbs oracle which has query cost O(√ρC · β log4 � Cmn
1922
+ δ
1923
+
1924
+ ). The total additional cost
1925
+ incurred if x undergoes T Update calls which preserve the invariants on �Z, C, ρ, β is O(T log m).
1926
+ Proof. Throughout the proof, let δ ← min(1
1927
+ 2, δ) and B := 4(β + log(Cn
1928
+ δ )).
1929
+ Also define ℓ :=
1930
+ δ
1931
+ n (following notation of Corollary 5).
1932
+ We first observe that since maxj∈[n][A⊤x]j ≤ log Z ≤
1933
+ 25
1934
+
1935
+ maxj∈[n][A⊤x]j + log n,
1936
+ − log(Cn) ≤ max
1937
+ j∈[n][A⊤x]j − log
1938
+
1939
+ �Zqj
1940
+
1941
+ ≤ 0.
1942
+ Here, the upper bound used that for all j ∈ [n], exp([A⊤x]j − �Zqj) = pj
1943
+ qj · Z
1944
+ �Z ≤ 1 by assumption.
1945
+ Hence, for v := A⊤x − log( �Zq) entrywise,
1946
+ −c ≤ max
1947
+ j∈[n] vj ≤ 0 for c := log(Cn).
1948
+ Next, we note log( �Zq) is entrywise bounded in magnitude by B
1949
+ 2 :
1950
+ log( �Zqj) ≤ log(CZ) ≤ log
1951
+
1952
+ n · max
1953
+ j∈[n] exp([A⊤x]j)
1954
+
1955
+ + log C ≤ B
1956
+ 2 ,
1957
+ log( �Zqj) ≥ log Z + log δ
1958
+ n ≥ min
1959
+ j∈[n][A⊤x]j − log n
1960
+ δ ≥ −B
1961
+ 2 .
1962
+ Define M1 := diag
1963
+
1964
+ A⊤x
1965
+ 2B
1966
+
1967
+ and M2 := diag
1968
+
1969
+ − 1
1970
+ 2B log( �Zq)
1971
+
1972
+ . By the calculations above, we have
1973
+ ∥M2∥op ≤ 1
1974
+ 2, and similarly it is clear that ∥M1∥op ≤
1975
+ 1
1976
+ 2 because
1977
+ ��A⊤x
1978
+ ��
1979
+ ∞ ≤ β. Moreover, by
1980
+ using Corollary 6 with β ← B, we obtain U1, a block-encoding of M1 applicable in O(log m) time.
1981
+ Using a similar construction as Corollary 6, since q, B, and �Z are all efficiently classically queriable,
1982
+ we obtain U2, a block-encoding of M2 applicable in O(1) time. Hence, Proposition 5 yields U, a
1983
+ block-encoding of
1984
+ M1 + M2 = diag
1985
+ � v
1986
+ 2B
1987
+
1988
+ ,
1989
+ which can be applied in O(log mn) time. Next, let P be the degree-∆ = O(B log Cn
1990
+ δ ) polynomial
1991
+ from Corollary 5, parameterized by B, v, c, q, ℓ as defined earlier.
1992
+ Corollary 5 shows that P :
1993
+ [−1, 1] → [− 1
1994
+ 2, 1
1995
+ 2]. Thus, Proposition 6 then yields U′, a block-encoding of diag
1996
+
1997
+ P( v
1998
+ 2B )
1999
+
2000
+ which can
2001
+ be applied in O(∆ · log mn) time. Furthermore, since q and ρ are efficiently classically queriable,
2002
+ we can define a gate Oq which is applicable in O(1) time and acts as
2003
+ Oq|0⟩⊗(b+1) = |0⟩
2004
+
2005
+ j∈[n]
2006
+ �qj
2007
+ ρ |j⟩ + |1⟩|g⟩.
2008
+ Applying U′ to the output of Oq with appropriate ancilla qubits then yields
2009
+ |0⟩⊗O(1) �
2010
+ j∈[n]
2011
+
2012
+ qjP(uj)2
2013
+ ρ
2014
+ |j⟩|gj⟩ + |g′⟩, where uj := vj
2015
+ 2B for all j ∈ [n].
2016
+ Post-selecting on the first register being the all-zeroes state and measuring on the register corre-
2017
+ sponding to j, we see that we obtain a sample j ∈ [n] with probability proportional to qjP(uj)2. By
2018
+ Corollary 5, conditioned on the sample succeeding, the resulting distribution is δ-close in ℓ1 to the
2019
+ distribution proportional to q ◦ exp(v) ∝ exp(A⊤x), and hence the result is a δ-approximate Gibbs
2020
+ oracle. Finally, we bound the query cost of the oracle. Define wj := P(uj)2qj and zj := exp(vj)qj
2021
+ as in Corollary 5. By definition of v, �Z,
2022
+ ∥z∥1 =
2023
+
2024
+ j∈[n]
2025
+ exp
2026
+ ��
2027
+ A⊤x
2028
+
2029
+ j
2030
+
2031
+ �Z
2032
+
2033
+
2034
+ C−1, 1
2035
+
2036
+ .
2037
+ 26
2038
+
2039
+ Moreover, the last conclusion in Corollary 5 shows ∥w∥1 ≥
2040
+ 1
2041
+ 72 ∥z∥1 ≥ (72C)−1. Hence,
2042
+
2043
+ j∈[n]
2044
+ qjP(uj)2
2045
+ ρ
2046
+ = ∥w∥1
2047
+ ρ
2048
+
2049
+ 1
2050
+ 72Cρ.
2051
+ In other words, we have an oracle which we can apply in time O(∆·log mn) which correctly returns
2052
+ a sample with probability α ≥
2053
+ 1
2054
+ 72Cρ. By applying Proposition 4 to improve the success probability,
2055
+ we obtain the desired conclusion at a O(√Cρ log 1
2056
+ δ ) overhead.
2057
+ Corollary 2. Following notation of Proposition 2, let R :=
2058
+ �Z
2059
+ Z . There is a quantum oracle Otest
2060
+ which can be implemented under T Update calls to x in O(T log m) time, and has query cost
2061
+ O
2062
+ ��
2063
+ ρC · β log4
2064
+ �Cmn
2065
+ ℓδ
2066
+ ��
2067
+ .
2068
+ Furthermore, for explicitly known constants Cℓ and Cu, Otest returns “success” with probability p for
2069
+ Cℓ
2070
+ √Rρ ≤ p ≤
2071
+ Cu
2072
+ √Rρ.
2073
+ Proof. Our oracle Otest is the oracle from Proposition 2, except we will choose a sufficiently small
2074
+ constant value of δ. It returns “success” when the sample is accepted by the rejection sampler after
2075
+ boosting by amplitude amplification. Before boosting, the success probability from Proposition 2
2076
+ is Θ( 1
2077
+ Rρ) where the constants in the upper and lower bounds are explicit. Further, the constants
2078
+ from Proposition 4 are explicit, and hence boosting by amplitude amplification improves the success
2079
+ probability to Θ(
2080
+ 1
2081
+ √Rρ) with known constant bounds as required by the corollary statement.
2082
+ C
2083
+ Bounded approximation to exp on [−1, 1]
2084
+ Here, we give a proof of a lemma (with slightly different constants) used in the prior work [vAG19].
2085
+ This section builds entirely off prior results on polynomial approximation in [GSLW19]; we include
2086
+ it for completeness because a proof was not given in [vAG19]. As a reminder, we stated and used
2087
+ the following result earlier when constructing our rejection sampler in Appendix B.
2088
+ Lemma 8 (Lemma 7, [vAG19]). Let β ≥ 1, ξ ≤
2089
+ 1
2090
+ 10. There is a polynomial Pβ,ξ of degree O(β log 1
2091
+ ξ )
2092
+ such that maxx∈[−1,1] |Pβ,ξ(x)| ≤ 3 and maxx∈[−1,0] |Pβ,ξ(x) − exp(βx)| ≤ ξ.
2093
+ To obtain the lemma, we will utilize the following result from [GSLW19].
2094
+ Proposition 7 (Corollary 66, [GSLW19]). Let x0 ∈ [−1, 1], r ∈ (0, 2], δ ∈ (0, r]. Let f : [x0 − r −
2095
+ δ, x0 + r + δ] → C be such that f(x0 + x) = �
2096
+ ℓ≥0 aℓxℓ for all x ∈ [−r − δ, r + δ]. Suppose B > 0 is
2097
+ such that �
2098
+ ℓ≥0(r + δ)ℓ|aℓ| ≤ B and let ǫ ∈ (0,
2099
+ 1
2100
+ 2B ]. There is a polynomial P (see Appendix D for
2101
+ its numerically stable implementation) of degree O
2102
+ � 1
2103
+ δ log B
2104
+ ǫ
2105
+
2106
+ such that
2107
+ max
2108
+ x∈[x0−r,x0+r] |f(x) − P(x)| ≤ ǫ and
2109
+ max
2110
+ x∈[−1,1] |P(x)| ≤ ǫ + B.
2111
+ Proof of Lemma 8. We apply Proposition 7 with f(x) := exp(βx) which has a convergent Taylor
2112
+ series everywhere, and the parameter settings x0 = −1, r = 1, δ =
2113
+ 1
2114
+ β, B = e.
2115
+ We have that
2116
+ 27
2117
+
2118
+ f(x0 + x) = �
2119
+ ℓ≥0 exp(−β)βℓ·xℓ
2120
+ ℓ!
2121
+ = �
2122
+ ℓ≥0 aℓxℓ with aℓ = exp(−β)βℓ
2123
+ ℓ! for any integer ℓ ≥ 0. We also
2124
+ check that our choice of B is valid, via
2125
+
2126
+ ℓ≥0
2127
+ (r + δ)ℓ|aℓ| = exp(−β)
2128
+
2129
+ ℓ≥0
2130
+
2131
+ 1 + 1
2132
+ β
2133
+ �ℓ βℓ
2134
+ ℓ! = exp(−β)
2135
+
2136
+ ℓ≥0
2137
+ (β + 1)ℓ
2138
+ ℓ!
2139
+ = exp(β + 1 − β) = e.
2140
+ Hence by Proposition 7, we have for any ξ ≤
2141
+ 1
2142
+ 2e, there is a polynomial P of degree O(β log 1
2143
+ ξ )
2144
+ such that maxx∈[−2,0] | exp(βx) − P(x)| ≤ ǫ and maxx∈[−1,1] | ˜P(x)| ≤ e + 1
2145
+ 6 + ξ ≤ 3.
2146
+ D
2147
+ Numerically stable implementation of polynomial approximation
2148
+ Throughout this section, let ∆ = O(1
2149
+ ǫ log2(mn
2150
+ ǫ )) be the degree of the polynomial used in the proof
2151
+ of Proposition 2 in Appendix B (specifically, constructed in the proof of Proposition 2, where we
2152
+ have C = O(1) and δ = O(ǫ) in our applications).
2153
+ The polynomial we use is constructed via
2154
+ a decomposition in the Fourier basis (see Lemmas 57 and 65, [GSLW19]).
2155
+ It is not immediate
2156
+ that this polynomial transform can be implemented stably in finite-precision arithmetic, within
2157
+ the quantum singular value transformation framework of [GSLW19], which is used in the proof
2158
+ of Proposition 2. However, [Haa19] shows that given such a decomposition in the Fourier basis,
2159
+ we can obtain a numerically-stable implementation of the polynomial transformation required as a
2160
+ quantum circuit up to additive error ξ, in time
2161
+ O
2162
+
2163
+ ∆3 log
2164
+ �∆
2165
+ ξ
2166
+ ��
2167
+ .
2168
+ In our setting (in the proof of Proposition 2), it is straightforward to check that ξ = poly(m, n, ǫ−1).
2169
+ This construction results in the additive term in Theorem 4.
2170
+ 28
2171
+
8tE2T4oBgHgl3EQfPwar/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
9tFJT4oBgHgl3EQfoyxM/content/2301.11597v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79e625ab7976c129b39eeb312becb30ffc16cfba061f383270ca932118dd4de5
3
+ size 2380508
9tFJT4oBgHgl3EQfoyxM/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b61ca1da7b6dec3b13aa0182ce54f1b0c4e26de338201553d91bb175319cc977
3
+ size 2883629
9tFJT4oBgHgl3EQfoyxM/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ddd3b99cd93382787d6b21f9b26b7d12aa05a675b89a31ed741f248b809bab
3
+ size 103487
AtAzT4oBgHgl3EQfhv1C/content/tmp_files/2301.01488v1.pdf.txt ADDED
@@ -0,0 +1,1850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Informed Down-Sampled Lexicase Selection:
2
+ Identifying productive training cases for
3
+ efficient problem solving
4
+ Ryan Boldi∗
5
+ rbahlousbold@umass.edu
6
+ University of Massachusetts, Amherst, MA 01003, USA
7
+ Martin Briesch∗
8
+ briesch@uni-mainz.de
9
+ Johannes Gutenberg University, Mainz, 55128, Germany
10
+ Dominik Sobania
11
+ dsobania@uni-mainz.de
12
+ Johannes Gutenberg University, Mainz, 55128, Germany
13
+ Alexander Lalejini
14
+ lalejina@gvsu.edu
15
+ Grand Valley State University, Allendale, MI 49401, USA
16
+ Thomas Helmuth
17
+ thelmuth@hamilton.edu
18
+ Hamilton College, Clinton, NY, 13323, USA
19
+ Franz Rothlauf
20
+ rothlauf@uni-mainz.de
21
+ Johannes Gutenberg University, Mainz, 55128, Germany
22
+ Charles Ofria
23
+ ofria@msu.edu
24
+ Michigan State University, East Lansing, MI 48824, USA
25
+ Lee Spector
26
+ lspector@amherst.edu
27
+ Amherst College, Amherst, MA 01002, USA
28
+ Abstract
29
+ Genetic Programming (GP) often uses large training sets and requires all individuals
30
+ to be evaluated on all training cases during selection. Random down-sampled lexicase
31
+ selection evaluates individuals on only a random subset of the training cases allow-
32
+ ing for more individuals to be explored with the same amount of program executions.
33
+ However, creating a down-sample randomly might exclude important cases from the
34
+ current down-sample for a number of generations, while cases that measure the same
35
+ behavior (synonymous cases) may be overused despite their redundancy. In this work,
36
+ we introduce Informed Down-Sampled Lexicase Selection.
37
+ This method leverages
38
+ population statistics to build down-samples that contain more distinct and therefore
39
+ informative training cases. Through an empirical investigation across two different GP
40
+ systems (PushGP and Grammar-Guided GP), we find that informed down-sampling
41
+ significantly outperforms random down-sampling on a set of contemporary program
42
+ synthesis benchmark problems. Through an analysis of the created down-samples, we
43
+ find that important training cases are included in the down-sample consistently across
44
+ independent evolutionary runs and systems. We hypothesize that this improvement
45
+ can be attributed to the ability of Informed Down-Sampled Lexicase Selection to main-
46
+ tain more specialist individuals over the course of evolution, while also benefiting from
47
+ reduced per-evaluation costs.
48
+ Keywords
49
+ Genetic programming, parent selection algorithms, selection schemes, lexicase selec-
50
+ tion, down-sampling, informed down-sampling
51
+ ∗Both authors contributed equally.
52
+ ©2022 by the Massachusetts Institute of Technology
53
+ Preprint
54
+ arXiv:2301.01488v1 [cs.NE] 4 Jan 2023
55
+
56
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
57
+ 1
58
+ Introduction
59
+ In Evolutionary Computation, we often use large sets of training data to evaluate the
60
+ quality of candidate solutions. For instance, most Genetic Programming (GP) systems
61
+ evaluate programs using input/output examples (training cases) that specify the ex-
62
+ pected behavior of a correct program. Many GP selection strategies aggregate each
63
+ program’s performance across all training cases to produce one fitness score that can be
64
+ used for selection. In contrast, lexicase selection (Spector, 2012; Helmuth et al., 2015)
65
+ avoids aggregation and considers each training case separately, which has been shown
66
+ to improve diversity maintenance (Helmuth et al., 2016; Dolson and Ofria, 2018) and
67
+ problem-solving success across a wide range of domains (Moore and Stanton, 2017;
68
+ Metevier et al., 2019; Aenugu and Spector, 2019; Ding and Spector, 2021; Lalejini et al.,
69
+ 2022).
70
+ However, standard lexicase selection has the drawback that we have to evaluate all
71
+ individuals on all training cases, which can be computationally expensive when eval-
72
+ uation is non-trivial. To reduce lexicase selection’s computational cost, recent work in-
73
+ troduced down-sampled lexicase selection (Moore and Stanton, 2017; Hernandez et al.,
74
+ 2019; Ferguson et al., 2020). In down-sampled lexicase selection, the training set is ran-
75
+ domly down-sampled, reducing the number of test case evaluations required to assess
76
+ the quality of each candidate solution. This in turn reduces the cost of evaluating an
77
+ entire set of individuals, allowing us to reallocate computational resources to other as-
78
+ pects of an evolutionary search (e.g., increasing search time or population size), which
79
+ can substantially improve problem-solving success (Helmuth and Spector, 2020, 2021;
80
+ Hernandez et al., 2019). However, a naive random down-sample can leave out poten-
81
+ tially important test cases, resulting in a loss of diversity (Ferguson et al., 2020; Helmuth
82
+ et al., 2020; Hernandez et al., 2022).
83
+ In order to put more computational effort towards evaluating individuals on im-
84
+ portant training cases, we propose informed down-sampling (IDS), which uses runtime
85
+ population statistics to build a down-sample that contains more distinct cases.
86
+ Given a set of solutions, two training cases are distinct from each other if the sub-
87
+ sets of solutions that solve each of the two test cases have little-to-no overlap. Two
88
+ training cases are synonymous if the opposite is true: there is substantial overlap be-
89
+ tween the subsets of solutions that solve each case*. Consequently, Informed down-
90
+ sampling favors the distinct training cases over synonymous cases when building a
91
+ down-sample to use for selection. We expect these informed down-samples to better
92
+ maintain unique individuals, increasing overall population diversity while also putting
93
+ more selection pressure on individuals whose descendants are more likely to solve the
94
+ problem. These unique individuals are often viewed as the stepping-stones for evolu-
95
+ tion to use in finding a perfect solution program (Helmuth et al., 2020).
96
+ To assess the performance of Informed Down-Sampled Lexicase Selection, we
97
+ compare lexicase selection without down-sampling (standard lexicase), with random
98
+ down-sampling, and with informed down-sampling across eight problems from the
99
+ first and second program synthesis benchmark suites (Helmuth and Spector, 2015; Hel-
100
+ muth and Kelly, 2021). We conduct our experiments in two independent GP frame-
101
+ works, Grammar-Guided Genetic Programming (G3P) (Whigham et al., 1995; Forsten-
102
+ lechner et al., 2016, 2017) and PushGP (Spector and Robinson, 2002; Spector et al., 2004).
103
+ We find that building a down-sample based on information we collect from the
104
+ *Synonymous cases can also be thought of as cases that have different inputs and outputs yet measure
105
+ a very similar functionality such that there is a high correlation between individuals’ performance on these
106
+ cases.
107
+ 2
108
+ Preprint
109
+
110
+ Informed Down-Sampled Lexicase Selection
111
+ population is a valuable way to improve the success rates of evolutionary runs at a
112
+ fixed computational cost. Furthermore, simply tracking which cases are distinct, and
113
+ ensuring they are placed in a down-sample, can significantly improve problem solving
114
+ performance. Our results provide evidence that informed down-sampling improves
115
+ the success rate of search in the two GP systems used. By analyzing the composition
116
+ of down-samples, we also verify that informed down-sampling builds down-samples
117
+ that contain more informative test cases (i.e. edge cases) than random down-sampling.
118
+ 2
119
+ Related Work
120
+ In most GP applications, parent selection uses the performance of candidate solutions
121
+ on a set of training cases to pick individuals that contribute genetic material to the next
122
+ generation. Most selection algorithms aggregate the scores on these training cases to get
123
+ a single score per candidate and then select the most fit candidates using tournament
124
+ selection (Brindle, 1980), implicit fitness sharing (Smith et al., 1993), fitness proportion-
125
+ ate selection (Holland, 1992), or another selection strategy. The fitness aggregation pro-
126
+ cedure for these methods often results in a loss of semantic information about which
127
+ training cases the individual performs well on (Krawiec et al., 2016), motivating the
128
+ development of selection strategies that consider each individual’s performance on all
129
+ training cases encountered (Vanneschi et al., 2014; Goings et al., 2012; Deb et al., 2002;
130
+ Horn et al., 1994).
131
+ In contrast, lexicase selection does not aggregate fitness or performance measures
132
+ (Spector, 2012). For each parent selection event, the lexicase selection procedure first
133
+ places all individuals in the population into a “parent pool” (i.e., the pool of individ-
134
+ uals eligible to be selected). To select a parent, lexicase selection shuffles the training
135
+ cases into a random ordering, and each training case is considered in sequence. For
136
+ each training case, the parent pool is filtered down to just the individuals that have the
137
+ best (or tie for the best) performance, removing all but the best candidates from further
138
+ consideration. If there is only one individual that remains in the pool during this filter-
139
+ ing process, this individual is selected. If the training cases are exhausted and there are
140
+ still individuals in the pool, one of these individuals is selected at random.
141
+ Meanwhile, many variants of lexicase selection have been proposed for use in dif-
142
+ ferent problems or domains. For example, epsilon lexicase selection (La Cava et al.,
143
+ 2016; Moore and Stanton, 2017), batch lexicase selection (Aenugu and Spector, 2019;
144
+ Sobania and Rothlauf, 2022), gradient lexicase selection (Ding and Spector, 2021), lexi-
145
+ case selection for GAs (Metevier et al., 2019), weighted shuffle lexicase selection (Troise
146
+ and Helmuth, 2017), and fast lexicase selection (Ding et al., 2022).
147
+ One of the most promising variants of lexicase selection is down-sampled lexicase
148
+ selection, which was first proposed for expensive evolutionary robotics runs by Moore
149
+ and Stanton (2017) and later formalized by Hernandez et al. (2019) for GP runs. So far,
150
+ down-sampled lexicase selection increased the success and generalization rates for a
151
+ variety of problems (Ferguson et al., 2020). Down-sampled lexicase selection works by
152
+ randomly sampling once in each generation the training set to create a smaller set of
153
+ cases. These cases are then used to perform all selection events in the population for
154
+ that one generation. This limitation on the number of test cases reduces the computa-
155
+ tional costs of evaluating the individuals, which is usually one of the most expensive
156
+ operations in evolutionary runs. These savings could be used to perform computation-
157
+ ally cheaper GP runs, increase the population size, or run evolution for more genera-
158
+ tions.
159
+ Down-sampled lexicase selection has also been found to significantly outperform
160
+ Preprint
161
+ 3
162
+
163
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
164
+ regular lexicase selection in a variety of program synthesis benchmarks (Hernandez
165
+ et al., 2019; Ferguson et al., 2020; Helmuth and Spector, 2020, 2021; Helmuth and Ab-
166
+ delhady, 2020). However, creating a down-sample randomly can exclude important
167
+ training cases from the current down-sample for a number of generations (Hernandez
168
+ et al., 2022), while synonymous cases may be overused. As a first attempt at chang-
169
+ ing the composition of cases in the down-sample, Boldi et al. (2022) explored using
170
+ a rolling down-sample and a disjoint down-sample for lexicase selection runs. While
171
+ the results were neutral-if-not-negative, they highlighted the presence of synonymous
172
+ cases in practice and suggest that an attempt at mediating the time put into evaluating
173
+ individuals on these synonymous cases might improve search performance.
174
+ Work in the EC literature that is related to informed down-sampling primarily
175
+ includes the co-evolution of fitness predictors and maximizers (Schmidt and Lipson,
176
+ 2005, 2008; ˇSikulov´a and Sekanina, 2012). That work attempts to evolve a smaller set
177
+ of training cases, or fitness predictors, to evaluate the fitness of individuals instead of
178
+ using the entire training set. While our studied methods do not involve co-evolution,
179
+ they both result in a compressed training set that is roughly as informative as the set
180
+ of all available data. Another example is the use of random down-sampling to im-
181
+ prove performance of AutoML runs that use Genetic Programming (Zogaj et al., 2021).
182
+ In the broader machine learning community, random down-sampling is used to gen-
183
+ erate mini-batches for stochastic gradient descent (Ruder, 2017), and forms of non-
184
+ random down-sampling are used to detect hard or informative parts of the training
185
+ data (Loshchilov and Hutter, 2015; Bachem et al., 2017; Paul et al., 2021; Chrysakis and
186
+ Moens, 2020).
187
+ 3
188
+ Informed Down-Sampling
189
+ Informed down-sampling addresses randomly down-sampled lexicase’s drawback of
190
+ sometimes including many synonymous training cases in a down-sample, which is
191
+ computationally inefficient and can result in a failure to accurately assess candidate so-
192
+ lution quality. For example, down-sampled lexicase selection might fail to select candi-
193
+ date solutions that specialize on training cases absent from a particular random down-
194
+ sample, resulting in the loss of potentially important genetic material from the popu-
195
+ lation. Instead of down-sampling randomly, informed down-sampling creates down-
196
+ samples composed of more distinct training cases than a random sample would contain
197
+ using runtime population statistics. As a result, we expect informed down-sampling
198
+ lexicase selection to maintain more diverse populations, while reducing computation
199
+ spent on evaluating individuals on synonymous training cases.
200
+ We suggest two methods of building an informed down-sample. First, we explore
201
+ the idealized effectiveness of informed down-sampling by presenting it with full infor-
202
+ mation. This method requires evaluating the entire population on all training cases,
203
+ performing the same number of program executions per generation as normal lexicase
204
+ selection. Therefore, informed down-sampling with full information cannot capital-
205
+ ize on the computational savings afforded by random down-sampling. However, the
206
+ full information approach provides useful intuition for building an informed down-
207
+ sample, allowing us to measure the problem-solving success of our sampling approach
208
+ under idealized conditions.
209
+ Next, we present an approach for creating an informed down-sample that reduces
210
+ the number of per-generation evaluations required for selection (relative to standard
211
+ lexicase selection). This second approach, referred to as the “sparse information” ap-
212
+ proach, estimates the distinctness of training cases based on a sample of individuals
213
+ 4
214
+ Preprint
215
+
216
+ Informed Down-Sampled Lexicase Selection
217
+ I1
218
+ I2
219
+ I3
220
+ I4
221
+ I5
222
+ I6
223
+
224
+ �����
225
+
226
+ �����
227
+ S1
228
+ 0
229
+ 1
230
+ 0
231
+ 1
232
+ 1
233
+ 0
234
+ S2
235
+ 1
236
+ 1
237
+ 0
238
+ 0
239
+ 1
240
+ 1
241
+ S3
242
+ 1
243
+ 0
244
+ 1
245
+ 1
246
+ 0
247
+ 1
248
+ S4
249
+ 0
250
+ 1
251
+ 0
252
+ 0
253
+ 1
254
+ 1
255
+ S5
256
+ 0
257
+ 1
258
+ 0
259
+ 1
260
+ 1
261
+ 0
262
+ Figure 1: Example of the data structure that is used to determine distances between
263
+ cases. c1,...,5 are cases, with their respective solve vectors S1,...,5, and I1,...,6 are indi-
264
+ viduals. The entry at Sj and Ii represents whether the ith individual solved the jth test
265
+ case or not. The binary solve vectors Sj can be read off as the respective row for the
266
+ jth case. The distance between two cases, D(cx, cy), is the Hamming distance between
267
+ their respective solve vectors. For example, D(c1, c2) = 3 and D(c2, c3) = 4.
268
+ from the parent population. Indeed, building an informed down-sample using sparse
269
+ information results in nearly the same per-generation evaluation savings as when using
270
+ random down-sampling.
271
+ 3.1
272
+ Building an Informed Down-Sample with Full Information
273
+ In our informed down-sampling approach with full information, we create one down-
274
+ sample of training cases per generation, and we use candidate solution performances
275
+ on only the sampled training cases to choose parents with lexicase selection. To con-
276
+ struct an informed down-sample with full information, we evaluate all members of the
277
+ population on all training cases. In this work, each of these evaluations is on a pass/fail
278
+ basis. Next, we construct the “solve vector” Sj for each training case cj, which is a vec-
279
+ tor of binary values that specifies which individuals in the population have solved the
280
+ training case. We then calculate the Hamming distance between solve vectors for all
281
+ pairs of training cases, allowing us to measure how distinct training cases are relative
282
+ to one another.
283
+ We begin constructing the down-sample by randomly selecting an initial training
284
+ case to include. Then we find the training case whose solve vector is maximally distant
285
+ from the closest training case already included in the down-sample, and add it to the
286
+ down-sample. We repeatedly add training cases to the down-sample in this way until
287
+ reaching a parameterized sample size.
288
+ Figure 1 provides an example set of binary solve vectors for a set of five training
289
+ cases and a population of six individuals.
290
+ The columns in this matrix Ii describe the performance of the ith individual on
291
+ all cases. A value of 1 at (Ii, cj) implies that the ith individual solved the jth test case
292
+ (error = 0), or Si
293
+ j = 1. Since all members of a population of size p are evaluated on all
294
+ test cases (at least initially), we can say that ∥Sj∥ = p for all cases, cj. Thus, the number
295
+ of columns corresponds to the population size.
296
+ We define the distance between two training cases D(cx, cy) := Hamming(Sx, Sy)
297
+ where Hamming(·, ·) is the Hamming distance between two vectors. For binary vec-
298
+ tors, the distance function is defined as: D(cx, cy) = �p
299
+ i=1 |Si
300
+ x − Si
301
+ y|. Thus, two training
302
+ cases that are solved by the same set of individuals are deemed to have D(c1, c2) = 0
303
+ Preprint
304
+ 5
305
+
306
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
307
+ and are called “synonymous cases”. For example, for the cases in Figure 1, c1 and c5
308
+ have identical solve vectors, and therefore are synonymous (D(c1, c5) = 0).
309
+ We think of this distance function as indicating the joint information contained in
310
+ a pair of cases. Two cases that have exactly the same individuals solving them (i.e. are
311
+ synonymous) have little to no joint information because having both of the cases in
312
+ the sample would be about as informative as just having one of them. Two cases that
313
+ have a high distance from each other, due to being solved by different subsets of the
314
+ population, have high joint information as each case is responsible for informing the
315
+ system about the performance of one set of individuals. Having both of these cases, as
316
+ opposed to one alone, would be a more faithful approximation of using the full training
317
+ set.
318
+ Once we have a method to evaluate the pairwise distance between two cases, we
319
+ can use it to select a down-sample of the training set for use in the current generation.
320
+ In this work, we apply a variant of Farthest First Traversal to select the down-sample
321
+ (Hochbaum and Shmoys, 1985). The creation of the down-sample starts with the selec-
322
+ tion of one random case to include. Then, at each step, we scan each unselected test
323
+ case and measure it’s minimum distance to any test in the current down-sample. We
324
+ select the case that has the largest minimum distance. In other words, we successively
325
+ add the test case that is furthest from the current down-sample at its nearest point.
326
+ Our Farthest First Traversal algorithm is shown in algorithm 1. Starting with an
327
+ empty down-sample, we first add a random case to the down-sample (line 4), and
328
+ then iteratively add the cases that are maximally far from the closest case to it (5-9). If
329
+ there are multiple cases with the same maximum minimum distance, ties are broken
330
+ randomly. The MinDisti value stores the distance from a given case, ci to the closest
331
+ case to it in the down-sample. The cases.popMaxMinDistCase() function removes
332
+ and returns the case in cases that has the maximum value for MinDisti. Note here
333
+ that it is often the case that the minimum distances all go to zero at a point during the
334
+ down-sample formation. At this point, every case left over in the training set has a
335
+ synonymous case in the down-sample already. When this happens, the farthest first
336
+ procedure will automatically select cases at random from the training set to fill up the
337
+ required down-sample size. Figure 2 shows an example of performing informed down-
338
+ sampling with full information using the case solve vectors from Figure 1.
339
+ Algorithm 1 Farthest First Traversal Down-Sample Selection
340
+ Data: D(·, ·) : D(ci, cj) = D(cj, ci) = distance from case i to case j,
341
+ r = down-sample rate
342
+ 1: cases ← set of all cases in training set
343
+ 2: ds ← empty set
344
+ ▷ the down-sample
345
+ 3: size ← r × |cases|
346
+ ▷ desired size of down-sample
347
+ 4: ds.add(cases.popRandomCase())
348
+ 5: while ∥ds∥ < size do
349
+ 6:
350
+ for every case c in cases do
351
+ 7:
352
+ MinDisti ← minimum distance from ci to any case in ds
353
+ 8:
354
+ end for
355
+ 9:
356
+ ds.add(cases.popMaxMinDistCase())
357
+ 10: end while
358
+ 11: return ds
359
+ 6
360
+ Preprint
361
+
362
+ Informed Down-Sampled Lexicase Selection
363
+ D =
364
+ c1
365
+ c2
366
+ c3
367
+ c4
368
+ c5
369
+
370
+ �����
371
+
372
+ �����
373
+ c1
374
+ 0
375
+ 3
376
+ 4
377
+ 2
378
+ 0
379
+ c2
380
+ 3
381
+ 0
382
+ 4
383
+ 1
384
+ 3
385
+ c3
386
+ 4
387
+ 4
388
+ 0
389
+ 5
390
+ 5
391
+ c4
392
+ 2
393
+ 1
394
+ 5
395
+ 0
396
+ 2
397
+ c5
398
+ 0
399
+ 3
400
+ 5
401
+ 2
402
+ 0
403
+ Random
404
+
405
+ ��
406
+
407
+ ds = {c1}
408
+
409
+ c3 had max. distance to c1
410
+
411
+ ��
412
+
413
+ ds = {c1, c3}
414
+
415
+ c2 had max. min. distance to {c1, c3}
416
+
417
+ ��
418
+
419
+ ds = {c1, c3, c2}
420
+ Figure 2: Example running procedure of informed down-sampling with full informa-
421
+ tion to pick a down-sample of size 3 (or r =
422
+ 3
423
+ 5). We have a tabular representation
424
+ of the distance function D generated by computing the Hamming distance between
425
+ each pair of cases’ solve vectors. Beginning with a randomly selected case c1, we se-
426
+ quentially add the cases that are at the maximum distance to their closest case in the
427
+ down-sample. The first step is simply finding the case (c3) in the training set with the
428
+ maximum distance to c1. To select the next case, we need to find, for c2, c4 and c5,
429
+ which of c1 and c3 is closest to them, respectively, and then which of those cases is far-
430
+ thest away. In this example, c2 was added as it had a higher distance (3) to its closest
431
+ case than did c4 or c5 (2 and 0, respectively). Notice that the cases that were left out, c4
432
+ and c5, are synonymous or nearly synonymous with cases already in the down-sample:
433
+ c2 and c1, respectively.
434
+ 3.2
435
+ Building an Informed Down-Sample with Sparse Information
436
+ Down-sampled lexicase selection’s problem-solving benefits stem from the computa-
437
+ tional savings gained by not evaluating the entire population on the whole training set
438
+ for every generation. For a fixed computational budget, down-sampling allows more
439
+ computational resources to be allocated to other aspects of evolutionary search, such
440
+ as running for more generations or increasing population size. As a result, a larger
441
+ portion of the search space can be explored (Helmuth and Spector, 2021). Informed
442
+ down-sampling with full information requires the evaluation of all individuals on all
443
+ training cases in order to construct the down-sample to use in selection. This entire pro-
444
+ cess is counter productive, as we could have just used the initial population evaluation
445
+ to select individuals and circumvent the entire down-sampling process. The benefit of
446
+ down-sampling comes from its ability to use sparse information in the individual selec-
447
+ tion process. Since our aim is to improve on random down-sampling, we must reduce
448
+ the number of necessary program executions in order to calculate distances between
449
+ training cases, so that we can benefit from sparse evaluations in both our individual
450
+ selections and our down-sample creation.
451
+ We present two methods to decrease the number of evaluations required for the
452
+ pairwise distance calculation procedure. The first method, parent sampling, samples a
453
+ proportion ρ of the parents to evaluate the distances for every generation. These parent-
454
+ samples are evaluated on the entire training set. In our runs with a population size of
455
+ 1000, if we were to randomly sample 0.01 (or ρ = 0.01) of these parents to become
456
+ the parent sample, these 10 parents would be evaluated on all training cases. This
457
+ results in case solve vectors of length 10 that are used to calculate the distances between
458
+ Preprint
459
+ 7
460
+
461
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
462
+ cases. Distances between cases are determined purely based on these parent-sample
463
+ evaluations. We use the distance matrix generated from these parents to estimate the
464
+ joint informativeness.
465
+ The second method, scheduled case distance computation, involves recomputing the
466
+ distance matrix from the current population every k generations, as opposed to every
467
+ generation. This schedule reduces the amount of computation required for the evalua-
468
+ tion of case distances even further by not performing it every generation. While such
469
+ a schedule does not update the distances between cases as often, we still re-sample the
470
+ down-sample based on these distances every generation. Due to the stochastic nature of
471
+ the down-sample selection process (specifically the random selection of the first case),
472
+ it is likely that the same down-sample will not be used to evaluate the population in
473
+ consecutive generations.
474
+ In combination, parent sampling and scheduled case distance computation allow
475
+ us to select a down-sample using far less information about individuals while losing
476
+ only a small amount of information about cases and their similarity. This technique
477
+ enables informed down-sampling to explore nearly as many individuals as random
478
+ down-sampling does. Putting it all together, the informed down-sampling with sparse
479
+ information algorithm is detailed in Algorithm 2. This algorithm walks through a sin-
480
+ gle generation’s selection events, returning the parents for the next generation.
481
+ Algorithm 2 Informed Down-Sampling with Sparse Information
482
+ Data:
483
+ P : population,
484
+ cases: set of all training cases,
485
+ k : scheduled case distance computation parameter,
486
+ ρ : parent sampling rate,
487
+ G : current generation counter,
488
+ D : case distance matrix.
489
+ ▷ all distances initialized to be maximally far
490
+ Result: A list of selected parents
491
+ 1: if G%k == 0 then
492
+ 2:
493
+ ˆP ← sample ρ×|P| parents from P
494
+ 3:
495
+ evaluate ˆP on cases
496
+ 4:
497
+ calculate D from case solve vectors from solutions in ˆP on cases
498
+ 5: end if
499
+ 6: D(·, ·) ← distance function derived from indexing into D
500
+ 7: ds ← create downsample using farthest first traversal down-sampling (See Algo 1)
501
+ 8: P ← select |P| new parents using lexicase selection from P using ds as cases
502
+ 9: return P
503
+ 4
504
+ Experimental Methods
505
+ We conducted a series of experiments to study the performance of informed down-
506
+ sampled lexicase selection. We compared the performance of informed down-sampled,
507
+ random down-sampled, and standard lexicase selection on a series of program synthe-
508
+ sis benchmark problems. We performed all experiments in two independent genetic
509
+ programming systems to show that the findings are robust across different program
510
+ representations: PushGP and Grammar Guided Genetic Programming (G3P).
511
+ This section introduces the benchmark problems and genetic programming sys-
512
+ tems used in our experiments and describes our experimental design.
513
+ 8
514
+ Preprint
515
+
516
+ Informed Down-Sampled Lexicase Selection
517
+ Table 1: Program synthesis benchmark problems selected from the first and second gen-
518
+ eral program synthesis benchmark suite, along with their respective input and output
519
+ types and multiplicities.
520
+ Problem
521
+ Suite
522
+ Input Type
523
+ Output Type
524
+ Count Odds
525
+ PSB1
526
+ Vector of Integer
527
+ Integer
528
+ Find Pair
529
+ PSB2
530
+ Vector of Integer
531
+ Two Integers
532
+ Fizz Buzz
533
+ PSB2
534
+ Integer
535
+ String
536
+ Fuel Cost
537
+ PSB2
538
+ Vector of Integer
539
+ Integer
540
+ GCD
541
+ PSB2
542
+ Two Integers
543
+ Integer
544
+ Grade
545
+ PSB1
546
+ Five Integers
547
+ String
548
+ Scrabble Score
549
+ PSB1
550
+ String
551
+ Integer
552
+ Small or Large
553
+ PSB1
554
+ Integer
555
+ String
556
+ 4.1
557
+ Program Synthesis Benchmark Problems
558
+ We evaluate each system using eight program synthesis benchmark problems from the
559
+ first and second general program synthesis benchmark suites (Helmuth and Spector,
560
+ 2015; Helmuth and Kelly, 2021). These problems are well-studied and are commonly
561
+ used to compare parent selection algorithms in a GP context (Sobania et al., 2022b,a).
562
+ These two benchmark suites include a variety of introductory program synthesis prob-
563
+ lems that require the manipulation of multiple data types with complex looping or
564
+ conditional structures.
565
+ Each benchmark problem is defined by a set of input/output examples (referred
566
+ to as cases) that specify the desired behavior of a correct program. For each problem,
567
+ we split the input/output examples into a training set and a testing set. During evolu-
568
+ tion, we assessed program quality using only the training set. We used the testing set
569
+ to measure how well a program generalized on examples unseen during evolution. We
570
+ consider each input/output example on a pass/fail basis; that is, a program passes a
571
+ test case if it produces the correct output when run with the associated input. A pro-
572
+ gram is a solution if it passes all of the training cases; it generalizes if it passes all training
573
+ and all testing cases. We refer to runs as “success” if they result in the production of
574
+ a generalizing solution. We used the same training and testing data sets across both
575
+ PushGP and G3P for each problem to ensure the data available is not biasing perfor-
576
+ mance.
577
+ Table 1 shows the eight program synthesis benchmark problems that we have cho-
578
+ sen, along with their input and output types. We selected these particular problems to
579
+ allow us to test informed down-sampling on a set of easy, medium, and hard problems
580
+ as established by published success rates using PushGP and random down-sampled
581
+ lexicase selection (Helmuth and Spector, 2021; Helmuth and Kelly, 2022). We also en-
582
+ sured that these problems require qualitatively different programmatic paradigms to
583
+ solve, such as looping and conditional execution (Helmuth and Kelly, 2022).
584
+ 4.2
585
+ Genetic Programming Systems
586
+ PushGP is a system that evolves computer programs in the Push programming lan-
587
+ guage, a stack-based language specifically invented for use in genetic programming
588
+ (Spector and Robinson, 2002; Spector et al., 2004). Push literals are pushed onto one
589
+ of a set of datatype specific stacks while instructions are also stored on a stack dur-
590
+ ing interpretation. These instructions usually act on data from the stacks and leave
591
+ Preprint
592
+ 9
593
+
594
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
595
+ Table 2: General and System-Specific Evolution Parameters
596
+ General Parameter
597
+ Value
598
+ runs per problem
599
+ 100
600
+ population size
601
+ 1,000
602
+ size of training set
603
+ 200
604
+ size of test set
605
+ 1,000
606
+ program execution limit
607
+ 60 million
608
+ maximum number (base) of generations
609
+ 300
610
+ PushGP Parameter
611
+ Value
612
+ variation operator
613
+ UMAD
614
+ UMAD rate
615
+ 0.1
616
+ G3P Parameter
617
+ Value
618
+ crossover operator
619
+ subtree crossover
620
+ crossover probability
621
+ 0.95
622
+ mutation operator
623
+ subtree mutation
624
+ mutation steps
625
+ 1
626
+ maximum tree depth
627
+ 17
628
+ elite size
629
+ 5
630
+ initialisation
631
+ position-independent grow
632
+ maximum initial tree depth
633
+ 10
634
+ their return value on the stacks. Instructions take values from and return results to
635
+ the appropriately typed stack, including from and to the instruction stack, allowing for
636
+ programs to use multiple data types and complex conditional execution paradigms. In
637
+ this work, we used the propeller implementation of PushGP†.
638
+ G3P uses a context-free grammar in Backus-Naur form to evolve individuals in a
639
+ desired programming language and supports the use of different data types and con-
640
+ trol structures (Whigham et al., 1995; Forstenlechner et al., 2016, 2017). To prevent the
641
+ generation of many invalid solutions during search, we use a tree-based representation
642
+ instead of the common genotype-phenotype mapping known from classical grammat-
643
+ ical evolution (Ryan et al., 1998; Sobania and Rothlauf, 2020). For the implementation
644
+ of G3P, our code‡ is based on the PonyGE2 framework (Fenton et al., 2017).
645
+ Table 2 shows the system-specific parameters for PushGP and G3P, and the general
646
+ parameters that are used in both systems. The “runs per problem” parameter refers to
647
+ the number of independent evolutionary runs that were conducted for each problem
648
+ and experimental configuration. The PushGP system uses the uniform mutation by ad-
649
+ dition and deletion (UMAD) mutation operator (Helmuth et al., 2018). This UMAD op-
650
+ erator works with a 0.1 mutation rate. For G3P, we use subtree mutation and crossover,
651
+ with a crossover probability of 0.95. The initialization for G3P is position-independent
652
+ grow (Fagan et al., 2016). We use grammars based on those provided by the PonyGE2
653
+ framework with small adjustments to make them better comparable to the PushGP
654
+ instructions.
655
+ †https://github.com/ryanboldi/propeller/releases/tag/Informed-Downsampling
656
+ ‡https://gitlab.rlp.net/mbriesc/informed-down-sampled-lexicase-selection
657
+ 10
658
+ Preprint
659
+
660
+ Informed Down-Sampled Lexicase Selection
661
+ 4.3
662
+ Evaluation and Generation Limits
663
+ In order to make a fair comparison between methods that perform different numbers of
664
+ program executions per generation, we use the recommendation from the PSB2 bench-
665
+ mark suite to limit each GP run to 60 million program executions (Helmuth and Kelly,
666
+ 2021). Since program executions typically take up the majority of the computational
667
+ requirements of a GP run, this ensures runs receive similar amounts of computation re-
668
+ gardless of whether they use down-sampling. In standard runs using all training cases,
669
+ the 60 million executions are used by at most 300 generations of a population size of
670
+ 1000 individuals evaluated on 200 cases. With random down-sampling, we increase
671
+ the maximum number of generations by the same factor as the down-sampling. For
672
+ example, if one tenth of the training data is used in each sample, we can run evolu-
673
+ tion for ten times the number of generations while keeping the number of individual
674
+ program executions constant.
675
+ More generally, if we let G be the maximum number of generations for a run using
676
+ all training cases, we allow our random down-sampling runs a limit of ˆG generations
677
+ where ˆG is given by
678
+ ˆG = G
679
+ r ,
680
+ where r is the down-sample rate. For informed down-sampled lexicase selection the
681
+ generational limit is calculated by
682
+ ˆG =
683
+ G
684
+ r + ρ(1−r)
685
+ k
686
+ ,
687
+ where ρ is the parent sampling rate and k is the parameter for the scheduled case dis-
688
+ tance computation. The exact generational limits for each experimental configuration
689
+ are shown in table 3.§
690
+ 4.4
691
+ Experimental Configurations
692
+ We explore 11 different configurations of lexicase selection for each problem: standard
693
+ lexicase selection (Lex), random down-sampled lexicase selection (Rnd), IDS lexicase
694
+ selection with full information, as well as three sparse information configurations. To
695
+ better match previous literature, all down-sampling methods were performed both
696
+ with r ∈ {0.05; 0.1}.
697
+ Table 3 shows the configurations of the different runs performed in this work.
698
+ These runs, due to different generational computational costs, have different genera-
699
+ tional limits as explained in section 4.3.
700
+ Full information down-sampling is simply using a parent-sample rate of 1, which
701
+ means that the distances between training cases are determined by all parents’ perfor-
702
+ mance on every test case. With this, the quality of the distance metric between two
703
+ cases is not limited by the parent-sampling or generational gaps we are using to reduce
704
+ computational load. Full information down-sampling is included as a control exper-
705
+ iment to compare with using all cases for selection in standard lexicase selection. It
706
+ is important to note that we run for the same number of generations as with regular
707
+ lexicase selection because we need to evaluate all parents on all test-cases in order to
708
+ determine the distances between the cases.
709
+ §As our implementations evaluate the fitness of individuals in the parent sample twice, we run the IDS
710
+ with sparse information runs for slightly (< 40) fewer generations to compensate the additional computa-
711
+ tional effort.
712
+ Preprint
713
+ 11
714
+
715
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
716
+ Table 3: Different settings conducted in our experiments for standard lexicase selection
717
+ (Lex), random down-sampled lexicase selection (Rnd) and informed down-sampled
718
+ lexicase selection (IDS). The variable r denotes the down-sampling rate, ρ is the parent
719
+ sampling rate, k is generational interval at which we update the distance matrix and ˆG
720
+ specifies the maximum number of generations.
721
+ Method
722
+ Lex
723
+ Rnd
724
+ IDS
725
+ Rnd
726
+ IDS
727
+ r
728
+ -
729
+ 0.05
730
+ 0.05
731
+ 0.1
732
+ 0.1
733
+ ρ
734
+ -
735
+ -
736
+ 1
737
+ 0.01
738
+ 0.01
739
+ 0.01
740
+ -
741
+ 1
742
+ 0.01
743
+ 0.01
744
+ 0.01
745
+ k
746
+ -
747
+ -
748
+ 1
749
+ 1
750
+ 10
751
+ 100
752
+ -
753
+ 1
754
+ 1
755
+ 10
756
+ 100
757
+ ˆG
758
+ 300
759
+ 6000
760
+ 300
761
+ 5042
762
+ 5888
763
+ 5988
764
+ 3000
765
+ 300
766
+ 2752
767
+ 2973
768
+ 2997
769
+ Finally, the six informed down-sampling methods we have chosen for this work
770
+ include, for both the 0.05 and 0.1 down-sample rate (r), 0.01 parent sampling (ρ) rate
771
+ with a few different distance calculation scheduling (k) parameters. Through a set of
772
+ preliminary experiments, the value of ρ = 0.01 for the parent sampling rate was de-
773
+ termined to be effective while not resulting in too many extra program executions¶.
774
+ In conjunction, these hyper-parameters mean that every k generations, 10 parents are
775
+ used to determine the distances between all training cases, where k ∈ {1, 10, 100}.
776
+ 5
777
+ Results and Discussion
778
+ We discuss the success rates achieved by both GP systems using standard lexicase se-
779
+ lection, random down-sampling, and different configurations of IDS. Further, we study
780
+ how the composition of the down-samples found by IDS change over the number of
781
+ generations.
782
+ 5.1
783
+ Informed Down-Sampling Improves Problem-solving Success
784
+ Tables 4 and 5 show the success rates for PushGP and G3P respectively on the chosen
785
+ program synthesis benchmark problems for different parameter configurations. The
786
+ success rate is defined as the number of runs that result in a program that passes the
787
+ complete training set as well as the entire unseen test set.
788
+ For random down-sampling and IDS, we measured solutions on only the down-
789
+ samples during the actual run. As such, we execute these runs to the maximum gener-
790
+ ational limit, and then conduct a post-hoc analysis to see if any solutions passed all of
791
+ the training cases. If so, this is the solution that we then evaluate on the unseen test set
792
+ to determine whether it generalizes or not.
793
+ For all studied configurations, we report success rates based on 100 runs. For each
794
+ benchmark problem, we highlight in bold the best success rate at each of the down-
795
+ sample sizes. Problem names in bold are those where an informed down-sampling
796
+ run outperformed random at both down-sample rates on that problem. Problem names
797
+ that are underlined are those where a random down-sampling run outperformed an
798
+ informed down-sampling run at both down-sample rates. Asterisks signify results
799
+ that are significantly better than random down-sampling at the same down-sample size.
800
+ ¶As we are trying to approach the computational savings of random down-sampled lexicase selection,
801
+ the smaller the value of ρ, the better. We found that the relatively small value of ρ = 0.01 resulted in sampling
802
+ that was good enough to determine the joint case information.
803
+ 12
804
+ Preprint
805
+
806
+ Informed Down-Sampled Lexicase Selection
807
+ Standard lexicase selection was not included in our statistical analyses, as IDS is pre-
808
+ sented to improve upon random down-sampling at a fixed down-sample size. We per-
809
+ formed significance analysis with a two proportion z-test and Bonferroni-Holm correc-
810
+ tion. Shown with * are those significant at the α = 0.1 level, ** the α = 0.05 level, and
811
+ *** the α = 0.01 level.
812
+ For the PushGP results, let us consider the Fizz Buzz problem. Standard lexicase
813
+ selection had 13 successful runs. Using random down-sampling at the 0.05 down-
814
+ sampling rate improved this result to 64, in line with the findings of Helmuth and
815
+ Spector (2021). Using the same down-sampling rate with IDS, a 0.01 parent rate, and
816
+ k = 100 yielded 95 successful runs. This is significantly better than random down-
817
+ sampling at the 0.01 level. This is an important result as IDS is significantly improving
818
+ on random down-sampling, which in turn improves on lexicase selection. Another set
819
+ of PushGP IDS runs where we observed significant improvements were those of the
820
+ Count Odds problem. While standard lexicase selection achieves 24 successes, random
821
+ down-sampling at either down-sample rate (r = 0.05 or r = 0.1) does not produce
822
+ more than 26 successful runs. The failure to meaningfully improve success rates by
823
+ random down-sampling seemed to be addressed by informed down-sampling. This
824
+ is clear as informed down-sampling at all configurations ensures that close to if-not-
825
+ all 100 runs successfully generalize to the held out test set. This and similar results
826
+ hint that while randomly down-sampled lexicase selection works well usually, there
827
+ are some problems where important cases might be being dropped out, resulting in a
828
+ similar performance to standard lexicase selection despite the increased search gener-
829
+ ations. Informed down-sampling has the ability to improve success rates both when
830
+ random down-sampling improves upon standard lexicase selection, and when it does
831
+ not.
832
+ Only one configuration of G3P resulted in a significant improvement on random
833
+ down-sampling at the same down-sample rate. For the Grade problem at the 0.05
834
+ down-sample rate, we see significantly more successes when using IDS with ρ = 0.01
835
+ and k = 10. For this problem, using this informed down-sample configuration re-
836
+ sulted in 57% of the runs yielding a generalizing solution, where, using random down-
837
+ sampling resulted in only 39% of the runs yielding a success. The fact that only a single
838
+ configuration of IDS resulted in a significant improvement suggests that the problem-
839
+ solving benefits of using IDS are representation- and problem-dependent, motivating
840
+ future work to continue improving IDS to achieve more universal improvements to
841
+ problem-solving success.
842
+ We have a number of hypotheses explaining this improved performance. The first
843
+ of these is that the informed down-sampling procedure increases the number of spe-
844
+ cialists (individuals exceptional on a few cases, but have a high total error) that survive
845
+ over the course of evolutionary time. These individuals could be better maintained
846
+ with IDS as the cases they are exceptional on are still placed in the down-samples
847
+ throughout evolution, preventing them from being lost as could happen when ran-
848
+ domly down-sampling.
849
+ Another hypothesis for IDS’s improved performance is that it reduces the compu-
850
+ tation used to evaluate individuals on synonymous cases. When two cases are fully
851
+ synonymous, all individuals that solve one case solve the other as well. When using
852
+ lexicase selection, having both of these cases in the down-sample would result in little
853
+ difference in the probability of selecting each individual compared to having only one
854
+ case in the down-sample. After one of the two cases has been used to filter the pool
855
+ of candidate solutions, the other will have no filtering pressure because all remaining
856
+ Preprint
857
+ 13
858
+
859
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
860
+ Table 4: Number of generalizing solutions (successes) out of 100 runs achieved by PushGP on the test set.
861
+ Method
862
+ Lex
863
+ Rnd
864
+ IDS
865
+ Rnd
866
+ IDS
867
+ r
868
+ -
869
+ 0.05
870
+ 0.1
871
+ ρ
872
+ -
873
+ -
874
+ 1
875
+ 0.01
876
+ 0.01
877
+ 0.01
878
+ -
879
+ 1
880
+ 0.01
881
+ 0.01
882
+ 0.01
883
+ k
884
+ -
885
+ -
886
+ 1
887
+ 1
888
+ 10
889
+ 100
890
+ -
891
+ 1
892
+ 1
893
+ 10
894
+ 100
895
+ Count Odds
896
+ 24
897
+ 25
898
+ 43***
899
+ 99***
900
+ 100***
901
+ 98***
902
+ 26
903
+ 55***
904
+ 95***
905
+ 99***
906
+ 97***
907
+ Find Pair
908
+ 5
909
+ 27
910
+ 9
911
+ 32
912
+ 32
913
+ 36
914
+ 15
915
+ 7
916
+ 19
917
+ 19
918
+ 21
919
+ Fizz Buzz
920
+ 13
921
+ 64
922
+ 2
923
+ 85***
924
+ 94***
925
+ 95***
926
+ 45
927
+ 3
928
+ 75
929
+ 78*
930
+ 81**
931
+ Fuel Cost
932
+ 41
933
+ 72
934
+ 1
935
+ 83
936
+ 85
937
+ 83
938
+ 76
939
+ 7
940
+ 69
941
+ 72
942
+ 70
943
+ GCD
944
+ 20
945
+ 74
946
+ 4
947
+ 76
948
+ 67
949
+ 69
950
+ 54
951
+ 6
952
+ 56
953
+ 63
954
+ 62
955
+ Grade
956
+ 0
957
+ 0
958
+ 0
959
+ 0
960
+ 1
961
+ 0
962
+ 1
963
+ 0
964
+ 0
965
+ 1
966
+ 1
967
+ Scrabble Score
968
+ 8
969
+ 8
970
+ 6
971
+ 69***
972
+ 64***
973
+ 75***
974
+ 16
975
+ 9
976
+ 55***
977
+ 74***
978
+ 64***
979
+ Small or Large
980
+ 34
981
+ 93
982
+ 37
983
+ 69
984
+ 69
985
+ 69
986
+ 69
987
+ 39
988
+ 60
989
+ 66
990
+ 54
991
+ 14
992
+ Preprint
993
+
994
+ Informed Down-Sampled Lexicase Selection
995
+ Table 5: Number of generalizing solutions (successes) out of 100 runs achieved by G3P on the test set.
996
+ Method
997
+ Lex
998
+ Rnd
999
+ IDS
1000
+ Rnd
1001
+ IDS
1002
+ r
1003
+ -
1004
+ 0.05
1005
+ 0.1
1006
+ ρ
1007
+ -
1008
+ -
1009
+ 1
1010
+ 0.01
1011
+ 0.01
1012
+ 0.01
1013
+ -
1014
+ 1
1015
+ 0.01
1016
+ 0.01
1017
+ 0.01
1018
+ k
1019
+ -
1020
+ -
1021
+ 1
1022
+ 1
1023
+ 10
1024
+ 100
1025
+ -
1026
+ 1
1027
+ 1
1028
+ 10
1029
+ 100
1030
+ Count Odds
1031
+ 65
1032
+ 66
1033
+ 45
1034
+ 53
1035
+ 62
1036
+ 63
1037
+ 67
1038
+ 58
1039
+ 60
1040
+ 58
1041
+ 72
1042
+ Find Pair
1043
+ 0
1044
+ 0
1045
+ 0
1046
+ 1
1047
+ 0
1048
+ 0
1049
+ 1
1050
+ 0
1051
+ 0
1052
+ 1
1053
+ 0
1054
+ Fizz Buzz
1055
+ 62
1056
+ 83
1057
+ 50
1058
+ 84
1059
+ 78
1060
+ 85
1061
+ 78
1062
+ 53
1063
+ 81
1064
+ 89
1065
+ 72
1066
+ Fuel Cost
1067
+ 33
1068
+ 34
1069
+ 17
1070
+ 28
1071
+ 27
1072
+ 29
1073
+ 29
1074
+ 21
1075
+ 21
1076
+ 25
1077
+ 33
1078
+ GCD
1079
+ 0
1080
+ 1
1081
+ 0
1082
+ 0
1083
+ 0
1084
+ 1
1085
+ 0
1086
+ 0
1087
+ 0
1088
+ 0
1089
+ 0
1090
+ Grade
1091
+ 36
1092
+ 39
1093
+ 29
1094
+ 51
1095
+ 57*
1096
+ 44
1097
+ 44
1098
+ 37
1099
+ 46
1100
+ 51
1101
+ 48
1102
+ Scrabble Score
1103
+ 6
1104
+ 10
1105
+ 1
1106
+ 11
1107
+ 10
1108
+ 10
1109
+ 14
1110
+ 0
1111
+ 6
1112
+ 3
1113
+ 3
1114
+ Small or Large
1115
+ 41
1116
+ 52
1117
+ 49
1118
+ 54
1119
+ 63
1120
+ 63
1121
+ 59
1122
+ 52
1123
+ 57
1124
+ 55
1125
+ 63
1126
+ Preprint
1127
+ 15
1128
+
1129
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
1130
+ individuals perform identically on the synonymous cases. Having a synonymous case
1131
+ does increase the chance that one of the two cases appears earlier in the shuffled case
1132
+ ordering, producing a minor (though perhaps undesired) change in selection proba-
1133
+ bility. Synonymous (or near synonymous) cases additionally take spots in the down-
1134
+ sample that cannot be allocated to other, more-informative cases. When using IDS, we
1135
+ ensure that the first few cases added to the down-sample measure relatively different
1136
+ behaviors. This may allow IDS to select a larger variety of individuals than random
1137
+ down-sampling, instead approximating the variety that could be selected by full lexi-
1138
+ case selection.
1139
+ These results, in general, make it clear that informed down-sampling by farthest
1140
+ first traversal is significantly outperforming randomly down-sampled lexicase selec-
1141
+ tion on a portion of these program synthesis benchmark problems for the PushGP
1142
+ evolutionary framework. The G3P results are less clearly in favor of informed down-
1143
+ sampling, but still point to minor improvements in success rates. It is important to
1144
+ note that all of our down-sampled runs (besides full-information) consistently and sig-
1145
+ nificantly outperform standard lexicase selection, which has in turn been shown to
1146
+ significantly outperform other selection strategies. This result agrees with that of Hel-
1147
+ muth and Abdelhady (2020), showing down-sampled lexicase selection being, before
1148
+ this work, the state of the art in program synthesis with genetic programming. Our in-
1149
+ formed down-sampling runs outperform random down-sampling (higher success rate
1150
+ for both down-sample rates) on 6/8 of the problems we studied for PushGP, with 3/8
1151
+ of them being statistically significant. For G3P, informed down-sampling improves on
1152
+ 3/8 problems, with 1/8 being significant.
1153
+ Random down-sampling outperformed informed down-sampling (across both
1154
+ down-sampling levels) on only one problem (Small or Large) for PushGP, and none for
1155
+ G3P. For Small or Large with PushGP, we see that the worse performance with informed
1156
+ down-sampling can be attributed to a lower generalization rate (and not worse perfor-
1157
+ mance on the training sets). The generalization rates can be found in Appendix Figure 6
1158
+ for PushGP and Appendix Figure 7 for G3P. Future work should explore the effect that
1159
+ informed down-sampling has on generalization in more depth.
1160
+ 5.2
1161
+ Using Smaller Informed Down-Samples Tends to Improve Success Rates
1162
+ In general, our IDS runs at a 0.05 down-sample rate have a higher success rate than
1163
+ their equivalent counterparts at the 0.1 down-sample rate. This difference is likely due
1164
+ to the fact that the runs at a 0.1 down-sample rate have a substantially lower genera-
1165
+ tional limit, meaning that we are exploring a smaller portion of the space of possible
1166
+ solution programs. With 200 training cases, our down-sample contains 10 and 20 cases
1167
+ respectively for the 0.05 and 0.1 down-sample rates. A possible reason for the improved
1168
+ performance at 0.05 is that a larger proportion of these cases are indeed our distinct, or
1169
+ informative, cases. Note that once the Farthest First Traversal process selects a rep-
1170
+ resentative case for every synonymous group in the down-sample, every remaining
1171
+ solution’s minimum distances to the current sample will be equal to 0, so the selections
1172
+ are performed randomly to fill the rest of the cases. Since we are using the same prob-
1173
+ lems, with the same number of behavioral niches, we will see the runs with 20 cases in
1174
+ the down-sample having more synonymous cases in the down-sample. Due to the fact
1175
+ that the content of the training cases is not notably more informative to make up for the
1176
+ decreased generational limit, we see a lower success rate. We will analyze the specific
1177
+ cases that compose the down-samples in section 5.3.
1178
+ The exceptions to this trend are the full information down-sampling runs. For
1179
+ 16
1180
+ Preprint
1181
+
1182
+ Informed Down-Sampled Lexicase Selection
1183
+ these runs, the larger down-samples tend to perform better. This result is likely due
1184
+ to the fact that the generational limit was set to 300 for both sampling levels (as they
1185
+ both evaluate all individuals on all test cases), and so having a smaller down-sample
1186
+ size would not change the number of evaluations. With more cases in the sample, the
1187
+ GP method can take into account more information when performing selection, which
1188
+ could result in more informed search. The magnitude of the differences for success rate
1189
+ across sample size for the full IDS runs suggests that there are diminishing returns for
1190
+ including more cases in the sample.
1191
+ 5.3
1192
+ Informed Down-Sampling Automatically Discovers Important Training Cases
1193
+ To gain a deeper insight into how IDS composes down-samples, we visualize how the
1194
+ selected training cases (used for a down-sample) develop over the generations of an
1195
+ evolutionary run.
1196
+ Figures 3 and 4 show the composition of down-samples for every problem at every
1197
+ generation using PushGP (Fig. 3) and G3P (Fig. 4) with down-sample rate r = 0.05. We
1198
+ present results for a full information configuration (ρ = 1 and k = 1) as well as a
1199
+ sparse information configuration (ρ = 0.01 and k = 10). We chose to analyze both a full
1200
+ information and sparse information run in order to see whether our sparse information
1201
+ configurations are finding the same training cases to be informative as if we had used
1202
+ all parents to evaluate the distances between training cases.
1203
+ The plots show how often certain training cases are included in the down-sample
1204
+ at every generation, averaged over all active runs. Each row represents a case in the
1205
+ training data, ordered by its position in the training set. The training sets used were
1206
+ generated by first adding some human-expert defined edge cases, and filling the rest
1207
+ with cases that were randomly generated by an function that already implements our
1208
+ desired program (oracle function). For each figure, there is a single marker on the y-
1209
+ axis that shows where exactly the expert-case cutoff for the training set was. Thus, the
1210
+ rows above the marker in the visuals are representing cases that humans determined
1211
+ to be important based on the problem definition.
1212
+ Brighter colors imply that a case is included more often, darker colors imply a
1213
+ lower number of inclusions.
1214
+ For PushGP (Figure 3), we see that the configurations with sparse information of-
1215
+ ten include the same cases in the down-sample as the runs with full information. This
1216
+ result means that by using a parent sampling rate of ρ = 0.01 and a case distance
1217
+ evaluation schedule parameter of k = 10, we can significantly reduce the number of
1218
+ evaluations needed to calculate distances between cases, while still maintaining a good
1219
+ approximation to the ground truth (full information, where we use all parents every
1220
+ generation to calculate distances). However, the composition for our sparse informa-
1221
+ tion runs are slightly more noisy than that for full information, suggesting that using
1222
+ parent sampling could introduce some extra stochasticity to the down-sample creation
1223
+ process.
1224
+ For all studied benchmark problems, we see that IDS has a strong bias toward
1225
+ specific training cases that are included substantially more often in the down-sample.
1226
+ These selected training cases are mainly consistent with the human-defined edge cases
1227
+ that exist at the beginning of the training set. This result shows that informed down-
1228
+ sampling is indeed often finding the same cases to be informative as those that a human
1229
+ expert would, without any knowledge of the problem definition. However, with IDS,
1230
+ we can draw further comparisons of informativeness within this expert-defined groups
1231
+ of cases. This can be seen as some cases are selected more often that others within the
1232
+ Preprint
1233
+ 17
1234
+
1235
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
1236
+ Full Information
1237
+ Sparse Information
1238
+ Count Odds
1239
+ Cases
1240
+ Find Pair
1241
+ Cases
1242
+ Fizz Buzz
1243
+ Cases
1244
+ Generations
1245
+ Fuel Cost
1246
+ Cases
1247
+ Generations
1248
+ Figure 3: Down-sample composition over generations for PushGP with 0.05 down-
1249
+ sample rate for a full information (ρ = 1 and k = 1) and a sparse information configu-
1250
+ ration (ρ = 0.01 and k = 10).
1251
+ first several cases.
1252
+ We then look at the labels of the specific training cases that are found to be impor-
1253
+ tant. We see that these training cases make sense to be included more often than others
1254
+ in the down-samples. Note that the labels of the specific training cases are not included
1255
+ 18
1256
+ Preprint
1257
+
1258
+ Informed Down-Sampled Lexicase Selection
1259
+ Full Information
1260
+ Sparse Information
1261
+ GCD
1262
+ Cases
1263
+ Grade
1264
+ Cases
1265
+ Scrabble Score
1266
+ Cases
1267
+ Generations
1268
+ Small or Large
1269
+ Cases
1270
+ Generations
1271
+ Figure 3: Continued.
1272
+ in the plots for simplicity, but can be queried based on their specific index in the data
1273
+ sets provided in our code implementation.
1274
+ For example, for the Small or Large problem, cases around the decision boundaries
1275
+ as well as numbers between 0 and 1000 are more often included. For the Grade problem,
1276
+ those edge cases with very close decision boundaries are included while the ones with
1277
+ far away boundaries are not taken into account for the down-sample. For Fuel Cost,
1278
+ Preprint
1279
+ 19
1280
+
1281
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
1282
+ Full Information
1283
+ Sparse Information
1284
+ Count Odds
1285
+ Cases
1286
+ Find Pair
1287
+ Cases
1288
+ Fizz Buzz
1289
+ Cases
1290
+ Generations
1291
+ Fuel Cost
1292
+ Cases
1293
+ Generations
1294
+ Figure 4: Down-sample composition over generations for G3P with 0.05 down-sample
1295
+ rate for a full information (ρ = 1 and k = 1) and a sparse information configuration
1296
+ (ρ = 0.01 and k = 10).
1297
+ nearly all of the human defined edge cases are found to be important, while for the
1298
+ GCD problem the first two cases in particular make it in nearly every down-sample,
1299
+ while the rest are selected less often.
1300
+ 20
1301
+ Preprint
1302
+
1303
+ Informed Down-Sampled Lexicase Selection
1304
+ Full Information
1305
+ Sparse Information
1306
+ GCD
1307
+ Cases
1308
+ Grade
1309
+ Cases
1310
+ Scrabble Score
1311
+ Cases
1312
+ Generations
1313
+ Small or Large
1314
+ Cases
1315
+ Generations
1316
+ Figure 4: Continued.
1317
+ For the Scrabble Score problem, we see that the first edge cases, which specify the
1318
+ score for each letter, does not seem to be informative at all. This result is not surprising,
1319
+ as this information is already available to PushGP through a vector with these scores
1320
+ on the vector stack. However, the three edge cases after them with empty strings and
1321
+ special characters as input are included a lot. For Count Odds, the edge cases denot-
1322
+ ing empty lists, or lists with zero or a single odd number were found to be important,
1323
+ Preprint
1324
+ 21
1325
+
1326
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
1327
+ indicating that those contain all the important information to learn what are odd and
1328
+ even numbers as well as how to handle a list. For Fizz Buzz, all edge cases seem im-
1329
+ portant while for the Find Pair problem only those edge cases with lists of length 3 are
1330
+ consistently included. Those lists of length 2 in the edge cases are represented in the
1331
+ down-sample less often.
1332
+ Lastly, we see that the composition of the down-sample stays rather stable during
1333
+ the evolutionary run for the PushGP system, explaining why there is only a small dif-
1334
+ ference in our experiments between calculating the distances every k = 1 and k = 100
1335
+ generations (see Table 4).
1336
+ For G3P (Fig 4), we see similar results as with PushGP. However, for the prob-
1337
+ lems that require iterative structures to be solved (Count Odds, Find Pair) we see that
1338
+ the down-sample quickly dissolves into random noise instead of any form of struc-
1339
+ ture. This dynamic occurs despite the fact that the same edge cases as with PushGP are
1340
+ initially identified in the first few generations. This result is not surprising as finding it-
1341
+ erative structures is known to be challenging for grammar-guided approaches, as such
1342
+ structures are difficult to be built step-by-step guided by the performance on a set of
1343
+ training cases. (Sobania and Rothlauf, 2020; Sobania et al., 2022b). Another difference
1344
+ between the case compositions are that, while IDS with G3P tends to discover the same
1345
+ cases as those found with PushGP, their use is less consistent, resulting in lines that
1346
+ are more faint than those for PushGP. Both of these hypotheses could help explain the
1347
+ relatively worse improvement that IDS yields for G3P than for PushGP.
1348
+ However, for the problems that require conditionals, like Small or Large and Grade,
1349
+ we see that the important cases are identified and used during evolution. This result is
1350
+ also reflected in the success rates compared to random down-sampling (see Table 5).
1351
+ Interestingly, IDS identifies many of the same cases as important for G3P as well as
1352
+ PushGP. This result suggests that the structure of the problem itself determines which
1353
+ cases are important rather than the considered representation. This dynamic makes
1354
+ IDS potentially useful across many different systems and approaches.
1355
+ 6
1356
+ Conclusion and Future work
1357
+ In this work, we proposed a novel approach to construct down-samples in an informed
1358
+ manner during evolution when using down-sampled lexicase selection. We find that
1359
+ changing the composition of down-samples to include cases that are more “informa-
1360
+ tive” helps improve problem solving performance with a fixed computational bud-
1361
+ get. Informativeness, we hypothesize, is linked to how distinct the cases in the down-
1362
+ sample are. Cases that are solved by the same subset of the population are likely testing
1363
+ for the same behavior, and thus need not be included in the down-sample at the same
1364
+ time. Cases that test for different behaviors likely maintain different behavioral groups
1365
+ of individuals, which could promote and maintain higher levels of diversity in the pop-
1366
+ ulation.
1367
+ In our empirical comparisons of these down-sampling methods, we find evidence
1368
+ to support the conclusion that selecting cases in an informed manner increases the suc-
1369
+ cess rate of GP runs. These results were confirmed across two independent GP systems
1370
+ by using well studied benchmark problems. We find that using IDS often increases the
1371
+ proportion of informative cases in the down-sample as verified by improved success
1372
+ rates as well as by directly inspecting the content of the down-samples. IDS improves
1373
+ upon the state of the art selection method across the majority of the program synthesis
1374
+ problems explored in this work.
1375
+ This work is a first exploration into changing the case composition of down-
1376
+ 22
1377
+ Preprint
1378
+
1379
+ Informed Down-Sampled Lexicase Selection
1380
+ samples for lexicase selection runs. As such, it opens many potential directions for
1381
+ future research. Due to the modular nature of the informed down-sampling system,
1382
+ different methods could be used for either the pairwise information measurement, or
1383
+ for the down-sample creation portions of the algorithm. An exploration into differ-
1384
+ ent down-sampling levels, and the effects levels have on the informational content of
1385
+ down-samples is also a promising direction for future work. Additionally, IDS intro-
1386
+ duces new hyperparameters for the parent sampling rate and generational schedule;
1387
+ it would be beneficial to create a method for automatically setting these dependant on
1388
+ the problem and the state of the GP search. Finally, even though there are reasons to
1389
+ believe that IDS and down-sampling in general work well with lexicase selection, there
1390
+ is nothing that ties them to a particular selection method; it may be informative to
1391
+ explore the effects of IDS on other parent selection methods such as tournament selec-
1392
+ tion. Finally, comparing the extent to which different down-sampling strategies blunt
1393
+ lexicase’s ability to maintain specialists could also yield important insights into why
1394
+ informed down-sampling improves success rates as much as it does.
1395
+ 7
1396
+ Acknowledgements
1397
+ This material is based upon work supported by the National Science Foundation un-
1398
+ der Grant No. 1617087. Any opinions, findings, and conclusions or recommendations
1399
+ expressed in this publication are those of the authors and do not necessarily reflect the
1400
+ views of the National Science Foundation.
1401
+ This work was performed in part using high performance computing equipment
1402
+ obtained under a grant from the Collaborative R&D Fund managed by the Mas-
1403
+ sachusetts Technology Collaborative.
1404
+ Parts of this research were conducted using the supercomputer Mogon and/or ad-
1405
+ visory services offered by Johannes Gutenberg University Mainz (hpc.uni-mainz.de),
1406
+ which is a member of the AHRP (Alliance for High Performance Computing in
1407
+ Rhineland Palatinate, www.ahrp.info) and the Gauss Alliance e.V.
1408
+ The authors would like to thank Anil Saini, Austin Ferguson, Cooper Sigrist, Con-
1409
+ stantin Weiser, Edward Pantridge, Jose Hernandez, Li Ding and the Members of the
1410
+ PUSH lab at Amherst College for discussions that helped shape this work.
1411
+ References
1412
+ Aenugu, S. and Spector, L. (2019). Lexicase selection in learning classifier systems. In Proceedings
1413
+ of the Genetic and Evolutionary Computation Conference, GECCO ’19, page 356–364, New York,
1414
+ NY, USA. Association for Computing Machinery.
1415
+ Bachem, O., Lucic, M., and Krause, A. (2017). Practical coreset constructions for machine learn-
1416
+ ing. arXiv: Machine Learning.
1417
+ Boldi, R., Helmuth, T., and Spector, L. (2022).
1418
+ Exploring Environmental Change for Down-
1419
+ Sampled Lexicase Selection. volume Why it Didn’t Work-Shop of ALIFE 2022: The 2022 Con-
1420
+ ference on Artificial Life.
1421
+ Brindle, A. (1980). Genetic algorithms for function optimization. PhD thesis, University of Alberta.
1422
+ Chrysakis, A. and Moens, M.-F. (2020). Online continual learning from imbalanced data. In III,
1423
+ H. D. and Singh, A., editors, Proceedings of the 37th International Conference on Machine Learning,
1424
+ volume 119 of Proceedings of Machine Learning Research, pages 1952–1961. PMLR.
1425
+ Deb, K., Pratap, A., Agarwal, S., and Meyarivan, T. (2002). A fast and elitist multiobjective genetic
1426
+ algorithm: NSGA-II. IEEE Transactions on Evolutionary Computation, 6(2):182–197.
1427
+ Preprint
1428
+ 23
1429
+
1430
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
1431
+ Ding, L., Boldi, R., Helmuth, T., and Spector, L. (2022). Lexicase selection at scale. In Genetic
1432
+ and Evolutionary Computation Conference Companion (GECCO ’22 Companion), July 9–13, 2022,
1433
+ Boston, MA, USA.
1434
+ Ding, L. and Spector, L. (2021). Optimizing neural networks with gradient lexicase selection. In
1435
+ International Conference on Learning Representations.
1436
+ Dolson, E. and Ofria, C. (2018). Ecological theory provides insights about evolutionary compu-
1437
+ tation. In Proceedings of the Genetic and Evolutionary Computation Conference Companion, GECCO
1438
+ ’18, page 105–106, New York, NY, USA. Association for Computing Machinery.
1439
+ Fagan, D., Fenton, M., and O’Neill, M. (2016). Exploring position independent initialisation in
1440
+ grammatical evolution. In 2016 IEEE Congress on Evolutionary Computation (CEC), pages 5060–
1441
+ 5067.
1442
+ Fenton, M., McDermott, J., Fagan, D., Forstenlechner, S., Hemberg, E., and O’Neill, M. (2017).
1443
+ Ponyge2: Grammatical evolution in python. In Proceedings of the Genetic and Evolutionary Com-
1444
+ putation Conference Companion, pages 1194–1201.
1445
+ Ferguson, A. J., Hernandez, J. G., Junghans, D., Lalejini, A., Dolson, E., and Ofria, C. (2020). Char-
1446
+ acterizing the effects of random subsampling on lexicase selection. In Banzhaf, W., Goodman,
1447
+ E., Sheneman, L., Trujillo, L., and Worzel, B., editors, Genetic Programming Theory and Practice
1448
+ XVII, pages 1–23. Springer International Publishing, Cham.
1449
+ Forstenlechner, S., Fagan, D., Nicolau, M., and O’Neill, M. (2017). A grammar design pattern
1450
+ for arbitrary program synthesis problems in genetic programming. In European Conference on
1451
+ Genetic Programming, pages 262–277. Springer.
1452
+ Forstenlechner, S., Nicolau, M., Fagan, D., and O’Neill, M. (2016). Grammar design for derivation
1453
+ tree based genetic programming systems. In European Conference on Genetic Programming, pages
1454
+ 199–214. Springer.
1455
+ Goings, S., Goldsby, H., Cheng, B. H., and Ofria, C. (2012).
1456
+ An ecology-based evolutionary
1457
+ algorithm to evolve solutions to complex problems. In Artificial Life 13, pages 171–177. MIT
1458
+ Press.
1459
+ Helmuth, T. and Abdelhady, A. (2020). Benchmarking parent selection for program synthesis by
1460
+ genetic programming. In Proceedings of the 2020 Genetic and Evolutionary Computation Conference
1461
+ Companion, pages 237–238, Canc´un Mexico. ACM.
1462
+ Helmuth, T. and Kelly, P. (2021). PSB2: The second program synthesis benchmark suite. In 2021
1463
+ Genetic and Evolutionary Computation Conference, GECCO ’21, Lille, France. ACM.
1464
+ Helmuth, T. and Kelly, P. (2022).
1465
+ Applying genetic programming to psb2: The next gen-
1466
+ eration program synthesis benchmark suite.
1467
+ Genetic Programming and Evolvable Machines,
1468
+ 23(3):375–404.
1469
+ Helmuth, T., McPhee, N. F., and Spector, L. (2016). Effects of lexicase and tournament selection
1470
+ on diversity recovery and maintenance. In Proceedings of the 2016 on Genetic and Evolution-
1471
+ ary Computation Conference Companion, GECCO ’16 Companion, page 983–990, New York, NY,
1472
+ USA. Association for Computing Machinery.
1473
+ Helmuth, T., McPhee, N. F., and Spector, L. (2018). Program synthesis using uniform mutation
1474
+ by addition and deletion. In Proceedings of the Genetic and Evolutionary Computation Conference,
1475
+ GECCO ’18, page 1127–1134, New York, NY, USA. Association for Computing Machinery.
1476
+ Helmuth, T., Pantridge, E., and Spector, L. (2020). On the importance of specialists for lexicase
1477
+ selection. Genetic Programming and Evolvable Machines, 21(3):349–373.
1478
+ Helmuth, T. and Spector, L. (2015). General program synthesis benchmark suite. In GECCO
1479
+ ’15: Proceedings of the 2015 conference on Genetic and Evolutionary Computation Conference, pages
1480
+ 1039–1046, Madrid, Spain. ACM.
1481
+ 24
1482
+ Preprint
1483
+
1484
+ Informed Down-Sampled Lexicase Selection
1485
+ Helmuth, T. and Spector, L. (2020). Explaining and exploiting the advantages of down-sampled
1486
+ lexicase selection. In Artificial Life Conference Proceedings, pages 341–349. MIT Press.
1487
+ Helmuth, T. and Spector, L. (2021). Problem-solving benefits of down-sampled lexicase selection.
1488
+ Artificial Life, pages 1–21.
1489
+ Helmuth, T., Spector, L., and Matheson, J. (2015). Solving uncompromising problems with lexi-
1490
+ case selection. IEEE Transactions on Evolutionary Computation, 19(5):630–643.
1491
+ Hernandez, J. G., Lalejini, A., Dolson, E., and Ofria, C. (2019). Random subsampling improves
1492
+ performance in lexicase selection. In GECCO ’19: Proceedings of the Genetic and Evolutionary
1493
+ Computation Conference Companion, pages 2028–2031, Prague, Czech Republic. ACM.
1494
+ Hernandez, J. G., Lalejini, A., and Ofria, C. (2022). An Exploration of Exploration: Measuring the
1495
+ Ability of Lexicase Selection to Find Obscure Pathways to Optimality. In Banzhaf, W., Trujillo,
1496
+ L., Winkler, S., and Worzel, B., editors, Genetic Programming Theory and Practice XVIII, pages
1497
+ 83–107. Springer Nature Singapore, Singapore.
1498
+ Hochbaum, D. S. and Shmoys, D. B. (1985). A best possible heuristic for the k-center problem.
1499
+ Math. Oper. Res., 10:180–184.
1500
+ Holland, J. H. (1992). Adaptation in Natural and Artificial Systems: An Introductory Analysis with
1501
+ Applications to Biology, Control and Artificial Intelligence. MIT Press, Cambridge, MA, USA.
1502
+ Horn, J., Nafpliotis, N., and Goldberg, D. (1994). A niched Pareto genetic algorithm for multi-
1503
+ objective optimization. In Proceedings of the First IEEE Conference on Evolutionary Computation.
1504
+ IEEE World Congress on Computational Intelligence, pages 82–87, Orlando, FL, USA. IEEE.
1505
+ Krawiec, K., Swan, J., and O’Reilly, U.-M. (2016).
1506
+ Behavioral Program Synthesis: Insights and
1507
+ Prospects, pages 169–183. Springer International Publishing, Cham.
1508
+ La Cava, W., Spector, L., and Danai, K. (2016). Epsilon-lexicase selection for regression. In Pro-
1509
+ ceedings of the Genetic and Evolutionary Computation Conference 2016, GECCO ’16, page 741–748,
1510
+ New York, NY, USA. Association for Computing Machinery.
1511
+ Lalejini, A., Dolson, E., Vostinar, A. E., and Zaman, L. (2022). Artificial selection methods from
1512
+ evolutionary computing show promise for directed evolution of microbes. eLife, 11:e79665.
1513
+ Loshchilov, I. and Hutter, F. (2015). Online batch selection for faster training of neural networks.
1514
+ ArXiv, abs/1511.06343.
1515
+ Metevier, B., Saini, A. K., and Spector, L. (2019). Lexicase selection beyond genetic programming.
1516
+ In Banzhaf, W., Spector, L., and Sheneman, L., editors, Genetic Programming Theory and Practice
1517
+ XVI, pages 123–136. Springer International Publishing, Cham.
1518
+ Moore, J. M. and Stanton, A. (2017). Lexicase selection outperforms previous strategies for in-
1519
+ cremental evolution of virtual creature controllers. In Knibbe, C., Beslon, G., Parsons, D. P.,
1520
+ Misevic, D., Rouzaud-Cornabas, J., Bred`eche, N., Hassas, S., 0001, O. S., and Soula, H., ed-
1521
+ itors, Proceedings of the Fourteenth European Conference Artificial Life, ECAL 2017, Lyon, France,
1522
+ September 4-8, 2017, pages 290–297. MIT Press.
1523
+ Paul, M., Ganguli, S., and Dziugaite, G. K. (2021). Deep learning on a data diet: Finding impor-
1524
+ tant examples early in training. Advances in Neural Information Processing Systems, 34:20596–
1525
+ 20607.
1526
+ Ruder, S. (2017). An overview of gradient descent optimization algorithms. arXiv:1609.04747
1527
+ [cs].
1528
+ Ryan, C., Collins, J. J., and Neill, M. O. (1998). Grammatical evolution: Evolving programs for an
1529
+ arbitrary language. In European conference on genetic programming, pages 83–96. Springer.
1530
+ Schmidt, M. and Lipson, H. (2005). Co-evolution of fitness maximizers and fitness predictors.
1531
+ In Rothlauf, F., editor, Late breaking paper at Genetic and Evolutionary Computation Conference
1532
+ (GECCO’2005), Washington, D.C., USA.
1533
+ Preprint
1534
+ 25
1535
+
1536
+ R. Boldi, M. Briesch, D. Sobania, A. Lalejini, T. Helmuth, F. Rothlauf, C. Ofria and L. Spector
1537
+ Schmidt, M. D. and Lipson, H. (2008). Coevolution of fitness predictors. IEEE Transactions on
1538
+ Evolutionary Computation, 12:736–749.
1539
+ Smith, R. E., Forrest, S., and Perelson, A. S. (1993). Population diversity in an immune system
1540
+ model: Implications for genetic search. In WHITLEY, L. D., editor, Foundations of Genetic Algo-
1541
+ rithms, volume 2 of Foundations of Genetic Algorithms, pages 153–165. Elsevier.
1542
+ Sobania, D., Briesch, M., and Rothlauf, F. (2022a). Choose your programming copilot: a com-
1543
+ parison of the program synthesis performance of github copilot and genetic programming. In
1544
+ Proceedings of the Genetic and Evolutionary Computation Conference, pages 1019–1027.
1545
+ Sobania, D. and Rothlauf, F. (2020). Challenges of program synthesis with grammatical evolution.
1546
+ In European Conference on Genetic Programming (Part of EvoStar), pages 211–227. Springer.
1547
+ Sobania, D. and Rothlauf, F. (2022). Program synthesis with genetic programming: The influence
1548
+ of batch sizes. In Genetic Programming: 25th European Conference, EuroGP 2022, Held as Part of
1549
+ EvoStar 2022, Madrid, Spain, April 20–22, 2022, Proceedings, page 118–129, Berlin, Heidelberg.
1550
+ Springer-Verlag.
1551
+ Sobania, D., Schweim, D., and Rothlauf, F. (2022b). A comprehensive survey on program syn-
1552
+ thesis with evolutionary algorithms. IEEE Transactions on Evolutionary Computation.
1553
+ Spector, L. (2012). Assessment of problem modality by differential performance of lexicase selec-
1554
+ tion in genetic programming: A preliminary report. In Proceedings of the 14th Annual Conference
1555
+ Companion on Genetic and Evolutionary Computation, GECCO ’12, page 401–408, New York, NY,
1556
+ USA. Association for Computing Machinery.
1557
+ Spector, L., Perry, C., Klein, J., and Keijzer, M. (2004). Push 3.0 programming language descrip-
1558
+ tion. Technical Report HC-CSTR-2004-02, School of Cognitive Science, Hampshire College,
1559
+ USA.
1560
+ Spector, L. and Robinson, A. (2002). Genetic programming and autoconstructive evolution with
1561
+ the push programming language. Genetic Programming and Evolvable Machines, 3(1):7–40.
1562
+ Troise, S. A. and Helmuth, T. (2017). Lexicase selection with weighted shuffle. In Banzhaf, W.,
1563
+ Olson, R. S., Tozier, W., and Riolo, R., editors, Genetic Programming Theory and Practice XV,
1564
+ Genetic and Evolutionary Computation, pages 89–104, University of Michigan in Ann Arbor,
1565
+ USA. Springer.
1566
+ Vanneschi, L., Castelli, M., and Silva, S. (2014). A survey of semantic methods in genetic pro-
1567
+ gramming. Genetic Programming and Evolvable Machines, 15(2):195–214.
1568
+ Whigham, P. A. et al. (1995). Grammatically-based genetic programming. In Proceedings of the
1569
+ workshop on genetic programming: from theory to real-world applications, volume 16, pages 33–41.
1570
+ Citeseer.
1571
+ Zogaj, F., Cambronero, J. P., Rinard, M. C., and Cito, J. (2021). Doing more with less: characteriz-
1572
+ ing dataset downsampling for AutoML. Proceedings of the VLDB Endowment, 14(11):2059–2072.
1573
+ ˇSikulov´a, M. and Sekanina, L. (2012).
1574
+ Coevolution in Cartesian Genetic Programming.
1575
+ In
1576
+ Moraglio, A., Silva, S., Krawiec, K., Machado, P., and Cotta, C., editors, Genetic Programming,
1577
+ Lecture Notes in Computer Science, pages 182–193, Berlin, Heidelberg. Springer.
1578
+ 26
1579
+ Preprint
1580
+
1581
+ Informed Down-Sampled Lexicase Selection
1582
+ A
1583
+ Generalization Rates
1584
+ Table 6: Generalization rate for PushGP. These data indicate the proportion of the runs
1585
+ that passed the training set that also passed the held out test set.
1586
+ Method
1587
+ Lex
1588
+ Rnd
1589
+ IDS
1590
+ Rnd
1591
+ IDS
1592
+ r
1593
+ -
1594
+ 0.05
1595
+ 0.1
1596
+ ρ
1597
+ -
1598
+ -
1599
+ 1
1600
+ 0.01
1601
+ 0.01
1602
+ 0.01
1603
+ -
1604
+ 1
1605
+ 0.01
1606
+ 0.01
1607
+ 0.01
1608
+ k
1609
+ -
1610
+ -
1611
+ 1
1612
+ 1
1613
+ 10
1614
+ 100
1615
+ -
1616
+ 1
1617
+ 1
1618
+ 10
1619
+ 100
1620
+ Count Odds
1621
+ 1.00
1622
+ 0.96
1623
+ 0.98
1624
+ 0.99
1625
+ 1.00
1626
+ 0.99
1627
+ 0.96
1628
+ 1.00
1629
+ 0.98
1630
+ 0.99
1631
+ 0.99
1632
+ Find Pair
1633
+ 1.00
1634
+ 0.82
1635
+ 0.82
1636
+ 0.73
1637
+ 0.74
1638
+ 0.80
1639
+ 0.50
1640
+ 0.88
1641
+ 0.79
1642
+ 0.68
1643
+ 0.75
1644
+ Fizz Buzz
1645
+ 0.93
1646
+ 0.96
1647
+ 1.00
1648
+ 0.93
1649
+ 0.95
1650
+ 0.99
1651
+ 1.00
1652
+ 1.00
1653
+ 0.96
1654
+ 0.96
1655
+ 0.96
1656
+ Fuel Cost
1657
+ 1.00
1658
+ 1.00
1659
+ 1.00
1660
+ 0.99
1661
+ 0.99
1662
+ 0.99
1663
+ 1.00
1664
+ 1.00
1665
+ 1.00
1666
+ 1.00
1667
+ 1.00
1668
+ GCD
1669
+ 0.91
1670
+ 0.93
1671
+ 1.00
1672
+ 0.93
1673
+ 0.83
1674
+ 0.87
1675
+ 0.82
1676
+ 0.75
1677
+ 0.80
1678
+ 0.89
1679
+ 0.87
1680
+ Grade
1681
+ -
1682
+ -
1683
+ -
1684
+ -
1685
+ 1.00
1686
+ -
1687
+ 1.00
1688
+ -
1689
+ -
1690
+ 1.00
1691
+ 1.00
1692
+ Scrabble Score
1693
+ 1.00
1694
+ 1.00
1695
+ 1.00
1696
+ 1.00
1697
+ 1.00
1698
+ 1.00
1699
+ 1.00
1700
+ 1.00
1701
+ 0.98
1702
+ 1.00
1703
+ 1.00
1704
+ Small or Large
1705
+ 0.71
1706
+ 0.95
1707
+ 0.80
1708
+ 0.78
1709
+ 0.74
1710
+ 0.71
1711
+ 0.81
1712
+ 0.77
1713
+ 0.69
1714
+ 0.73
1715
+ 0.64
1716
+ Table 7: Generalization rate for G3P. These data indicate the proportion of the runs that
1717
+ passed the training set that also passed the held out test set.
1718
+ Method
1719
+ Lex
1720
+ Rnd
1721
+ IDS
1722
+ Rnd
1723
+ IDS
1724
+ r
1725
+ -
1726
+ 0.05
1727
+ 0.1
1728
+ ρ
1729
+ -
1730
+ -
1731
+ 1
1732
+ 0.01
1733
+ 0.01
1734
+ 0.01
1735
+ -
1736
+ 1
1737
+ 0.01
1738
+ 0.01
1739
+ 0.01
1740
+ k
1741
+ -
1742
+ -
1743
+ 1
1744
+ 1
1745
+ 10
1746
+ 100
1747
+ -
1748
+ 1
1749
+ 1
1750
+ 10
1751
+ 100
1752
+ Count Odds
1753
+ 0.94
1754
+ 0.96
1755
+ 0.96
1756
+ 0.88
1757
+ 1.00
1758
+ 0.96
1759
+ 1.00
1760
+ 0.92
1761
+ 0.95
1762
+ 0.91
1763
+ 0.95
1764
+ Find Pair
1765
+ -
1766
+ -
1767
+ -
1768
+ 1.00
1769
+ -
1770
+ -
1771
+ 1.00
1772
+ -
1773
+ -
1774
+ 1.00
1775
+ -
1776
+ Fizz Buzz
1777
+ 0.79
1778
+ 0.87
1779
+ 0.85
1780
+ 0.84
1781
+ 0.78
1782
+ 0.85
1783
+ 0.83
1784
+ 0.82
1785
+ 0.82
1786
+ 0.89
1787
+ 0.73
1788
+ Fuel Cost
1789
+ 1.00
1790
+ 0.97
1791
+ 1.00
1792
+ 0.97
1793
+ 0.96
1794
+ 1.00
1795
+ 1.00
1796
+ 0.96
1797
+ 0.96
1798
+ 1.00
1799
+ 1.00
1800
+ GCD
1801
+ -
1802
+ 0.17
1803
+ -
1804
+ -
1805
+ -
1806
+ 0.25
1807
+ -
1808
+ -
1809
+ -
1810
+ -
1811
+ -
1812
+ Grade
1813
+ 0.42
1814
+ 0.45
1815
+ 0.50
1816
+ 0.53
1817
+ 0.59
1818
+ 0.45
1819
+ 0.47
1820
+ 0.54
1821
+ 0.47
1822
+ 0.54
1823
+ 0.49
1824
+ Scrabble Score
1825
+ 1.00
1826
+ 1.00
1827
+ 1.00
1828
+ 1.00
1829
+ 0.92
1830
+ 0.83
1831
+ 1.00
1832
+ -
1833
+ 0.86
1834
+ 1.00
1835
+ 0.60
1836
+ Small or Large
1837
+ 0.47
1838
+ 0.57
1839
+ 0.65
1840
+ 0.56
1841
+ 0.64
1842
+ 0.66
1843
+ 0.68
1844
+ 0.59
1845
+ 0.60
1846
+ 0.579
1847
+ 0.65
1848
+ Preprint
1849
+ 27
1850
+
AtAzT4oBgHgl3EQfhv1C/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
C9E4T4oBgHgl3EQfFwy_/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:793cd509b59ddeeab47be342607d90d26ed2afeacd9c10d5fbc0945581a0c471
3
+ size 129855
CtE0T4oBgHgl3EQfQQAs/content/2301.02189v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb54594044be1479cd21fcecb615347004f6880d38076ab335d1986ec0daa478
3
+ size 6212423
CtE0T4oBgHgl3EQfQQAs/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f160ec6ed32e013d0d8fcd69127b9576bb23a4f041f1be7ff47842e49ffaf282
3
+ size 13500461
DNE4T4oBgHgl3EQfGAw7/content/tmp_files/2301.04890v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
DNE4T4oBgHgl3EQfGAw7/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
DdAzT4oBgHgl3EQfwf7y/content/2301.01725v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f4e85485cea3a2027a6a08de3756967183384d11cb9f57cbc2ff04d4d617897
3
+ size 1073396
DdAzT4oBgHgl3EQfwf7y/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76f32f4c2c97031a087b24d8de8d4798db441b1fb007e226f485cf350a32d993
3
+ size 7012397
FtAzT4oBgHgl3EQfHPtI/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd2787875cd2837f1997a6cbf8392caf764b53e33de56a19319ae335fd63ae0
3
+ size 160400