jackkuo commited on
Commit
db26a8d
·
verified ·
1 Parent(s): 5fab001

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +60 -0
  2. 1dAzT4oBgHgl3EQft_1-/content/2301.01684v1.pdf +3 -0
  3. 1dAzT4oBgHgl3EQft_1-/vector_store/index.faiss +3 -0
  4. 3NFKT4oBgHgl3EQf8S4-/content/2301.11948v1.pdf +3 -0
  5. 3NFKT4oBgHgl3EQf8S4-/vector_store/index.pkl +3 -0
  6. 3tE0T4oBgHgl3EQfeADx/content/tmp_files/2301.02386v1.pdf.txt +3166 -0
  7. 4NE4T4oBgHgl3EQfbQzc/content/2301.05072v1.pdf +3 -0
  8. 4NE4T4oBgHgl3EQfbQzc/vector_store/index.faiss +3 -0
  9. 4NE4T4oBgHgl3EQfbQzc/vector_store/index.pkl +3 -0
  10. 4dAzT4oBgHgl3EQfEPoC/content/tmp_files/2301.00988v1.pdf.txt +1682 -0
  11. 4dAzT4oBgHgl3EQfEPoC/content/tmp_files/load_file.txt +0 -0
  12. 6tE4T4oBgHgl3EQf1w38/content/2301.05294v1.pdf +3 -0
  13. 6tFAT4oBgHgl3EQfnx0W/vector_store/index.faiss +3 -0
  14. 79E2T4oBgHgl3EQfPgaW/vector_store/index.faiss +3 -0
  15. 7tFLT4oBgHgl3EQfsi8k/content/tmp_files/2301.12147v1.pdf.txt +2034 -0
  16. 7tFLT4oBgHgl3EQfsi8k/content/tmp_files/load_file.txt +0 -0
  17. 89AzT4oBgHgl3EQfSfsp/content/tmp_files/2301.01232v1.pdf.txt +1779 -0
  18. 89AzT4oBgHgl3EQfSfsp/content/tmp_files/load_file.txt +0 -0
  19. 8NFLT4oBgHgl3EQfsy_c/content/2301.12149v1.pdf +3 -0
  20. 99E1T4oBgHgl3EQfUgM7/content/tmp_files/2301.03090v1.pdf.txt +0 -0
  21. 99E1T4oBgHgl3EQfUgM7/content/tmp_files/load_file.txt +0 -0
  22. A9E2T4oBgHgl3EQfnQhW/content/tmp_files/2301.04006v1.pdf.txt +2077 -0
  23. A9E2T4oBgHgl3EQfnQhW/content/tmp_files/load_file.txt +0 -0
  24. BtAyT4oBgHgl3EQfePgk/content/tmp_files/2301.00316v1.pdf.txt +595 -0
  25. BtAyT4oBgHgl3EQfePgk/content/tmp_files/load_file.txt +508 -0
  26. C9FQT4oBgHgl3EQf_jdA/content/2301.13458v1.pdf +3 -0
  27. EtE5T4oBgHgl3EQfUw_x/content/2301.05547v1.pdf +3 -0
  28. EtE5T4oBgHgl3EQfUw_x/vector_store/index.faiss +3 -0
  29. EtE5T4oBgHgl3EQfUw_x/vector_store/index.pkl +3 -0
  30. FdAzT4oBgHgl3EQfi_0V/content/tmp_files/2301.01507v1.pdf.txt +1706 -0
  31. FdAzT4oBgHgl3EQfi_0V/content/tmp_files/load_file.txt +0 -0
  32. HdE4T4oBgHgl3EQfgg3J/vector_store/index.faiss +3 -0
  33. J9E3T4oBgHgl3EQfvQsy/vector_store/index.pkl +3 -0
  34. JNE1T4oBgHgl3EQfGAOI/content/tmp_files/2301.02909v1.pdf.txt +776 -0
  35. JNE1T4oBgHgl3EQfGAOI/content/tmp_files/load_file.txt +0 -0
  36. JtE1T4oBgHgl3EQfGQNW/content/2301.02911v1.pdf +3 -0
  37. JtE1T4oBgHgl3EQfGQNW/vector_store/index.faiss +3 -0
  38. K9FRT4oBgHgl3EQf1Th-/vector_store/index.pkl +3 -0
  39. KtFRT4oBgHgl3EQf1Dhl/vector_store/index.faiss +3 -0
  40. LdE0T4oBgHgl3EQfSgCx/content/2301.02224v1.pdf +3 -0
  41. LdE0T4oBgHgl3EQfSgCx/vector_store/index.faiss +3 -0
  42. NdAyT4oBgHgl3EQfUPel/content/tmp_files/2301.00122v1.pdf.txt +873 -0
  43. NdAyT4oBgHgl3EQfUPel/content/tmp_files/load_file.txt +0 -0
  44. NtAzT4oBgHgl3EQfzP6J/vector_store/index.pkl +3 -0
  45. NtE1T4oBgHgl3EQfZwQu/content/2301.03153v1.pdf +3 -0
  46. NtE1T4oBgHgl3EQfZwQu/vector_store/index.faiss +3 -0
  47. O9AyT4oBgHgl3EQf7Prc/vector_store/index.faiss +3 -0
  48. ONFAT4oBgHgl3EQfyh6B/content/tmp_files/2301.08693v1.pdf.txt +675 -0
  49. ONFAT4oBgHgl3EQfyh6B/content/tmp_files/load_file.txt +470 -0
  50. OtAzT4oBgHgl3EQflP1e/content/2301.01544v1.pdf +3 -0
.gitattributes CHANGED
@@ -5687,3 +5687,63 @@ NtE0T4oBgHgl3EQfjgEx/content/2301.02459v1.pdf filter=lfs diff=lfs merge=lfs -tex
5687
  AdFLT4oBgHgl3EQfxDCd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5688
  h9FJT4oBgHgl3EQfWSwL/content/2301.11516v1.pdf filter=lfs diff=lfs merge=lfs -text
5689
  o9AyT4oBgHgl3EQfzPm_/content/2301.00699v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5687
  AdFLT4oBgHgl3EQfxDCd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5688
  h9FJT4oBgHgl3EQfWSwL/content/2301.11516v1.pdf filter=lfs diff=lfs merge=lfs -text
5689
  o9AyT4oBgHgl3EQfzPm_/content/2301.00699v1.pdf filter=lfs diff=lfs merge=lfs -text
5690
+ htE0T4oBgHgl3EQf6gL5/content/2301.02766v1.pdf filter=lfs diff=lfs merge=lfs -text
5691
+ 6tE4T4oBgHgl3EQf1w38/content/2301.05294v1.pdf filter=lfs diff=lfs merge=lfs -text
5692
+ WtA0T4oBgHgl3EQfFP8E/content/2301.02028v1.pdf filter=lfs diff=lfs merge=lfs -text
5693
+ jNFLT4oBgHgl3EQfbi-Q/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5694
+ HdE4T4oBgHgl3EQfgg3J/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5695
+ O9AyT4oBgHgl3EQf7Prc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5696
+ KtFRT4oBgHgl3EQf1Dhl/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5697
+ bNFPT4oBgHgl3EQfwTX3/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5698
+ udE3T4oBgHgl3EQfkQqh/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5699
+ bNFPT4oBgHgl3EQfwTX3/content/2301.13163v1.pdf filter=lfs diff=lfs merge=lfs -text
5700
+ vNFJT4oBgHgl3EQfeyyW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5701
+ NtE1T4oBgHgl3EQfZwQu/content/2301.03153v1.pdf filter=lfs diff=lfs merge=lfs -text
5702
+ _9E1T4oBgHgl3EQfVAPl/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5703
+ ndFRT4oBgHgl3EQfbDfe/content/2301.13559v1.pdf filter=lfs diff=lfs merge=lfs -text
5704
+ wNAzT4oBgHgl3EQfsv0o/content/2301.01663v1.pdf filter=lfs diff=lfs merge=lfs -text
5705
+ wdFIT4oBgHgl3EQfzCuL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5706
+ OtAzT4oBgHgl3EQflP1e/content/2301.01544v1.pdf filter=lfs diff=lfs merge=lfs -text
5707
+ vNFJT4oBgHgl3EQfeyyW/content/2301.11554v1.pdf filter=lfs diff=lfs merge=lfs -text
5708
+ LdE0T4oBgHgl3EQfSgCx/content/2301.02224v1.pdf filter=lfs diff=lfs merge=lfs -text
5709
+ tNE0T4oBgHgl3EQfbADw/content/2301.02344v1.pdf filter=lfs diff=lfs merge=lfs -text
5710
+ 1dAzT4oBgHgl3EQft_1-/content/2301.01684v1.pdf filter=lfs diff=lfs merge=lfs -text
5711
+ gdE0T4oBgHgl3EQfpAGk/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5712
+ NtE1T4oBgHgl3EQfZwQu/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5713
+ LdE0T4oBgHgl3EQfSgCx/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5714
+ tNE0T4oBgHgl3EQfbADw/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5715
+ 6tFAT4oBgHgl3EQfnx0W/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5716
+ wdFIT4oBgHgl3EQfzCuL/content/2301.11363v1.pdf filter=lfs diff=lfs merge=lfs -text
5717
+ JtE1T4oBgHgl3EQfGQNW/content/2301.02911v1.pdf filter=lfs diff=lfs merge=lfs -text
5718
+ rNFIT4oBgHgl3EQfxivv/content/2301.11357v1.pdf filter=lfs diff=lfs merge=lfs -text
5719
+ rNFIT4oBgHgl3EQfxivv/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5720
+ 79E2T4oBgHgl3EQfPgaW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5721
+ 1dAzT4oBgHgl3EQft_1-/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5722
+ VtFLT4oBgHgl3EQfSS81/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5723
+ g9AyT4oBgHgl3EQfxfly/content/2301.00667v1.pdf filter=lfs diff=lfs merge=lfs -text
5724
+ atAyT4oBgHgl3EQfW_e5/content/2301.00175v1.pdf filter=lfs diff=lfs merge=lfs -text
5725
+ C9FQT4oBgHgl3EQf_jdA/content/2301.13458v1.pdf filter=lfs diff=lfs merge=lfs -text
5726
+ EtE5T4oBgHgl3EQfUw_x/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5727
+ h9FJT4oBgHgl3EQfWSwL/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5728
+ OtAzT4oBgHgl3EQflP1e/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5729
+ JtE1T4oBgHgl3EQfGQNW/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5730
+ EtE5T4oBgHgl3EQfUw_x/content/2301.05547v1.pdf filter=lfs diff=lfs merge=lfs -text
5731
+ itAzT4oBgHgl3EQf4v7P/content/2301.01850v1.pdf filter=lfs diff=lfs merge=lfs -text
5732
+ cdE4T4oBgHgl3EQfPwyZ/content/2301.04976v1.pdf filter=lfs diff=lfs merge=lfs -text
5733
+ bNE1T4oBgHgl3EQfxAXP/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5734
+ 3NFKT4oBgHgl3EQf8S4-/content/2301.11948v1.pdf filter=lfs diff=lfs merge=lfs -text
5735
+ itAzT4oBgHgl3EQf4v7P/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5736
+ cNFJT4oBgHgl3EQf9y04/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5737
+ WtE0T4oBgHgl3EQfmQEQ/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5738
+ m9E2T4oBgHgl3EQfJgYM/content/2301.03691v1.pdf filter=lfs diff=lfs merge=lfs -text
5739
+ o9AyT4oBgHgl3EQfzPm_/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5740
+ j9AyT4oBgHgl3EQf_Pqt/content/2301.00906v1.pdf filter=lfs diff=lfs merge=lfs -text
5741
+ wNAzT4oBgHgl3EQfsv0o/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5742
+ ndFRT4oBgHgl3EQfbDfe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5743
+ 4NE4T4oBgHgl3EQfbQzc/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5744
+ nNE1T4oBgHgl3EQf1QWH/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5745
+ ytAyT4oBgHgl3EQfn_gd/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
5746
+ 4NE4T4oBgHgl3EQfbQzc/content/2301.05072v1.pdf filter=lfs diff=lfs merge=lfs -text
5747
+ ldE1T4oBgHgl3EQfgwR3/content/2301.03233v1.pdf filter=lfs diff=lfs merge=lfs -text
5748
+ 8NFLT4oBgHgl3EQfsy_c/content/2301.12149v1.pdf filter=lfs diff=lfs merge=lfs -text
5749
+ TdAzT4oBgHgl3EQfJfuh/content/2301.01081v1.pdf filter=lfs diff=lfs merge=lfs -text
1dAzT4oBgHgl3EQft_1-/content/2301.01684v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f2c9c6f128061622ab58f57d059711b1a004e3efe27c6dcdc12e7717b718a5c
3
+ size 11649280
1dAzT4oBgHgl3EQft_1-/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1781c63f34ffaf02150e6a348637098c8e7873b6dd039c40085d821b3a661f98
3
+ size 5308461
3NFKT4oBgHgl3EQf8S4-/content/2301.11948v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a75265cf5e4b614ab555ca7f50ae914df5532b0964f0b964a37998f4d7b79be6
3
+ size 591206
3NFKT4oBgHgl3EQf8S4-/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4e9bfdcd14c3f4d120781706a2b84f83cdaa1027b174a5adf4ec5d33b0f3cf9
3
+ size 192766
3tE0T4oBgHgl3EQfeADx/content/tmp_files/2301.02386v1.pdf.txt ADDED
@@ -0,0 +1,3166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A Stochastic ADMM Algorithm for Large-Scale Ptychography with Weighted
2
+ Difference of Anisotropic and Isotropic Total Variation∗
3
+ Kevin Bui† and Zichao (Wendy) Di ‡
4
+ Abstract. Ptychography is an imaging technique that has various scientific applications, ranging from biology to
5
+ optics. The method scans the object of interest in a series of overlapping positions, thereby generating
6
+ a set of multiple Fourier magnitude measurements that are potentially corrupted by noise. From
7
+ these measurements, an image of the object can be reconstructed depending on how the related
8
+ inverse problem is formulated and solved. In this paper, we propose a class of variational models
9
+ that incorporate the weighted anisotropic–isotropic total variation (AITV), an effective regularizer
10
+ for image recovery. This class of models is applicable to measurements corrupted by either Gaussian
11
+ or Poisson noise. In order to have the models applicable for large number of ptychographic scans,
12
+ we design an efficient stochastic alternating direction method of multipliers algorithm and establish
13
+ its convergence.
14
+ Numerical experiments demonstrate that from a large set of highly corrupted
15
+ Fourier measurements, the proposed stochastic algorithm with AITV regularization can reconstruct
16
+ complex-valued images with satisfactory quality, especially for the phase components.
17
+ AMS subject classifications. 65F22, 65K10, 68U10, 90C06, 90C15, 90C26
18
+ Key words. phase retrieval, total variation, ptychography, ADMM, Poisson/Gaussian noise, nonconvex opti-
19
+ mization, stochastic optimization
20
+ 1. Introduction. Ptychography is a popular imaging technique that combines both co-
21
+ herent diffractive imaging and scanning transmission microscopy. It has been used in various
22
+ industrial and scientific applications, including biology [34, 45, 58], crystallography [14], and
23
+ optics [44, 48]. To perform a ptychographic experiment (see Figure 1), a coherent beam is
24
+ scanned across the object of interest, where each scan may have overlapping positions with
25
+ another. The scanning procedure provides a set of phaseless measurements that can be used
26
+ to reconstruct an image of the object of interest.
27
+ We describe the 2D ptychography in the discrete setting. Let z ∈ Cn2 be the object of
28
+ interest with n × n pixels and ω ∈ Cm2 be the localized 2D probe with m × m pixels, where
29
+ m < n. Both the object z and the probe ω are expressed as vectors in lexiographical order.
30
+ We denote the set of N masks by {Sj}N
31
+ j=1, where each Sj ∈ Rm2×n2 is a binary matrix that
32
+ represents a (m×m)-size window over the image z. The set of phaseless measurements {dj}N
33
+ j=1
34
+ is obtained by dj = |F(Pjz)|2 = |F(ω◦Sjz)|2, where Pj := ω◦Sj is the jth probe, F ∈ Cm2×m2
35
+ is the 2D discrete Fourier operator, the operation ◦ is elementwise multiplication, and the
36
+ operation | · | is the elementwise absolute value of a vector. We aim to solve the following
37
+ ptychographic phase retrieval problems. When the probe is unknown, the blind ptychographic
38
+ ∗Submitted to the editors DATE.
39
+ Funding: This material is based upon work supported by the U.S. Department of Energy, Office of Science,
40
+ under contract number DE-AC02-06CH11357.
41
+ †Department of Mathematics, University of California at Irvine, Irvine, CA 92697 USA (kevinb3@uci.edu).
42
+ ‡Mathematics and Computer Science Division, Argonne National Laboratory, Lemont, IL (wendydi@anl.gov).
43
+ 1
44
+ arXiv:2301.02386v1 [math.NA] 6 Jan 2023
45
+
46
+ 2
47
+ KEVIN BUI AND ZICHAO (WENDY) DI
48
+ Probe
49
+ Beam
50
+ Object
51
+ Measurements
52
+ Figure 1: Schematic of a ptychography experiment.
53
+ phase retrieval problem is expressed by
54
+ BP-PR:
55
+ To find ω ∈ Cm2 and z ∈ Cn2 such that |F(ω ◦ Sjz)|2 = dj, j = 1, . . . , N.
56
+ (1.1)
57
+ When the probe ω is known, (1.1) reduces to the non-blind case where we only find z ∈ Cn2.
58
+ Multiple algorithms have been developed to solve the non-blind and blind phase retrieval
59
+ problems. One of the most popular methods is the ptychographical iterative engine (PIE) [42],
60
+ where later refinements led to ePIE [33] and rPIE [32]. The PIE methods are based on gradient
61
+ descent applied to each measurement sequentially. Other gradient-based methods for phase
62
+ retrieval include Wirtinger flow [6] and its variants [13, 56, 57], which use careful initialization
63
+ by a spectral method and adaptive step sizes. PIE is also one class of projection algorithms
64
+ for phase retrieval.
65
+ Other projection-based algorithms are hybrid projection-reflection [1],
66
+ Douglas-Rachford splitting [39, 46], and the relaxed averaged alternating reflections [31]. The
67
+ phase retrieval problem can be formulated as a semidefinite optimization problem. For exam-
68
+ ple, PhaseLift [7] solves the phase retrieval problem as a trace (nuclear) norm minimization
69
+ problem. A nonconvex variant called PhaseLiftOff subtracts the trace norm by the Frobenius
70
+ norm in the objective function [55]. PhaseCut proposes a different semidefinite formulation
71
+ of the phase retrieval problem by explicitly separating the amplitude and phase variables and
72
+ optimize only the values of the phase variables [47]. The phase retrieval problem can alter-
73
+ natively be written as a saddle point problem [51], solved by alternating direction method
74
+ of multipliers (ADMM) [4]. A globally convergent ADMM algorithm has recently been de-
75
+ veloped to solve the BP-PR problem [8]. Another globally convergent algorithm is proximal
76
+ alternating linearized minimization (PALM) [2], which has also been adapted to solve the
77
+ BP-PR problem [12, 21]. For a detailed survey of numerical algorithms for phase retrieval,
78
+ please refer to [17].
79
+ For large-scale ptychography, when a huge number of scans are collected, many of the
80
+ aforementioned algorithms may be inapplicable or may need to be adapted because of the
81
+
82
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
83
+ 3
84
+ demanding memory footprint and computational cost. Various parallel algorithms have been
85
+ developed. For example, an asynchronous parallel version of ePIE has been implemented on
86
+ GPUs, where each partition of a measurement set is asynchronously processed to obtain a sub-
87
+ image and the sub-images are later fused together to form the entire image [35]. A parallel
88
+ version of relaxed averaged alternating reflections has recently been developed for GPU impl-
89
+ mentation [15]. Unfortunately, some of these parallel algorithms require a GPU, which many
90
+ computers do not have. However, there are efficient algorithms for large-scale ptychography
91
+ without the need for a GPU, although having one could speed up the processing time. A
92
+ multigrid optimization framework has been proposed to accelerate large-scale gradient-based
93
+ methods for phase retrieval [52]. An overlapping domain decomposition method combined
94
+ with ADMM leads to a highly parallel algorithm with good load balance [9]. To the best
95
+ of our knowledge, there does not yet exist a stochastic optimization algorithm for large-scale
96
+ ptychography that iteratively processes a batch of measurements. Such an algorithm would be
97
+ useful for practitioners who do not have access to multiple cores to perform parallel computing.
98
+ To improve the image reconstruction quality in phase retrieval, total variation (TV) [43]
99
+ has been incorporated for the cases when the measurements are corrupted with Gaussian noise
100
+ [11] or with Poisson noise [10]. Both cases consider the isotropic TV approximation:
101
+ ∥∇z∥2,1 =
102
+ n2
103
+
104
+ i=1
105
+
106
+ |(∇xz)i|2 + |(∇yz)i|2,
107
+ (1.2)
108
+ where ∇x and ∇y are the horizontal and vertical difference operators, respectively, and (∇xz)i
109
+ and (∇yz)i are the ith entries of the vectors ∇xz and ∇yz, respectively. However, it has been
110
+ known that isotropic TV tends to blur oblique edges.
111
+ An alternative approximation that
112
+ preserves sharper edges is the anisotropic TV [16]:
113
+ ∥∇z∥1 =
114
+ n2
115
+
116
+ i=1
117
+ (|(∇xz)i| + |(∇yz)i|) .
118
+ (1.3)
119
+ Overall, TV is meant to approximate the ℓ0 “norm” of the image gradient, i.e., ∥∇z∥0, be-
120
+ cause TV is based on the ℓ1 norm, a convex relaxation of ℓ0. A nonconvex alternative to ℓ1 is
121
+ ℓ1 − αℓ2, 0 < α ≤ 1, which performs well in recovering sparse solutions in various compressed
122
+ sensing problems [27, 28, 29, 54]. The superior performance of ℓ1 − αℓ2 in sparse recovery has
123
+ motivated the development of the weighted difference of anisotropic and isotropic total varia-
124
+ tion (AITV) [30], which applies ℓ1 −αℓ2 on each gradient vector of an image. Mathematically,
125
+ AITV is formulated by
126
+ ∥∇z∥1 − α∥∇z∥2,1 =
127
+ n2
128
+
129
+ i=1
130
+
131
+ |(∇xz)i| + |(∇yz)i| − α
132
+
133
+ |(∇xz)i|2 + |(∇yz)i|2
134
+
135
+ .
136
+ (1.4)
137
+ AITV has demonstrated better performance than TV in image denoising, image deconvolution,
138
+ image segmentation, and MRI reconstruction [5, 30, 38], especially in preserving sharper edges.
139
+ In this work, we consider the large-scale ptychography problem where the measurements
140
+ are corrupted by either Gaussian or Poisson noise.
141
+ To improve the image reconstruction
142
+
143
+ 4
144
+ KEVIN BUI AND ZICHAO (WENDY) DI
145
+ quality, the AITV regularization is incorporated. The overall problem is formulated as a gen-
146
+ eral variational problem, where we develop an ADMM algorithm to solve it. The ADMM
147
+ algorithm has subproblems that can be approximately solved by stochastic gradient descent
148
+ (SGD) [40]. Although SGD is a generic and popular algorithm for unconstrained optimization
149
+ problems whose objective functions have a finite-sum structure, it may not be directly appli-
150
+ cable to the subproblem being solved in the case of ptychography. Hence, we show how to
151
+ appropriately apply SGD in order to develop our specialized stochastic ADMM algorithm. To
152
+ further modify the algorithm, we incorporate adaptive step size based on the PIE algorithms
153
+ [32, 33, 42]. Instead of using all measurements per iteration, this stochastic ADMM algorithm
154
+ can iteratively process a batch of measurements to accurately perform image reconstruction.
155
+ The paper is organized as follows. In Section 2, we review notations and definitions that
156
+ will be used throughout the paper.
157
+ Next in Section 3 we describe the AITV-regularized
158
+ variational models to solve the image ptychography problem. Within this section, we design
159
+ the stochastic ADMM algorithms to solve these models. Convergence analysis of the stochatsic
160
+ ADMM algorithm follows in Section 4. In Section 5, we illustrate the performance of our
161
+ proposed stochastic ADMM algorithms and compare them with other competing algorithms.
162
+ Lastly, in Section 6, we conclude the paper with summary and future works.
163
+ 2. Preliminaries. In this section, we describe basic notations used throughout the paper.
164
+ Let z ∈ Cn2. The ith entry of z is denoted by (z)i. The vector 1 is a vector whose entries
165
+ are all ones. The vector 0 is also defined similarly. The real transpose and conjugate transpose
166
+ of z are denoted by z⊤ and z∗, respectively. The same superscript notations are used for the
167
+ real transpose and conjugate transpose of matrices. The sign of a complex value z′ ∈ C is
168
+ given by
169
+ sgn(z′) =
170
+
171
+
172
+
173
+ z′
174
+ |z′|
175
+ if z′ ̸= 0,
176
+ c ∈ {c′ ∈ C : |c′| = 1}
177
+ if z′ = 0.
178
+ The sign of a vector z ∈ Cn2 is denoted by sgn(z) and is defined elementwise by sgn(z)i =
179
+ sgn(zi), i = 1, . . . , n2. The standard basis vectors of Cn2 are denoted by {ei}n2
180
+ i=1, where ei is
181
+ a vector whose ith component is 1 while all other components are zeros. The diagonal matrix
182
+ of a vector z ∈ Cn2 is denoted by Dz = Diag(z) = z1⊤ ◦ In2×n2.
183
+ For p = (px, py) ∈ Cn2 × Cn2, its ith entry is pi =
184
+ �(px)i
185
+ (py)i
186
+
187
+ ∈ C2. We define the following
188
+ norms on Cn2 × Cn2:
189
+ ∥p∥1 =
190
+ n2
191
+
192
+ i=1
193
+ |(px)i| + |(py)i|,
194
+ ∥p∥2 =
195
+
196
+
197
+
198
+
199
+ n2
200
+
201
+ i=1
202
+ |(px)i|2 + |(py)i|2,
203
+ ∥p∥2,1 =
204
+ n2
205
+
206
+ i=1
207
+
208
+ |(px)i|2 + |(py)i|2 =
209
+ n2
210
+
211
+ i=1
212
+ ∥pi∥2.
213
+ The discrete gradient operator ∇ : Cn2 → Cn2 × Cn2 when specifically applied to the image
214
+
215
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
216
+ 5
217
+ z is given by ∇z = (∇xz, ∇yz), where ∇x and ∇y are the forward horizontal and vertical
218
+ difference operators.
219
+ We also define the proximal operator of a function f : Cn2 → R ∪ {+∞} as
220
+ proxf(·)(z′) = arg min
221
+ z
222
+ f(z) + 1
223
+ 2∥z − z′∥2
224
+ 2, ∀z′ ∈ Cn2.
225
+ 3. Proposed Model. Throughout the paper, we assume that among the mask set {Sj}N
226
+ j=1,
227
+ there exists j′ such that ∥Sj′ei∥1 = 1 for each i = 1, . . . , n2. This assumption ensures that
228
+ each pixel of an image z ∈ Cn2 is sampled at least once.
229
+ Suppose that the measurements {dj}N
230
+ j=1 are corrupted by independent and identically
231
+ distributed (iid) noise, i.e., dj
232
+ iid
233
+ ∼ Noise(|F(ω ◦ Sjz)|2) for j = 1, . . . , N. We assume that the
234
+ noise is either Gaussian or Poisson, both of which are common in phase retrieval. Given an
235
+ unknown probe ω, the blind variational model [8] is
236
+ min
237
+ ω∈Cm2,z∈Cn2
238
+ N
239
+
240
+ j=1
241
+ B(|F(ω ◦ Sjz)|2, dj),
242
+ (3.1)
243
+ where
244
+ B(g, f) =
245
+
246
+
247
+
248
+
249
+
250
+
251
+
252
+
253
+
254
+ 1
255
+ 2∥√g −
256
+
257
+ f∥2
258
+ 2,
259
+ amplitude Gaussian metric (AGM) [51],
260
+ 1
261
+ 2⟨g − f ◦ log(g), 1⟩,
262
+ intensity Poisson metric (IPM) [10].
263
+ (3.2)
264
+ Note that √· is elementwise square root. When the probe ω is known, (3.1) simplifies to the
265
+ non-blind case as a special case where we only need to find z ∈ Cn2. Hence, throughout the
266
+ rest of this section, we will focus on the blind case. To improve image recovery, we propose a
267
+ class of AITV-regularized variants of (3.1).
268
+ 3.1. AITV model. For image ptychography, we propose the following AITV-regularized
269
+ model:
270
+ min
271
+ ω∈Cm2,z∈Cn2
272
+ N
273
+
274
+ j=1
275
+ B(|F(ω ◦ Sjz)|2, dj) + λ (∥∇z∥1 − α∥∇z∥2,1) , λ > 0, α ∈ [0, 1].
276
+ (3.3)
277
+ To develop an ADMM algorithm of (3.3), we introduce auxiliary variables u = (u1, . . . , uN) ∈
278
+ Cm2×N and v = (vx, vy) ∈ Cn2 ×Cn2 so that we obtain an equivalent constrained optimization
279
+ problem
280
+ min
281
+ u,v,z
282
+ N
283
+
284
+ j=1
285
+ B(|uj|2, dj) + λ (∥v∥1 − α∥v∥2,1) s.t.
286
+ uj = F(ω ◦ Sjz), j = 1, . . . , N, and v = ∇z.
287
+ (3.4)
288
+
289
+ 6
290
+ KEVIN BUI AND ZICHAO (WENDY) DI
291
+ The augmented Lagrangian of (3.4) is
292
+ L(u, ω, v, z, Λ, y) =
293
+ N
294
+
295
+ j=1
296
+
297
+ B(|uj|2, dj) + R (⟨Λj, uj − F(ω ◦ Sjz)⟩) + β1
298
+ 2 ∥uj − F(ω ◦ Sjz)∥2
299
+ 2
300
+
301
+ + λ (∥v∥1 − α∥v∥2,1) + R (⟨y, v − ∇z⟩) + β2
302
+ 2 ∥v − ∇z∥2
303
+ 2,
304
+ (3.5)
305
+ where R(·) denotes the real component of a complex number; ⟨·, ·⟩ denotes the complex inner
306
+ product between two vectors; Λ = (Λ1, . . . , ΛN) ∈ Cm2×N and y = (yx, yy) ∈ Cn2×n2 are
307
+ Lagrange multipliers; and β1, β2 > 0 are penalty parameters. The ADMM algorithm iterates
308
+ as follows:
309
+ ut+1 ∈ arg min
310
+ u
311
+ L(u, ωt, vt, zt, Λt, yt),
312
+ (3.6a)
313
+ ωt+1 ∈ arg min
314
+ ω
315
+ L(ut+1, ω, vt, zt, Λt, yt),
316
+ (3.6b)
317
+ vt+1 ∈ arg min
318
+ v
319
+ L(ut+1, ωt+1, v, zt, Λt, yt),
320
+ (3.6c)
321
+ zt+1 ∈ arg min
322
+ z
323
+ L(ut+1, ωt+1, vt+1, z, Λt, yt),
324
+ (3.6d)
325
+ Λt+1
326
+ j
327
+ = Λt
328
+ j + β1
329
+
330
+ ut+1
331
+ j
332
+ − F(ωt+1 ◦ Sjzt+1)
333
+
334
+ ,
335
+ j = 1, . . . , N,
336
+ (3.6e)
337
+ yt+1 = yt + β2
338
+
339
+ vt+1 − ∇zt+1�
340
+ .
341
+ (3.6f)
342
+ Next we explain how to solve each subproblem.
343
+ 3.1.1. u-subproblem. In (3.6a), we solve uj independently of each other. For each j =
344
+ 1, . . . , N, we have
345
+ ut+1
346
+ j
347
+ ∈ arg min
348
+ uj
349
+ B(|uj|2, dj) + R
350
+
351
+ ⟨Λt
352
+ j, uj − F(P t
353
+ j zt)⟩
354
+
355
+ + β1
356
+ 2 ∥uj − F(P t
357
+ j zt)∥2
358
+ 2
359
+ = arg min
360
+ uj
361
+ 1
362
+ β1
363
+ B(|uj|2, dj) + 1
364
+ 2
365
+ ����uj − F(P t
366
+ j zt) + 1
367
+ β1
368
+ Λt
369
+ j
370
+ ����
371
+ 2
372
+ 2
373
+ = prox 1
374
+ β1 B(|·|2,dj)
375
+
376
+ F(P t
377
+ j zt) − 1
378
+ β1
379
+ Λt
380
+ j
381
+
382
+ ,
383
+ (3.7)
384
+ where P t
385
+ j = ωt ◦ Sj. The proximal operator for each fidelity term in (3.2) has a closed-form
386
+ solution provided in [10, 11], so we have
387
+ ut+1
388
+ j
389
+ =
390
+
391
+
392
+
393
+
394
+
395
+
396
+
397
+
398
+ dj+β1
399
+ ���F(P t
400
+ j zt)− 1
401
+ β1 Λt
402
+ j
403
+ ���
404
+ 1+β1
405
+ ◦ sgn
406
+
407
+ F(P t
408
+ j zt) − 1
409
+ β1 Λt
410
+ j
411
+
412
+ ,
413
+ AGM,
414
+ β1|F(P t
415
+ j zt)− 1
416
+ β1 Λt
417
+ j|+
418
+ ��
419
+ β1|F(P t
420
+ j zt)− 1
421
+ β1 Λt
422
+ j|
423
+ �2
424
+ +4(1+β1)dj
425
+ 2(1+β1)
426
+ ◦ sgn
427
+
428
+ F(P t
429
+ j zt) − 1
430
+ β1 Λt
431
+ j
432
+
433
+ ,
434
+ IPM.
435
+ (3.8)
436
+
437
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
438
+ 7
439
+ 3.1.2. ω-subproblem. The ω-subproblem (3.6b) can be rewritten as
440
+ ωt+1 ∈ arg min
441
+ ω
442
+ N
443
+
444
+ j=1
445
+
446
+ �β1
447
+ 2
448
+ �����F−1
449
+
450
+ ut+1
451
+ j
452
+ +
453
+ Λt
454
+ j
455
+ β1
456
+
457
+ − ω ◦ Sjzt
458
+ �����
459
+ 2
460
+ 2
461
+
462
+ � ,
463
+ (3.9)
464
+ which shows that updating ω requires access to all N probes. Hence, we develop an alternative
465
+ update scheme that uses only b ≤ N probes. Instead of solving (3.6b) exactly, we linearize it
466
+ as done in [26, 37] to obtain
467
+ ωt+1 ∈ arg min
468
+ ω
469
+ ⟨∇ωL(ut+1, ωt, zt, Λt, yt), ω − ωt⟩ +
470
+ 1
471
+ 2δtω
472
+ ∥ω − ωt∥2
473
+ 2
474
+ (3.10)
475
+ for some constant δt
476
+ ω > 0 at iteration t. Then (3.10) is equivalent to performing gradient
477
+ descent with step size δt
478
+ ω:
479
+ ωt+1 = ωt − δt
480
+ ω∇ωL(ut+1, ωt, vt, zt, Λt, yt).
481
+ (3.11)
482
+ Next we approximate ∇Lω by its stochastic estimator ˜∇ωL. thereby updating ωt+1 by SGD
483
+ with step size δt
484
+ ω > 0:
485
+ ωt+1 = ωt − δt
486
+ ω ˜∇ωL(ut+1, ωt, vt, zt, Λt, yt).
487
+ (3.12)
488
+ We derive some candidates for ˜∇ωL. Let Gt
489
+ j(ω) = β1
490
+ 2
491
+ ���F−1 �
492
+ ut+1
493
+ j
494
+ +
495
+ Λt
496
+ j
497
+ β1
498
+
499
+ − ω ◦ Sjzt���
500
+ 2
501
+ 2, which
502
+ means that ∇Gt
503
+ j(ω) = −β1(Sjzt)∗ ◦
504
+
505
+ F−1 �
506
+ ut+1
507
+ j
508
+ +
509
+ Λt
510
+ j
511
+ β1
512
+
513
+ − ω ◦ Sjzt�
514
+ . (3.11) can be rewritten as
515
+ a gradient descent step with Nδt
516
+ ω:
517
+ ωt+1 = ωt − Nδt
518
+ ω
519
+
520
+ � 1
521
+ N
522
+ N
523
+
524
+ j=1
525
+ ∇Gt
526
+ j(ωt)
527
+
528
+ � .
529
+ (3.13)
530
+ The SGD estimator [3, 40] of 1
531
+ N
532
+ �N
533
+ j=1 ∇Gt
534
+ j(ωt) is 1
535
+ b
536
+
537
+ j∈nt ∇Gt
538
+ j(ωt), where nt ⊂ {1, . . . , N} is
539
+ a sub-batch of N masks such that |nt| = b. Then the SGD variant (after scaling δt
540
+ ω) of (3.11)
541
+ is
542
+ ωt+1 = ωt − δt
543
+ ω
544
+
545
+ �1
546
+ b
547
+
548
+ j∈nt
549
+ ∇Gt
550
+ j(ωt)
551
+
552
+ � .
553
+ (3.14)
554
+ This implies that one candidate stochastic estimator for ∇ωL is
555
+ ˜∇SGD
556
+ ω
557
+ L(ut+1, ωt, vt, zt, Λt, yt) = 1
558
+ b
559
+
560
+ j∈nt
561
+ ∇Gt
562
+ j(ωt)
563
+ = −β1
564
+ b
565
+
566
+ j∈nt
567
+ (Sjzt)∗ ◦
568
+
569
+ F−1
570
+
571
+ ut+1
572
+ j
573
+ +
574
+ Λt
575
+ j
576
+ β1
577
+
578
+ − ωt ◦ Sjzt
579
+
580
+ .
581
+ (3.15)
582
+
583
+ 8
584
+ KEVIN BUI AND ZICHAO (WENDY) DI
585
+ We can further modify (3.15) by incorporating spatially varying step sizes inspired by the PIE
586
+ algorithms [32, 33, 42]. Let
587
+ Φt
588
+ j =
589
+
590
+
591
+
592
+
593
+
594
+
595
+
596
+
597
+
598
+
599
+
600
+
601
+
602
+
603
+
604
+ 1
605
+ ∥Sjzt∥2∞
606
+ ,
607
+ ePIE [33],
608
+ ∥Sjzt∥11
609
+ ∥Sjzt∥∞ (|Sjzt|2 + γω∥Sjzt∥2∞1),
610
+ PIE [42],
611
+ 1
612
+ (1 − γω)|Sjzt|2 + γω∥Sjzt∥2∞1,
613
+ rPIE [32],
614
+ (3.16)
615
+ where γω ∈ [0, 1] for PIE and rPIE and division is elementwise. Incorporating Φt
616
+ j into (3.15),
617
+ we have another class of stochastic estimators
618
+ ˜∇PIE
619
+ ω
620
+ L(ut+1, ωt, vt, zt, Λt, yt) = −β1
621
+ b
622
+
623
+ j∈nt
624
+ Φt
625
+ j ◦ (Sjzt)∗ ◦
626
+
627
+ F−1
628
+
629
+ ut+1
630
+ j
631
+ +
632
+ Λt
633
+ j
634
+ β1
635
+
636
+ − ωt ◦ Sjzt
637
+
638
+ .
639
+ (3.17)
640
+ 3.1.3. v-subproblem. Expanding (3.6c) gives
641
+ vt+1 ∈ arg min
642
+ v
643
+ λ
644
+ β2
645
+ (∥v∥1 − α∥v∥2,1) + 1
646
+ 2
647
+ ����v − ∇zt + yt
648
+ β2
649
+ ����
650
+ 2
651
+ 2
652
+ = arg min
653
+ v
654
+ n2
655
+
656
+ i=1
657
+ λ
658
+ β2
659
+ (∥vi∥1 − α∥vi∥2) + 1
660
+ 2
661
+ ����vi − (∇zt)i + (yt)i
662
+ β2
663
+ ����
664
+ 2
665
+ 2
666
+ ,
667
+ (3.18)
668
+ which means that the solution vt+1 can be solved elementwise. As a result, the subproblem
669
+ simplifies to
670
+ (vt+1)i = prox λ
671
+ β2 (∥·∥1−α∥·∥2)
672
+
673
+ (∇zt)i − (yt)i
674
+ β2
675
+
676
+ .
677
+ (3.19)
678
+ A closed-form solution for the proximal operator of ℓ1 − αℓ2 is provided in [27] but only for
679
+ real-valued vectors. We generalize it to the complex case in Lemma 3.1, whose proof is delayed
680
+ to Appendix A.
681
+ Lemma 3.1. Given x′ ∈ Cn, λ > 0. and α ≥ 0, we have the following cases:
682
+ 1. When ∥x′∥∞ > λ, we have
683
+ x∗ = (∥ξ∥2 + αλ)
684
+ ξ
685
+ ∥ξ∥2
686
+ , where ξ = sgn(x′) ◦ max(|x′| − λ, 0).
687
+ 2. When (1 − α)λ < ∥x′∥∞ ≤ λ, we have x∗ as a 1-sparse vector such that one chooses
688
+ an index i ∈ arg maxj(|(x′)j|) and have
689
+ (x∗)j =
690
+
691
+ (|(x′)j| + (α − 1)λ) sgn((x′)j)
692
+ if j = i,
693
+ 0
694
+ if j ̸= i.
695
+ 3. When ∥x′∥∞ ≤ (1 − α)λ, we have x∗ = 0.
696
+ Then x∗ is an optimal solution to
697
+ proxλ(∥·∥1−α∥·∥2)(x′) = arg min
698
+ x
699
+ ∥x∥1 − α∥x∥2 + 1
700
+ 2λ∥x − x′∥2
701
+ 2.
702
+ (3.20)
703
+
704
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
705
+ 9
706
+ 3.1.4. z-subproblem. (3.6d) can be rewritten as
707
+ zt+1 ∈ arg min
708
+ z
709
+ N
710
+
711
+ j=1
712
+
713
+ �β1
714
+ 2
715
+ �����ut+1
716
+ j
717
+ − F(P t+1
718
+ j
719
+ z) +
720
+ Λt
721
+ j
722
+ β1
723
+ �����
724
+ 2
725
+ 2
726
+ ���
727
+ � + β2
728
+ 2
729
+ ����vt+1 − ∇z + yt
730
+ β2
731
+ ����
732
+ 2
733
+ 2
734
+ ,
735
+ (3.21)
736
+ which implies that zt+1 must satisfy the first-order optimality condition
737
+
738
+ �β1
739
+ N
740
+
741
+ j=1
742
+ (P t+1
743
+ j
744
+ )∗P t+1
745
+ j
746
+ − β2∆
747
+
748
+ � zt+1 =
749
+ N
750
+
751
+ j=1
752
+ β1(P t+1
753
+ j
754
+ )∗F−1
755
+
756
+ ut+1
757
+ j
758
+ +
759
+ Λt
760
+ j
761
+ β1
762
+
763
+ + β2∇⊤
764
+
765
+ vt+1 + yt
766
+ β2
767
+
768
+ ,
769
+ (3.22)
770
+ where the Laplacian ∆ = −∇⊤∇. Since the coefficient matrix of zt+1 is invertible, solving
771
+ (3.22) can be performed exactly, but it could be computationally expensive if the matrix
772
+ system is extremely large because of the image size of z. Since the coefficient matrix tends
773
+ to be sparse, conjugate gradient [22] can be used to solve (3.22) like in [10, 11], but it needs
774
+ access to all N probes and requires at most n2 iterations to attain an exact solution, assuming
775
+ exact arithmetic. Moreover, it is sensitive to roundoff error [19].
776
+ Alternatively, like in Section 3.1.2 we linearize (3.21) to obtain the gradient descent step
777
+ with step size δt
778
+ z > 0:
779
+ zt+1 = zt − δt
780
+ z∇zL(ut+1, ωt+1, vt+1, zt, Λt, yt).
781
+ (3.23)
782
+ We approximate ∇zL by its stochastic estimator ˜∇zL that only has access to b ≤ N probes.
783
+ Replacing ∇zL with ˜∇zL in (3.23) gives
784
+ zt+1 = zt − δt
785
+ z ˜∇zL(ut+1, ωt+1, vt+1, zt, Λt, yt).
786
+ (3.24)
787
+ To design candidates for ˜∇zL, we will use the following lemma:
788
+ Lemma 3.2. Let S ∈ Rm2×n2. If ei ∈ ker(S) for some index i, then for any x ∈ Cm2, we
789
+ have (S⊤x)i = 0.
790
+ Proof. We have (S⊤x)i = ⟨S⊤x, ei⟩ = ⟨x, Sei⟩ = ⟨x, 0⟩ = 0.
791
+ For brevity, we denote the vectors
792
+ At
793
+ j = −β1
794
+
795
+ (P t+1
796
+ j
797
+ )∗F−1
798
+
799
+ ut+1
800
+ j
801
+ +
802
+ Λt
803
+ j
804
+ β1
805
+
806
+ − (P t+1
807
+ j
808
+ )∗P t+1
809
+ j
810
+ zt
811
+
812
+ ,
813
+ Bt = −β2
814
+
815
+ ∇⊤
816
+
817
+ vt+1 + yt
818
+ β2
819
+
820
+ + ∆zt
821
+
822
+ .
823
+ (3.25)
824
+ At each element i = 1, . . . , n2, (3.23) becomes
825
+ (zt+1)i = (zt)i − δt
826
+ z(∇zL(ut+1, ωt+1, vt+1, zt, Λt, yt))i = (zt)i − δt
827
+ z
828
+
829
+
830
+ N
831
+
832
+ j=1
833
+ (At
834
+ j)i + (Bt)i
835
+
836
+ � .
837
+ (3.26)
838
+
839
+ 10
840
+ KEVIN BUI AND ZICHAO (WENDY) DI
841
+ By Lemma 3.2, since (P t+1
842
+ j
843
+ )∗ = (ωt+1 ◦ Sj)∗ = S⊤
844
+ j D(ωt+1)∗, we have (At
845
+ j)i = 0 if ei ∈ ker(Sj),
846
+ which means that element i is not scanned by the mask matrix Sj. For each i = 1, . . . , n2,
847
+ we define Ni = {j : ei ̸∈ ker(Sj)} to be the set of indices corresponding to the mask matrices
848
+ that scan element i. As a result, (3.26) reduces to and can be rewritten as
849
+ (zt+1)i = (zt)i − δt
850
+ z
851
+
852
+ � �
853
+ j∈Ni
854
+ (At
855
+ j)i + (Bt)i
856
+
857
+ � = (zt)i − |Ni|δt
858
+ z
859
+
860
+ � 1
861
+ |Ni|
862
+
863
+ j∈Ni
864
+
865
+ (At
866
+ j)i +
867
+ 1
868
+ |Ni|(Bt)i
869
+ ��
870
+ � .
871
+ (3.27)
872
+ Comparing (3.26) and (3.27), we observe that
873
+ 1
874
+ |Ni|
875
+
876
+ j∈Ni
877
+
878
+ (At
879
+ j)i +
880
+ 1
881
+ |Ni|(Bt)i
882
+
883
+ ∝ (∇zL(ut+1, ωt+1, vt+1, zt, Λt, yt))i.
884
+ Thus, a candidate for the stochastic estimator ˜∇zL is the SGD estimator ˜∇SGD
885
+ z
886
+ L given by
887
+
888
+ ˜∇SGD
889
+ z
890
+ L(ut+1, ωt+1, vt+1, zt, Λt, yt)
891
+
892
+ i =
893
+ 1
894
+ |nt
895
+ i|
896
+
897
+ j∈nt
898
+ i
899
+
900
+ (At
901
+ j)i +
902
+ 1
903
+ |Ni|(Bt)i
904
+
905
+ ,
906
+ (3.28)
907
+ where nt
908
+ i ⊂ Ni is a mini-batch sampled from Ni at iteration t [3, 40].
909
+ We can further modify (3.28) by incorporating spatially varying step sizes inspired by the
910
+ PIE algorithms [32, 33, 42]. We define
911
+ Ψi,j =
912
+
913
+
914
+
915
+
916
+
917
+
918
+
919
+
920
+
921
+
922
+
923
+
924
+
925
+
926
+
927
+
928
+
929
+ 1
930
+ ∥ωt+1∥2∞
931
+ ePIE [33],
932
+ ∥P t+1
933
+ j
934
+ ei∥1
935
+ ∥ωt+1∥∞
936
+
937
+ ∥P t+1
938
+ j
939
+ ei∥2
940
+ 1 + γz∥ωt+1∥2∞
941
+
942
+ PIE [42],
943
+ 1
944
+ (1 − γz)∥P t+1
945
+ j
946
+ ei∥2
947
+ 1 + γz∥ωt+1∥2∞
948
+ rPIE [32],
949
+ (3.29)
950
+ with γz ∈ [0, 1] for PIE and rPIE. A class of PIE candidates for the stochastic estimator is
951
+
952
+ ˜∇PIE
953
+ z
954
+ L(ut+1, ωt+1, vt+1, zt, Λt, yt)
955
+
956
+ i =
957
+ 1
958
+ |nt
959
+ i|
960
+
961
+ j∈nt
962
+ i
963
+ Ψi,j
964
+
965
+ (At
966
+ j)i +
967
+ 1
968
+ |Ni|(Bt)i
969
+
970
+ .
971
+ (3.30)
972
+ The overall stochastic ADMM algorithm that solves (3.3) is provided by Algorithm 3.1.
973
+ Notice that the non-blind problem is just a special case with the probe ω fixed.
974
+ 4. Convergence Analysis. We discuss the convergence of Algorithm 3.1. Although global
975
+ convergence for ADMM can be established using Kurdyka-�Lojasiewicz assumptions [49], the
976
+ result does not apply for our models because our models contain the gradient operator, which
977
+ does not satisfy the necessary surjectivity assumption. Hence, we will prove up to subse-
978
+ quential convergence. The convergence analysis is based on the analyses done in [10, 11, 51],
979
+ where under certain assumptions, they showed that the iterate subsequences of the ADMM
980
+
981
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
982
+ 11
983
+ Algorithm 3.1 Stochastic ADMM to solve (3.3)
984
+ Input:
985
+ set of masks {Sj}N
986
+ j=1; model parameters λ > 0, α ∈ [0, 1]; penalty parameters β1, β2 > 0; sequence of step sizes
987
+ {(δt
988
+ ω, δt
989
+ z)}∞
990
+ t=1; batch size b ≤ N; PIE factors γz, γω ∈ [0, 1].
991
+ 1: Initialize ω0, z0, {u0
992
+ j}N
993
+ j=1 = {Λ0
994
+ j}N
995
+ j=1, y0 = ∇z0.
996
+ 2: for t = 0 to T − 1 do
997
+ 3:
998
+ Uniformly sample without replacement the subset nt ⊂ {1, . . . , N} of batch size b.
999
+ 4:
1000
+ Compute nt
1001
+ i from nt, i.e., nt
1002
+ i =
1003
+
1004
+ j∈nt
1005
+ ∥Sjei∥1.
1006
+ 5:
1007
+ Update ut+1
1008
+ j
1009
+ according to (3.8) for each j ∈ nt.
1010
+ 6:
1011
+ if ω is unknown then
1012
+ 7:
1013
+ Update ωt+1 = ωt − δt
1014
+ ω ˜∇ωL(ut+1, ωt, vt, zt, Λt, yt). See (3.15) and (3.17) for a candidate ˜∇ωL.
1015
+ 8:
1016
+ else
1017
+ 9:
1018
+ ωt+1 = ωt.
1019
+ 10:
1020
+ end if
1021
+ 11:
1022
+ Compute
1023
+ (vt+1)i = prox λ
1024
+ β2 (∥·∥1−α∥·∥2)
1025
+
1026
+ (∇zt)i − (yt)i
1027
+ β2
1028
+
1029
+ for all i = 1, . . . , n2; see Lemma 3.1.
1030
+ 12:
1031
+ Update (zt+1)i = (zt)i − δt
1032
+ z
1033
+
1034
+ ˜∇zL(ut+1, ωt+1, vt+1, zt, Λt, yt)
1035
+
1036
+ i for all i such that nt
1037
+ i ̸= 0. See (3.28) and (3.30)
1038
+ for a candidate ˜∇zL.
1039
+ 13:
1040
+ Compute
1041
+ Λt+1
1042
+ j
1043
+ = Λt
1044
+ j + β1
1045
+
1046
+ ut+1
1047
+ j
1048
+ − F(ωt+1 ◦ Sjzt+1)
1049
+
1050
+ , ∀j ∈ nt,
1051
+ yt+1 = yt + β2
1052
+
1053
+ vt+1 − ∇zt+1�
1054
+ .
1055
+ 14: end for
1056
+ Output: ω∗ = ωT , z∗ = zT
1057
+ algorithms converge to Karush-Kuhn-Tucker (KKT) points. To simplify notation, let Z =
1058
+ (u, ω, v, z) and Ω = (Λ, y). We also write L(ω), for example, to represent the Lagrangian with
1059
+ respect to ω with all other variables fixed at their most recent values. A KKT point (Z⋆, Ω⋆)
1060
+ of the Lagrangian (3.5) satisfies the KKT conditions given by
1061
+ 0 ∈
1062
+
1063
+
1064
+
1065
+
1066
+
1067
+ ∂|u⋆
1068
+ j|(|u⋆
1069
+ j| −
1070
+
1071
+ dj) + Λ⋆
1072
+ j,
1073
+ if AGM,
1074
+ ∂|u⋆
1075
+ j|
1076
+
1077
+ |u⋆
1078
+ j| −
1079
+
1080
+ dj
1081
+ |u⋆
1082
+ j|
1083
+
1084
+ + Λ⋆
1085
+ j,
1086
+ if IPM,
1087
+ for j = 1, . . . , N,
1088
+ (4.1a)
1089
+ −y⋆
1090
+ λ ∈ ∂(∥v⋆∥1 − α∥v⋆∥2,1),
1091
+ (4.1b)
1092
+ u⋆
1093
+ j = F(ω⋆ ◦ Sjz⋆)
1094
+ for j = 1, . . . , N,
1095
+ (4.1c)
1096
+ v⋆ = ∇z⋆,
1097
+ (4.1d)
1098
+ ∇ωL(Z⋆, Ω⋆) = 0,
1099
+ (4.1e)
1100
+ ∇zL(Z⋆, Ω⋆) = 0.
1101
+ (4.1f)
1102
+
1103
+ 12
1104
+ KEVIN BUI AND ZICHAO (WENDY) DI
1105
+ Because we implement SGD to solve for the probe ω and the image z in the ADMM algorithm,
1106
+ we replace (4.1e) and (4.1f) with the following conditions, respectively:
1107
+ E
1108
+
1109
+ ∥∇ωL(Z⋆, Ω⋆)∥2
1110
+ 2
1111
+
1112
+ = 0
1113
+ (4.1e′)
1114
+ E
1115
+
1116
+ ∥∇zL(Z⋆, Ω⋆)∥2
1117
+ 2
1118
+
1119
+ = 0.
1120
+ (4.1f′)
1121
+ We say a point (Z⋆, Ω⋆) is a stochastic KKT point if it satisfies (4.1a)-(4.1d) and (4.1e′)-(4.1f′).
1122
+ Where Et denotes the expectation conditioned on the first t iterations of the stochastic
1123
+ ADMM algorithm, we impose the following assumption adapted from [3, Assumption 4.3]
1124
+ relating to the stochastic gradient estimators ˜∇ωL and ˜∇zL.
1125
+ Assumption 4.1. Let {(Zt, Ωt)}∞
1126
+ t=1 be a sequence of iterates generated by Algorithm 3.1.
1127
+ Suppose that at each iteration t, the stochastic gradient estimators
1128
+ ˜∇ωL(ωt) := ˜∇ωL(ut+1, ωt, vt, zt, Λt, yt) and ˜∇zL(zt) := ˜∇ωL(ut+1, ωt+1, vt+1, zt, Λt, yt) sat-
1129
+ isfy the following:
1130
+ (a) There exist constants KU ≥ KL > 0 such that
1131
+ R
1132
+
1133
+ Et
1134
+
1135
+ ⟨∇ωL(ωt), ˜∇ωL(ωt)⟩
1136
+ ��
1137
+ ≥ KLEt
1138
+
1139
+ ∥∇ωL(ωt)∥2
1140
+ 2
1141
+
1142
+ (4.3)
1143
+ ���Et[ ˜∇ωL(ωt)]
1144
+ ���
1145
+ 2
1146
+ 2 ≤ KUEt
1147
+
1148
+ ∥∇ωL(ωt)∥2
1149
+ 2
1150
+
1151
+ (4.4)
1152
+ R
1153
+
1154
+ Et
1155
+
1156
+ ⟨∇zL(zt)), ˜∇zL(zt)⟩
1157
+ ��
1158
+ ≥ KLEt
1159
+
1160
+ ∥∇zL(zt)∥2
1161
+ 2
1162
+
1163
+ (4.5)
1164
+ ���Et[ ˜∇zL(zt)]
1165
+ ���
1166
+ 2
1167
+ 2 ≤ KUEt
1168
+
1169
+ ∥∇zL(zt)∥2
1170
+ 2
1171
+
1172
+ .
1173
+ (4.6)
1174
+ (b) There exists constant M, MV ≥ 0 such that
1175
+ Et
1176
+
1177
+ ∥ ˜∇ωL(ωt)∥2
1178
+ 2
1179
+
1180
+
1181
+ ���Et[ ˜∇ωL(ωt)]
1182
+ ���
1183
+ 2
1184
+ 2 ≤ M + MV Et
1185
+ ���∇ωL(ωt)
1186
+ ��2
1187
+ 2
1188
+
1189
+ (4.7)
1190
+ Et
1191
+
1192
+ ∥ ˜∇zL(zt)∥2
1193
+ 2
1194
+
1195
+
1196
+ ���Et[ ˜∇zL(zt)]
1197
+ ���
1198
+ 2
1199
+ 2 ≤ M + MV Et
1200
+ ���∇zL(zt)
1201
+ ��2
1202
+ 2
1203
+
1204
+ .
1205
+ (4.8)
1206
+ To prove the convergence of Algorithm 3.1, we require the following preliminary results.
1207
+ Lemma 4.2 provides useful inequalities while Proposition 4.3 bounds the iterates {(Zt, Ωt)}∞
1208
+ t=1
1209
+ and establishes some bounded property of the gradients {(∇ωL(ωt), ∇zL(zt))}∞
1210
+ t=1.
1211
+ Lemma 4.2. Let {(Zt, Ωt)}∞
1212
+ t=1 be a sequence of iterates generated by Algorithm 3.1 that
1213
+ satisfies Assumption 4.1. Suppose that {(ωt, zt)}∞
1214
+ t=1 is bounded. For each iteration t, we have
1215
+ Et[L(ωt+1)] − Et[L(ωt)] ≤ −
1216
+
1217
+ KL − δt
1218
+ ωLω(MV + KU)
1219
+ 2
1220
+
1221
+ δt
1222
+ ωEt
1223
+ ���∇ωL(ωt)
1224
+ ��2
1225
+ 2
1226
+
1227
+ + (δt
1228
+ ω)2LωM
1229
+ 2
1230
+ (4.9)
1231
+ Et[L(zt+1)] − Et[L(zt)] ≤ −
1232
+
1233
+ KL − δt
1234
+ zLz(MV + KU)
1235
+ 2
1236
+
1237
+ δt
1238
+ zEt
1239
+ ���∇zL(zt)
1240
+ ��2
1241
+ 2
1242
+
1243
+ + (δt
1244
+ z)2LzM
1245
+ 2
1246
+ (4.10)
1247
+ for some constants Lω, Lz > 0.
1248
+
1249
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
1250
+ 13
1251
+ Proposition 4.3. Let {(Zt, Ωt)}∞
1252
+ t=1 be a sequence of iterates generated by Algorithm 3.1 that
1253
+ satisfies Assumption 4.1. Suppose {(ωt, zt)}∞
1254
+ t=1 is bounded, �∞
1255
+ t=1 ∥Ωt+1 − Ωt∥2
1256
+ 2 < ∞, and
1257
+
1258
+
1259
+ t=1
1260
+ δt
1261
+ ω = ∞,
1262
+
1263
+
1264
+ t=1
1265
+ (δt
1266
+ ω)2 < ∞,
1267
+
1268
+
1269
+ t=1
1270
+ δt
1271
+ z = ∞,
1272
+
1273
+
1274
+ t=1
1275
+ (δt
1276
+ z)2 < ∞.
1277
+ (4.11)
1278
+ Then {(Zt, Ωt)}∞
1279
+ t=1 is bounded and
1280
+
1281
+
1282
+ t=1
1283
+ E
1284
+
1285
+ δt
1286
+ ω∥∇ωL(ωt)∥2
1287
+ 2 + δt
1288
+ z∥∇zL(zt)∥2
1289
+ 2
1290
+
1291
+ < ∞.
1292
+ (4.12)
1293
+ The convergence of Algorithm 3.1 is finally established below (see proofs in Appendix B).
1294
+ Theorem 4.4. Let {(Zt, Ωt)}∞
1295
+ t=1 be generated by Algorithm 3.1. Under the same assump-
1296
+ tion as Proposition 4.3, there exists a subsequence of {(Zt, Ωt)}∞
1297
+ t=1 whose accumulation point
1298
+ (Z⋆, Ω⋆) is a stochastic KKT point almost surely (a.s.) of (3.5).
1299
+ We note that the requirement �∞
1300
+ t=1 ∥Ωt+1 − Ωt∥2
1301
+ 2 < ∞ is rather strong, but similar
1302
+ assumption was made in other nonconvex ADMM algorithms [24, 25, 53] that do not satisfy
1303
+ the necessary assumptions for global convergence [49].
1304
+ 5. Numerical Results. In this section, we evaluate the performance of Algorithm 3.1 on
1305
+ two complex images presented in Figure 2. The probe size used for both images is 256 × 256,
1306
+ and the probe is scanned across an image from left to right and top to bottom, giving us
1307
+ N = 100 measurements. The measurements {dj}N
1308
+ j=1 are either corrupted by Gaussian noise
1309
+ or Poisson noise. More specifically, when the measurements are corrupted by Gaussian noise,
1310
+ we have
1311
+ dj = (|F(Pjz)| + ϵ)2,
1312
+ (5.1)
1313
+ where ϵ is an i.i.d. Gaussian random vector. When the measurements are corrupted by Poisson
1314
+ noise, we have
1315
+ dj = Poisson(|F(Pjzζ)|),
1316
+ (5.2)
1317
+ where zζ = ζz for some constant ζ > 0. Note that Poisson noise is stronger when ζ is smaller.
1318
+ For numerical evaluation, we compute the SSIMs [50] of the magnitudes and phases be-
1319
+ tween the reconstructed image z∗∗ and the ground-truth image zg, where z∗∗
1320
+ i
1321
+ = ζ∗z∗
1322
+ i+t∗ is
1323
+ adjusted for scaling by ζ∗ and translation by t∗ and (ζ∗, t∗) = arg min
1324
+ ζ∈C,t∈Z
1325
+ n2
1326
+
1327
+ i=1
1328
+ |ζz∗
1329
+ i+t − zg
1330
+ i |2. We
1331
+ compare the proposed stochastic ADMM algorithms with its deterministic, full-batch coun-
1332
+ terparts (i.e., (3.22) and (3.9) are solved exactly) and its isotropic TV counterparts based on
1333
+
1334
+ 14
1335
+ KEVIN BUI AND ZICHAO (WENDY) DI
1336
+ 0.5
1337
+ 0.55
1338
+ 0.6
1339
+ 0.65
1340
+ 0.7
1341
+ 0.75
1342
+ 0.8
1343
+ 0.85
1344
+ 0.9
1345
+ 0.95
1346
+ (a)
1347
+ 0.05
1348
+ 0.1
1349
+ 0.15
1350
+ 0.2
1351
+ 0.25
1352
+ 0.3
1353
+ 0.35
1354
+ (b)
1355
+ 5
1356
+ 10
1357
+ 15
1358
+ 20
1359
+ 25
1360
+ 30
1361
+ (c)
1362
+ 0.1
1363
+ 0.2
1364
+ 0.3
1365
+ 0.4
1366
+ 0.5
1367
+ 0.6
1368
+ 0.7
1369
+ 0.8
1370
+ 0.9
1371
+ (d)
1372
+ 0
1373
+ 0.05
1374
+ 0.1
1375
+ 0.15
1376
+ 0.2
1377
+ 0.25
1378
+ 0.3
1379
+ 0.35
1380
+ (e)
1381
+ 1
1382
+ 2
1383
+ 3
1384
+ 4
1385
+ 5
1386
+ 6
1387
+ 7
1388
+ 8
1389
+ 9
1390
+ 10
1391
+ (f)
1392
+ Figure 2: Two complex sample images and their probes examined in the experiments. Left
1393
+ column: sample magnitude; middle column: sample phase with inserted proportionally sized
1394
+ probe magnitude; left column: the magnitude differences between the ground-truth probe and
1395
+ the initial probe ω0.
1396
+ Table 1: Parameter settings for each method. Note that b refers to the batch size.
1397
+ Total
1398
+ Epochs
1399
+ β1 = β2
1400
+ δt
1401
+ z
1402
+ Ψi,j
1403
+ δt
1404
+ ω
1405
+ Φi,j
1406
+ AGM
1407
+ 600
1408
+ 0.25
1409
+
1410
+
1411
+
1412
+
1413
+
1414
+ 2
1415
+
1416
+ b
1417
+ 1 ≤ t ≤ 300
1418
+ 1
1419
+ 5
1420
+
1421
+ b
1422
+ 300 < t ≤ 450
1423
+ 1
1424
+ 50
1425
+
1426
+ b
1427
+ 450 < t ≤ 600
1428
+ rPIE
1429
+ (γz = 0.1)
1430
+
1431
+
1432
+
1433
+
1434
+
1435
+
1436
+ b × 10−3
1437
+ 1 ≤ t ≤ 300
1438
+
1439
+ b × 10−4
1440
+ 300 < t ≤ 450
1441
+
1442
+ b × 10−5
1443
+ 450 ≤ t ≤ 600
1444
+ rPIE
1445
+ (γω = 0.025)
1446
+ IPM
1447
+ 300
1448
+ 0.25
1449
+
1450
+
1451
+
1452
+
1453
+
1454
+ 15
1455
+
1456
+ b
1457
+ 1 ≤ t ≤ 150
1458
+ 3
1459
+ 2
1460
+
1461
+ b
1462
+ 150 < t ≤ 225
1463
+ 3
1464
+ 20
1465
+
1466
+ b
1467
+ 225 < t ≤ 300
1468
+ ePIE
1469
+
1470
+
1471
+
1472
+
1473
+
1474
+ 2
1475
+
1476
+ b × 10−3
1477
+ 1 ≤ t ≤ 300
1478
+ 2
1479
+
1480
+ b × 10−4
1481
+ 300 < t ≤ 450
1482
+ 2
1483
+
1484
+ b × 10−5
1485
+ 450 ≤ t ≤ 600
1486
+ ePIE
1487
+ [10, 11]. The results are also compared with Douglas-Rachford splitting [46], rPIE [33], and
1488
+ PHeBIE [21]. We follow the implementation of Douglas-Rachford from [8].
1489
+ We initialize z0 =
1490
+ 1
1491
+
1492
+ 2(1+i1) when using AGM for Gaussian-corrupted measurements and
1493
+ z0 =
1494
+ ζ
1495
+
1496
+ 2(1 + i1) when using IPM for Poisson-corrupted measurements. When performing the
1497
+ blind experiments using Algorithm 3.1, ω0 is initialized as the perturbation of the ground-
1498
+ truth probe. The magnitude differences between the initial probe and the ground-truth probe
1499
+ are shown in Figure 2. The selected parameters, except for λ, are summarized in Table 1.
1500
+ The initial step sizes for δt
1501
+ z and δt
1502
+ ω are determined empirically, and motivated by (4.11), we
1503
+ decrease them by a factor of 10 at the 1/2 and 3/4 of the total epochs. Decreasing the step
1504
+ size in this way is a popular technique, especially in the deep learning community [18, 20].
1505
+ Inspired from [18], the step sizes are multiplied by a factor of
1506
+
1507
+ b so that they are scaled
1508
+ accordingly to the batch size b. For AITV regularization, we examine α ∈ {0.2, 0.4, 0.6, 0.8}
1509
+
1510
+ -
1511
+ -
1512
+ -
1513
+ -
1514
+ -
1515
+ -
1516
+ -STOCHASTIC ADMM FOR PTYCHOGRAPHY
1517
+ 15
1518
+ Table 2: SSIM results of the algorithms applied to the Gaussian corrupted measurements
1519
+ with SNR = 40. The stochastic algorithms (e.g., AITV and isoTV, b ∈ {5, 10, 20, 50}) are ran
1520
+ three times to obtain the average SSIM values. Bold indicates best value; underline indicates
1521
+ second best value.
1522
+ Non-blind
1523
+ Blind
1524
+ Chip
1525
+ Cameraman/Baboon
1526
+ Chip
1527
+ Cameraman/Baboon
1528
+ mag.
1529
+ SSIM
1530
+ phase
1531
+ SSIM
1532
+ mag.
1533
+ SSIM
1534
+ phase
1535
+ SSIM
1536
+ mag.
1537
+ SSIM
1538
+ phase
1539
+ SSIM
1540
+ mag.
1541
+ SSIM
1542
+ phase
1543
+ SSIM
1544
+ DR
1545
+ 0.8130
1546
+ 0.8089
1547
+ 0.8701
1548
+ 0.5191
1549
+ 0.8008
1550
+ 0.7642
1551
+ 0.8009
1552
+ 0.3207
1553
+ rPIE
1554
+ 0.8886
1555
+ 0.9073
1556
+ 0.8930
1557
+ 0.6055
1558
+ 0.9070
1559
+ 0.9120
1560
+ 0.8890
1561
+ 0.6145
1562
+ PHeBIE
1563
+ 0.8004
1564
+ 0.8019
1565
+ 0.8725
1566
+ 0.5718
1567
+ 0.8612
1568
+ 0.8438
1569
+ 0.8846
1570
+ 0.5756
1571
+ isoTV (b = 5)
1572
+ 0.9501
1573
+ 0.9027
1574
+ 0.9393
1575
+ 0.7578
1576
+ 0.9426
1577
+ 0.8919
1578
+ 0.9324
1579
+ 0.7547
1580
+ isoTV (b = 10)
1581
+ 0.9498
1582
+ 0.9004
1583
+ 0.9387
1584
+ 0.7475
1585
+ 0.9429
1586
+ 0.8891
1587
+ 0.9326
1588
+ 0.7477
1589
+ isoTV (b = 20)
1590
+ 0.9514
1591
+ 0.8981
1592
+ 0.9385
1593
+ 0.7302
1594
+ 0.9447
1595
+ 0.8850
1596
+ 0.9298
1597
+ 0.7289
1598
+ isoTV (b = 50)
1599
+ 0.9355
1600
+ 0.9193
1601
+ 0.9294
1602
+ 0.7050
1603
+ 0.9322
1604
+ 0.9047
1605
+ 0.9153
1606
+ 0.7025
1607
+ isoTV (full batch)
1608
+ 0.9578
1609
+ 0.9145
1610
+ 0.9769
1611
+ 0.7338
1612
+ 0.9527
1613
+ 0.8698
1614
+ 0.9589
1615
+ 0.5774
1616
+ AITV (b = 5)
1617
+ 0.9585
1618
+ 0.9556
1619
+ 0.9438
1620
+ 0.7720
1621
+ 0.9490
1622
+ 0.9477
1623
+ 0.9373
1624
+ 0.7775
1625
+ AITV (b = 10)
1626
+ 0.9620
1627
+ 0.9579
1628
+ 0.9515
1629
+ 0.7747
1630
+ 0.9534
1631
+ 0.9481
1632
+ 0.9450
1633
+ 0.7772
1634
+ AITV (b = 20)
1635
+ 0.9629
1636
+ 0.9583
1637
+ 0.9538
1638
+ 0.7707
1639
+ 0.9547
1640
+ 0.9470
1641
+ 0.9468
1642
+ 0.7690
1643
+ AITV (b = 50)
1644
+ 0.9585
1645
+ 0.9550
1646
+ 0.9490
1647
+ 0.7358
1648
+ 0.9514
1649
+ 0.9432
1650
+ 0.9391
1651
+ 0.7342
1652
+ AITV (full batch)
1653
+ 0.9674
1654
+ 0.9513
1655
+ 0.9814
1656
+ 0.7463
1657
+ 0.9676
1658
+ 0.9296
1659
+ 0.9725
1660
+ 0.5956
1661
+ and determine that α = 0.8 yields the best results across all of our numerical examples. The
1662
+ batch sizes we examine are b ∈ {5, 10, 20, 50} for Gaussian noise and b ∈ {5, 10, 20, 25} for
1663
+ Poisson noise. For each parameter setting and image, we run three trials to obtain the mean
1664
+ SSIM values.
1665
+ The code for the experiments is available at https://github.com/kbui1993/Stochastic
1666
+ ADMM Ptycho.
1667
+ 5.1. Gaussian noise. The SNR of the noisy measurements [10] is given by
1668
+ SNR
1669
+
1670
+ {
1671
+
1672
+ dj}N
1673
+ j=1, {|F(Pjz)|}N
1674
+ j=1
1675
+
1676
+ = −10 log10
1677
+
1678
+
1679
+
1680
+
1681
+
1682
+
1683
+
1684
+ N
1685
+
1686
+ j=1
1687
+
1688
+
1689
+ dj − |F(Pjz)|∥2
1690
+ 2
1691
+ N
1692
+
1693
+ j=1
1694
+ ∥F(Pjz)∥2
1695
+ 2
1696
+
1697
+
1698
+
1699
+
1700
+
1701
+
1702
+
1703
+ ,
1704
+ so determined by the SNR value, the noise level ϵ in (5.1) can be calculated by
1705
+ ϵ =
1706
+
1707
+
1708
+
1709
+
1710
+
1711
+
1712
+ 10−SNR/10
1713
+ N
1714
+
1715
+ j=1
1716
+ ∥F(Pjz)∥2
1717
+ 2
1718
+ Nm2
1719
+ .
1720
+ For both the non-blind and blind case, we examine the case when SNR = 40 for the
1721
+ noisy measurements. We set λ = 10.0. The numerical results are recorded in Table 2. For
1722
+
1723
+ 16
1724
+ KEVIN BUI AND ZICHAO (WENDY) DI
1725
+ (a) isoTV (b = 50)
1726
+ (b) AITV (b = 20)
1727
+ (c) AITV (full)
1728
+ (d) DR
1729
+ (e) rPIE
1730
+ (f) PHeBIE
1731
+ (g) isoTV (b = 50)
1732
+ (h) AITV (b = 20)
1733
+ (i) AITV (full)
1734
+ (j) DR
1735
+ (k) rPIE
1736
+ (l) PHeBIE
1737
+ (m) isoTV (b = 5)
1738
+ (n) AITV (b = 10)
1739
+ (o) AITV (full)
1740
+ (p) DR
1741
+ (q) rPIE
1742
+ (r) PHeBIE
1743
+ (s) isoTV (b = 5)
1744
+ (t) AITV (b = 10)
1745
+ (u) AITV (full)
1746
+ (v) DR
1747
+ (w) rPIE
1748
+ (x) PHeBIE
1749
+ Figure 3: Reconstructions of the non-blind case for the Gaussian noise. Top two rows: recon-
1750
+ structions of Figures 2a-2b; bottom two rows: reconstructions of Figs. 2d-2e.
1751
+ both the non-blind and blind cases, DR, rPIE, and PHeBIE yield magnitude images with
1752
+ the worst SSIM values, and AITV outperforms its corresponding isotropic TV counterpart by
1753
+ having better SSIM values for both the magnitude and phase images. The stochastic AITV,
1754
+ particularly b = 10 or b = 20, has slightly lower magnitude SSIM values by at most 0.04 than
1755
+ the best results obtained from the deterministic, full-batch AITV. In fact, stochastic AITV
1756
+ attains the second best magnitude SSIM values in three out of the four cases considered.
1757
+ On the other hand, stochastic AITV with either b = 10 or b = 20 has the best phase SSIM
1758
+ values, outperforming its deterministic version by up to 0.19.
1759
+ For the blind case of the
1760
+ cameraman/baboon image, the deterministic AITV and isotropic TV have worse phase SSIM
1761
+ values than their stochastic counterparts. In general, the stochastic algorithm does best in
1762
+ recovering phase images with superior SSIM values while recovering the magnitude images
1763
+ with comparable SSIM values as the deterministic algorithm.
1764
+ The reconstructed images for the non-blind experiments are presented in Figure 3. DR,
1765
+ rPIE, PHeBIE, and the stochastic algorithms have artifacts in all four corners of the magnitude
1766
+ images because the corners are scanned significantly less than in the middle of the image.
1767
+
1768
+ -STOCHASTIC ADMM FOR PTYCHOGRAPHY
1769
+ 17
1770
+ (a) isoTV (b = 5)
1771
+ (b) AITV (b = 20)
1772
+ (c) AITV (full)
1773
+ (d) DR
1774
+ (e) rPIE
1775
+ (f) PHeBIE
1776
+ (g) isoTV (b = 5)
1777
+ (h) AITV (b = 20)
1778
+ (i) AITV (full)
1779
+ (j) DR
1780
+ (k) rPIE
1781
+ (l) PHeBIE
1782
+ (m) isoTV (b = 10) (n) AITV (b = 10)
1783
+ (o) AITV (full)
1784
+ (p) DR
1785
+ (q) rPIE
1786
+ (r) PHeBIE
1787
+ (s) isoTV (b = 10)
1788
+ (t) AITV (b = 10)
1789
+ (u) AITV (full)
1790
+ (v) DR
1791
+ (w) rPIE
1792
+ (x) PHeBIE
1793
+ Figure 4: Reconstructions of the blind case for Gaussian noise.
1794
+ 100
1795
+ 101
1796
+ 102
1797
+ 103
1798
+ epoch
1799
+ 105
1800
+ 106
1801
+ 107
1802
+ 108
1803
+ Figure 5: Amplitude Gaussian metric plotted across 600 epochs for the blind algorithms on
1804
+ the complex image given by Figure 2d-2e.
1805
+
1806
+ -18
1807
+ KEVIN BUI AND ZICHAO (WENDY) DI
1808
+ 20
1809
+ 30
1810
+ 40
1811
+ 50
1812
+ 60
1813
+ 70
1814
+ 0
1815
+ 0.1
1816
+ 0.2
1817
+ 0.3
1818
+ 0.4
1819
+ 0.5
1820
+ 0.6
1821
+ 0.7
1822
+ 0.8
1823
+ 0.9
1824
+ 1
1825
+ Phase SSIM
1826
+ 20
1827
+ 30
1828
+ 40
1829
+ 50
1830
+ 60
1831
+ 70
1832
+ 0.5
1833
+ 0.55
1834
+ 0.6
1835
+ 0.65
1836
+ 0.7
1837
+ 0.75
1838
+ 0.8
1839
+ 0.85
1840
+ 0.9
1841
+ 0.95
1842
+ 1
1843
+ Magnitude SSIM
1844
+ Figure 6: Magnitude and phase SSIMs over different Gaussian noise level for the complex
1845
+ image given by Figure 2d-2e for the blind case.
1846
+ However, the deterministic AITV has no artifacts because (3.22) is solved exactly for the
1847
+ image solution z. As a result, it has higher magnitude SSIM values than their stochastic
1848
+ counterparts. Nevertheless, the stochastic algorithms yield better phase images with less noise
1849
+ artifacts than any other algorithms. For example, the phase images of Figure 2e reconstructed
1850
+ from stochastic isoTV and AITV have the least amount of cameraman remnants.
1851
+ Figure 4 shows the results of the blind algorithms. The phase images reconstructed by
1852
+ the deterministic AITV, DR, ePIE and PHeBIE are significantly worse than the stochastic
1853
+ algorithms.
1854
+ For example in Figure 2b, the contrasts of the reconstructed images by the
1855
+ deterministic AITV, DR, and PHeBIE are inconsistent as they become darker from left to
1856
+ right while the contrasts are more consistent with the stochastic algorithms. For Figure 2e,
1857
+ the stochastic algorithms perform the best in recovering the phase image while deterministic
1858
+ AITV is unable to recover the left half of the image and DR, rPIE, and PHeBIE have strong
1859
+ remnants of the cameraman present. Like in the non-blind case, stochastic AITV reconstructs
1860
+ the phase image the best.
1861
+ In Figure 5, we examine the convergence of the blind algorithms applied to the Camera-
1862
+ man/Baboon image by recording their AGM values for each epoch. We omit the convergence
1863
+ curves for isoTV since their curves are similar to their AITV counterparts. Overall, the curves
1864
+ for our proposed stochastic algorithms are decreasing, validating the numerical convergence
1865
+ of Algorithm 3.1 with AGM fidelity. However, their curves are slightly above the determinis-
1866
+ tic ADMM algorithm and rPIE. The reason why rPIE outperforms the AITV algorithms is
1867
+ because it seeks to only minimize AGM while the AITV algorithms minimize a larger objec-
1868
+ tive function given by (3.5). Overall, after several hundred epochs, our proposed stochastic
1869
+ algorithms can give comparable AGM values as the deterministic AITV and rPIE algorithms.
1870
+
1871
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
1872
+ 19
1873
+ Table 3: SSIM results of the algorithms applied to the Poisson corrupted measurements with
1874
+ η = 0.01. The stochastic algorithms (e.g., AITV and isoTV, b ∈ {5, 10, 20, 25}) are ran three
1875
+ times to obtain the average SSIM values. Bold indicates best value; underline indicates second
1876
+ best value.
1877
+ Non-blind
1878
+ Blind
1879
+ Chip
1880
+ Cameraman/Baboon
1881
+ Chip
1882
+ Cameraman/Baboon
1883
+ mag.
1884
+ SSIM
1885
+ phase
1886
+ SSIM
1887
+ mag.
1888
+ SSIM
1889
+ phase
1890
+ SSIM
1891
+ mag.
1892
+ SSIM
1893
+ phase
1894
+ SSIM
1895
+ mag.
1896
+ SSIM
1897
+ phase
1898
+ SSIM
1899
+ DR
1900
+ 0.8523
1901
+ 0.8455
1902
+ 0.8704
1903
+ 0.5043
1904
+ 0.8431
1905
+ 0.7387
1906
+ 0.7630
1907
+ 0.2529
1908
+ PHeBIE
1909
+ 0.9404
1910
+ 0.9398
1911
+ 0.9271
1912
+ 0.6791
1913
+ 0.9280
1914
+ 0.9082
1915
+ 0.8678
1916
+ 0.5470
1917
+ isoTV (b = 5)
1918
+ 0.9460
1919
+ 0.9151
1920
+ 0.9301
1921
+ 0.7193
1922
+ 0.9394
1923
+ 0.9001
1924
+ 0.9238
1925
+ 0.7105
1926
+ isoTV (b = 10)
1927
+ 0.9365
1928
+ 0.9212
1929
+ 0.9250
1930
+ 0.7085
1931
+ 0.9342
1932
+ 0.9064
1933
+ 0.9188
1934
+ 0.6979
1935
+ isoTV (b = 20)
1936
+ 0.9335
1937
+ 0.9397
1938
+ 0.9224
1939
+ 0.7008
1940
+ 0.9355
1941
+ 0.9248
1942
+ 0.9153
1943
+ 0.6905
1944
+ isoTV (b = 25)
1945
+ 0.9353
1946
+ 0.9465
1947
+ 0.9220
1948
+ 0.6992
1949
+ 0.9376
1950
+ 0.9315
1951
+ 0.9150
1952
+ 0.6907
1953
+ isoTV (full batch)
1954
+ 0.9767
1955
+ 0.9590
1956
+ 0.9773
1957
+ 0.7093
1958
+ 0.9655
1959
+ 0.9192
1960
+ 0.9588
1961
+ 0.4920
1962
+ AITV (b = 5)
1963
+ 0.9526
1964
+ 0.9680
1965
+ 0.9319
1966
+ 0.7477
1967
+ 0.9409
1968
+ 0.9530
1969
+ 0.9242
1970
+ 0.7418
1971
+ AITV (b = 10)
1972
+ 0.9590
1973
+ 0.9685
1974
+ 0.9375
1975
+ 0.7366
1976
+ 0.9472
1977
+ 0.9533
1978
+ 0.9321
1979
+ 0.7318
1980
+ AITV (b = 20)
1981
+ 0.9598
1982
+ 0.9682
1983
+ 0.9383
1984
+ 0.7171
1985
+ 0.9494
1986
+ 0.9526
1987
+ 0.9322
1988
+ 0.7114
1989
+ AITV (b = 25)
1990
+ 0.9585
1991
+ 0.9676
1992
+ 0.9373
1993
+ 0.7112
1994
+ 0.9492
1995
+ 0.9525
1996
+ 0.9307
1997
+ 0.7055
1998
+ AITV (full batch)
1999
+ 0.9803
2000
+ 0.9644
2001
+ 0.9782
2002
+ 0.7084
2003
+ 0.9741
2004
+ 0.9354
2005
+ 0.9671
2006
+ 0.4975
2007
+ Lastly, we analyze the robustness of the blind algorithms applied to Figures 2d-2e cor-
2008
+ rupted by different levels of Gaussian noise, from SNR 25 to 65. The fidelity parameter λ
2009
+ varies for different noise level of the image: λ = 100 for SNR = 25; λ = 50 for SNR = 30,
2010
+ 35; λ = 10 for SNR =40, 45; λ = 5 for SNR = 50, 55, 60; and λ = 3 for SNR = 65. The
2011
+ SSIMs for the magnitude and phase images across different SNRs are plotted in Figure 6.
2012
+ For SNR ≥ 40, the deterministic algorithms have the best magnitude SSIMs than the other
2013
+ algorithms while their stochastic counterparts have slightly lower SSIMs. When SNR < 40,
2014
+ the stochastic algorithms perform the best. In fact, stochastic AITV has magnitude SSIM at
2015
+ least 0.90 across different noise levels. For the phase image, the stochastic algorithms have
2016
+ the highest SSIMs up to SNR = 55. For SNR ≥ 60, the rPIE algorithm has the best phase
2017
+ SSIM while stochastic AITV has the second best. Overall, stochastic AITV is the most stable
2018
+ across different levels of Gaussian noise.
2019
+ 5.2. Poisson noise. For both the non-blind and blind case, we examine the measurements
2020
+ corrupted with Poisson noise with η = 0.01 according to (5.2). We set λ = 0.15. The numeri-
2021
+ cal results are recorded in Table 3. Note that rPIE results are excluded because the algorithm
2022
+ is tailored towards measurements corrupted with Gaussian noise [32]. Across all cases, de-
2023
+ terministic AITV attains the highest magnitude SSIM values and stochastic AITV attains
2024
+ the highest phase SSIM values while DR performs the worst in reconstructing images from
2025
+ Poisson-corrupted measurements. We observe general improvement in SSIM values for both
2026
+ magnitude and phase images by using AITV over isoTV. Although the stochastic algorithms
2027
+ have lower SSIM values than their deterministic counterparts for the magnitude images, the
2028
+ difference is at most 0.047 for AITV and at most 0.056 for isoTV. Moreover, the SSIM val-
2029
+ ues of the magnitude images from the stochastic algorithms are at least 0.91. Similar to the
2030
+
2031
+ 20
2032
+ KEVIN BUI AND ZICHAO (WENDY) DI
2033
+ Gaussian noise case, stochastic AITV reconstructs the phase image well while recovering the
2034
+ magnitude image with satisfactory quality.
2035
+ We examine the robustness of the blind algorithms on Figures 2d-2e with different level of
2036
+ Poisson noise. The noise levels we examine are η ∈ {0.005k}9
2037
+ k=1. We set the fidelity parameter
2038
+ to be λ = 15 × η. The SSIMs for the magnitude and phase images across different Poisson
2039
+ noise levels are plotted in Figure 7. We observe that the deterministic algorithms yield the
2040
+ best magnitude SSIMs while the stochastic algorithms yield the best phase SSIMs. DR yields
2041
+ the worst results for both magnitude and phase components. Although stochastic AITV yields
2042
+ the third best SSIMs for the magnitude image, its SSIMs are at least 0.90. Moreover, it has
2043
+ the best phase SSIMs, significantly more than its deterministic counterpart by at about 0.20.
2044
+ In summary, stochastic AITV is a robust method across different levels of Poisson noise.
2045
+ 6. Conclusion. In this work, we propose AITV-regularized variational models for image
2046
+ ptychography, where the measurements are corrupted by either Gaussian or Poisson noise.
2047
+ To adapt the algorithm for large number of measurements, we design a stochastic ADMM
2048
+ algorithm that incorporates adaptive step sizes based on the PIE algorithms. Overall, us-
2049
+ ing both AITV regularization and stochastic ADMM, we are able to reconstruct an image
2050
+ of satisfactory quality from heavily corrupted measurements as demonstrated in our numer-
2051
+ ical experiments. In fact, the phase component of the image is best recovered through our
2052
+ proposed algorithms. Lastly, we prove theoretical convergence for the proposed stochastic
2053
+ ADMM algorithm under certain conditions and demonstrate numerical convergence in our
2054
+ experiments.
2055
+ Future directions include the design of a globally convergent algorithm for the AITV-
2056
+ regularized ptychography model, and incorporation of variance-reduced stochastic gradient
2057
+ estimators, such as SVRG [23] and SARAH [36], to accelerate convergence and improve re-
2058
+ construction quality.
2059
+ Appendix A. Proof of Lemma 3.1.
2060
+ Proof. If x′ = 0, then it is trivial, so for the rest of the proof, we assume that x′ ̸= 0.
2061
+ Suppose x∗ is the optimal solution to (3.20). We show that sgn(x∗) = sgn(x′). If x∗ = 0,
2062
+ then we can choose ci ∈ {c′ ∈ C : |c′| = 1} such that sgn(x∗)i = ci = sgn(x′)i for each
2063
+ i = 1, . . . , n2, giving the desired result. Suppose that x∗ ̸= 0. Because ∥ · ∥1 − α∥ · ∥2,1 is
2064
+ rotation invariant, we only need to examine and expand the quadratic term in (3.20). We see
2065
+ that
2066
+ ∥x∗ − x′∥2
2067
+ 2 =
2068
+ n2
2069
+
2070
+ i=1
2071
+ |(x∗)i − (x′)i|2 =
2072
+ n2
2073
+
2074
+ i=1
2075
+
2076
+ |(x∗)i|2 + |(x′)i|2 − 2|(x∗)i||(x′)i| cos θi
2077
+
2078
+ ,
2079
+ where θi is the angle between the components (x∗)i and (x′)i. This term is minimized when
2080
+ θi = 0 for all i. This means that sgn(x∗)i = sgn(x′)i for all i, or otherwise x∗ would not be an
2081
+ optimal solution to (3.20). Hence, sgn(x∗) = sgn(x′).
2082
+ After establishing that sgn(x∗) = sgn(x′), we simplify (3.20) to an optimization problem
2083
+ with respect to |x| given by
2084
+ |x∗| =
2085
+ arg min
2086
+ ρ∈Rn, (ρ)i≥0
2087
+ ∥ρ∥1 − α∥ρ∥2 + 1
2088
+ 2λ∥ρ − |x′|∥2
2089
+ 2.
2090
+
2091
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
2092
+ 21
2093
+ 0
2094
+ 0.01
2095
+ 0.02
2096
+ 0.03
2097
+ 0.04
2098
+ 0.05
2099
+ 0
2100
+ 0.1
2101
+ 0.2
2102
+ 0.3
2103
+ 0.4
2104
+ 0.5
2105
+ 0.6
2106
+ 0.7
2107
+ 0.8
2108
+ 0.9
2109
+ 1
2110
+ Phase SSIM
2111
+ 0
2112
+ 0.01
2113
+ 0.02
2114
+ 0.03
2115
+ 0.04
2116
+ 0.05
2117
+ 0.5
2118
+ 0.55
2119
+ 0.6
2120
+ 0.65
2121
+ 0.7
2122
+ 0.75
2123
+ 0.8
2124
+ 0.85
2125
+ 0.9
2126
+ 0.95
2127
+ 1
2128
+ Magnitude SSIM
2129
+ Figure 7: Magnitude and phase SSIMs over different Poisson noise level for the complex image
2130
+ given by Figure 2d-2e for the blind case.
2131
+ Therefore, by applying [27, Lemma 1] to the optimization problem followed by multiplying
2132
+ the solution |x∗| by sgn(x∗), we obtain the desired results.
2133
+ Appendix B. Proofs of Section 4.
2134
+ Before proving our main results, we present prelimi-
2135
+ nary tools necessary for the convergence analysis.
2136
+ Definition B.1 ([41]). Let h : Rn2 → (−∞, +∞] be a proper and lower semicontinuouous
2137
+ function and dom h := {x ∈ Rn2 : h(x) < ∞}.
2138
+ (a) The Fr´echet subdifferential of h at the point x ∈ dom h is the set
2139
+ ˆ∂h(x) =
2140
+
2141
+ v ∈ Rn2 : lim inf
2142
+ y̸=x,y→x
2143
+ h(y) − h(x) − ⟨v, y − x⟩
2144
+ ∥y − x∥
2145
+ ≥ 0
2146
+
2147
+ .
2148
+ (b) The limiting subdifferential of h at the point x ∈ dom h is the set
2149
+ ∂h(x) =
2150
+
2151
+ v ∈ Rn2 : ∃{(xt, yt)}∞
2152
+ t=1 s.t. xt → x, h(xt) → h(x), ˆ∂h(xt) ∋ yt → y
2153
+
2154
+ .
2155
+ We note that the limiting subdifferential is closed [41]:
2156
+ (xt, yt) → (x, y), h(xt) → h(x), yt ∈ ∂h(xt) =⇒ y ∈ ∂h(x).
2157
+ After establishing the definitions of subdifferentials in the real case, we extend them to the
2158
+ complex case. If z = z1 + z2i, where z1, z2 ∈ Rn2, then the limiting subdifferential of a proper
2159
+ and lower semicontinuous function f : Cn2 → (−∞, +∞] is defined by
2160
+ ∂f(z) := ∂z1f(z) + ∂z2f(z)i.
2161
+ (B.1)
2162
+ If the function f is continuously differentiable at the point z, then with slight abuse of notation,
2163
+ we denote its gradient by ∇f(z), and ∂f(z) = {∇f(z)} [41].
2164
+
2165
+ 22
2166
+ KEVIN BUI AND ZICHAO (WENDY) DI
2167
+ B.1. Proof of Lemma 4.2.
2168
+ Proof. If {(ωt, zt)}∞
2169
+ t=1 is bounded, then there exists a constant C such that ∥ωt∥∞, ∥zt∥∞ ≤
2170
+ C for all t ∈ N. We establish that L(ω) has a Lipschitz continuous gradient with respect to
2171
+ ω. For any ω1, ω2 ∈ Cm2 at iteration t, we have
2172
+ ∥∇ωL(ω2) − ∇ωL(ω1)∥2 ≤
2173
+ ������
2174
+ N
2175
+
2176
+ j=1
2177
+ β1(Sjzt)∗ ◦ (ω2 − ω1) ◦ (Sjzt)
2178
+ ������
2179
+ 2
2180
+
2181
+
2182
+
2183
+ N
2184
+
2185
+ j=1
2186
+ β1
2187
+ ��(Sjzt)∗ ◦ (Sjzt)
2188
+ ��
2189
+
2190
+
2191
+ � ∥ω2 − ω1∥2 ≤ β1NC2∥ω2 − ω1∥2.
2192
+ Hence, we observe that L(ω) has a Lipschitz continuous gradient with Lipschitz constant
2193
+ Lω := β1NC2. By the descent property [8, Definition 1], at iteration t we have
2194
+ L(ωt+1) − L(ωt) ≤ R(⟨∇ωL(ωt), ωt+1 − ωt⟩) + Lω
2195
+ 2 ∥ωt+1 − ωt∥2
2196
+ 2
2197
+ = −δt
2198
+ ωR(⟨∇ωL(ωt), ˜∇ωL(ωt)⟩) + Lω(δt
2199
+ ω)2
2200
+ 2
2201
+ ∥ ˜∇ωL(ωt)∥2
2202
+ 2,
2203
+ where the last equality is due to (3.12). Taking the expectation with respect to the first t
2204
+ iterations, we obtain
2205
+ Et[L(ωt+1)] − Et[L(ωt)] = −δt
2206
+ ωR
2207
+
2208
+ Et
2209
+
2210
+ ⟨∇ωL(ωt), ˜∇ωL(ωt)⟩
2211
+ ��
2212
+ + Lω(δt
2213
+ ω)2
2214
+ 2
2215
+ Et
2216
+
2217
+ ∥ ˜∇ωL(ωt)∥2
2218
+ 2
2219
+
2220
+ ≤ −
2221
+
2222
+ KL − δt
2223
+ ωLω(MV + KU)
2224
+ 2
2225
+
2226
+ δt
2227
+ ωEt
2228
+ ���∇ωL(ωt)
2229
+ ��2
2230
+ 2
2231
+
2232
+ + (δt
2233
+ ω)2LωM
2234
+ 2
2235
+ ,
2236
+ where the last inequality is due to combining (4.3), (4.4). and (4.7). Similarly, we can estimate
2237
+ (4.10) because we can compute that L(z) has a Lipschitz continuous gradient with Lipschitz
2238
+ constant Lz := β1NC2 + β2∥∆∥ and follow the same steps as above.
2239
+ B.2. Proof of Proposition 4.3.
2240
+ Proof. Because �∞
2241
+ t=1 ∥Ωt+1 −Ωt∥2
2242
+ 2 < ∞, we have lim
2243
+ t→∞ Λt+1
2244
+ j
2245
+ −Λt
2246
+ j = 0 for each j = 1, . . . , N
2247
+ and lim
2248
+ t→∞ yt+1 − yt = 0, which implies from (3.6e)-(3.6f) that
2249
+ lim
2250
+ t→∞ ut
2251
+ j − F(ωt ◦ Sjzt) = 0
2252
+ ∀j = 1, . . . , N,
2253
+ (B.2)
2254
+ lim
2255
+ t→∞ vt − ∇zt = 0.
2256
+ (B.3)
2257
+ It follows that {(ut, vt)}∞
2258
+ t=1 is bounded since {(ωt, zt)}∞
2259
+ t=1 is bounded. By (3.8), when B(·, ·)
2260
+ is AGM, we have
2261
+ ∥ut+1
2262
+ j
2263
+ ∥2 =
2264
+ ������
2265
+
2266
+ dj + β1
2267
+ ���F(ωt ◦ Sjzt) − 1
2268
+ β1 Λt
2269
+ j
2270
+ ���
2271
+ 1 + β1
2272
+ ������
2273
+ 2
2274
+
2275
+ β1
2276
+ 1 + β1
2277
+ � 1
2278
+ β1
2279
+ ��Λt
2280
+ j
2281
+ ��
2282
+ 2 − ∥F(ωt ◦ Sjzt)∥2
2283
+
2284
+ ,
2285
+
2286
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
2287
+ 23
2288
+ or equivalently,
2289
+ (1 + β1)∥ut+1
2290
+ j
2291
+ ∥2 + β1∥F(ωt ◦ Sjzt)∥2 ≥ ∥Λt
2292
+ j∥2.
2293
+ (B.4)
2294
+ Similarly, when B(·, ·) is IPM, we have the same inequality as (B.4). As a result, {Λt}∞
2295
+ t=1 is
2296
+ bounded. Finally, we show that {yt}∞
2297
+ t=1 is bounded. By Lemma 3.1, we have two cases. When
2298
+ ����(∇zt)i − (yt)i
2299
+ β2
2300
+ ����
2301
+
2302
+ ≤ λ
2303
+ β2
2304
+ ,
2305
+ we have
2306
+ λ
2307
+ β2
2308
+
2309
+ ����(∇zt)i − (yt)i
2310
+ β2
2311
+ ����
2312
+
2313
+ ≥ 1
2314
+ β2
2315
+ ��(yt)i
2316
+ ��
2317
+ ∞ −
2318
+ ��(∇zt)i
2319
+ ��
2320
+ ∞ ,
2321
+ or λ + β2∥(∇zt)i∥∞ ≥ ∥(yt)i∥∞. Otherwise, we have
2322
+ ∥(vt+1)i∥∞ ≥
2323
+ ����
2324
+ ����(∇zt)i − (yt)i
2325
+ β2
2326
+ ���� − λ
2327
+ β2
2328
+ ����
2329
+
2330
+
2331
+ ����(∇zt)i − (yt)i
2332
+ β2
2333
+ ����
2334
+
2335
+ − λ
2336
+ β2
2337
+ ≥ 1
2338
+ β2
2339
+ ��(yt)i
2340
+ ��
2341
+ ∞ −
2342
+ ��(∇zt)i
2343
+ ��
2344
+ ∞ − λ
2345
+ β2
2346
+ ,
2347
+ or β2
2348
+
2349
+ ∥(vt+1)i∥∞ +
2350
+ ��(∇zt)i
2351
+ ��
2352
+
2353
+
2354
+ + λ ≥
2355
+ ��(yt)i
2356
+ ��
2357
+ ∞. Altogether, {yt}∞
2358
+ t=1 is bounded since
2359
+ {(vt, zt)}∞
2360
+ t=1 is bounded. Therefore, we establish that {(Zt, Ωt)}∞
2361
+ t=1 is bounded.
2362
+ We see that
2363
+ L(Z, Ω) =
2364
+ N
2365
+
2366
+ j=1
2367
+
2368
+ B(|uj|2, dj) + β1
2369
+ 2
2370
+ ����uj − F(ω ◦ Sjz) + Λj
2371
+ β1
2372
+ ����
2373
+ 2
2374
+ 2
2375
+
2376
+ 1
2377
+ 2β1
2378
+ ∥Λj∥2
2379
+ 2
2380
+
2381
+ + λ(∥v∥1 − α∥v∥2,1) + β2
2382
+ 2
2383
+ ����v − ∇z + y
2384
+ β2
2385
+ ����
2386
+ 2
2387
+ 2
2388
+
2389
+ 1
2390
+ 2β2
2391
+ ∥y∥2
2392
+ 2
2393
+
2394
+ N
2395
+
2396
+ j=1
2397
+
2398
+ B(|uj|2, dj) −
2399
+ 1
2400
+ 2β1
2401
+ ∥Λj∥2
2402
+ 2
2403
+
2404
+
2405
+ 1
2406
+ 2β2
2407
+ ∥y∥2
2408
+ 2.
2409
+ Because B(·, ·) is bounded below according to (3.2) and {Ωt}∞
2410
+ t=1 is bounded, {L(Zt, Ωt)}∞
2411
+ t=1
2412
+ is bounded below by some constant Linf. By (3.6a) and (3.6c), we have L(ut+1) ≤ L(ut) and
2413
+ L(vt+1) ≤ L(vt), respectively, so taking expectation with respect to the first t iterations, we
2414
+ obtain
2415
+ Et[L(ωt)] = Et[L(ut+1)] ≤ Et[L(ut)],
2416
+ (B.5)
2417
+ Et[L(zt)] = Et[L(vt+1)] ≤ Et[L(vt)] = Et[L(ωt+1)].
2418
+ (B.6)
2419
+ In addition, we have
2420
+ L(Λt+1) − L(Λt) =
2421
+ N
2422
+
2423
+ j=1
2424
+ R(⟨Λt+1
2425
+ j
2426
+ − Λt
2427
+ j, ut+1
2428
+ j
2429
+ − F(ωt+1 ◦ Sjzt+1)⟩)
2430
+
2431
+ 24
2432
+ KEVIN BUI AND ZICHAO (WENDY) DI
2433
+ = 1
2434
+ β1
2435
+ N
2436
+
2437
+ j=1
2438
+ ���Λt+1
2439
+ j
2440
+ − Λt
2441
+ j
2442
+ ���
2443
+ 2
2444
+ 2 = 1
2445
+ β1
2446
+ ∥Λt+1 − Λt∥2
2447
+ 2,
2448
+ where the second to last equality is due to (3.6e). Taking expectation with respect to the first
2449
+ t iterations gives
2450
+ Et[L(Λt+1)] − Et[L(Λt)] = 1
2451
+ β1
2452
+ Et
2453
+ ���Λt+1 − Λt��2
2454
+ 2
2455
+
2456
+ .
2457
+ (B.7)
2458
+ Similarly, we obtain
2459
+ Et[L(yt+1)] − Et[L(yt)] = 1
2460
+ β2
2461
+ Et[∥yt+1 − yt∥2
2462
+ 2].
2463
+ (B.8)
2464
+ Summing up (4.9)-(4.10), (B.5)-(B.8) and taking total expectation, we have
2465
+ E[L(Zt+1, Ωt+1)] − E[L(Zt, Ωt)] ≤ 1
2466
+ β1
2467
+ Et
2468
+ ���Λt+1 − Λt��2
2469
+ 2
2470
+
2471
+ + 1
2472
+ β2
2473
+ E[∥yt+1 − yt∥2
2474
+ 2]
2475
+
2476
+
2477
+ KL − δt
2478
+ ωLω(MV + KU)
2479
+ 2
2480
+
2481
+ δt
2482
+ ωE
2483
+ ���∇ωL(ωt)
2484
+ ��2
2485
+ 2
2486
+
2487
+ + (δt
2488
+ ω)2LωM
2489
+ 2
2490
+
2491
+
2492
+ KL − δt
2493
+ zLz(MV + KU)
2494
+ 2
2495
+
2496
+ δt
2497
+ zE
2498
+ ���∇zL(zt)
2499
+ ��2
2500
+ 2
2501
+
2502
+ + (δt
2503
+ z)2LzM
2504
+ 2
2505
+ .
2506
+ (B.9)
2507
+ By (4.11), lim
2508
+ t→∞ δt
2509
+ ω = 0 and lim
2510
+ t→∞ δt
2511
+ z = 0, which means that we can assume without generality
2512
+ that δt
2513
+ ωLω, δt
2514
+ zLz <
2515
+ KL
2516
+ MV +KU for all t ∈ N. Hence, summing up t = 1, . . . , T, we obtain
2517
+ Linf − E[L(Z0, Ω0)] ≤ E[L(ZT+1, ΩT+1)] − E[L(Z0, Ω0)]
2518
+
2519
+ T
2520
+
2521
+ t=1
2522
+
2523
+ C∥Ωt+1 − Ωt∥2
2524
+ 2 − KLδt
2525
+ ω
2526
+ 2
2527
+ E
2528
+ ���∇ωL(ωt)
2529
+ ��2
2530
+ 2
2531
+
2532
+ − KLδt
2533
+ z
2534
+ 2
2535
+ E
2536
+ ���∇zL(zt)
2537
+ ��2
2538
+ 2
2539
+
2540
+ + (δt
2541
+ ω)2LωM
2542
+ 2
2543
+ + (δt
2544
+ z)2LzM
2545
+ 2
2546
+
2547
+ ,
2548
+ where C = max{ 1
2549
+ β1 , 1
2550
+ β2 }. Rearranging the inequality and letting T → ∞ give us
2551
+ 0 ≤KL
2552
+ 2
2553
+
2554
+
2555
+ t=1
2556
+
2557
+ δt
2558
+ ωE
2559
+ ���∇ωL(ωt)
2560
+ ��2
2561
+ 2
2562
+
2563
+ + δt
2564
+ zE
2565
+ ���∇zL(zt)
2566
+ ��2
2567
+ 2
2568
+ ��
2569
+
2570
+ E[L(Z0, Ω0)] − Linf +
2571
+
2572
+
2573
+ t=1
2574
+
2575
+ C∥Ωt+1 − Ωt∥2
2576
+ 2 + (δt
2577
+ ω)2LωM
2578
+ 2
2579
+ + (δt
2580
+ z)2LzM
2581
+ 2
2582
+
2583
+ .
2584
+ By the assumption, the right-hand side is bounded, so therefore it implies (4.12).
2585
+
2586
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
2587
+ 25
2588
+ B.3. Proof of Theorem 4.4.
2589
+ Proof. If {(wt, zt)}∞
2590
+ t=1 is bounded, then ∥wt∥2, ∥zt∥2 ≤ C for all t ∈ N for some constant
2591
+ C > 0. At a given iteration t, we have ∇ωL(ωt+1) = 0 by the first-order optimality condition
2592
+ of (3.6b) and ∇ωL(ω) to be Lipschitz. It follows that
2593
+ ∥∇ωL(ωt)∥2 = ∥∇ωL(ωt+1) − ∇ωL(ωt)∥2 ≤ Lω∥ωt+1 − ωt∥2 ≤ 2CLω.
2594
+ Similarly, we have ∥∇zL(zt)∥2 ≤ 2CLz. As a result, by squaring the inequalities and tak-
2595
+ ing their expectation,
2596
+ ��
2597
+ E
2598
+ ���∇ωL(ωt)
2599
+ ��2
2600
+ 2
2601
+
2602
+ , E
2603
+ ���∇zL(zt)
2604
+ ��2
2605
+ 2
2606
+ ���∞
2607
+ t=1 is bounded. The step size
2608
+ condition (4.11) implies that lim
2609
+ t→∞ δt
2610
+ ω = 0 and lim
2611
+ t→∞ δt
2612
+ z = 0. Since the SGD steps for ω and z
2613
+ are
2614
+ ωt+1 = ωt − δt
2615
+ ω ˜∇L(ωt), zt+1 = zt − δt
2616
+ z ˜∇L(zt),
2617
+ it follows that by taking expectation with respect to the first t iterations and using Assumption
2618
+ 4.1,
2619
+ Et[
2620
+ ��ωt+1 − ωt��2
2621
+ 2] = (δt
2622
+ ω)2Et[∥ ˜∇ωL(ωt)∥2
2623
+ 2] ≤ (δt
2624
+ ω)2 �
2625
+ M + (MV + KU)Et
2626
+ ���∇ωL(ωt)
2627
+ ��2
2628
+ 2
2629
+ ��
2630
+ ,
2631
+ Et[
2632
+ ��zt+1 − zt��2
2633
+ 2] = (δt
2634
+ z)2Et[∥ ˜∇zL(zt)∥2
2635
+ 2] ≤ (δt
2636
+ z)2 �
2637
+ M + (MV + KU)Et
2638
+ ���∇zL(zt)
2639
+ ��2
2640
+ 2
2641
+ ��
2642
+ .
2643
+ Because
2644
+ ��
2645
+ E
2646
+ ���∇ωL(ωt)
2647
+ ��2
2648
+ 2
2649
+
2650
+ , E
2651
+ ���∇zL(zt)
2652
+ ��2
2653
+ 2
2654
+ ���∞
2655
+ t=1 is bounded, we apply total expectation and
2656
+ take the limit to obtain
2657
+ lim
2658
+ t→∞ E[
2659
+ ��ωt+1 − ωt��2
2660
+ 2] ≤ lim
2661
+ t→∞(δt
2662
+ ω)2 �
2663
+ M + (MV + KU)E
2664
+ ���∇ωL(ωt)
2665
+ ��2
2666
+ 2
2667
+ ��
2668
+ = 0,
2669
+ (B.10)
2670
+ lim
2671
+ t→∞ E[
2672
+ ��zt+1 − zt��2
2673
+ 2] ≤ lim
2674
+ t→∞(δt
2675
+ z)2 �
2676
+ M + (MV + KU)E
2677
+ ���∇zL(zt)
2678
+ ��2
2679
+ 2
2680
+ ��
2681
+ = 0.
2682
+ (B.11)
2683
+ Earlier, in the proof of Proposition 4.3, we obtain
2684
+ lim
2685
+ t→∞ ut
2686
+ j − F(ωt ◦ Sjzt) = 0,
2687
+ ∀j = 1, . . . , N,
2688
+ (B.12)
2689
+ lim
2690
+ t→∞ vt − ∇zt = 0.
2691
+ (B.13)
2692
+ By Proposition 4.3, we have {(Zt, Ωt)}∞
2693
+ t=1 to be bounded and (4.12) to be true.
2694
+ Because {Zt}∞
2695
+ t=1 is bounded, there exists a convergent subsequence {(Ztk, Ωtk)}∞
2696
+ k=1 and
2697
+ a point (Z⋆, Ω⋆) such that lim
2698
+ k→∞(Ztk, Ωtk) = (Z⋆, Ω⋆). In addition, (4.12) and (B.10)-(B.13)
2699
+ hold for the subsequence and its further subsequences. (B.10)-(B.11) imply that there is a
2700
+ further subsequence {(Ztkℓ, Ωtkℓ)}∞
2701
+ ℓ=1 such that
2702
+ lim
2703
+ ℓ→∞ ωtkℓ+1 − ωtkℓ = 0 a.s.,
2704
+ (B.14)
2705
+ lim
2706
+ ℓ→∞ ztkℓ+1 − ztkℓ = 0 a.s.
2707
+ (B.15)
2708
+
2709
+ 26
2710
+ KEVIN BUI AND ZICHAO (WENDY) DI
2711
+ From (4.12), we obtain lim
2712
+ ℓ→∞ E
2713
+
2714
+ δ
2715
+ tkℓ
2716
+ ω ∥∇ωL(ωtkℓ)∥2
2717
+ 2 + δ
2718
+ tkℓ
2719
+ z ∥∇zL(ztkℓ)∥2
2720
+ 2
2721
+
2722
+ = 0, so we obtain
2723
+ lim inf
2724
+ ℓ→∞ E[∥∇ωL(ωtkℓ)∥2
2725
+ 2] = 0
2726
+ (B.16)
2727
+ lim inf
2728
+ ℓ→∞ E[∥∇zL(ztkℓ)∥2
2729
+ 2] = 0.
2730
+ (B.17)
2731
+ Now we show that (Z⋆, Ω⋆) is a stochastic KKT point a.s. From (B.12)-(B.13), we have
2732
+ u⋆
2733
+ j = lim
2734
+ ℓ→∞ u
2735
+ tkℓ
2736
+ j
2737
+ = lim
2738
+ ℓ→∞ F(ωtkℓ ◦ Sjztkℓ) = F(ω⋆ ◦ Sjz⋆),
2739
+ (B.18)
2740
+ v⋆ = lim
2741
+ ℓ→∞ vtkℓ = lim
2742
+ ℓ→∞ ∇ztkℓ = ∇z⋆.
2743
+ (B.19)
2744
+ By (B.12) and (B.14)-(B.15), we have
2745
+ lim
2746
+ ℓ→∞ utkℓ+1 = lim
2747
+ ℓ→∞ F
2748
+
2749
+ ωtkℓ+1 ◦ Sjztkℓ+1�
2750
+ = F
2751
+
2752
+ lim
2753
+ ℓ→∞ ωtkℓ+1 ◦ Sj
2754
+
2755
+ lim
2756
+ ℓ→∞ ztkℓ+1
2757
+ ��
2758
+ = F
2759
+
2760
+ lim
2761
+ ℓ→∞ ωtkℓ ◦ Sj
2762
+
2763
+ lim
2764
+ ℓ→∞ ztkℓ
2765
+ ��
2766
+ = lim
2767
+ ℓ→∞ F
2768
+
2769
+ ωtkℓ ◦ Sjztkℓ�
2770
+ = lim
2771
+ ℓ→∞ utkℓ a.s.
2772
+ As a result, we have
2773
+ lim
2774
+ ℓ→∞ utkℓ+1 − utkℓ = 0 a.s.
2775
+ (B.20)
2776
+ Similarly, by (B.13) and (B.15), we have
2777
+ lim
2778
+ ℓ→∞ vtkℓ+1 − vtkℓ = 0 a.s.
2779
+ (B.21)
2780
+ For the sake of brevity, we will omit “a.s.” henceforth.
2781
+ Next we prove (4.1a). Suppose that B(·, ·) is AGM. At iteration tkℓ, the first-order opti-
2782
+ mality condition of (3.6a) is
2783
+ 0 ∈ ∂
2784
+ ���u
2785
+ tkℓ+1
2786
+ j
2787
+ ���
2788
+
2789
+ |u
2790
+ tkℓ+1
2791
+ j
2792
+ | −
2793
+
2794
+ dj
2795
+
2796
+ + Λ
2797
+ tkℓ
2798
+ j
2799
+ + β1
2800
+
2801
+ u
2802
+ tkℓ+1
2803
+ j
2804
+ − F(ωtkℓ ◦ Sjzkℓ)
2805
+
2806
+ ,
2807
+ (B.22)
2808
+ where
2809
+
2810
+
2811
+ ���u
2812
+ tkℓ+1
2813
+ j
2814
+ ���
2815
+
2816
+ i =
2817
+
2818
+
2819
+
2820
+
2821
+
2822
+
2823
+
2824
+ (u
2825
+ tkℓ+1
2826
+ j
2827
+ )i
2828
+ |(u
2829
+ tkℓ+1
2830
+ j
2831
+ )i|
2832
+ ,
2833
+ if (u
2834
+ tkℓ+1
2835
+ j
2836
+ )i ̸= 0
2837
+ {u ∈ C : |u| ≤ 1}
2838
+ if (u
2839
+ tkℓ+1
2840
+ j
2841
+ )i = 0.
2842
+ If (u⋆
2843
+ j)i ̸= 0 for some i ∈ {1, . . . , n2}, then there exists a neighborhood Br((u⋆
2844
+ j)i) = {u ∈ C :
2845
+ |u − (u⋆
2846
+ j)i| ≤ r} such that all points in Br((u⋆
2847
+ j)i) are nonzero. By (B.20) and the fact that
2848
+ lim
2849
+ ℓ→∞ utkℓ = u⋆, we have (u
2850
+ tkℓ+1
2851
+ j
2852
+ )i ̸= 0 for all tkℓ sufficiently large. As a result, (B.22) becomes
2853
+ 0 =
2854
+ (u
2855
+ tkℓ+1
2856
+ j
2857
+ )i
2858
+ |(u
2859
+ tkℓ+1
2860
+ j
2861
+ )i|
2862
+
2863
+ |(u
2864
+ tkℓ+1
2865
+ j
2866
+ )i| − (
2867
+
2868
+ dj)i
2869
+
2870
+ + (Λ
2871
+ tkℓ
2872
+ j )i + β1
2873
+
2874
+ (u
2875
+ tkℓ+1
2876
+ j
2877
+ )i − (F(ωtkℓ ◦ Sjzkℓ))i
2878
+
2879
+ ,
2880
+ (B.23)
2881
+
2882
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
2883
+ 27
2884
+ By (B.12) and (B.20), we take the limit to obtain
2885
+ 0 =
2886
+ (u⋆
2887
+ j)i
2888
+ |(u⋆
2889
+ j)i|
2890
+
2891
+ |(u⋆
2892
+ j)i| − (
2893
+
2894
+ dj)i
2895
+
2896
+ + (Λ⋆
2897
+ j)i.
2898
+ (B.24)
2899
+ On the other hand, when (u⋆
2900
+ j)i = 0 for some index i, we have (u
2901
+ tkℓ
2902
+ j )i ∈ Br((u⋆
2903
+ j)i) \ {(u⋆
2904
+ j)i} to
2905
+ be nonzero. Taking the absolute value of (B.23) gives us
2906
+ ���(Λ
2907
+ tkℓ
2908
+ j )i + β1
2909
+
2910
+ (u
2911
+ tkℓ+1
2912
+ j
2913
+ )i − (F(ωtkℓ ◦ Sjzkℓ))i
2914
+ ���� =
2915
+ ���|(u
2916
+ tkℓ+1
2917
+ j
2918
+ )i| − (
2919
+
2920
+ dj)i
2921
+ ��� .
2922
+ Again, by (B.12) and (B.20), taking the limit yields
2923
+ ��(Λ⋆
2924
+ j)i
2925
+ �� =
2926
+ ���(
2927
+
2928
+ dj)i
2929
+ ��� ,
2930
+ which implies that there exists u′ ∈ {u ∈ C : |u| ≤ 1} such that −u′(
2931
+
2932
+ dj)i + (Λ⋆
2933
+ j)i = 0.
2934
+ This result and (B.24) give us (4.1a) for the AGM case.
2935
+ As for the IPM case, because
2936
+ lim
2937
+ x→0+ x − d log x = +∞, it follows that (u⋆
2938
+ j)i ̸= 0 for all i, so we only need to worry about the
2939
+ nonzero case. Hence, verifying (4.1a) for IPM requires computation similar to the nonzero
2940
+ case for AGM, so it is omitted for brevity.
2941
+ At iteration tkℓ, the first-order optimality condition of (3.6c) is
2942
+ −ytkℓ
2943
+ λ
2944
+ − β2
2945
+ λ
2946
+
2947
+ vtkℓ − ∇ztkℓ�
2948
+ ∈ ∂
2949
+
2950
+ ∥vtkℓ+1∥1 − α∥vtkℓ+1∥2,1
2951
+
2952
+ .
2953
+ (B.25)
2954
+ By (B.13) and (B.21), we have
2955
+ lim
2956
+ ℓ→∞ −ytkℓ
2957
+ λ
2958
+ − β2
2959
+ λ
2960
+
2961
+ vtkℓ − ∇ztkℓ�
2962
+ = −y⋆
2963
+ λ ,
2964
+ lim
2965
+ ℓ→∞ vtkℓ+1 = lim
2966
+ ℓ→∞ vtkℓ = v⋆.
2967
+ By continuity, lim
2968
+ ℓ→∞ ∥vtkℓ+1∥1 − α∥vtkℓ+1∥2,1 = ∥v⋆∥1 − α∥v⋆∥2,1. Altogether, by closedness of
2969
+ limiting subdifferential, we establish (4.1b).
2970
+ Lastly, we prove (4.1e′)-(4.1f′). At iteration tkℓ, we have
2971
+ ∇ωL(ωtkℓ) = −
2972
+ N
2973
+
2974
+ j=1
2975
+
2976
+
2977
+ �β1(Sjztkℓ)∗ ◦
2978
+
2979
+ �F−1
2980
+
2981
+ �u
2982
+ tkℓ+1
2983
+ j
2984
+ +
2985
+ Λ
2986
+ tkℓ
2987
+ j
2988
+ β1
2989
+
2990
+ � − ωtkℓ ◦ Sjztkℓ
2991
+
2992
+
2993
+
2994
+
2995
+ � .
2996
+ Taking the limit, we see that
2997
+ ∇ωL(Z⋆, Ω⋆) = −
2998
+ N
2999
+
3000
+ j=1
3001
+
3002
+ β1(Sjz⋆)∗ ◦
3003
+
3004
+ F−1
3005
+
3006
+ u⋆
3007
+ j +
3008
+ Λ⋆
3009
+ j
3010
+ β1
3011
+
3012
+ − ω⋆ ◦ Sjz⋆
3013
+ ��
3014
+ = lim
3015
+ ℓ→∞ ∇ωL(ωtkℓ).
3016
+ Applying Fatou’s Lemma to (B.16), we have E
3017
+
3018
+ ∥∇ωL(Z⋆, Ω⋆)∥2
3019
+ 2
3020
+
3021
+ ≤ lim inf
3022
+ ℓ→∞ E[∥∇ωL(ωtkℓ)∥2
3023
+ 2] =
3024
+ 0. Proving (4.1f′) is similar to proving (4.1e′), so we omit the proof. Altogether, (Z⋆, Ω⋆) is a
3025
+ stochastic KKT point a.s.
3026
+
3027
+ 28
3028
+ KEVIN BUI AND ZICHAO (WENDY) DI
3029
+ REFERENCES
3030
+ [1] H. H. Bauschke, P. L. Combettes, and D. R. Luke, Hybrid projection–reflection method for phase
3031
+ retrieval, Journal of the Optical Society of America A, 20 (2003), pp. 1025–1034.
3032
+ [2] J. Bolte, S. Sabach, and M. Teboulle, Proximal alternating linearized minimization for nonconvex
3033
+ and nonsmooth problems, Mathematical Programming, 146 (2014), pp. 459–494.
3034
+ [3] L. Bottou, F. E. Curtis, and J. Nocedal, Optimization methods for large-scale machine learning,
3035
+ SIAM Review, 60 (2018), pp. 223–311.
3036
+ [4] S. Boyd, N. Parikh, and E. Chu, Distributed optimization and statistical learning via the alternating
3037
+ direction method of multipliers, Now Publishers Inc, 2011.
3038
+ [5] K. Bui, F. Park, Y. Lou, and J. Xin, A weighted difference of anisotropic and isotropic total varia-
3039
+ tion for relaxed Mumford–Shah color and multiphase image segmentation, SIAM Journal on Imaging
3040
+ Sciences, 14 (2021), pp. 1078–1113.
3041
+ [6] E. J. Candes, X. Li, and M. Soltanolkotabi, Phase retrieval via Wirtinger flow: Theory and algo-
3042
+ rithms, IEEE Transactions on Information Theory, 61 (2015), pp. 1985–2007.
3043
+ [7] E. J. Candes, T. Strohmer, and V. Voroninski, PhaseLift: Exact and stable signal recovery from
3044
+ magnitude measurements via convex programming, Communications on Pure and Applied Mathemat-
3045
+ ics, 66 (2013), pp. 1241–1274.
3046
+ [8] H. Chang, P. Enfedaque, and S. Marchesini, Blind ptychographic phase retrieval via convergent
3047
+ alternating direction method of multipliers, SIAM Journal on Imaging Sciences, 12 (2019), pp. 153–
3048
+ 185.
3049
+ [9] H. Chang, R. Glowinski, S. Marchesini, X.-C. Tai, Y. Wang, and T. Zeng, Overlapping domain
3050
+ decomposition methods for ptychographic imaging, SIAM Journal on Scientific Computing, 43 (2021),
3051
+ pp. B570–B597.
3052
+ [10] H. Chang, Y. Lou, Y. Duan, and S. Marchesini, Total variation–based phase retrieval for Poisson
3053
+ noise removal, SIAM Journal on Imaging Sciences, 11 (2018), pp. 24–55.
3054
+ [11] H. Chang, Y. Lou, M. K. Ng, and T. Zeng, Phase retrieval from incomplete magnitude information via
3055
+ total variation regularization, SIAM Journal on Scientific Computing, 38 (2016), pp. A3672–A3695.
3056
+ [12] H. Chang, S. Marchesini, Y. Lou, and T. Zeng, Variational phase retrieval with globally convergent
3057
+ preconditioned proximal algorithm, SIAM Journal on Imaging Sciences, 11 (2018), pp. 56–93.
3058
+ [13] Y. Chen and E. J. Cand`es, Solving random quadratic systems of equations is nearly as easy as solving
3059
+ linear systems, Communications on Pure and Applied Mathematics, 70 (2017), pp. 822–883.
3060
+ [14] L. De Caro, D. Altamura, M. Arciniegas, D. Siliqi, M. R. Kim, T. Sibillano, L. Manna, and
3061
+ C. Giannini, Ptychographic imaging of branched colloidal nanocrystals embedded in free-standing
3062
+ thick polystyrene films, Scientific Reports, 6 (2016), pp. 1–8.
3063
+ [15] P. Enfedaque, H. Chang, B. Enders, D. Shapiro, and S. Marchesini, High performance partial
3064
+ coherent x-ray ptychography, in International Conference on Computational Science, Springer, 2019,
3065
+ pp. 46–59.
3066
+ [16] S. Esedo¯glu and S. J. Osher, Decomposition of images by the anisotropic Rudin-Osher-Fatemi model,
3067
+ Communications on Pure and Applied Mathematics, 57 (2004), pp. 1609–1626.
3068
+ [17] A. Fannjiang and T. Strohmer, The numerics of phase retrieval, Acta Numerica, 29 (2020), pp. 125–
3069
+ 228.
3070
+ [18] P. Goyal, P. Doll´ar, R. Girshick, P. Noordhuis, L. Wesolowski, A. Kyrola, A. Tulloch,
3071
+ Y. Jia, and K. He, Accurate, large minibatch SGD: Training Imagenet in 1 hour, arXiv preprint
3072
+ arXiv:1706.02677, (2017).
3073
+ [19] A. Greenbaum, Behavior of slightly perturbed lanczos and conjugate-gradient recurrences, Linear Algebra
3074
+ and its Applications, 113 (1989), pp. 7–63.
3075
+ [20] K. He, X. Zhang, S. Ren, and J. Sun, Deep residual learning for image recognition, in Proceedings of
3076
+ the IEEE conference on computer vision and pattern recognition, 2016, pp. 770–778.
3077
+ [21] R. Hesse, D. R. Luke, S. Sabach, and M. K. Tam, Proximal heterogeneous block implicit-explicit
3078
+ method and application to blind ptychographic diffraction imaging, SIAM Journal on Imaging Sciences,
3079
+ 8 (2015), pp. 426–457.
3080
+ [22] M. R. Hestenes and E. Stiefel, Methods of conjugate gradients for solving, Journal of research of the
3081
+ National Bureau of Standards, 49 (1952), p. 409.
3082
+
3083
+ STOCHASTIC ADMM FOR PTYCHOGRAPHY
3084
+ 29
3085
+ [23] R. Johnson and T. Zhang, Accelerating stochastic gradient descent using predictive variance reduction,
3086
+ Advances in neural information processing systems, 26 (2013), pp. 315–323.
3087
+ [24] Y.-F. Ke and C.-F. Ma, Alternating direction methods for solving a class of sylvester-like matrix equa-
3088
+ tions, Linear and Multilinear Algebra, 65 (2017), pp. 2268–2292.
3089
+ [25] H. Liu, K. Deng, H. Liu, and Z. Wen, An entropy-regularized admm for binary quadratic programming,
3090
+ Journal of Global Optimization, (2022), pp. 1–33.
3091
+ [26] Q. Liu, X. Shen, and Y. Gu, Linearized admm for nonconvex nonsmooth optimization with convergence
3092
+ analysis, IEEE Access, 7 (2019), pp. 76131–76144.
3093
+ [27] Y. Lou and M. Yan, Fast L1–L2 minimization via a proximal operator, Journal of Scientific Computing,
3094
+ 74 (2018), pp. 767–785.
3095
+ [28] Y. Lou, P. Yin, Q. He, and J. Xin, Computing sparse representation in a highly coherent dictionary
3096
+ based on difference of L1 and L2, Journal of Scientific Computing, 64 (2015), pp. 178–196.
3097
+ [29] Y. Lou, P. Yin, and J. Xin, Point source super-resolution via non-convex L1 based methods, Journal of
3098
+ Scientific Computing, 68 (2016), pp. 1082–1100.
3099
+ [30] Y. Lou, T. Zeng, S. Osher, and J. Xin, A weighted difference of anisotropic and isotropic total
3100
+ variation model for image processing, SIAM Journal on Imaging Sciences, 8 (2015), pp. 1798–1823.
3101
+ [31] D. R. Luke, Relaxed averaged alternating reflections for diffraction imaging, Inverse Problems, 21 (2004),
3102
+ p. 37.
3103
+ [32] A. Maiden, D. Johnson, and P. Li, Further improvements to the ptychographical iterative engine,
3104
+ Optica, 4 (2017), pp. 736–745.
3105
+ [33] A. M. Maiden and J. M. Rodenburg, An improved ptychographical phase retrieval algorithm for diffrac-
3106
+ tive imaging, Ultramicroscopy, 109 (2009), pp. 1256–1262.
3107
+ [34] J. Marrison, L. R¨aty, P. Marriott, and P. O’Toole, Ptychography–a label free, high-contrast
3108
+ imaging technique for live cells using quantitative phase information, Scientific Reports, 3 (2013),
3109
+ pp. 1–7.
3110
+ [35] Y. S. Nashed, D. J. Vine, T. Peterka, J. Deng, R. Ross, and C. Jacobsen, Parallel ptychographic
3111
+ reconstruction, Optics Express, 22 (2014), pp. 32082–32097.
3112
+ [36] L. M. Nguyen, J. Liu, K. Scheinberg, and M. Tak´aˇc, Sarah: A novel method for machine learning
3113
+ problems using stochastic recursive gradient, in International Conference on Machine Learning, PMLR,
3114
+ 2017, pp. 2613–2621.
3115
+ [37] Y. Ouyang, Y. Chen, G. Lan, and E. Pasiliao Jr, An accelerated linearized alternating direction
3116
+ method of multipliers, SIAM Journal on Imaging Sciences, 8 (2015), pp. 644–681.
3117
+ [38] F. Park, Y. Lou, and J. Xin, A weighted difference of anisotropic and isotropic total variation for re-
3118
+ laxed mumford-shah image segmentation, in 2016 IEEE International Conference on Image Processing
3119
+ (ICIP), IEEE, 2016, pp. 4314–4318.
3120
+ [39] M. Pham, A. Rana, J. Miao, and S. Osher, Semi-implicit relaxed Douglas-Rachford algorithm (sDR)
3121
+ for ptychography, Optics Express, 27 (2019), pp. 31246–31260.
3122
+ [40] H. Robbins and S. Monro, A stochastic approximation method, The annals of mathematical statistics,
3123
+ (1951), pp. 400–407.
3124
+ [41] R. T. Rockafellar and R. J.-B. Wets, Variational analysis, vol. 317, Springer Science & Business
3125
+ Media, 2009.
3126
+ [42] J. M. Rodenburg and H. M. Faulkner, A phase retrieval algorithm for shifting illumination, Applied
3127
+ Physics Letters, 85 (2004), pp. 4795–4797.
3128
+ [43] L. I. Rudin, S. Osher, and E. Fatemi, Nonlinear total variation based noise removal algorithms, Physica
3129
+ D: nonlinear phenomena, 60 (1992), pp. 259–268.
3130
+ [44] Y. Shechtman, Y. C. Eldar, O. Cohen, H. N. Chapman, J. Miao, and M. Segev, Phase retrieval
3131
+ with application to optical imaging: a contemporary overview, IEEE signal processing magazine, 32
3132
+ (2015), pp. 87–109.
3133
+ [45] A. Suzuki, K. Shimomura, M. Hirose, N. Burdet, and Y. Takahashi, Dark-field x-ray ptychography:
3134
+ Towards high-resolution imaging of thick and unstained biological specimens, Scientific Reports, 6
3135
+ (2016), pp. 1–9.
3136
+ [46] P. Thibault, M. Dierolf, O. Bunk, A. Menzel, and F. Pfeiffer, Probe retrieval in ptychographic
3137
+ coherent diffractive imaging, Ultramicroscopy, 109 (2009), pp. 338–343.
3138
+ [47] I. Waldspurger, A. d’Aspremont, and S. Mallat, Phase recovery, maxcut and complex semidefinite
3139
+
3140
+ 30
3141
+ KEVIN BUI AND ZICHAO (WENDY) DI
3142
+ programming, Mathematical Programming, 149 (2015), pp. 47–81.
3143
+ [48] A. Walther, The question of phase retrieval in optics, Optica Acta: International Journal of Optics, 10
3144
+ (1963), pp. 41–49.
3145
+ [49] Y. Wang, W. Yin, and J. Zeng, Global convergence of admm in nonconvex nonsmooth optimization,
3146
+ Journal of Scientific Computing, 78 (2019), pp. 29–63.
3147
+ [50] Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, Image quality assessment: from error
3148
+ visibility to structural similarity, IEEE transactions on image processing, 13 (2004), pp. 600–612.
3149
+ [51] Z. Wen, C. Yang, X. Liu, and S. Marchesini, Alternating direction methods for classical and ptycho-
3150
+ graphic phase retrieval, Inverse Problems, 28 (2012), p. 115010.
3151
+ [52] S. Wu Fung and Z. Di, Multigrid optimization for large-scale ptychographic phase retrieval, SIAM
3152
+ Journal on Imaging Sciences, 13 (2020), pp. 214–233.
3153
+ [53] Y. Xu, W. Yin, Z. Wen, and Y. Zhang, An alternating direction algorithm for matrix completion with
3154
+ nonnegative factors, Frontiers of Mathematics in China, 7 (2012), pp. 365–384.
3155
+ [54] P. Yin, Y. Lou, Q. He, and J. Xin, Minimization of ℓ1−2 for compressed sensing, SIAM Journal on
3156
+ Scientific Computing, 37 (2015), pp. A536–A563.
3157
+ [55] P. Yin and J. Xin, PhaseLiftOff: an accurate and stable phase retrieval method based on difference of
3158
+ trace and frobenius norms, Communications in Mathematical Sciences, 13 (2014), pp. 1033–1049.
3159
+ [56] Z. Yuan, H. Wang, and Q. Wang, Phase retrieval via sparse Wirtinger flow, Journal of Computational
3160
+ and Applied Mathematics, 355 (2019), pp. 162–173.
3161
+ [57] H. Zhang, Y. Zhou, Y. Liang, and Y. Chi, A nonconvex approach for phase retrieval: Reshaped
3162
+ Wirtinger flow and incremental algorithms, Journal of Machine Learning Research, 18 (2017).
3163
+ [58] L. Zhou, J. Song, J. S. Kim, X. Pei, C. Huang, M. Boyce, L. Mendonc¸a, D. Clare, A. Siebert,
3164
+ C. S. Allen, et al., Low-dose phase retrieval of biological specimens using cryo-electron ptychogra-
3165
+ phy, Nature Communications, 11 (2020), pp. 1–9.
3166
+
4NE4T4oBgHgl3EQfbQzc/content/2301.05072v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d0c14dfa338a6b80b3c47cf735407553623941fa53b1994b16ba7e229d410ee
3
+ size 459192
4NE4T4oBgHgl3EQfbQzc/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cf69465bcfbe01a05409835ee46a354730722c252c65baeb499109bb0cf3179
3
+ size 1376301
4NE4T4oBgHgl3EQfbQzc/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8760f97456fe44e7ff1611821163cc05e71f981d88924152100ae5b3accb733
3
+ size 59611
4dAzT4oBgHgl3EQfEPoC/content/tmp_files/2301.00988v1.pdf.txt ADDED
@@ -0,0 +1,1682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Under consideration for publication in J. Plasma Phys.
2
+ 1
3
+ Energetic bounds on gyrokinetic instabilities.
4
+ Part III. Generalized free energy.
5
+ G. G. Plunk1†, and P. Helander1
6
+ 1Max-Planck-Institut für Plasmaphysik, 17491 Greifswald, Germany
7
+ (Received xx; revised xx; accepted xx)
8
+ Free energy, widely used as a measure of turbulence intensity in weakly collisional
9
+ plasmas, has been recently found to be a suitable basis to describe both linear and
10
+ nonlinear growth in a wide class gyrokinetic systems. The simplicity afforded by this
11
+ approach is accompanied by some drawbacks, notably the lack of any explicit treatment
12
+ of wave-particle effects, which makes the theory unable to describe things like stability
13
+ thresholds or dependence on the geometry of the background magnetic field. As a step
14
+ toward overcoming these limitations, we propose an extension of the theory based on
15
+ a generalization of free energy. With this it is demonstrated that resonance effects
16
+ are recovered, and the bounds on growth are significantly reduced. The simplicity
17
+ and efficient computation of the associated “optimal” growth rates makes the theory
18
+ potentially applicable to stellarator optimization.
19
+ 1. Introduction
20
+ This is the third paper in a series (Helander and Plunk 2022; Plunk and Helander 2022),
21
+ in which we develop a linear and nonlinear stability theory based on gyrokinetic energy
22
+ balance. The last two papers used Helmholtz free energy, and introduced the concept of
23
+ optimal mode growth for fully electromagnetic gyrokinetic. The present paper proposes
24
+ a generalized energetic measure of fluctuations, allowing the inclusion of additional
25
+ instability mechanisms. We do this first for a simple case, namely the electrostatic limit
26
+ (low plasma β) with only one kinetic species (ions), with the electrons being treated
27
+ adiabatically. These simplifications limit the application to ion-temperature-gradient
28
+ (ITG) driven turbulence, though the central result of the paper is capable of treating
29
+ completely general details of the magnetic geometry.
30
+ Free energy is a useful concept for understanding nonlinear and linear aspects of
31
+ plasma turbulence. At the level of linear instabilities it is common to speak of a source
32
+ of free energy that drives modes. Indeed, without a source of free energy, provided
33
+ by background plasma gradients (density, temperature, flows), there can be no linear
34
+ instabilities (nor can there be subcritical turbulence Landreman et al. (2015); Plunk and
35
+ Helander (2022)). However, there is usually another ingredient that arises in the detailed
36
+ analysis of normal linear instabilities, namely the wave-particle resonance. In gyrokinetic
37
+ theory, this involves parallel motion (along the magnetic field) and magnetic drift, and
38
+ the resonance is physically linked to the work that the electrostatic field performs on
39
+ gyrocenter motion. However, the terms needed to capture this do not contribute to free
40
+ energy balance, and the influence of resonance therefore cannot be accounted for by the
41
+ optimal modes that we introduced in our previous works.
42
+ In this work we propose a new measure of gyrokinetic fluctuations, a generalization
43
+ of the concept of free energy, that incorporates the resonance mechanism, and, via the
44
+ † Email address for correspondence: gplunk@ipp.mpg.de
45
+ arXiv:2301.00988v1 [physics.plasm-ph] 3 Jan 2023
46
+
47
+ 2
48
+ G. G. Plunk and P. Helander
49
+ magnetic drift, the full details of the background magnetic geometry. We demonstrate
50
+ the existence of a class of quadratic measures closely related to Helmholtz free energy
51
+ that behave as positive-definite norms for fluctuations in the distribution function. The
52
+ corresponding energy balance equation is then used to derive a theory of optimal modes
53
+ that most efficiently extract this energy from its source. The growth rate of these optimal
54
+ modes provides a rigorous upper bound on the growth rate of linear instabilities, and this
55
+ bound is shown to be lower than that obtained previously from Helmholtz free energy.
56
+ By studying some simple limits, we show that we recover some expected behavior of both
57
+ the slab and toroidal branches of the ITG mode.
58
+ 2. Definitions and gyrokinetic energy balance
59
+ The ion gyrokinetic equation in the electrostatic limit is written
60
+ ∂gk
61
+ ∂t + v∥
62
+ ∂gk
63
+ ∂l + i˜ωdgk + 1
64
+ B2
65
+
66
+ k′
67
+ B · (k × k′)δφk′gk−k′ = eiF0
68
+ Ti
69
+ � ∂
70
+ ∂t + iωT
71
+
72
+
73
+ δφk,
74
+ (2.1)
75
+ where g is the gyro-center dependent part of the perturbed ion distribution function, i.e.
76
+ fi = (1 − eiδφ(r)/Ti) Fi0 + g(R, Ei, µi, t). Its phase space variables are the energy Ea =
77
+ mav2/2 + eaΦ(ψ) and the magnetic moment µa = mav2
78
+ ⊥/(2B), and the perpendicular
79
+ wavenumber is k = k⊥ = kψ∇ψ + kα∇α with kψ and kα independent of the arc length
80
+ l along the magnetic field, and ψ and α defined via B = Bb = ∇ψ × ∇α. We neglect
81
+ collisions here†, and used the simplified notation gk = gi,k, and ω∗ = ω∗i, etc because the
82
+ adiabatic approximation ge,k = 0 is assumed throughout‡. We will also assume kρi ∼ 1,
83
+ implying kρe ≪ 1. The gyrokinetic free energy balance equation obtained in this limit
84
+ reads
85
+ d
86
+ dt
87
+
88
+ k
89
+ H = 2
90
+
91
+ k
92
+ D,
93
+ (2.2)
94
+ where the drive term D is
95
+ D(k, t) = Im ei
96
+ ��
97
+ gkωT
98
+ ∗ δφ
99
+
100
+ kd3v
101
+
102
+ ,
103
+ (2.3)
104
+ and the free energy, expressed in terms of the gyrocenter distribution function
105
+ H(k, t) =
106
+
107
+ Ti
108
+ � |gk|2
109
+ Fi0
110
+ d3v −
111
+
112
+ a
113
+ nae2
114
+ a
115
+ Ta
116
+ |δφk|2
117
+
118
+ ,
119
+ (2.4)
120
+ where the space average is defined as (see also Helander and Plunk (2022) for general-
121
+ izations)
122
+ ⟨· · ·⟩ = lim
123
+ L→∞
124
+ � L
125
+ −L
126
+ (· · · )dl
127
+ B
128
+ � � L
129
+ −L
130
+ dl
131
+ B .
132
+ (2.5)
133
+ The diamagnetic frequencies are
134
+ † We do not retain collisions, since we will not be able to fix the sign of its contribution in
135
+ our later analysis.
136
+ ‡ Here we do not include the customary correction for the zonal component (Dorland and
137
+ Hammett 1993), but it does not affect the subsequent analysis, as the growth of this component
138
+ is always zero because it has no free energy source (D = 0 for kα = 0).
139
+
140
+ Energetic bounds. Part III
141
+ 3
142
+ ω∗a = kαTa
143
+ ea
144
+ d ln na
145
+
146
+ ,
147
+ ωT
148
+ ∗a = ω∗a
149
+
150
+ 1 + ηa
151
+ �mav2
152
+ 2Ta
153
+ − 3
154
+ 2
155
+ ��
156
+ ,
157
+ and the magnetic drift frequency is
158
+ ˜ωd = k · vd,
159
+ where the magnetic drift velocity is vd = ˆb × ((v2
160
+ ⊥/2)∇ ln B + v2
161
+ ∥κ)/Ωi, κ = ˆb · ∇ˆb, and
162
+ Ωa = eaB/ma is gyrofrequency. Assuming ∇ ln B ≈ κ (low plasma β), we can separate
163
+ the drift frequency into velocity-dependent and space-dependent factors following Plunk
164
+ et al. (2014)†:
165
+ ˜ωd = ωd(l)
166
+
167
+ v2
168
+
169
+ 2v2
170
+ th
171
+ +
172
+ v2
173
+
174
+ v2
175
+ th
176
+
177
+ .
178
+ (2.6)
179
+ The gyro-averaged electrostatic potential is denoted
180
+ δφk = J0
181
+ �k⊥v⊥
182
+ Ωi
183
+
184
+ δφk,
185
+ and the quasi-neutrality condition is
186
+
187
+ a
188
+ nae2
189
+ a
190
+ Ta
191
+ δφk = ei
192
+
193
+ gkJ0d3v, ,
194
+ (2.7)
195
+ where Jn = Jn(k⊥v⊥/Ωi). Following our previous convention, we define the free energy
196
+ as twice that which appears in some other publications. Henceforth, we suppress the
197
+ k-subscripts.
198
+ 2.1. Electrostatic energy and positive-definiteness of free energy
199
+ It is useful to decompose the free energy into a part associated with a perturbed
200
+ distribution function and a part associated with fluctuations in the electrostatic field,
201
+ i.e.
202
+ H = G + E,
203
+ (2.8)
204
+ where
205
+ G = −TiSi =
206
+
207
+ Ti
208
+ � |δF|2
209
+ Fi0
210
+ d3v
211
+
212
+ (2.9)
213
+ E =
214
+
215
+ (τ + 1 − Γ0) nie2
216
+ i
217
+ Ti
218
+ |δφ|2
219
+
220
+ .
221
+ (2.10)
222
+ Recall the conventional definitions Γn(b) = exp(−b)In(b) and b = k2
223
+ ⊥ρ2
224
+ i = k2
225
+ ⊥Ti/(miΩ2
226
+ i ),
227
+ and τ = (eTi)/(eiTe). Note that δF = g−(eiδφ/Ti)F0 is the gyro-averaged perturbed dis-
228
+ † Actually, there is spatial dependence in both v⊥ and v∥, since these are not the proper
229
+ gyrokinetic phase-space variables, but a separation like this is useful to make contact with
230
+ known limits from gyrokinetic theory of the ITG mode.
231
+
232
+ 4
233
+ G. G. Plunk and P. Helander
234
+ tribution function, and these two contributions to H can be identified as the gyrokinetic
235
+ perturbed entropy and the gyrokinetic field energy.
236
+ Although the general electromagnetic free energy admits a similar form as Eqn. 2.8
237
+ (see for instance Helander and Plunk (2022)), we note that the electrostatic limit is
238
+ distinguished by the fact that the field contribution E is itself a nonlinear invariant of
239
+ the gyrokinetic system (Schekochihin et al. 2009), and its conservation may be viewed
240
+ as an additional constraint on the nonlinear dynamics, with consequences e.g. for the
241
+ cascade and production of large-scale E × B flows (PLUNK et al. 2010).
242
+ For what follows, we need the electrostatic energy balance equation. This is obtained
243
+ by multiplying the ion gyrokinetic equation by eiδφ
244
+ ∗ integrate over velocity, average over
245
+ the parallel coordinate l, and sum over perpendicular wavenumber k, yielding (Helander
246
+ et al. 2013)
247
+ d
248
+ dt
249
+
250
+ k
251
+ E = 2
252
+
253
+ k
254
+ K,
255
+ (2.11)
256
+ where the drive term K is
257
+ K = −Re ei
258
+ ��
259
+ δφ
260
+ ∗ �
261
+ v∥
262
+
263
+ ∂l + iωd
264
+
265
+ gd3v
266
+
267
+ .
268
+ (2.12)
269
+ This is composed to two contributions, one coming from the parallel streaming term,
270
+ and the other coming from the magnetic drift term. The first contribution has a simple
271
+ physical interpretation, as the rate of energy exchanged between particles and the parallel
272
+ electric field (i.e. the volume average of the parallel current multiplied by the parallel
273
+ electric field), while the second term describes an analogous process in the perpendicular
274
+ direction associated with the drift motion of gyrocenters.
275
+ Eqn. 2.8 is a physically transparent form that makes it clear that the free energy H is
276
+ a positive-definite norm for the distribution function g†, i.e.
277
+ H ⩾ 0, and H = 0 iff g = 0
278
+ (2.13)
279
+ over all of phase space, ℓ and v. To see this, note that the quantities G and E are both
280
+ positive, i.e. G ⩾ 0, obviously, and E ⩾ 0 because Γ0 ⩽ 1. Therefore if H = 0 then both
281
+ E = 0 and G = 0. The first implies δφ = 0 everywhere, while the second implies δF = 0
282
+ over all of phase space; δφ = 0 and δF = 0 obviously implies g = 0.
283
+ We note that positive-definiteness is a desirable property of an energetic measure
284
+ that can be useful for setting bounds on the growth rate of fluctuations; if a non-
285
+ zero fluctuation (g ̸= 0) has zero measure M then the rate of growth d ln M/dt can
286
+ be unbounded.
287
+ Although we mainly consider a plasma with a single kinetic ion species and adiabatic
288
+ electrons, the concepts and the formalism carry over to the more general case of a plasma
289
+ with an arbitrary number of kinetic species, as shown in Appendix A. An important
290
+ limitation, however, is that magnetic fluctuations and collisions are neglected.
291
+ 2.2. Generalized Free Energy
292
+ The positive definiteness of H suggests a family of related quadratic energetic measures
293
+ that are also positive definite. In particular it is clear that something of the form
294
+ † By extension, using Eqn. 2.7, H can be shown to also be a positive-definite norm for the total
295
+ deviation of the distribution function δf = g − (eiφ/Ti)F0 from the zeroth-order Maxwellian.
296
+
297
+ Energetic bounds. Part III
298
+ 5
299
+ ˜H = H − ∆E,
300
+ (2.14)
301
+ will be positive-definite, by the same arguments of the previous section, for particular
302
+ values of the parameter ∆. For instance the choice ∆ < 1 allows trivial generalization of
303
+ the arguments, but we will see that the value can be extended beyond this.
304
+ To find a range of permissible values of ∆, we will consider a diagonalization of ˜H,
305
+ meaning that we will define a distribution function ˜g, which allows the energy to be
306
+ expressed using to the Euclidean norm,
307
+ ˜H = ||˜g||2 = (˜g, ˜g),
308
+ (2.15)
309
+ where we have introduced the inner product
310
+ (˜g1, ˜g2) =
311
+
312
+ Ti
313
+ � ˜g∗
314
+ 1˜g2
315
+ F0
316
+ d3v
317
+
318
+ .
319
+ (2.16)
320
+ To find the relationship between ˜g and g, we introduce the Ansatz ˜g = g −νJ0F0eiδφ/Ti,
321
+ substitute this into Eqn. 2.15, using also Eqn. 2.10, and solve for the free parameter ν,
322
+ yielding
323
+ ν = 1
324
+ Γ0
325
+
326
+ 1 + τ −
327
+
328
+ (1 + τ − Γ0)(1 + τ − ∆Γ0)
329
+
330
+ ,
331
+ (2.17)
332
+ where we have taken the negative root for convenience. Observe that in order for ν to be
333
+ real, we must have
334
+ ∆ ⩽ (1 + τ)/Γ0.
335
+ (2.18)
336
+ The parameter ∆ can of course be negative, in which case its magnitude is unbounded.
337
+ Noting that Γ0 generally depends on k, we may also assume the more restrictive ∆ ⩽
338
+ (1 + τ) to ensure that ˜H remains a nonlinear invariant.
339
+ We pause to note that the choice ∆ = 0 yields a novel form of the conventional
340
+ (Helmholtz) free energy, immediately suggesting what can be considered as the phase-
341
+ space density of free energy, namely the quantity Ti|˜g|2/F0, for which there has not yet
342
+ been an expression available.†
343
+ It is useful now to write quasi-neutrality in terms of ˜g,
344
+ ei
345
+ Ti
346
+ δφ = α
347
+ ni
348
+
349
+ ˜gJ0d3v,
350
+ (2.19)
351
+ where
352
+ α =
353
+ 1
354
+
355
+ (1 + τ − Γ0)(1 + τ − ∆Γ0)
356
+ .
357
+ (2.20)
358
+ Finally, we can show that ˜H is positive-definite. First, positivity follows from Eqn. 2.15,
359
+ and it is obvious from Eqn. 2.7 that if g = 0 then δφ = 0 so that E and H both vanish,
360
+ implying ˜H = 0. On the other hand, if we assume that ˜H = 0, then Eqn. 2.15 implies that
361
+ ˜g = 0, and Eqn. 2.19 implies that δφ = 0, from which we conclude g = 0. In summary,
362
+ ˜H ⩾ 0 and ˜H = 0 iff g = 0.
363
+ † The idea for a phase-space density of free energy (i.e. a quantity that can be directly
364
+ integrated over phase space to yield the total free energy) was suggested by Teaca.
365
+
366
+ 6
367
+ G. G. Plunk and P. Helander
368
+ 3. Modes of optimal growth
369
+ A key point in introducing the generalization of free energy ˜H is that this quantity
370
+ introduces wave-particle effects (parallel resonance and drift resonance) that enter the
371
+ electrostatic energy balance equation, Eqn. 2.11. Note that the case ∆ = 0 (i.e. the
372
+ “conventional” Helmholtz free energy) is included as a limit ∆ = 0 and so the most
373
+ stringent bound on growth obtained from the generalized free energy will be at least as
374
+ good as the known bound obtained from the Helmholtz free energy.
375
+ Note that, as long as the parameter ∆ is independent of k, the quantity ˜H is conserved
376
+ by the nonlinearity, i.e. under summation over k. This is because it is a linear combination
377
+ of two nonlinear invariants. One simply combines Eqns. 2.2 and 2.11 to obtain
378
+ d
379
+ dt
380
+
381
+ k
382
+ ˜H = 2
383
+
384
+ k
385
+ (D − ∆K),
386
+ for ∆ independent of k,
387
+ (3.1)
388
+ i.e. the change of this measure is due to the drive terms of electrostatic and free energy,
389
+ and is otherwise conserved by the turbulent interactions. It is potentially useful to also
390
+ consider ∆ that does depend on k, for the purpose of obtaining bounds on linear growth,
391
+ but the nonlinear implications will be less clear in that case.
392
+ In direct analogy to how modes of optimal free energy growth were defined, we
393
+ introduce a rate Λ
394
+ Λ = (D − ∆K)/ ˜H
395
+ (3.2)
396
+ to be optimized over the space of ion distribution functions g. We note the bound on
397
+ conventional gyrokinetic instability growth rates,
398
+ γL ⩽ max
399
+ g
400
+ Λ.
401
+ (3.3)
402
+ Having already found a diagonal form of the generalized free energy, Eqn. 2.15, we
403
+ need not use a variational approach to find the states of extremal Λ. We simply identify
404
+ the Hermitian linear operators associated with the input of free energy and electrostatic
405
+ energy, i.e.
406
+ D = (˜g, D˜g),
407
+ (3.4)
408
+ K = (˜g, K˜g)
409
+ (3.5)
410
+ Using Eqn. 2.19 and Eqns. 2.3 and 2.12, and some straightforward algebra (see Appendix
411
+ B), we obtain
412
+ D˜g = iα
413
+ 2ni
414
+ J0F0ηω∗
415
+
416
+ d3v′J′
417
+ 0˜g′
418
+ �� v
419
+ vth
420
+ �2
421
+
422
+ � v′
423
+ vth
424
+ �2�
425
+ ,
426
+ (3.6)
427
+ where primes denote evaluation at v′ and vth =
428
+
429
+ 2Ti/mi. For convenience, the operator
430
+ K can be split into its parallel and perpendicular components as K = K∥ + Kd, for which
431
+ we obtain
432
+ Kd˜g = iα
433
+ 2ni
434
+ ωd(ℓ)F0J0
435
+
436
+ d3v′J′
437
+ 0˜g′
438
+
439
+
440
+
441
+ v⊥
442
+
443
+ 2vth
444
+ �2
445
+ +
446
+ � v∥
447
+ vth
448
+ �2
449
+
450
+
451
+ v′
452
+
453
+
454
+ 2vth
455
+ �2
456
+
457
+
458
+ v′
459
+
460
+ vth
461
+ �2�
462
+ � ,
463
+ (3.7)
464
+
465
+ Energetic bounds. Part III
466
+ 7
467
+ and
468
+ K∥˜g = α
469
+ 2ni
470
+ F0
471
+
472
+ J0
473
+
474
+ −B ∂
475
+ ∂l
476
+ � 1
477
+ B
478
+
479
+ d3v′v′
480
+ ∥J′
481
+ 0˜g′
482
+
483
+ +
484
+
485
+ d3v′v′
486
+
487
+ ∂J′
488
+ 0
489
+ ∂l ˜g′
490
+
491
+ +v∥
492
+
493
+ ∂l
494
+
495
+ J0
496
+
497
+ d3v′J′
498
+ 0˜g′
499
+ ��
500
+ .
501
+ (3.8)
502
+ In deriving Eqn. 3.8, it is important to note that the parallel derivative is taken at fixed
503
+ magnetic moment and particle energy, and that the velocity-space volume element d3v
504
+ is proportional to B/v∥ in these variables. More details are given in Appendix A. The
505
+ kinetic eigenvalue problem can be stated now as
506
+ Λ˜g = (D − ∆K) ˜g,
507
+ (3.9)
508
+ where solutions (Λ, g(l, v)) realize modes of optimal growth of ˜H. The analysis of this
509
+ eigenproblem is greatly simplified by adopting a moment form.
510
+ 3.1. Moment form of eigenproblem
511
+ As found in the preceding papers, there are natural moments that appear in the
512
+ energy input terms that can be identified to reduce the dimensionality of the problem
513
+ substantially. Upon inspecting the energy balance equations one finds the following key
514
+ dimensionless integrals:
515
+ κ1 =
516
+
517
+ d3vJ0˜g/ni,
518
+ (3.10)
519
+ κ2 =
520
+
521
+ d3v
522
+ � v2
523
+ v2
524
+ th
525
+
526
+ J0˜g/ni,
527
+ (3.11)
528
+ κ3 =
529
+
530
+ d3v
531
+
532
+ v2
533
+
534
+ 2v2
535
+ th
536
+ +
537
+ v2
538
+
539
+ v2
540
+ th
541
+
542
+ J0˜g/ni,
543
+ (3.12)
544
+ κ4 =
545
+
546
+ d3v
547
+ � v∥
548
+ vth
549
+
550
+ J0˜g/ni,
551
+ (3.13)
552
+ κ5 =
553
+
554
+ d3v
555
+ � v∥
556
+ vth
557
+ � ∂J0
558
+ ∂l ˜g/ni,
559
+ (3.14)
560
+ where κ1 is a density-like moment, κ2 and κ3 are pressure-like, κ4 is parallel ion flow,
561
+ while κ5 is more abstract.
562
+ It is easy to recognize these integrals on the right hand side of Eqns. 3.6, 3.7 and
563
+ 3.8, and straightforward to rewrite those equations in moment form. The dimensional
564
+ reduction is achieved by taking moments of the these equations to obtain a coupled set
565
+ of five fluid equations. These, which are given in Appendix C, can be combined, leading,
566
+ after lengthy algebra, to a relatively simple second order ordinary differential equation,
567
+ the main result of this paper:
568
+ �4Λ2
569
+ α2 + (∆ωdG3 − ηω∗G1)2 − G0
570
+
571
+ (ηω∗)2G2 − 2∆ωdηω∗G4 + ∆2ω2
572
+ dG5
573
+ ��
574
+ κ1
575
+ = ∆2v2
576
+ thG0B
577
+
578
+ − ∂
579
+ ∂l
580
+ �G0,2
581
+ B
582
+ ∂κ1
583
+ ∂l
584
+
585
+ + G′′
586
+ 0,2
587
+ B κ1 − ∂
588
+ ∂l
589
+ �G′
590
+ 0,2
591
+ B
592
+
593
+ κ1
594
+
595
+ ,
596
+ (3.15)
597
+
598
+ 8
599
+ G. G. Plunk and P. Helander
600
+ The functions Gm,n, G′
601
+ m,n, and G′′
602
+ m,n, which depend on arc length via b(l) and B(l), are
603
+ defined in terms of integrals involving Bessel functions, and are evaluated in Appendix
604
+ D. The other b-dependent factors (G0-G5) can be expressed in terms of Gm,n, and are
605
+ evaluated in terms of more elementary Bessel functions in Appendix D.1.
606
+ In Eqn. 3.15 we see the eigenvalue Λ entering quadratically, reflecting the fact that
607
+ there will be two real roots, one positive and one negative, owing to Hermiticity and
608
+ time-reversal symmetriy of the full eigenproblem, Eqn. 3.9. Note that the terms arising
609
+ from the parallel drive of electrostatic energy are placed on the right hand side. In the
610
+ following section, we will consider some simple limits of this equation, and leave its more
611
+ general solution for a future publication.
612
+ 4. Simple limits
613
+ In this section we will consider some simple limits applied to Eqn. 3.15, and draw some
614
+ comparison to linear theory of the main instability targeted by limit of this paper, the
615
+ ion temperature gradient (ITG) mode (see for instance Plunk et al. (2014)). To start,
616
+ we note that taking ∆ = 0, so that ˜H becomes the conventional Helmholtz free energy,
617
+ yields
618
+ Λ2 =
619
+ (ηω∗)2
620
+ 4(1 + τ − Γ0)(1 + τ)
621
+
622
+ G0G2 − G2
623
+ 1
624
+
625
+ ,
626
+ (4.1)
627
+ which matches Eqn. 6.20 of Helander and Plunk (2022).
628
+ In considering other simplifications, we first should note that the adiabatic electron ap-
629
+ proximation already neglects a trapped particle population, which is not really consistent
630
+ unless we take the magnetic field strength to be independent of arc length
631
+ ∂B
632
+ ∂l = 0.
633
+ (4.2)
634
+ We have avoided making this approximation explicitly, since the present paper lays the
635
+ foundation for extensions, in which it will be useful to include variation in B. Making
636
+ the approximation now leads to minor simplifications of Eqn. 3.15, where all the explicit
637
+ factors of B drop out of the right-hand side. A more significant simplification is achieved
638
+ by assuming unsheared and uniform magnetic geometry, in particular
639
+ ∂b
640
+ ∂l = 0,
641
+ (4.3)
642
+ ∂ωd
643
+ ∂l
644
+ = 0,
645
+ (4.4)
646
+ In this limit, all of the coefficients of Eqn. 3.15 are constants, and a simple dispersion
647
+ relation is the obtained by taking ∂κ1/∂l = ik∥κ1. We find
648
+ 4Λ2
649
+ α2 + (∆ωdG3 − ηω∗G1)2 − G0
650
+
651
+ (ηω∗)2G2 − 2∆ωdηω∗G4 + ∆2ω2
652
+ dG5
653
+
654
+ = ∆2k2
655
+ ∥v2
656
+ thG2
657
+ 0/2.
658
+ (4.5)
659
+ were we have used G0,2 = G0/2. As noted in Section 2.2, the quantity ∆ is a free
660
+ parameter, over which we can optimize Λ to improve the bounds on the growth rate of
661
+ fluctuations.
662
+
663
+ Energetic bounds. Part III
664
+ 9
665
+ 4.1. Slab ITG mode
666
+ Setting ωd = 0 leaves only the slab branch of the ITG mode, driven by the temperature
667
+ gradient, and involving ion parallel resonance. Eqn. 4.5 reduces to
668
+ 4Λ2
669
+ (ηω∗)2α2 = G0G2 − G2
670
+ 1 + ∆2κ−2
671
+ ∥ G2
672
+ 0/2,
673
+ (4.6)
674
+ where κ∥ = ηω∗/(k∥vth). Because G0G2−G2
675
+ 1 ⩾ 0, the two contributions on the right hand
676
+ side are both positive but the solution for which Λ is minimal is actually not obtained
677
+ for ∆ = 0, due to the implicit dependence of α on ∆ given by Eqn. 2.20.
678
+ To obtain the value of ∆ which yields an optimal bound, we can look for extrema of
679
+ Λ2/(ηω∗)2, i.e.
680
+ d
681
+ d∆
682
+
683
+ G0G2 − G2
684
+ 1 + ∆2κ−2
685
+ ∥ G2
686
+ 0/2
687
+ (1 + τ − Γ0)(1 + τ − ∆Γ0)
688
+
689
+ = 0.
690
+ (4.7)
691
+ This results in a quadratic equation for ∆ that is still rather complicated so we will
692
+ consider the limit b → 0; see Appendix D.2 for the relevant limits of Gm,n, etc. Applying
693
+ the limit to Eqn. 4.6 yields
694
+ Λ2
695
+ (ηω∗)2 =
696
+ 3 + ∆2/κ2
697
+
698
+ 8τ(1 + τ − ∆)
699
+ (4.8)
700
+ This solution diverges as ∆ approaches 1+τ; recall that this is the upper limit allowed
701
+ by Eqn. 2.18. It also grows in an unbounded fashion as ∆ → −∞. There is an optimal
702
+ value giving minimal |Λ|, obtained by solving Eqn. 4.7 in this limit. This solution, denoted
703
+ as ∆min, is
704
+ ∆min = 1 + τ −
705
+
706
+ (1 + τ)2 + 3κ2
707
+
708
+ (4.9)
709
+ where the negative root has been selected to be consistent with Eqn. 2.18. Substituting
710
+ this solution into Eqn. 4.8 gives
711
+ Λ2
712
+ min =
713
+ (ηω∗)2
714
+ 4¯κ2
715
+ ∥τ(1 + τ)
716
+ ��
717
+ 1 + 3¯κ2
718
+ ∥ − 1
719
+
720
+ ,
721
+ (4.10)
722
+ where we define ¯κ∥ = κ∥/(1 + τ). This reaches its maximum value in the limit ¯κ∥ → 0,
723
+ and is a decreasing function of |¯κ∥|, i.e.
724
+ Λ2
725
+ min =
726
+
727
+ 3
728
+ 8τ(τ+1)(ηω∗)2,
729
+ for ¯κ∥ → 0,
730
+
731
+ 3
732
+ 4τ |ηω∗k∥vth|,
733
+ for |¯κ∥| ≫ 1,
734
+ (4.11)
735
+ Physically, the first result implies that when drive (ηω∗) is much smaller than the parallel
736
+ transit frequency (k∥vth), the best bound is equal to that obtained by free energy (∆ = 0).
737
+ In this case, the bound is consistent from expectations of the growth rate of a resonant
738
+ slab ITG mode, i.e. γL ∼ ηω∗.
739
+ In the opposite limit (¯κ∥ ≫ 1), however, when the drive large, i.e. in the so-called
740
+ non-resonant or “fluid” limit, we obtain a much lower bound, essentially the geometric
741
+ mean of the drive and the parallel transit frequency k∥vth. We note that this bound is
742
+ not as low as what is obtained from the non-resonant solution of the dispersion relation
743
+ (without density gradient), i.e. γL ∼ ηω1/3
744
+
745
+ (k∥vth)2/3 (Plunk et al. 2014), but nevertheless
746
+ captures the expected weakening (relative to the resonant result) qualitatively.
747
+
748
+ 10
749
+ G. G. Plunk and P. Helander
750
+ It is interesting to observe that this latter limit corresponds to ∆min → −∞, making
751
+ ˜H in some sense dominated by the electrostatic component.
752
+ 4.2. Toroidal ITG mode
753
+ Now taking k∥vth to be small, we can neglect the right-hand side of Eqn. 4.5, leaving
754
+ 4Λ2
755
+ α2 = G0
756
+
757
+ (ηω∗)2G2 − 2∆ωdηω∗G4 + ∆2ω2
758
+ dG5
759
+
760
+ − (∆ωdG3 − ηω∗G1)2 .
761
+ (4.12)
762
+ To derive the optimal choice of ∆, we again take the b → 0 limit and obtain from
763
+ Eqn. 4.12
764
+ Λ2
765
+ (ηω∗)2 = 3∆2 − 8∆κd + 6κ2
766
+ d
767
+ 16κ2
768
+ dτ(τ + 1 − ∆) ,
769
+ (4.13)
770
+ where we define κd = ηω∗/ωd. Note the similar qualitative behavior with ∆ as Eqn. 4.6,
771
+ namely its divergence at the ∆ → 1 + τ, and unbounded growth as ∆ → −∞. The key
772
+ difference here arises in the linear term in the drive parameter κ; this is expected from
773
+ the theory of the toroidal ITG mode since the sign of the drift frequency (associated with
774
+ so-called ‘good’ and ’bad’ magnetic curvature) is important for the resonance.
775
+ We now find the value of ∆ that minimizes Λ:
776
+ ∆min = (1 + τ)
777
+
778
+ 1 −
779
+
780
+ 2¯κ2
781
+ d − 8¯κd/3 + 1
782
+
783
+ ,
784
+ (4.14)
785
+ where we define the parameter ¯κd = κd/(1 + τ). Substituting this into Eqn. 4.13 yields
786
+ Λ2
787
+ min = (ηω∗)2
788
+
789
+ 8¯κd (ζ (¯κd) − 1) + 3 (ζ (¯κd) − 1)2 + 6¯κ2
790
+ d
791
+ 16τ(τ + 1)¯κ2
792
+ dζ (¯κd)
793
+
794
+ ,
795
+ (4.15)
796
+ with ζ =
797
+
798
+ 2¯κ2
799
+ d − 8¯κd/3 + 1. This expression for Λmin is naturally separated into a
800
+ factor that depends only on the instability parameter ¯κd, from which we can derive
801
+ the asymptotic behavior. To show the behavior of this factor we plot the quantity
802
+ τ(1 + τ)Λ2
803
+ min/(ηω∗)2 in Fig. 1. The overall behavior of Λmin is captured by the following
804
+ limits
805
+ Λ2
806
+ min =
807
+
808
+
809
+
810
+
811
+
812
+
813
+
814
+ |ηω∗ωd|
815
+
816
+ 3
817
+
818
+ 2+4σ
819
+
820
+
821
+ ,
822
+ for |¯κd| ≫ 1,
823
+ (ηω∗)2
824
+ 24τ(1+τ),
825
+ for ¯κd → 0,
826
+ 3(ηω∗)2
827
+ 8τ(1+τ),
828
+ for ¯κd → 4/3
829
+ (4.16)
830
+ where we denote σ = ±1 as the sign of κd. At large drive (|¯κd| ≫ 1; |ηω∗| ≫ |ωd|(1 + τ))
831
+ we recover the expected non-resonant (“fluid”) behavior of the toroidal ITG mode with no
832
+ density gradient, namely γL ∼ √ηω∗ωd. Note that this growth rate is much smaller than
833
+ the bound found by merely considering the Helmholtz free energy (Helander and Plunk
834
+ 2022). Although we do not see the complete stabilization (Λ = 0) at negative values of
835
+ ¯κd (opposite sign of ωd and ηω∗) expected from theory, there is a strong asymmetry,
836
+ with |Λ| having its larger values at positive ¯κd and being comparatively much smaller for
837
+ negative ¯κd.
838
+ The value ¯κd = 4/3 (i.e. ηω∗ = 4(1 + τ)ωd/3) achieves the maximal value of Λmin
839
+ at fixed ηω∗, and therefore is evocative of the resonance condition for the toroidal ITG
840
+ modes ηω∗ ∼ ωd (Biglari et al. 1989). This value of ¯κd is obtained by solving for ∆min = 0,
841
+
842
+ Energetic bounds. Part III
843
+ 11
844
+ -1
845
+ 1
846
+ 2
847
+ 4/3
848
+ 3
849
+ 4
850
+ 0.1
851
+ 0.2
852
+ 0.3
853
+ Figure 1. Bound of the growth rate of the toroidal ITG mode, obtained from the optimal
854
+ growth of generalized free energy, plotted versus the instability parameter
855
+ ¯κd = ηω∗/[ωd(1 + τ)].
856
+ explaining why it produces the worst bound, i.e. that given by optimal growth of Helmoltz
857
+ free energy.
858
+ It is noteworthy that for the limit ¯κd → 0 (ωd ≫ ηω∗/(1 + τ)) our method yields a
859
+ value of |Λ| that is a factor of 1/3 reduced as compared to the resonant case, again at
860
+ least qualitatively reproducing the expected stabilization of the toroidal ITG mode in
861
+ this limit.
862
+ 5. Conclusion
863
+ We have demonstrated that the use of a generalized form of free energy ˜H introduces
864
+ some of the physics of wave-particle resonance that is missing in the theory of optimal
865
+ mode growth of Helmholtz free energy (Helander and Plunk 2021, 2022; Plunk and
866
+ Helander 2022). The growth rates of optimal modes of generalized free energy provide
867
+ a rigorous upper bound on the growth of conventional gyrokinetic instabilities (“normal
868
+ modes”), which is always below the Helmholtz bound, as it must be, given that the
869
+ Helmholtz free energy is a special case of the generalized measure. Moreover, optimal
870
+ modes of generalized free energy depend on the magnetic-field geometry to a greater
871
+ extent than those associated with Helmholtz free energy. The difference in growth rates
872
+ can be very large. For instance, in the important case of a strongly driven toroidal ITG
873
+ mode, the Helmholtz bound is larger by a factor of order ηω∗/ωd ≫ 1.
874
+ A single ordinary differential equation has been derived for optimal modes, allowing
875
+ general magnetic geometry. We found solutions of this equation in some simple limits to
876
+ demonstrate that it indeed recovers, at least qualitatively, some of the physical effects
877
+ expected from the theory of linear ITG modes, including sensitivity to the ratio of the
878
+ frequencies associated with drive and resonance, and transition of the instability when
879
+ this ratio is near one. Density gradient dependence of ITG mode is absent from both
880
+ electrostatic and free energy input terms, assuming adiabatic electrons, so its effect is
881
+ not accounted for by the theory presented here.
882
+ The results of this work have possible implications for “turbulence optimization”,
883
+ i.e. the endeavor to shape the equilibrium magnetic geometry for low turbulence in
884
+
885
+ 12
886
+ G. G. Plunk and P. Helander
887
+ stellarators. The general result, Eqn. 3.15, allows in principle for the inclusion of the
888
+ complete geometric information contained in gyrokinetics, that is needed to run gyroki-
889
+ netic simulations. However, the solution of this equation should be far simpler and more
890
+ efficient, due to the reduction of velocity space to a single moment. The results found for
891
+ the toroidal branch of the ITG hint at a possible optimization strategy. Consider fixed
892
+ plasma conditions, i.e. a given temperature gradient (ηω∗) and temperature ratio (τ): At
893
+ high drive (ηω∗/(1+τ) > 4ωd/3), minimization of the optimal growth rate |Λ| is achieved
894
+ by minimization of the magnetic drift ωd (i.e. magnetic curvature), corresponding to
895
+ minimization of the strongly-driven (non-resonant) toroidal ITG mode. On the other
896
+ hand, at low drive (ηω∗/(1 + τ) < 4ωd/3) the increase of ωd is favored, corresponding to
897
+ a weakening of the marginally unstable ITG mode, i.e. an increase of the threshold of
898
+ instability. The latter case corresponds to “critical gradient” optimization, an idea which
899
+ has recently been developed (Roberg-Clark et al. 2022a,b).
900
+ More general solutions of the optimal mode equation, and the application to optimiza-
901
+ tion will be pursued in future works. Other special limits can also be explored including,
902
+ adiabatic ion limits, appropriate for studying trapped electron and universal instabilities.
903
+ Electromagnetic generalizations are also possible: although it is not clear how to construct
904
+ an electromagnetic form of generalized free energy ˜H that is a nonlinear invariant, it is
905
+ certainly possible to consider related measures that focus on linear bounds.
906
+ Funding. This work has been carried out within the framework of the EUROfusion
907
+ Consortium, funded by the European Union via the Euratom Research and Training
908
+ Programme (Grant Agreement No 101052200 — EUROfusion). Views and opinions
909
+ expressed are however those of the author(s) only and do not necessarily reflect those of
910
+ the European Union or the European Commission. Neither the European Union nor the
911
+ European Commission can be held responsible for them. This work was partly supported
912
+ by a grant from the Simons Foundation (560651, PH).
913
+ Declaration of Interests. The authors report no conflict of interest.
914
+ Author ORCID. G. G. Plunk, https://orcid.org/0000-0002-4012-4038; P. Helander,
915
+ https://orcid.org/0000-0002-0460-590X.
916
+ Appendix A. Several kinetic species
917
+ For a plasma with an arbitrary number of particle species, we multiply each gyrokinetic
918
+ equation
919
+ ∂ga,k
920
+ ∂t
921
+ + v∥
922
+ ∂ga,k
923
+ ∂l
924
+ + iωdaga,k + 1
925
+ B2
926
+
927
+ k′
928
+ B · (k × k′)δφk′ga,k−k′
929
+ (A 1)
930
+ = eaFa0
931
+ Ta
932
+ � ∂
933
+ ∂t + iωT
934
+ ∗a
935
+
936
+ δφk ,
937
+ (A 2)
938
+ by eaδφ
939
+
940
+ k, integrate over velocity space, take the real part and the average ⟨· · · ⟩ over the
941
+ flux tube, and sum over all species a and wave vectors k. In other words, we apply the
942
+ operator
943
+ Re
944
+
945
+ a,k
946
+ ea
947
+ ��
948
+ δφ
949
+
950
+ k (· · · ) d3v
951
+
952
+ .
953
+ (A 3)
954
+ Since the expression
955
+ Re (k × k′)δφ
956
+
957
+ k′δφkga,k−k′ = 1
958
+ 2(k × k′)
959
+
960
+ δφ
961
+
962
+ k′δφkga,k−k′ + δφk′δφ
963
+
964
+ kg∗
965
+ a,k−k′
966
+
967
+ (A 4)
968
+
969
+ Energetic bounds. Part III
970
+ 13
971
+ = 1
972
+ 2(k × k′)
973
+
974
+ δφ−k′δφkga,k−k′ + δφk′δφ−kga,−k+k′�
975
+ (A 5)
976
+ changes sign under an exchange of k and k′, the nonlinear terms cancel upon summation
977
+ over k and k′, and we obtain
978
+ Re
979
+
980
+ a,k
981
+ ea
982
+ ��
983
+ δφ
984
+
985
+ k
986
+ �∂ga,k
987
+ ∂t
988
+ + v∥
989
+ ∂ga,k
990
+ ∂l
991
+ + iωdaga,k − eaFa0
992
+ Ta
993
+ ∂δφk
994
+ ∂t
995
+
996
+ d3v
997
+
998
+ = 0.
999
+ (A 6)
1000
+ The quasineutrality equation (2.7) can be used to write the first term as
1001
+ Re
1002
+
1003
+ a,k
1004
+ ea
1005
+
1006
+ δφ
1007
+
1008
+ k
1009
+ ∂ga,k
1010
+ ∂t d3v = d
1011
+ dt
1012
+
1013
+ k
1014
+ nae2
1015
+ a
1016
+ 2Ta
1017
+ |δφk|2 .
1018
+ (A 7)
1019
+ We thus arrive at the electrostatic energy balance equation
1020
+ d
1021
+ dt
1022
+
1023
+ k
1024
+ nae2
1025
+ a
1026
+ 2Ta
1027
+
1028
+ [1 − Γ0(bak)] |δφk|2�
1029
+ (A 8)
1030
+ = −Re
1031
+
1032
+ a,k
1033
+
1034
+ ea
1035
+
1036
+ δφ
1037
+
1038
+ k
1039
+
1040
+ v∥
1041
+ ∂ga,k
1042
+ ∂l
1043
+ + iωdaga,k
1044
+
1045
+ d3v
1046
+
1047
+ ,
1048
+ (A 9)
1049
+ which is the generalization of Eqn. 2.11 to several species. The right-hand side can be
1050
+ interpreted as minus the work done by the electric field on the various particle species.
1051
+ The first term in this expression contains
1052
+
1053
+ J0av∥
1054
+ ∂ga,k
1055
+ ∂l
1056
+ d3v =
1057
+
1058
+ σ
1059
+ 2πσB
1060
+ m2a
1061
+ � ∞
1062
+ 0
1063
+ dEa
1064
+ � Ea/B
1065
+ 0
1066
+ J0a
1067
+ ∂ga,k
1068
+ ∂l
1069
+ dµa
1070
+ (A 10)
1071
+ = B ∂
1072
+ ∂l
1073
+ � 1
1074
+ B
1075
+
1076
+ J0av∥ga,kd3v
1077
+
1078
+
1079
+ � ∂J0a
1080
+ ∂l v∥ga,kd3v,
1081
+ (A 11)
1082
+ which we used in Eqn. 3.8, with σ = v∥/|v∥|, Ea = mav2/2 and µa = mav2
1083
+ ⊥/(2B).
1084
+ Appendix B. Derivation of operators D and K
1085
+ We first write forms of D and K explicit in ˜g, noting that the contribution to D
1086
+ proportional to the density gradient is zero by use of quasi-neutrality with the adiabatic
1087
+ electron approximation (the factor ηω∗ ∝ dTi/dψ appears in what follows, but never
1088
+ ω∗ ∝ dni/dψ alone). For the same reason, the terms proportional to ν, involved in the
1089
+ transformation from g to ˜g, do not contribute, so expressing energy input in terms of ˜g
1090
+ merely has the consequence of introducing the overall factor α. The expressions are
1091
+ D = −Ti
1092
+
1093
+ ni
1094
+ ��
1095
+ ˜g(v)˜g∗(v′) ηω∗
1096
+ � v2
1097
+ v2
1098
+ th
1099
+
1100
+ J0J′
1101
+ 0d3vd3v′
1102
+
1103
+ + c.c.,
1104
+ (B 1)
1105
+ K∥ = −Ti
1106
+ α
1107
+ 2ni
1108
+ ��
1109
+ ˜g∗(v′)
1110
+
1111
+ v∥
1112
+ ∂˜g(v)
1113
+ ∂l
1114
+
1115
+ J0J′
1116
+ 0d3vd3v′
1117
+
1118
+ + c.c.,
1119
+ (B 2)
1120
+ Kd = −Ti
1121
+
1122
+ 2ni
1123
+ ��
1124
+ ˜g∗(v′)ωd˜g(v)J0J′
1125
+ 0d3vd3v′
1126
+
1127
+ + c.c.,
1128
+ (B 3)
1129
+
1130
+ 14
1131
+ G. G. Plunk and P. Helander
1132
+ where we separate K = K∥ + Kd and ‘c.c.’ denotes complex conjutage. These can be
1133
+ re-expressed in terms of linear operators by writing them in the form
1134
+ D = (˜g, D˜g) =
1135
+
1136
+ Ti
1137
+
1138
+ d3v ˜g∗
1139
+ F0
1140
+ D˜g
1141
+
1142
+ ,
1143
+ (B 4)
1144
+ K∥ = (˜g, K∥˜g) =
1145
+
1146
+ Ti
1147
+
1148
+ d3v ˜g∗
1149
+ F0
1150
+ K∥˜g
1151
+
1152
+ ,
1153
+ (B 5)
1154
+ Kd = (˜g, Kd˜g) =
1155
+
1156
+ Ti
1157
+
1158
+ d3v ˜g∗
1159
+ F0
1160
+ Kd˜g
1161
+
1162
+ .
1163
+ (B 6)
1164
+ Identifying D and Kd is simply a matter of exchanging labels of dummy variables of
1165
+ integration (v for v′, etc.). Manipulating the expression for K∥ to reveal K∥ is more
1166
+ involved. We also need to integrate by parts in l and will need to use the fact that ∂/∂l
1167
+ is performed at fixed phase space variables Ei and µ = miv2
1168
+ ⊥/(2B). The velocity space
1169
+ volume element contains an important factor of 1/v∥, which generally depends on l and
1170
+ does not itself commute with ∂/∂l:
1171
+ d3v = 2πv⊥dv⊥v∥ =
1172
+
1173
+ σ
1174
+ 2πBdEidµi
1175
+ m2
1176
+ i |v∥|
1177
+ (B 7)
1178
+ where σ denotes the sign of v∥.
1179
+ Appendix C. Moment form of eigenproblem
1180
+ The three terms of Eqn. 3.9 can be rewritten in terms of the moments of ˜g (Eqn. 3.10):
1181
+ D˜g = iα
1182
+ 2 ηω∗J0F0
1183
+ � v2
1184
+ v2
1185
+ th
1186
+ κ1 − κ2
1187
+
1188
+ ,
1189
+ (C 1)
1190
+ K∥˜g = α
1191
+ 2 F0
1192
+
1193
+ vthJ0
1194
+
1195
+ −B ∂
1196
+ ∂l
1197
+ �κ4
1198
+ B
1199
+
1200
+ + κ5
1201
+
1202
+ + v∥
1203
+
1204
+ ∂l (J0κ1)
1205
+
1206
+ ,
1207
+ (C 2)
1208
+ Kd˜g = iα
1209
+ 2 ωdJ0F0
1210
+ ��
1211
+ v2
1212
+
1213
+ 2v2
1214
+ th
1215
+ +
1216
+ v2
1217
+
1218
+ v2
1219
+ th
1220
+
1221
+ κ1 − κ3
1222
+
1223
+ ,
1224
+ (C 3)
1225
+ (C 4)
1226
+ Then, taking moments of Eqn. 3.9 yields the following five equations
1227
+
1228
+ α κ1 = iηω∗ (G1κ1 − G0κ2) − i∆ωd (G3κ1 − G0κ3) − ∆G0vth
1229
+
1230
+ κ5 − B ∂
1231
+ ∂l
1232
+ �κ4
1233
+ B
1234
+ ��
1235
+ ,(C 5)
1236
+
1237
+ α κ2 = iηω∗ (G2κ1 − G1κ2) − i∆ωd (G2κ1 − G1κ3) − ∆G1vth
1238
+
1239
+ κ5 − B ∂
1240
+ ∂l
1241
+ �κ4
1242
+ B
1243
+ ��
1244
+ ,(C 6)
1245
+
1246
+ α κ3 = iηω∗ (G4κ1 − G3κ2) − i∆ωd (G5κ1 − G3κ3) − ∆G3vth
1247
+
1248
+ κ5 − B ∂
1249
+ ∂l
1250
+ �κ4
1251
+ B
1252
+ ��
1253
+ ,(C 7)
1254
+
1255
+ α κ4 = −∆vth
1256
+
1257
+ G′
1258
+ 0,2κ1 + G0,2
1259
+ ∂κ1
1260
+ ∂l
1261
+
1262
+ ,(C 8)
1263
+
1264
+ α κ5 = −∆vth
1265
+
1266
+ G′′
1267
+ 0,2κ1 + G′
1268
+ 0,2
1269
+ ∂κ1
1270
+ ∂l
1271
+
1272
+ .
1273
+ (C 9)
1274
+ See the next section where the integrals Gm,n, etc., are defined and evaluated. Note that
1275
+ the final two equations can be immediately used to eliminate κ4 and κ5, leaving a system
1276
+
1277
+ Energetic bounds. Part III
1278
+ 15
1279
+ of three equations. The second and third equations are used together to find forms for κ2
1280
+ and κ3 in terms of κ1, and these forms are substituted into the first equation to obtain
1281
+ the final form, in terms of κ1 only, given by Eqn. 3.15.
1282
+ Appendix D. Bessel-type integrals
1283
+ The following definitions, mostly copied from Plunk and Helander (2022), are needed to
1284
+ perform the various integrals that appear in the moment equations for our eiegenproblem.
1285
+ First we need a general form of Weber’s integral,
1286
+ In(p, a1, a2) =
1287
+ � ∞
1288
+ 0
1289
+ exp(−pt2)Jn(a1t)Jn(a2t)tdt
1290
+ = 1
1291
+ 2p exp
1292
+ �−a2
1293
+ 1 − a2
1294
+ 2
1295
+ 4p
1296
+
1297
+ In
1298
+ �a1a2
1299
+ 2p
1300
+
1301
+ (D 1)
1302
+ where In is the modified Bessel function of order n. The integrals we need to evaluate
1303
+ can be conveniently found in terms of In. We define
1304
+ G⊥m(b) = 2
1305
+ � ∞
1306
+ 0
1307
+ xm+1
1308
+
1309
+ exp(−x2
1310
+ ⊥)J2
1311
+ 0(
1312
+
1313
+ 2bx2
1314
+ ⊥)dx⊥,
1315
+ (D 2a)
1316
+ G(1)
1317
+ ⊥m(b) = 2
1318
+ � ∞
1319
+ 0
1320
+ xm+2
1321
+
1322
+ exp(−x2
1323
+ ⊥)J0(
1324
+
1325
+ 2bx2
1326
+ ⊥)J1(
1327
+
1328
+ 2bx2
1329
+ ⊥)dx⊥,
1330
+ (D 2b)
1331
+ G(2)
1332
+ ⊥m(b) = 2
1333
+ � ∞
1334
+ 0
1335
+ xm+3
1336
+
1337
+ exp(−x2
1338
+ ⊥)J2
1339
+ 1(
1340
+
1341
+ 2bx2
1342
+ ⊥)dx⊥,
1343
+ (D 2c)
1344
+ where m is assumed to be even. Now we note that these integrals can be evaluated in
1345
+ terms of Weber’s integral:
1346
+ G⊥m(b) = 2
1347
+ ��
1348
+ − d
1349
+ dp
1350
+ �m/2
1351
+ I0(p,
1352
+
1353
+ 2b,
1354
+
1355
+ 2b)
1356
+
1357
+ p=1
1358
+ ,
1359
+ (D 3a)
1360
+ G(1)
1361
+ ⊥m(b) = 2
1362
+ ��
1363
+ − d
1364
+ dp
1365
+ �m/2 �
1366
+ − d
1367
+
1368
+
1369
+ I0(p, λ,
1370
+
1371
+ 2b)
1372
+
1373
+ p=1,λ=
1374
+
1375
+ 2b
1376
+ ,
1377
+ (D 3b)
1378
+ G(2)
1379
+ ⊥m(b) = 2
1380
+ ��
1381
+ − d
1382
+ dp
1383
+ �m/2 �
1384
+ − d
1385
+ dλ1
1386
+ � �
1387
+ − d
1388
+ dλ2
1389
+
1390
+ I0(p, λ1, λ2)
1391
+
1392
+ p=1,λ1=λ2=
1393
+
1394
+ 2b
1395
+ . (D 3c)
1396
+ The above relations allows us to evaluate the functions
1397
+ Gm,n(b) = G⊥m(b)G∥n,
1398
+ (D 4a)
1399
+ G(1)
1400
+ m,n(b) = G(1)
1401
+ ⊥m(b)G∥n,
1402
+ (D 4b)
1403
+ G(2)
1404
+ m,n(b) = G(2)
1405
+ ⊥m(b)G∥n.
1406
+ (D 4c)
1407
+ where
1408
+ G∥n =
1409
+ 1
1410
+ √π
1411
+ � ∞
1412
+ −∞
1413
+ exp(−x2
1414
+ ∥)xn
1415
+ ∥dx∥ = 1 + (−1)n
1416
+ 2√π
1417
+ ΓE
1418
+ �1 + n
1419
+ 2
1420
+
1421
+ ,
1422
+ (D 5)
1423
+
1424
+ 16
1425
+ G. G. Plunk and P. Helander
1426
+ and ΓE is the Euler gamma function. Finally, we can evaluate the integrals G′
1427
+ m,n, and
1428
+ G′
1429
+ m,n. We define
1430
+ G′
1431
+ ⊥m(b) = 2
1432
+ � ∞
1433
+ 0
1434
+ xm+1
1435
+
1436
+ exp(−x2
1437
+ ⊥)J0
1438
+ ∂J0
1439
+ ∂l dx⊥,
1440
+ (D 6)
1441
+ G′′
1442
+ ⊥m(b) = 2
1443
+ � ∞
1444
+ 0
1445
+ xm+1
1446
+
1447
+ exp(−x2
1448
+ ⊥)
1449
+ �∂J0
1450
+ ∂l
1451
+ �2
1452
+ dx⊥,
1453
+ (D 7)
1454
+ Relating x⊥ to the proper gyrokinetic phase space variable µ, that is x2
1455
+ ⊥ = µB/Ti allows
1456
+ the derivatives to be evaluated
1457
+ ∂J0
1458
+ ∂l = −
1459
+
1460
+ 2/B ∂
1461
+ ∂l(B
1462
+
1463
+ b)x2
1464
+ ⊥J1
1465
+ (D 8)
1466
+ so that we can write
1467
+ G′
1468
+ ⊥m(b) = −
1469
+
1470
+ 2
1471
+ B
1472
+
1473
+ ∂l
1474
+
1475
+ B
1476
+
1477
+ b
1478
+
1479
+ G(1)
1480
+ ⊥m(b),
1481
+ (D 9)
1482
+ G′′
1483
+ ⊥m(b) = 2
1484
+ B
1485
+ � ∂
1486
+ ∂l
1487
+
1488
+ B
1489
+
1490
+ b
1491
+ ��2
1492
+ G(2)
1493
+ ⊥m(b).
1494
+ (D 10)
1495
+ These expressions allow us to evaluate
1496
+ G′
1497
+ m,n = G′
1498
+ ⊥mG∥n,
1499
+ (D 11)
1500
+ G′′
1501
+ m,n = G′′
1502
+ ⊥mG∥n.
1503
+ (D 12)
1504
+ D.1. Explicit expressions for some Bessel integrals
1505
+ The b(l)-dependent factors in Eqn. 3.15 can be written as
1506
+ G0 = G0,0,
1507
+ (D 13)
1508
+ G1 = G2,0 + G0,2,
1509
+ (D 14)
1510
+ G2 = G4,0 + 2G2,2 + G0,4,
1511
+ (D 15)
1512
+ G3 = 1
1513
+ 2G2,0 + G0,2,
1514
+ (D 16)
1515
+ G4 = 1
1516
+ 2G4,0 + 3
1517
+ 2G2,2 + G0,4,
1518
+ (D 17)
1519
+ G5 = 1
1520
+ 4G4,0 + G2,2 + G0,4,
1521
+ (D 18)
1522
+ which can be evaluated using the identities of the previous section in terms of the familiar
1523
+ Γn(b) of gyrokinetic theory (suppressing its argument for succinctness):
1524
+
1525
+ Energetic bounds. Part III
1526
+ 17
1527
+ G0 = Γ0,
1528
+ (D 19)
1529
+ G1 =
1530
+ �3
1531
+ 2 − b
1532
+
1533
+ Γ0 + bΓ1,
1534
+ (D 20)
1535
+ G2 = 1
1536
+ 4
1537
+ ��
1538
+ 6b2 − 20b + 15
1539
+
1540
+ Γ0 + 2b ((10 − 4b)Γ1 + bΓ2)
1541
+
1542
+ ,
1543
+ (D 21)
1544
+ G3 = 1
1545
+ 2 (bΓ1 − (b − 2)Γ0) ,
1546
+ (D 22)
1547
+ G4 = 1
1548
+ 4
1549
+ ��
1550
+ 3b2 − 11b + 10
1551
+
1552
+ Γ0 + b ((11 − 4b)Γ1 + bΓ2)
1553
+
1554
+ ,
1555
+ (D 23)
1556
+ G5 = 1
1557
+ 8
1558
+ ��
1559
+ 3b2 − 12b + 14
1560
+
1561
+ Γ0 + b (bΓ2 − 4(b − 3)Γ1)
1562
+
1563
+ .
1564
+ (D 24)
1565
+ where we recall
1566
+ Γn(b) = exp(−b)In(b)
1567
+ (D 25)
1568
+ For completeness, we evaluate the few remaining factors that enter Eqn. 3.15.
1569
+ G0,2 = Γ0
1570
+ 2 ,
1571
+ (D 26)
1572
+ G(1)
1573
+ 0,2 =
1574
+
1575
+ b (Γ0 − Γ1)
1576
+ 2
1577
+
1578
+ 2
1579
+ ,
1580
+ (D 27)
1581
+ G(2)
1582
+ 0,2 = 1
1583
+ 8 (3bΓ0 + (2 − 4b)Γ1 + bΓ2) ,
1584
+ (D 28)
1585
+ and using Eqn. D 11
1586
+ G′
1587
+ 0,2 = −
1588
+
1589
+ 2
1590
+ B
1591
+
1592
+ ∂l
1593
+
1594
+ B
1595
+
1596
+ b
1597
+
1598
+ G(1)
1599
+ 0,2,
1600
+ (D 29)
1601
+ G′′
1602
+ 0,2 = 2
1603
+ B
1604
+ � ∂
1605
+ ∂l
1606
+
1607
+ B
1608
+
1609
+ b
1610
+ ��2
1611
+ G(2)
1612
+ 0,2.
1613
+ (D 30)
1614
+ D.2. Limit b → 0
1615
+ In the limit b → 0 we obtain
1616
+ G0 = 1,
1617
+ (D 31)
1618
+ G1 = 3
1619
+ 2,
1620
+ (D 32)
1621
+ G2 = 15
1622
+ 4 ,
1623
+ (D 33)
1624
+ G3 = 1,
1625
+ (D 34)
1626
+ G4 = 5
1627
+ 2,
1628
+ (D 35)
1629
+ G5 = 7
1630
+ 4.
1631
+ (D 36)
1632
+ and
1633
+
1634
+ 18
1635
+ G. G. Plunk and P. Helander
1636
+ G0,2 = 1
1637
+ 2,
1638
+ (D 37)
1639
+ G(1)
1640
+ 0,2 = 0,
1641
+ (D 38)
1642
+ G(2)
1643
+ 0,2 = 0,
1644
+ (D 39)
1645
+ REFERENCES
1646
+ P. Helander and G.G. Plunk.
1647
+ Energetic bounds on gyrokinetic instabilities. Part 1.
1648
+ Fundamentals. Journal of Plasma Physics, 88(2):905880207, 2022. .
1649
+ G.G. Plunk and P. Helander. Energetic bounds on gyrokinetic instabilities. Part 2. Modes of
1650
+ optimal growth. Journal of Plasma Physics, 88(3):905880313, 2022. .
1651
+ Matt Landreman, Gabriel G. Plunk, and William Dorland. Generalized universal instability:
1652
+ transient linear amplification and subcritical turbulence. Journal of Plasma Physics, 81
1653
+ (5):905810501, 2015. .
1654
+ W. Dorland and G. W. Hammett. Gyrofluid turbulence models with kinetic effects. Physics
1655
+ of Fluids B: Plasma Physics, 5(3):812–835, 1993. . URL https://doi.org/10.1063/1.
1656
+ 860934.
1657
+ G. G. Plunk, P. Helander, P. Xanthopoulos, and J. W. Connor. Collisionless microinstabilities
1658
+ in stellarators. III. the ion-temperature-gradient mode. Physics of Plasmas, 21(3):032112,
1659
+ 2014. . URL https://doi.org/10.1063/1.4868412.
1660
+ A. A. Schekochihin, S. C. Cowley, W. Dorland, G. W. Hammett, G. G. Howes, E. Quataert, and
1661
+ T. Tatsuno. Astrophysical gyrokinetics: Kinetic and fluid turbulent cascades in magnetized
1662
+ weakly collisional plasmas. The Astrophysical Journal Supplement Series, 182(1):310, may
1663
+ 2009. . URL https://dx.doi.org/10.1088/0067-0049/182/1/310.
1664
+ G. G. PLUNK, S. C. COWLEY, A. A. SCHEKOCHIHIN, and T. TATSUNO. Two-dimensional
1665
+ gyrokinetic turbulence. Journal of Fluid Mechanics, 664:407–435, 2010. .
1666
+ P. Helander, J. H. E. Proll, and G. G. Plunk. Collisionless microinstabilities in stellarators. i.
1667
+ analytical theory of trapped-particle modes. Physics of Plasmas, 20(12):122505, 2013. .
1668
+ URL https://doi.org/10.1063/1.4846818.
1669
+ Bogdan Teaca. private communication.
1670
+ H. Biglari, P. H. Diamond, and M. N. Rosenbluth. Toroidal ion-pressure-gradient-driven drift
1671
+ instabilities and transport revisited. Physics of Fluids B: Plasma Physics, 1(1):109–118,
1672
+ 1989. . URL http://link.aip.org/link/?PFB/1/109/1.
1673
+ P. Helander and G. G. Plunk. Upper bounds on gyrokinetic instabilities in magnetized plasmas.
1674
+ Phys. Rev. Lett., 127:155001, Oct 2021. . URL https://link.aps.org/doi/10.1103/
1675
+ PhysRevLett.127.155001.
1676
+ G. T. Roberg-Clark, G. G. Plunk, and P. Xanthopoulos. Coarse-grained gyrokinetics for the
1677
+ critical ion temperature gradient in stellarators. Phys. Rev. Research, 4:L032028, Aug
1678
+ 2022a. . URL https://link.aps.org/doi/10.1103/PhysRevResearch.4.L032028.
1679
+ G. T. Roberg-Clark, P. Xanthopoulos, and G. G. Plunk. Reduction of electrostatic turbulence
1680
+ in a quasi-helically symmetric stellarator via critical gradient optimization, 2022b. URL
1681
+ https://arxiv.org/abs/2210.16030.
1682
+
4dAzT4oBgHgl3EQfEPoC/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
6tE4T4oBgHgl3EQf1w38/content/2301.05294v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c2bc885fa1e4c2cec4d3a681f5fed767583e9b9188185952ebe919cf9f75b680
3
+ size 16705656
6tFAT4oBgHgl3EQfnx0W/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bbdb67b97383da409fe9f0d74a4effdfa3897ecc29a73632ca2334d062be87e
3
+ size 3276845
79E2T4oBgHgl3EQfPgaW/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c0651cdcf1524478b760e9a540727965e0349da6a6a18e7d42203535bd4dcf6
3
+ size 4128813
7tFLT4oBgHgl3EQfsi8k/content/tmp_files/2301.12147v1.pdf.txt ADDED
@@ -0,0 +1,2034 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.12147v1 [math.AP] 28 Jan 2023
2
+ LOGISTIC ELLIPTIC EQUATION WITH A NONLINEAR BOUNDARY
3
+ CONDITION ARISING FROM COASTAL FISHERY HARVESTING II
4
+ KENICHIRO UMEZU
5
+ Abstract. We study the positive solutions of the logistic elliptic equation with a nonlinear
6
+ Neumann boundary condition that models coastal fishery harvesting ([18]). An essential role is
7
+ played by the smallest eigenvalue of the Dirichlet eigenvalue problem, with respect to which a
8
+ noncritical case is studied in [32]. In this paper, we extend our analysis to the critical case and
9
+ further study the noncritical case for a more precise description of the positive solution set. Our
10
+ approach relies on the energy method, sub- and supersolutions, and implicit function analysis.
11
+ 1. Introduction
12
+ This paper is devoted to the study of the positive solutions for the following logistic elliptic
13
+ equation with a nonlinear boundary condition arising from coastal fishery harvesting ([18]):
14
+
15
+
16
+
17
+
18
+
19
+ −∆u = u − up
20
+ in Ω,
21
+ u ≥ 0
22
+ in Ω,
23
+ ∂u
24
+ ∂ν = −λuq
25
+ on ∂Ω.
26
+ (1.1)
27
+ Here, Ω ⊂ RN, N ≥ 1, is a bounded domain with smooth boundary ∂Ω, ∆ = �N
28
+ i=1
29
+ ∂2
30
+ ∂x2
31
+ i is the
32
+ usual Laplacian in RN, 0 < q < 1 < p, λ ≥ 0 is a parameter, and ν is the unit outer normal to
33
+ ∂Ω. Unless stated otherwise, throughout this paper we assume the subcritical condition
34
+ p < N + 2
35
+ N − 2
36
+ for N > 2.
37
+ (1.2)
38
+ In the case of p = 2, the unknown function u ≥ 0 ecologically represents the biomass of fish that
39
+ inhabit a lake Ω, obeying the logistic law ([8]), and the nonlinear boundary condition means
40
+ fishery harvesting with the harvesting effort λ on the lake coast ∂Ω, obeying the Cobb–Douglas
41
+ production function ([18, Subsection 2.1]).
42
+ A nonnegative function u ∈ H1(Ω) is called a nonnegative (weak) solution of (1.1) if u satisfies
43
+
44
+
45
+
46
+ ∇u∇ϕ − uϕ + upϕ
47
+
48
+ + λ
49
+
50
+ ∂Ω
51
+ uqϕ = 0,
52
+ ϕ ∈ H1(Ω)
53
+ (1.3)
54
+ (we may regard (λ, u) as a nonnegative solution of (1.1)). It is seen that problem (1.1) has
55
+ a solution (λ, 0) for every λ > 0, called a trivial solution.
56
+ The sets {(λ, 0) : λ ≥ 0} and
57
+ {(λ, 0) : λ > 0} are said to be the trivial lines. We know ([30]) that a nonnegative solution u of
58
+ (1.1) belongs to the space W 1,r(Ω) for r > N (consequently, Cθ(Ω) for θ ∈ (0, 1)). Moreover, a
59
+ nontrivial nonnegative solution u of (1.1) satisfies that u ∈ C2+θ(Ω) for θ ∈ (0, 1), and u > 0
60
+ in Ω ([17], [27]), which is called a positive solution. Indeed, if u > 0 in Ω, then u ∈ C2+θ(Ω)
61
+ by the bootstrap argument using elliptic regularity, and u satisfies (1.1) pointwisely in Ω in the
62
+ classical sense. However, we do not know if u > 0 on the entirety of ∂Ω for a positive solution u
63
+ 2020 Mathematics Subject Classification. 35J65, 35B32, 35J25, 92D40.
64
+ Key words and phrases. logistic elliptic equation, concave–convex nonlinearity, positive solution, uniqueness,
65
+ stability, sub- and supersolutions, energy method, boundary harvesting.
66
+ 1
67
+
68
+ of (1.1). As a matter of fact, Hopf’s boundary point lemma ([27]) does not work because of the
69
+ lack of the one-sided Lipschitz condition [26, (4.1.19)] for mapping 0 ≤ u �→ (−uq) for u close to
70
+ 0.
71
+ For a positive solution (λ, u) of (1.1) satisfying u > 0 in Ω, we call γ1 = γ1(λ, u) ∈ R the
72
+ smallest eigenvalue of the linearized eigenvalue problem at (λ, u)
73
+
74
+ −∆ϕ = ϕ − pup−1ϕ + γϕ
75
+ in Ω,
76
+ ∂ϕ
77
+ ∂ν = −λquq−1ϕ + γϕ
78
+ on ∂Ω.
79
+ (1.4)
80
+ It is well known that γ1 is simple with a positive eigenfunction ϕ1 ∈ C2+θ(Ω) satisfying ϕ1 > 0
81
+ in Ω. Indeed, γ1 is characterized by the variational formula
82
+ γ1 = inf
83
+ ��
84
+
85
+
86
+ |∇ϕ|2 − ϕ2 + pup−1ϕ2
87
+
88
+ + λ
89
+
90
+ ∂Ω
91
+ quq−1ϕ2 : ϕ ∈ H1(Ω),
92
+
93
+
94
+ ϕ2 +
95
+
96
+ ∂Ω
97
+ ϕ2 = 1
98
+
99
+ .
100
+ A positive solution u > 0 in Ω of (1.1) is said to be asymptotically stable, weakly stable, and
101
+ unstable if γ1 > 0, γ1 ≥ 0, and γ1 < 0, respectively.
102
+ Problem (1.1) possesses a sublinear nonlinearity at infinity and also a concave–convex nature.
103
+ Thus, the global uniqueness of a positive solution of (1.1) for every λ > 0 would not be so easy
104
+ to deduce. For nonlinear elliptic problems with a concave–convex nature, we refer to [4, 31, 1,
105
+ 5, 11, 12, 19]. The sublinear nonlinearity (−uq) that appears in (1.1) induces the absorption
106
+ effect on ∂Ω. Sublinear boundary conditions of the uq type were explored in [16, 14, 15, 28, 29].
107
+ The case of an incoming flux on ∂Ω was studied in [16, 15]. The mixed case of absorption and
108
+ an incoming flux on ∂Ω was studied in [14]. The absorption case was also studied in [28, 29],
109
+ where a similar type of logistic elliptic equation with an indefinite weight has been analyzed for
110
+ the existence and multiplicity of nontrivial nonnegative solutions.
111
+ An important role is played by the smallest eigenvalue βΩ > 0 of the Dirichlet eigenvalue
112
+ problem
113
+
114
+ −∆φ = βφ
115
+ in Ω,
116
+ φ = 0
117
+ on ∂Ω.
118
+ It is well known that βΩ is simple with a positive eigenfunction φΩ ∈ H1
119
+ 0(Ω) (implying φΩ ∈
120
+ C2+θ(Ω) by elliptic regularity). Indeed, φΩ > 0 in Ω, and
121
+ c1 ≤ −∂φΩ
122
+ ∂ν ≤ c2
123
+ on ∂Ω
124
+ (1.5)
125
+ for some 0 < c1 < c2. Moreover, βΩ is characterized by the variational formula
126
+ βΩ = inf
127
+ ��
128
+
129
+ |∇φ|2 : φ ∈ H1
130
+ 0(Ω),
131
+
132
+
133
+ φ2 = 1
134
+
135
+ .
136
+ If βΩ < 1, then uD ∈ H1
137
+ 0(Ω) ∩ C2+θ(Ω) denotes the unique positive solution of the Dirichlet
138
+ logistic problem ([7])
139
+
140
+ −∆u = u − up
141
+ in Ω,
142
+ u = 0
143
+ on ∂Ω.
144
+ (1.6)
145
+ The existence, nonexistence, and multiplicity of positive solutions for (1.1) in the case where
146
+ βΩ ̸= 1 were studied in the author’s previous work [32, Theorems 1.1, 1.2, 1.4, 1.5], which
147
+ Theorem 0 summarizes.
148
+ Theorem 0.
149
+ (I) A positive solution u of (1.1) satisfies that u < 1 in Ω and u > 0 on Γ ⊂ ∂Ω with the
150
+ condition |Γ| > 0.
151
+ 2
152
+
153
+ (II) There exists λ∗ > 0 such that problem (1.1) has a positive solution curve
154
+ C0 = {(λ, uλ) : 0 ≤ λ ≤ λ∗},
155
+ (1.7)
156
+ emanating from (λ, u) = (0, 1), that satisfies the following three conditions:
157
+ • λ �→ uλ ∈ C2+θ(Ω) is C∞,
158
+ • uλ > 0 in Ω,
159
+ • uλ is asymptotically stable.
160
+ Moreover, the positive solutions of (1.1) near (λ, u) = (0, 1) in R × C2+θ(Ω) form C0.
161
+ Let λ be the positive value defined as
162
+ λ = sup{λ > 0 : (1.1) has a positive solution for λ}.
163
+ (1.8)
164
+ Then, the following assertions hold.
165
+ (III) Assume that βΩ < 1. Then, we have the following (as in Figure 1).
166
+ (i) λ = ∞, and more precisely, problem (1.1) possesses a positive solution u for every
167
+ λ > 0 such that u > 0 in Ω.
168
+ (ii) (λ, uλ) ∈ C0 is a unique positive solution of (1.1) for 0 < λ ≤ λ∗ (by making λ∗ in
169
+ (1.7) smaller if necessary).
170
+ (iii) un → uD in H1(Ω) for a positive solution (λn, un) of (1.1) with λn → ∞.
171
+ (iv) The positive solution set {(λ, u)} does not meet the trivial line {(λ, 0) : λ ≥ 0} in
172
+ the topology of H1(Ω) (nor C(Ω)).
173
+ (IV) Assume that βΩ > 1. Then, we have the following (as in Figure 2).
174
+ (i) λ < ∞.
175
+ (ii) There exists a bounded subcontinuum (closed connected subset) �C0 = {(λ, u)} of
176
+ nonnegative solutions of (1.1) in [0, ∞) × C(Ω) joining (λ, u) = (0, 1) and (0, 0)
177
+ such that �C0 \ {(0, 0)} includes C0 and consists of positive solutions of (1.1). Par-
178
+ ticularly, problem (1.1) has at least two positive solutions for λ > 0 small.
179
+ (iii) The positive solution set {(λ, u)} does not meet the trivial line {(λ, 0) : λ > 0} in
180
+ the topology of H1(Ω) (nor C(Ω)).
181
+ (iv) γ1(λn, un) < 0 for a positive solution (λn, un) of (1.1) such that (λn, un) → (0, 0)
182
+ in R × H1(Ω), provided that un > 0 in Ω, i.e., un is unstable.
183
+ Remark 0.
184
+ (i) Assertions (I) and (II) hold for every case of βΩ > 0.
185
+ (ii) Assertions (II) and (III-i) hold for any p > 1.
186
+ (iii) Problem (1.1) with λ = 0 has exactly two nonnegative solutions (λ, u) = (0, 0), (0, 1).
187
+ Thus, Theorem 0(I) is used to show easily that in every case of βΩ > 0, the positive
188
+ solution set {(λ, u)} of (1.1) meets at most (0, 0) and (0, 1) on {(0, u) : u ≥ 0}, i.e., if
189
+ (λn, un) is a positive solution of (1.1) such that (λn, un) → (0, u) in H1(Ω) (equivalently
190
+ C(Ω) by elliptic regularity), then either u = 0 or 1.
191
+ In this paper, we extend our consideration to the case where βΩ = 1 and further study the
192
+ positive solution set in the case where βΩ < 1. Our first main result concerns the case where
193
+ βΩ < 1. On the basis of Theorem 0(III), we present the uniqueness and stability of a positive
194
+ solution of (1.1) for λ > 0 large and also the strong positivity of the positive solutions for every
195
+ λ > 0.
196
+ Theorem 1.1. Assume that βΩ < 1. Then, the following assertions hold (see Figure 1):
197
+ 3
198
+
199
+ (i) There exists λ∗ ≥ λ∗ such that the positive solution of (1.1) ensured by Theorem 0(III-i)
200
+ is unique for every λ > λ∗ (say uλ); more precisely, the positive solutions of (1.1) for
201
+ λ > λ∗ form a C∞ curve C∞ = {(λ, uλ) : λ∗ < λ} (i.e., λ �→ uλ ∈ C2+θ(Ω) is C∞),
202
+ which satisfies the following conditions:
203
+ (a) uλ is asymptotically stable,
204
+ (b) uλ −→ uD in H1(Ω) as λ → ∞,
205
+ (c) uλ is decreasing, i.e., uλ1 > uλ2 in Ω if λ1 < λ2. Furthermore, if 0 < λ1 < λ2 with
206
+ the condition that λ1 ≤ λ∗ < λ2, then u > uλ2 in Ω for a positive solution u of
207
+ (1.1) for λ = λ1.
208
+ (ii) u > 0 in Ω for a positive solution u of (1.1) for every λ > 0 (strong positivity).
209
+ Figure 1. Possible positive solution sets in the case where βΩ < 1.
210
+ Remark 1.2.
211
+ (i) Assertions (i-a) and (i-b) hold for any p > 1 (see Remark 3.3).
212
+ (ii) For (λ, uλ) ∈ C0 with 0 ≤ λ ≤ λ∗ in (1.7), we present similar results as those in assertions
213
+ (i-c). Indeed, λ �→ uλ is decreasing for 0 < λ ≤ λ∗; if 0 < λ1 < λ2 with the condition
214
+ that λ1 ≤ λ∗ < λ2, then uλ1 > u in Ω for a positive solution u of (1.1) with λ = λ2.
215
+ (iii) It is an open question to get the global uniqueness for a positive solution of (1.1) for
216
+ all λ > 0, i.e., λ∗ = λ∗. In this case, [0, ∞) ∋ λ �→ uλ is C∞ and decreasing.
217
+ (iv) For uniqueness and stability analysis of positive solutions for large parameters in non-
218
+ linear elliptic problems, we refer to [9, 34, 33, 24, 10, 25, 13, 20, 21, 22].
219
+ Our second main result is the counterpart of Theorem 0(III-i) and (III-iii) for the case where
220
+ βΩ = 1 and pq > 1.
221
+ Theorem 1.3. Assume that βΩ = 1 and pq > 1. Then, problem (1.1) possesses a positive
222
+ solution uλ for every λ > 0 such that uλ > 0 in Ω, which satisfies that
223
+ uλ −→ 0
224
+ and
225
+
226
+ ∥uλ∥ −→ φΩ
227
+ in H1(Ω)
228
+ as λ → ∞.
229
+ (1.9)
230
+ Remark 1.4.
231
+ (i) The existence assertion holds for any p > 1; thus, so does assertion (1.9) (see Remark
232
+ 3.3).
233
+ (ii) Similarly as in Theorem 0(III-iii), assertion (1.9) is valid if we assume a positive solution
234
+ (λ, uλ) (which may take zero value somewhere on ∂Ω) of (1.1) with λ → ∞.
235
+ Our third main result is the counterpart of Theorem 0(III-iv), (IV-i), and (IV-iii) for the case
236
+ where βΩ = 1.
237
+ 4
238
+
239
+ u
240
+ u
241
+ A
242
+ 1
243
+ 1
244
+ Co
245
+ Co
246
+ ....
247
+
248
+
249
+ *Y
250
+ \*
251
+ 0
252
+ 入*
253
+ \*
254
+ 0Theorem 1.5. Assume that βΩ = 1. Then, the following three assertions hold.
255
+ (i) If pq ≤ 1, then λ < ∞ where λ > 0 is defined by (1.8).
256
+ (ii) If pq ̸= 1, then the positive solution set {(λ, u)} of (1.1) does not meet the trivial line
257
+ {(λ, 0) : λ > 0} in the topology of H1(Ω) (nor C(Ω)).
258
+ (iii) If pq ≥ 1, then it does not meet {(0, 0)} in the topology of H1(Ω) (nor C(Ω)).
259
+ Theorem 1.5 provides a guess (Remark 1.6) for the global extension of the C∞ positive solution
260
+ curve C0 given by (1.7) in the case where βΩ = 1 and pq ≤ 1.
261
+ Remark 1.6. Assume that βΩ = 1 and pq ≤ 1.
262
+ Let �C0 = {(λ, u)} ⊂ [0, ∞) × C(Ω) be
263
+ the component (maximal, closed, and connected subset) of nonnegative solutions of (1.1) that
264
+ includes C0. From Theorems 0(I) and Theorem 1.5(i), �C0 \ {(0, 1)} ⊂ {(λ, u) ∈ [0, ∞) × C(Ω) :
265
+ λ ≤ λ, u < 1 in Ω}. If we suppose that Γ0 :=
266
+
267
+ �C0 \ {(0, 1)}
268
+
269
+ ∩{(λ, 0), (0, u) : λ ≥ 0, u ≥ 0} ̸= ∅,
270
+ then Theorem 1.5(ii),(iii) show that Γ0 = {(0, 0)} and Γ0 ⊂ {(λ, 0) : λ ≥ Λ0} for some Λ0 > 0
271
+ when pq < 1 and pq = 1, respectively. The existence of �C0 is still an open question. Suggested
272
+ positive solution sets are illustrated in Figures 2 and 3.
273
+ Figure 2. Suggested positive solution set in the case where βΩ = 1 and pq < 1.
274
+ Figure 3. Suggested positive solution set in the case where βΩ = 1 and pq = 1,
275
+ and λc ∈ Γ0.
276
+ We conclude the Introduction by mentioning the stability of the trivial solution u = 0. A
277
+ linearized stability analysis does not work for u = 0 because u �→ uq is not differentiable at
278
+ u = 0.
279
+ Instead, by the construction of suitable sub- and supersolutions of (1.1), we try to
280
+ employ the Lyapunov stability criterion [26, Chapter 5] on the basis of the monotone iteration
281
+ method, which is developed in Section 5.
282
+ Notation: ∥ · ∥ denotes the usual norm of H1(Ω).
283
+ un ⇀ u∞ means that un weakly
284
+ converges to u∞ in H1(Ω). H1
285
+ 0(Ω) = {u ∈ H1(Ω) : u = 0 on ∂Ω}.
286
+
287
+ Ω fdx for f ∈ L1(Ω)
288
+ 5
289
+
290
+ u
291
+ 1
292
+
293
+ 入cu
294
+ 1and
295
+
296
+ ∂Ω gdσ for g ∈ L1(∂Ω) are simply written as
297
+
298
+ Ω f and
299
+
300
+ ∂Ω g, respectively. | · |
301
+ represents both the Lebesgue measure in Ω and the surface measure on ∂Ω.
302
+ The remainder of this paper is organized as follows. Sections 2 and 3 are devoted to the
303
+ preparation for the proofs of Theorems 1.1, 1.3 and 1.5. In Section 2, we develop the energy
304
+ method for the energy functional associated with (1.1).
305
+ In Section 3, we use the sub- and
306
+ supersolution method to prove existence and positivity results for positive solutions of (1.1).
307
+ We give proofs for Theorems 1.1 and 1.3 in Section 3. In Section 4, we prove Theorem 1.5.
308
+ Section 5 is devoted to a stability analysis of the trivial solution u = 0, which is based on
309
+ Lemma 3.1 and Theorem 5.1.
310
+ 2. Energy method
311
+ Let
312
+ E(u) =
313
+
314
+
315
+ (|∇u|2 − u2),
316
+ u ∈ H1(Ω);
317
+ then, the next lemma is used several times in the following arguments.
318
+ Lemma 2.1. Let {un} ⊂ H1(Ω) satisfy E(un) ≤ 0, un ⇀ u∞, and un → u∞ in L2(Ω). Then,
319
+ u∞ ̸= 0 if ∥un∥ ≥ C for some C > 0.
320
+ Proof. By the weak lower semicontinuity, E(u∞) ≤ limn E(un) ≤ limn E(un) ≤ 0. If u∞ = 0,
321
+ then ∥un∥ → 0, as desired.
322
+
323
+ We start by proving the following two propositions, which provide the asymptotic profile of
324
+ a positive solution of (1.1) as λ → ∞. It is understood that uD = 0 if βΩ = 1.
325
+ Proposition 2.2. Assume that βΩ ≤ 1. Let (λn, un) be a positive solution of (1.1) with λn → ∞.
326
+ Then, un → uD in H1(Ω).
327
+ Proof. We first assume that βΩ < 1. Because un < 1 in Ω, we substitute u = ϕ = un into (1.3)
328
+ to deduce that
329
+
330
+
331
+ |∇un|2 =
332
+
333
+
334
+
335
+ u2
336
+ n − up+1
337
+ n
338
+
339
+ − λn
340
+
341
+ ∂Ω
342
+ uq+1
343
+ n
344
+ (2.1)
345
+
346
+
347
+
348
+ u2
349
+ n ≤ |Ω|;
350
+ thus, ∥un∥ is bounded. Immediately, up to a subsequence, un ⇀ u∞ ≥ 0, un → u∞ in L2(Ω)
351
+ and L2(∂Ω), and un → u∞ a.e. in Ω for some u∞ ∈ H1(Ω). We then infer that
352
+
353
+ ∂Ω
354
+ uq+1
355
+ n
356
+ = 1
357
+ λn
358
+
359
+
360
+
361
+
362
+ |∇un|2 +
363
+
364
+
365
+
366
+ u2
367
+ n − up+1
368
+ n
369
+ ��
370
+ ≤ 1
371
+ λn
372
+
373
+
374
+ u2
375
+ n −→ 0,
376
+ (2.2)
377
+ which implies that
378
+
379
+ ∂Ω uq+1
380
+
381
+ = 0; thus, u∞ ∈ H1
382
+ 0(Ω). From (1.3) with (λ, u) = (λn, un), it follows
383
+ that
384
+
385
+
386
+
387
+ ∇un∇ϕ − unϕ + up
388
+
389
+
390
+ = 0,
391
+ ϕ ∈ H1
392
+ 0(Ω).
393
+ Taking the limit, u∞ is a nonnegative solution of (1.6), where we have used the Lebesgue
394
+ dominated convergence theorem to deduce that
395
+
396
+ Ω up
397
+ nϕ →
398
+
399
+ Ω up
400
+ ∞ϕ.
401
+ Then, we verify that u∞ ̸= 0. Since E(un) ≤ 0, the weak lower semicontinuity means that
402
+ E(u∞) ≤ lim
403
+ n→∞
404
+ E(un) ≤ lim
405
+ n→∞ E(un) ≤ 0.
406
+ 6
407
+
408
+ If u∞ = 0, then it follows that ∥un∥ → 0. Here, we may assume that un → 0 a.e. in Ω. Say that
409
+ wn =
410
+ un
411
+ ∥un∥; then, up to a subsequence, wn ⇀ w∞ ≥ 0, wn → w∞ in L2(Ω) and L2(∂Ω), and
412
+ wn → w∞ a.e. in Ω. Since ∥wn∥ = 1, we deduce that w∞ ̸= 0 using Lemma 2.1. However, we
413
+ observe from (2.2) that
414
+
415
+ ∂Ω
416
+ wq+1
417
+ n
418
+ ≤ 1
419
+ λn
420
+
421
+
422
+ w2
423
+ n∥un∥1−q −→ 0.
424
+ This implies that w∞ ∈ H1
425
+ 0(Ω). From (1.3) with (λ, u) = (λn, un), we see that
426
+
427
+
428
+
429
+ ∇wn∇ϕ − wnϕ + wnϕ up−1
430
+ n
431
+
432
+ = 0,
433
+ ϕ ∈ H1
434
+ 0(Ω).
435
+ Taking the limit,
436
+
437
+ Ω(∇w∞ϕ − w∞ϕ) = 0, where we have used the Lebesgue dominated conver-
438
+ gence theorem to obtain that
439
+
440
+
441
+ ��wnϕ up−1
442
+ n
443
+ �� ≤
444
+ ��
445
+
446
+ w2
447
+ n
448
+ � 1
449
+ 2 ��
450
+
451
+ ϕ2u2(p−1)
452
+ n
453
+ � 1
454
+ 2
455
+ ≤ C
456
+ ��
457
+
458
+ ϕ2u2(p−1)
459
+ n
460
+ � 1
461
+ 2
462
+ −→ 0.
463
+ This implies that w∞ is a nontrivial nonnegative solution of the problem
464
+
465
+ −∆w = w
466
+ in Ω,
467
+ w = 0
468
+ on ∂Ω.
469
+ Thus, we deduce that βΩ = 1, which contradicts the assumption. The assertion that u∞ ≥ 0
470
+ and u∞ ̸= 0 means that u∞ is the unique positive solution uD of (1.6) by the strong maximum
471
+ principle.
472
+ It remains to show that un → u∞ in H1(Ω).
473
+ Observing that E(u∞) +
474
+
475
+ Ω up+1
476
+
477
+ = 0 and
478
+ E(un) ≤ −
479
+
480
+ Ω up+1
481
+ n
482
+ , we deduce that
483
+ E(u∞) ≤ lim
484
+ n→∞
485
+ E(un) ≤ lim
486
+ n→∞ E(un) ≤ − lim
487
+ n→∞
488
+
489
+
490
+ up+1
491
+ n
492
+ = −
493
+
494
+
495
+ up+1
496
+
497
+ = E(u∞),
498
+ where we have used the Lebesgue dominated convergence theorem again. Thus, E(un) → E(u∞),
499
+ i.e., ∥un∥ → ∥u∞∥. Since un ⇀ u∞, the desired assertion follows.
500
+ Next, we assume that βΩ = 1. Then, u∞ is a nonnegative solution of (1.6), and indeed u∞ = 0
501
+ because βΩ = 1 ([7]). Thus, ∥un∥ → 0, as desired.
502
+
503
+ In the case where βΩ = 1, we observe that E(u) ≥ 0 for u ∈ H1
504
+ 0(Ω). Indeed, we note that
505
+
506
+ u ∈ H1(Ω) : E(u) = 0
507
+
508
+ = ⟨φΩ⟩ := {sφΩ : s ∈ R}.
509
+ We then investigate the asymptotic profile of a positive solution (λn, un) of (1.1) with ∥un∥ → 0.
510
+ Proposition 2.3. Assume that βΩ = 1. Let (λn, un) be a positive solution of (1.1) such that
511
+ λn ≥ λ for some λ > 0 and ∥un∥ → 0. Then, we obtain that
512
+ un
513
+ ∥un∥ → φΩ in H1(Ω).
514
+ Proof. Say that wn =
515
+ un
516
+ ∥un∥ and, up to a subsequence, wn ⇀ w∞ ≥ 0, and wn → w∞ in L2(Ω)
517
+ and L2(∂Ω) for some w∞ ∈ H1(Ω). From (2.1) it follows that
518
+ λ
519
+
520
+ ∂Ω
521
+ uq+1
522
+ n
523
+
524
+
525
+
526
+ u2
527
+ n.
528
+ We use the condition ∥un∥ → 0 to infer that
529
+
530
+ ∂Ω
531
+ wq+1
532
+ n
533
+ ≤ ∥un∥1−q
534
+ λ
535
+
536
+
537
+ w2
538
+ n −→ 0;
539
+ thus,
540
+
541
+ ∂Ω wq+1
542
+
543
+ = 0, i.e., w∞ ∈ H1
544
+ 0(Ω).
545
+ 7
546
+
547
+ Since βΩ = 1 and E(wn) ≤ 0, the weak lower semicontinuity means that
548
+ 0 ≤ E(w∞) ≤ lim
549
+ n→∞
550
+ E(wn) ≤ lim
551
+ n→∞ E(wn) ≤ 0,
552
+ implying that E(wn) → E(w∞) = 0, i.e., ∥wn∥ → ∥w∞∥. Since wn ⇀ w∞, we deduce that
553
+ wn → w∞ in H1(Ω) and w∞ = φΩ with ∥φΩ∥ = 1. Finally, because φΩ is unique, the desired
554
+ conclusion follows.
555
+
556
+ Remark 2.4. If we construct a positive solution (λn, un) of (1.1) without using (1.2), then
557
+ Propositions 2.2 and 2.3 remain valid for all p > 1.
558
+ For further analysis of the asymptotic behavior of a positive solution (λn, un) of (1.1) with
559
+ the condition that λn ≥ λ and ∥un∥ → 0, the orthogonal decomposition H1(Ω) = ⟨φΩ⟩⊕V using
560
+ ⟨φΩ⟩ is useful, where V denotes the orthogonal complement of ⟨φΩ⟩ that is given explicitly as
561
+ V =
562
+
563
+ v ∈ H1(Ω) :
564
+
565
+
566
+
567
+ ∇v∇φΩ + vφΩ
568
+
569
+ = 0
570
+
571
+ .
572
+ Note that ⟨φΩ⟩ and V are both closed subspaces of H1(Ω) and ∥u∥ is equivalent to |s| + ∥v∥ for
573
+ u = sφΩ + v ∈ H1(Ω) = ⟨φΩ⟩ ⊕ V .
574
+ Using the orthogonal decomposition,
575
+ un = snφΩ + vn ∈ ⟨φΩ⟩ ⊕ V
576
+ (2.3)
577
+ for a positive solution (λn, un) of (1.1) such that λn ≥ λ for some λ > 0 and ∥un∥ → 0 (under
578
+ the assumption of Proposition 2.3). Since
579
+ un
580
+ ∥un∥ → φΩ in H1(Ω), it follows that
581
+ sn
582
+ ∥un∥ → 1,
583
+ (2.4)
584
+ ∥vn∥
585
+ ∥un∥ → 0,
586
+ (2.5)
587
+ ∥vn∥
588
+ sn
589
+ → 0.
590
+ (2.6)
591
+ Because of (2.4), we may assume that sn > 0. Note that vn ≥ 0 on ∂Ω because φΩ = 0 on ∂Ω.
592
+ We then deduce the following result, which plays a crucial role in the proof of Theorem 1.5.
593
+ Lemma 2.5. Assume that βΩ = 1. Let {vn} be as introduced by (2.3). Then, there exists c > 0
594
+ such that
595
+ E(vn) + c
596
+
597
+ ∂Ω
598
+ vq+1
599
+ n
600
+ ≤ 0
601
+ for sufficiently large n,
602
+ (2.7)
603
+ provided that one of the following conditions is satisfied.
604
+ (a) pq < 1,
605
+ (b) pq = 1 and λn → ∞,
606
+ (c) pq > 1 and λn is bounded above.
607
+ Proof. Substituting un = snφΩ + vn into (2.1), we deduce that
608
+ 2sn
609
+ ��
610
+
611
+ ∇φΩ∇vn − φΩvn
612
+
613
+ + E(vn) +
614
+
615
+
616
+ (snφΩ + vn)p+1 + λn
617
+
618
+ ∂Ω
619
+ vq+1
620
+ n
621
+ = 0.
622
+ (2.8)
623
+ Using the divergence theorem,
624
+
625
+
626
+ φΩvn =
627
+
628
+
629
+ −∆φΩvn =
630
+
631
+
632
+ ∇φΩ∇vn +
633
+
634
+ ∂Ω
635
+
636
+ −∂φΩ
637
+ ∂ν
638
+
639
+ vn;
640
+ (2.9)
641
+ 8
642
+
643
+ thus, (2.8) implies that
644
+ −2sn
645
+
646
+ ∂Ω
647
+
648
+ −∂φΩ
649
+ ∂ν
650
+
651
+ vn + E(vn) +
652
+
653
+
654
+ (snφΩ + vn)p+1 + λn
655
+
656
+ ∂Ω
657
+ vq+1
658
+ n
659
+ = 0.
660
+ It follows that
661
+ E(vn) + λn
662
+ 2
663
+
664
+ ∂Ω
665
+ vq+1
666
+ n
667
+ + In ≤ 0
668
+ (2.10)
669
+ with
670
+ In = λn
671
+ 2
672
+
673
+ ∂Ω
674
+ vq+1
675
+ n
676
+ − 2sn
677
+
678
+ ∂Ω
679
+
680
+ −∂φΩ
681
+ ∂ν
682
+
683
+ vn.
684
+ (2.11)
685
+ Once we verify that
686
+ In ≥ 0
687
+ for sufficiently large n,
688
+ (2.12)
689
+ we obtain (2.7) and complete the proof. To verify (2.12), we use the test function ϕ = φΩ to
690
+ deduce that
691
+
692
+
693
+
694
+ ∇un∇φΩ − unφΩ + up
695
+ nφΩ
696
+
697
+ = 0.
698
+ (2.13)
699
+ Substituting un = snφΩ + vn into (2.13) and combining (2.9) with (2.13) provide
700
+
701
+ ∂Ω
702
+
703
+ −∂φΩ
704
+ ∂ν
705
+ � vn
706
+ sp
707
+ n
708
+ =
709
+
710
+
711
+
712
+ φΩ + vn
713
+ sn
714
+ �p
715
+ φΩ.
716
+ (2.14)
717
+ We then consider either case (a) or (b). From (2.6), we deduce that
718
+
719
+
720
+
721
+ φΩ + vn
722
+ sn
723
+ �p
724
+ φΩ −→
725
+
726
+
727
+ φp+1
728
+
729
+ > 0.
730
+ Taking into account (1.5), we may derive from (2.14) that
731
+ csp
732
+ n ≤
733
+
734
+ ∂Ω
735
+ vn
736
+ for some c > 0. By H¨older’s inequality, it follows that
737
+ csp
738
+ n ≤
739
+
740
+ ∂Ω
741
+ vn ≤ |∂Ω|
742
+ q
743
+ q+1
744
+ ��
745
+ ∂Ω
746
+ vq+1
747
+ n
748
+
749
+ 1
750
+ q+1
751
+ .
752
+ (2.15)
753
+ Combining (2.11) with (2.15) and using H¨older’s inequality, there exist c, ˜c > 0 such that
754
+ In ≥ λn
755
+ 2
756
+
757
+ ∂Ω
758
+ vq+1
759
+ n
760
+ − c sn
761
+ ��
762
+ ∂Ω
763
+ vq+1
764
+ n
765
+
766
+ 1
767
+ q+1
768
+ =
769
+
770
+ λn
771
+ 2
772
+ ��
773
+ ∂Ω
774
+ vq+1
775
+ n
776
+
777
+ q
778
+ q+1
779
+ − c sn
780
+ � ��
781
+ ∂Ω
782
+ vq+1
783
+ n
784
+
785
+ 1
786
+ q+1
787
+ ≥ {˜c λn spq
788
+ n − c sn}
789
+ ��
790
+ ∂Ω
791
+ vq+1
792
+ n
793
+
794
+ 1
795
+ q+1
796
+ = spq
797
+ n
798
+
799
+ ˜cλn − c s1−pq
800
+ n
801
+ � ��
802
+ ∂Ω
803
+ vq+1
804
+ n
805
+
806
+ 1
807
+ q+1
808
+ .
809
+ Since sn → 0 from (2.4), assertion (2.12) follows.
810
+ 9
811
+
812
+ We next consider case (c) and verify (2.12). By combining (2.11) with (2.14), it follows from
813
+ (2.11) that
814
+ In = sp+1
815
+ n
816
+ �λn
817
+ 2
818
+
819
+ ∂Ω
820
+ vq+1
821
+ n
822
+ sp+1
823
+ n
824
+ − 2
825
+
826
+
827
+
828
+ φΩ + vn
829
+ sn
830
+ �p
831
+ φΩ
832
+
833
+ .
834
+ (2.16)
835
+ Furthermore, we use the test function ϕ = 1 in (1.3) to infer that
836
+
837
+
838
+
839
+ un +
840
+
841
+
842
+ up
843
+ n + λn
844
+
845
+ ∂Ω
846
+ uq
847
+ n = 0.
848
+ Substituting un = snφΩ + vn,
849
+
850
+
851
+
852
+
853
+ φΩ + vn
854
+ sn
855
+
856
+ + sp−1
857
+ n
858
+
859
+
860
+
861
+ φΩ + vn
862
+ sn
863
+ �p
864
+ +
865
+
866
+ ∂Ω
867
+ λnvq
868
+ n
869
+ sn
870
+ = 0,
871
+ which implies that
872
+
873
+ ∂Ω
874
+ λnvq
875
+ n
876
+ sn
877
+ −→
878
+
879
+
880
+ φΩ > 0,
881
+ where we have used condition (2.6). Thus, we may deduce that
882
+ c sn
883
+ λn
884
+
885
+
886
+ ∂Ω
887
+ vq
888
+ n
889
+ for some c > 0. Using H¨older’s inequality, we deduce that
890
+ c
891
+ � sn
892
+ λn
893
+ � q+1
894
+ q
895
+
896
+
897
+ ∂Ω
898
+ vq+1
899
+ n
900
+ (2.17)
901
+ for some c > 0. Combining (2.16) with (2.17),
902
+ In ≥ sp+1
903
+ n
904
+
905
+ c s
906
+ 1
907
+ q −p
908
+ n
909
+ λ
910
+ − 1
911
+ q
912
+ n
913
+ − 2
914
+
915
+
916
+
917
+ φΩ + vn
918
+ sn
919
+ �p
920
+ φΩ
921
+
922
+ for some c > 0. We observe that
923
+ s
924
+ 1
925
+ q −p
926
+ n
927
+ → ∞,
928
+ λ
929
+ − 1
930
+ q
931
+ n
932
+ ≥ c
933
+ by some constant c > 0,
934
+
935
+
936
+
937
+ φΩ + vn
938
+ sn
939
+ �p
940
+ φΩ →
941
+
942
+
943
+ φp+1
944
+
945
+ > 0.
946
+ Thus, assertion (2.12) follows.
947
+
948
+ We conclude this section with the establishment of the uniqueness and stability results for a
949
+ positive solution of (1.1) in the case where βΩ < 1.
950
+ Proposition 2.6. Assume that βΩ < 1. Then, there exists Λ > 0 such that if λ > Λ, then the
951
+ following two assertions hold:
952
+ (i) Problem (1.1) has at most one positive solution.
953
+ (ii) A positive solution u of (1.1) satisfying u > 0 in Ω is asymptotically stable.
954
+ Proof. We recall that the unique positive solution uD of (1.6) is asymptotically stable, i.e.,
955
+ γ1,D = inf
956
+ ��
957
+
958
+
959
+ |∇ϕ|2 − ϕ2 + pup−1
960
+ D
961
+ ϕ2�
962
+ : ϕ ∈ H1
963
+ 0(Ω),
964
+
965
+
966
+ ϕ2 = 1
967
+
968
+ > 0.
969
+ (2.18)
970
+ 10
971
+
972
+ (i) Assume to the contrary that problem (1.1) has two distinct positive solutions (λn, un) and
973
+ (λn, vn) with λn → ∞. Note that un, vn < 1 in Ω. We may assume that un, vn → uD in H1(Ω)
974
+ and a.e. in Ω. The difference wn = un − vn (may change sign) allows that
975
+
976
+
977
+
978
+ ∇wn∇ϕ − wnϕ
979
+
980
+ +
981
+
982
+
983
+
984
+ (vn + wn)p − vp
985
+ n
986
+
987
+ ϕ + λn
988
+
989
+ Γn
990
+ (vn + wn)q − vq
991
+ n
992
+ wn
993
+ wnϕ = 0
994
+ (2.19)
995
+ for ϕ ∈ H1(Ω), where Γn = {x ∈ ∂Ω : wn(x) ̸= 0}. Note that ∥wn∥ → 0, and wn → 0 a.e. in Ω.
996
+ Substituting ϕ = wn into (2.19), the mean value theorem shows the existence of θn ∈ (0, 1)
997
+ such that
998
+ E(wn) +
999
+
1000
+
1001
+ p(vn + θnwn)p−1w2
1002
+ n + λn
1003
+
1004
+ Γn
1005
+ (vn + wn)q − vq
1006
+ n
1007
+ wn
1008
+ w2
1009
+ n = 0;
1010
+ thus, ψn =
1011
+ wn
1012
+ ∥wn∥ implies that
1013
+ E(ψn) +
1014
+
1015
+
1016
+ p(vn + θnwn)p−1ψ2
1017
+ n + λn
1018
+
1019
+ Γn
1020
+ (vn + wn)q − vq
1021
+ n
1022
+ wn
1023
+ ψ2
1024
+ n = 0.
1025
+ (2.20)
1026
+ From ∥ψn∥ = 1, we infer that up to a subsequence, ψn ⇀ ψ∞, ψn → ψ∞ in L2(Ω) and L2(∂Ω)
1027
+ for some ψ∞ ∈ H1(Ω). Then, we claim that ψ∞ ∈ H1
1028
+ 0(Ω) and ψ∞ ̸= 0. From (2.20), we deduce
1029
+ that
1030
+ λn
1031
+
1032
+ Γn
1033
+ (vn + wn)q − vq
1034
+ n
1035
+ wn
1036
+ ψ2
1037
+ n ≤
1038
+
1039
+
1040
+ ψ2
1041
+ n ≤ 1.
1042
+ We observe that
1043
+ (vn + wn)q − vq
1044
+ n
1045
+ wn
1046
+ ≥ q
1047
+ on Γn
1048
+ because un, vn < 1 in Ω. It follows that λnq
1049
+
1050
+ ∂Ω ψ2
1051
+ n ≤ 1. Passing to the limit, we deduce that
1052
+ ψ∞ ∈ H1
1053
+ 0(Ω). Indeed, ψ∞ ̸= 0 by Lemma 2.1.
1054
+ Then, we assert in (2.20) that
1055
+
1056
+
1057
+ p(vn + θnwn)p−1ψ2
1058
+ n −→
1059
+
1060
+
1061
+ pup−1
1062
+ D
1063
+ ψ2
1064
+ ∞.
1065
+ Indeed, we use
1066
+
1067
+
1068
+ p(vn + θnwn)p−1ψ2
1069
+ n =
1070
+
1071
+
1072
+ p(vn + θnwn)p−1ψ2
1073
+ ∞ +
1074
+
1075
+
1076
+ p(vn + θnwn)p−1(ψ2
1077
+ n − ψ2
1078
+ ∞).
1079
+ Since vn → uD and wn → 0 a.e. in Ω and un, vn < 1 in Ω, the Lebesgue dominated convergence
1080
+ theorem shows that
1081
+
1082
+
1083
+ p(vn + θnwn)p−1ψ2
1084
+ ∞ −→
1085
+
1086
+
1087
+ pup−1
1088
+ D
1089
+ ψ2
1090
+ ∞.
1091
+ Using the fact
1092
+
1093
+ Ω |ψ2
1094
+ n − ψ2
1095
+ ∞| → 0 yields
1096
+
1097
+
1098
+ p(vn + θnwn)p−1(ψ2
1099
+ n − ψ2
1100
+ ∞) −→ 0,
1101
+ as desired.
1102
+ Then, the weak lower semicontinuity allows us to deduce from (2.20) that
1103
+
1104
+
1105
+
1106
+ |∇ψ∞|2 − ψ2
1107
+ ∞ + pup−1
1108
+ D
1109
+ ψ2
1110
+
1111
+
1112
+ ≤ lim
1113
+ n→∞
1114
+
1115
+ E(ψn) +
1116
+
1117
+
1118
+ p(vn + θnwn)p−1ψ2
1119
+ n
1120
+
1121
+ ≤ 0,
1122
+ which contradicts ψ∞ ∈ H1
1123
+ 0(Ω) and ψ∞ ̸= 0 in view of (2.18).
1124
+ 11
1125
+
1126
+ (ii) On the basis of (1.4), we claim that γ1 > 0 for sufficiently large λ > 0.
1127
+ Assume by
1128
+ contradiction that a positive solution (λn, un) of (1.1) with the condition that λn → ∞ and
1129
+ un > 0 in Ω satisfies γn := γ1,n(λn, un) ≤ 0. This means that
1130
+
1131
+
1132
+
1133
+ |∇ϕn|2 − ϕ2
1134
+ n + pup−1
1135
+ n
1136
+ ϕ2
1137
+ n
1138
+
1139
+ + λnq
1140
+
1141
+ ∂Ω
1142
+ uq−1
1143
+ n
1144
+ ϕ2
1145
+ n = γn ≤ 0,
1146
+ (2.21)
1147
+ where ϕn := ϕ1,n, normalized as
1148
+
1149
+
1150
+ ϕ2
1151
+ n +
1152
+
1153
+ ∂Ω
1154
+ ϕ2
1155
+ n = 1.
1156
+ (2.22)
1157
+ Because
1158
+
1159
+ Ω |∇ϕn|2 ≤
1160
+
1161
+ Ω ϕ2
1162
+ n ≤ 1 from (2.21) and (2.22), ∥ϕn∥ is bounded, which implies that up
1163
+ to a subsequence, ϕn ⇀ ϕ∞, ϕn → ϕ∞ in L2(Ω) and L2(∂Ω), and ϕn → ϕ∞ a.e. in Ω for some
1164
+ ϕ∞ ∈ H1(Ω). Since uq−1
1165
+ n
1166
+ ≥ 1 from Theorem 0(I), assertion (2.21) gives us
1167
+ λnq
1168
+
1169
+ ∂Ω
1170
+ ϕ2
1171
+ n ≤
1172
+
1173
+
1174
+ ϕ2
1175
+ n ≤ 1.
1176
+ Passing to the limit, ϕ∞ = 0 on ∂Ω; thus, ϕ∞ ∈ H1
1177
+ 0(Ω). From (2.22),
1178
+
1179
+ Ω ϕ2
1180
+ ∞ = 1 is also deduced.
1181
+ By the weak lower semicontinuity, we derive from (2.21) that
1182
+
1183
+
1184
+
1185
+ |∇ϕ∞|2 − ϕ2
1186
+ ∞ + pup−1
1187
+ D
1188
+ ϕ2
1189
+
1190
+
1191
+ ≤ lim
1192
+ n→∞
1193
+
1194
+
1195
+
1196
+ |∇ϕn|2 − ϕ2
1197
+ n + pup−1
1198
+ n
1199
+ ϕ2
1200
+ n
1201
+
1202
+ ≤ 0
1203
+ (2.23)
1204
+ Indeed, on the basis of the facts that un → uD in H1(Ω) and un < 1 in Ω (see Theorem 0(I)
1205
+ and Proposition 2.2), the Lebesgue dominated convergence theorem shows that
1206
+
1207
+
1208
+ up−1
1209
+ n
1210
+ ϕ2
1211
+ n =
1212
+
1213
+
1214
+ up−1
1215
+ n
1216
+ ϕ2
1217
+ ∞ +
1218
+
1219
+
1220
+ up−1
1221
+ n
1222
+ (ϕ2
1223
+ n − ϕ2
1224
+ ∞) −→
1225
+
1226
+
1227
+ up−1
1228
+ D
1229
+ ϕ2
1230
+ ∞,
1231
+ where we have used that un → uD a.e. in Ω. Assertion (2.23) contradicts ϕ∞ ∈ H1
1232
+ 0(Ω) and
1233
+
1234
+ Ω ϕ2
1235
+ ∞ = 1 in view of (2.18).
1236
+
1237
+ Remark 2.7. If we construct a positive solution u of (1.1) such that u > 0 in Ω without using
1238
+ (1.2), then assertion (ii) of Proposition 2.6 remains valid for all p > 1.
1239
+ 3. Sub- and supersolutions
1240
+ Consider the case where βΩ < 1 or where βΩ = 1 and pq > 1. Then, we first construct small
1241
+ positive subsolutions of (1.1) and use them to establish an a priori lower bound for positive
1242
+ solutions (λ, u) of (1.1) satisfying u > 0 in Ω. For a fixed τ > 0 and with a parameter ε > 0, we
1243
+ set
1244
+ φε(x) = ε(φΩ(x) + ετ),
1245
+ x ∈ Ω,
1246
+ which implies that φε ∈ C2+θ(Ω) and φε > 0 in Ω.
1247
+ We then use φε to formulate the following a priori lower bound for positive solutions u > 0
1248
+ in Ω of (1.1).
1249
+ Lemma 3.1. Assume that βΩ < 1 or that βΩ = 1 and pq > 1. Let τ > 1−q
1250
+ q
1251
+ when βΩ < 1, and
1252
+ let 1−q
1253
+ q
1254
+ < τ < p − 1 when βΩ = 1 and pq > 1. Then, for Λ > 0 there exists ε = ε(τ, Λ) > 0 such
1255
+ that φε is a subsolution of (1.1), provided that λ ∈ [0, Λ] and ε ∈ (0, ε]. Furthermore, u ≥ φε
1256
+ in Ω for a positive solution u > 0 in Ω of (1.1) with λ ∈ [0, Λ]. Here, ε does not depend on
1257
+ λ ∈ [0, Λ].
1258
+ 12
1259
+
1260
+ Proof. We only consider the case where βΩ = 1 and pq > 1. The case where βΩ < 1 is proved
1261
+ similarly. First, we verify the former assertion. We take 0 < ε ≤ 1 and then use the condition
1262
+ p − τ − 1 > 0 to deduce that
1263
+ −∆φε − φε + φp
1264
+ ε ≤ ε1+τ
1265
+
1266
+ −1 + εp−τ−1
1267
+
1268
+ 1 + max
1269
+
1270
+ φΩ
1271
+ �p�
1272
+ ≤ 0
1273
+ in Ω
1274
+ if ε > 0 is small. For Λ > 0 we use (1.5) and the condition τ > 1−q
1275
+ q
1276
+ to deduce that
1277
+ ∂φε
1278
+ ∂ν + λφq
1279
+ ε ≤ −c1ε + λε(1+τ)q ≤ ε(−c1 + Λεq+τq−1) ≤ 0
1280
+ on ∂Ω
1281
+ if 0 < ε ≤ (c1/Λ)1/(q+τq−1). The desired assertion follows.
1282
+ Next, we argue by contradiction to verify the latter assertion. Assume by contradiction that
1283
+ u ̸≥ φε in Ω for some positive solution u > 0 in Ω with λ ∈ [0, Λ]. Because ε �→ φε is increasing
1284
+ and φε → 0 uniformly in Ω, we can take ε1 ∈ (0, ε) such that
1285
+
1286
+ u ≥ φε1
1287
+ in Ω,
1288
+ u(x1) = φε1(x1)
1289
+ for some x1 ∈ Ω.
1290
+ (3.1)
1291
+ Take c > 0 such that u, φε1 ≥ c in Ω; then, choose K > 0 sufficiently large so that fK(t) =
1292
+ Kt+t−tp is increasing for t ∈
1293
+
1294
+ 0, maxΩ u
1295
+
1296
+ and M > 0 sufficiently large so that M −Λqcq−1 > 0.
1297
+ We use the subsolution φε1 (not a positive solution of (1.1)) to deduce that
1298
+ (−∆ + K)(u − φε1) ≥ fK(u) − fK(φε1) ≥ 0 (and ̸≡ 0)
1299
+ in Ω,
1300
+ (3.2)
1301
+ and for x ∈ ∂Ω satisfying u > φε1,
1302
+ � ∂
1303
+ ∂ν + M
1304
+
1305
+ (u − φε1) ≥ −λuq + λφq
1306
+ ε1 + M(u − φε1)
1307
+ =
1308
+
1309
+ M − λuq − φq
1310
+ ε1
1311
+ u − φε1
1312
+
1313
+ (u − φε1)
1314
+ ≥ (M − Λqcq−1)(u − φε1) > 0.
1315
+ (3.3)
1316
+ Thus, the strong maximum principle and boundary point lemma are applicable to infer that
1317
+ u − φε1 > 0 in Ω, which contradicts (3.1).
1318
+
1319
+ On the basis of Lemma 3.1, we construct minimal and maximal positive solutions of (1.1) as
1320
+ follows.
1321
+ Proposition 3.2. Assume that βΩ < 1 or that βΩ = 1 and pq > 1. Then, problem (1.1) has
1322
+ a minimal positive solution uλ ∈ C2+θ(Ω) and a maximal positive solution uλ ∈ C2+θ(Ω) for
1323
+ each λ > 0 such that 0 < uλ ≤ uλ in Ω, meaning that any positive solution u of (1.1) with the
1324
+ condition that u > 0 in Ω satisfies uλ ≤ u ≤ uλ in Ω. Moreover, both uλ and uλ are weakly
1325
+ stable, i.e., γ1(λ, uλ), γ1(λ, uλ) ≥ 0.
1326
+ Proof. It is clear that (λ, 1) is a supersolution of (1.1). Choose ε0 > 0 such that φε0 ≤ 1 in Ω;
1327
+ then, Lemma 3.1 states that (λ, φε0) is a subsolution of (1.1). By Theorem 0(I) and Lemma 3.1,
1328
+ a positive solution u > 0 in Ω of (1.1) implies that φε0 ≤ u ≤ 1 in Ω. Thus, this proposition is
1329
+ a direct consequence of applying [3, (2.1)Theorem] and [2, Proposition 7.8].
1330
+
1331
+ Remark 3.3. In view of the construction, Lemma 3.1 and Proposition 3.2 remain valid for
1332
+ any p > 1; therefore, Propositions 2.2, 2.3, 2.6(ii) hold for any p > 1 with the positive solution
1333
+ (λn, un) of (1.1) with λn → ∞ that is constructed by Proposition 3.2 (see Remarks 2.4 and 2.7).
1334
+ 13
1335
+
1336
+ In the case where βΩ < 1, Propositions 2.6 and 3.2 ensure the existence of a unique positive
1337
+ solution u(λ) of (1.1) for λ > Λ, which is asymptotically stable. Using the implicit function
1338
+ theorem provides us with the following result.
1339
+ Corollary 3.4. Assume that βΩ < 1. Then, {(λ, u(λ)) : λ > Λ} is a C∞ curve, i.e., λ �→
1340
+ u(λ) ∈ C2+θ(Ω) is C∞. Moreover, it is decreasing, i.e., u(λ1) > u(λ2) in Ω for λ2 > λ1 > Λ.
1341
+ Proof. We verify the first assertion.
1342
+ Let (λ0, u(λ0)) be the unique positive solution of (1.1)
1343
+ for λ0 > Λ. Since γ1(λ0, u(λ0)) > 0, the implicit function theorem applies at (λ0, u0); then,
1344
+ we deduce, thanks to the uniqueness, that {(λ, u(λ)) : λ1 < λ ≤ λ2} is a C∞ curve for λ1 <
1345
+ λ0 < λ2 such that u(λ) is asymptotically stable. The implicit function theorem applies again at
1346
+ (λ2, u(λ2)); then, the curve is continued until λ = λ3 > λ2. Repeating the same procedure, the
1347
+ curve is continued to λ = ∞ thanks to the a priori upper and lower bounds (Theorem 0(I) and
1348
+ Lemma 3.1), as desired.
1349
+ We next verify the second assertion. If λ1 < λ2, then u(λ1) is a supersolution of (1.1) for
1350
+ λ = λ2. By Lemma 3.1, it is possible to construct a subsolution φε of (1.1) for λ = λ2 such
1351
+ that 0 < φε ≤ u(λ1) in Ω. The sub- and supersolution method applies, and problem (1.1) has
1352
+ a positive solution u for λ = λ2 such that φε ≤ u ≤ u(λ1) in Ω, where u ̸≡ u(λ1). Thanks
1353
+ to Proposition 2.6(i), we obtain u = u(λ2). The desired assertion follows by using the strong
1354
+ maximum principle and boundary point lemma (as developed in (3.2) and (3.3)).
1355
+
1356
+ We conclude this section by employing the weak sub- and supersolution method [23] to show
1357
+ global strong positivity for a positive solution of (1.1) in the case where βΩ < 1.
1358
+ Proposition 3.5. Assume that βΩ < 1. Then, a positive solution (λ, u) of (1.1) satisfies that
1359
+ u > 0 in Ω.
1360
+ Proof. Assume by contradiction that problem (1.1) possesses a positive solution (λ0, u0) for
1361
+ λ0 > 0 such that u0 = 0 somewhere on ∂Ω. Let λ = λ1 > max(λ0, Λ), for which problem (1.1)
1362
+ has at most one positive solution by Proposition 2.6(i). Then, u0 is a weak supersolution of
1363
+ (1.1) for λ = λ1. Indeed,
1364
+ 0 =
1365
+
1366
+
1367
+
1368
+ ∇u0∇ϕ − u0ϕ + up
1369
+
1370
+
1371
+ + λ0
1372
+
1373
+ ∂Ω
1374
+ uq
1375
+
1376
+
1377
+
1378
+
1379
+
1380
+ ∇u0∇ϕ − u0ϕ + up
1381
+
1382
+
1383
+ + λ1
1384
+
1385
+ ∂Ω
1386
+ uq
1387
+ 0ϕ,
1388
+ ϕ ∈ H1(Ω) and ϕ ≥ 0.
1389
+ We next construct a weak subsolution of (1.1) for λ = λ1 that is smaller than or equal to
1390
+ u0. Note that u0 ∈ Cθ(Ω) and u0 > 0 in Ω. From βΩ < 1, it follows by the continuity and
1391
+ monotonicity of βΩ with respect to Ω that we can choose a subdomain Ω1 ⋐ Ω with smooth
1392
+ boundary ∂Ω1 such that βΩ1 < 1. Then, we deduce that
1393
+ u0 ≥ c
1394
+ in Ω1
1395
+ (3.4)
1396
+ for some c > 0. We also deduce that if ε > 0 is sufficiently small, then
1397
+ −∆(εφΩ1) ≤ εφΩ1 − (εφΩ1)p
1398
+ in Ω1,
1399
+ where φΩ1 is a positive eigenfunction associated with βΩ1. Consequently, the divergence theorem
1400
+ is applied to
1401
+
1402
+ Ω −∆(εφΩ1)ϕ for ϕ ∈ H1(Ω) and ϕ ≥ 0 to deduce that
1403
+
1404
+ Ω1
1405
+
1406
+ ∇(εφΩ1)∇ϕ − (εφΩ1)ϕ + (εφΩ1)pϕ
1407
+
1408
+ ≤ 0.
1409
+ (3.5)
1410
+ 14
1411
+
1412
+ Define
1413
+ Φε =
1414
+
1415
+ εφΩ1
1416
+ in Ω1,
1417
+ 0
1418
+ in Ω \ Ω1,
1419
+ and Φε ∈ H1(Ω) ∩ C(Ω). By virtue of (3.5), the linking technique [6, Lemma I.1] yields
1420
+
1421
+
1422
+
1423
+ ∇Φε∇ϕ − Φεϕ + Φp
1424
+ ε ϕ
1425
+
1426
+ + λ1
1427
+
1428
+ ∂Ω
1429
+ Φq
1430
+ ε ϕ =
1431
+
1432
+ Ω1
1433
+
1434
+ ∇Φε∇ϕ − Φεϕ + Φp
1435
+ ε ϕ
1436
+
1437
+ ≤ 0.
1438
+ Thanks to (3.4), we can take ε > 0 such that Φε ≤ u0 in Ω, as desired.
1439
+ The weak sub- and supersolution method [23, Subsection 2.2] is now applicable to deduce
1440
+ the existence of a positive solution (λ1, u1) of (1.1) such that Φε ≤ u1 ≤ u0 in Ω. Particularly,
1441
+ u1 = 0 somewhere on ∂Ω because so is u0. However, this contradicts Proposition 3.2 in view of
1442
+ the uniqueness.
1443
+
1444
+ Proof of Theorem 1.1. The uniqueness assertion follows from Proposition 2.6(i). Assertions (i-
1445
+ a) and (i-b) follow from Propositions 2.6(ii) and 2.2, respectively. Assertion (i-c) is verified by
1446
+ Corollary 3.4 and an analogous argument as in the proof of Corollary 3.4. Assertion (ii) follows
1447
+ from Proposition 3.5.
1448
+
1449
+ Proof of Theorem 1.3. The existence part follows from Proposition 3.2. Assertion (1.9) follows
1450
+ from Propositions 2.2 and 2.3.
1451
+
1452
+ 4. Proof of Theorem 1.5
1453
+ This section is devoted to the proof of Theorem 1.5.
1454
+ (i) We prove assertion (i). Assume by contradiction that problem (1.1) has a positive solution
1455
+ (λn, un) with λn → ∞. Then, Proposition 2.2 shows that ∥un∥ → 0; thus, Proposition 2.3 shows
1456
+ that
1457
+ un
1458
+ ∥un∥ → φΩ in H1(Ω). We apply Lemma 2.5(a) and (b); then, for un = snφΩ+vn ∈ ⟨φΩ⟩⊕V
1459
+ as in (2.3), we have (2.7) with (2.4)–(2.6).
1460
+ Observe from (2.5) that ∥vn∥ → 0. Say that ψn =
1461
+ vn
1462
+ ∥vn∥; then, up to a subsequence, ψn ⇀
1463
+ ψ∞ ≥ 0, and ψn → ψ∞ in L2(Ω) and L2(∂Ω) for some ψ∞ ∈ H1(Ω). From (2.7), it follows that
1464
+ c
1465
+
1466
+ ∂Ω
1467
+ ψq+1
1468
+ n
1469
+ ≤ −E(ψn)∥vn∥1−q −→ 0,
1470
+ so that
1471
+
1472
+ ∂Ω ψq+1
1473
+
1474
+ = 0, i.e., ψ∞ ∈ H1
1475
+ 0(Ω). Lastly, we use the condition E(ψn) ≤ 0 derived from
1476
+ (2.7) to follow the argument in the last paragraph of the proof of Proposition 2.3; then, we arrive
1477
+ at the contradiction ψ∞ = φΩ ∈ ⟨φΩ⟩ ∩ V = {0}.
1478
+ (ii) We verify assertion (ii). We remark that the convergences (λn, un) → (λ∞, 0) with λ∞ ≥ 0
1479
+ in R × H1(Ω) and R × C(Ω) are equivalent for a positive solution (λn, un) of (1.1) with λn > 0.
1480
+ This is verified by the bootstrap argument [32, Lemma 3.3]. In fact, the proof of assertion (ii)
1481
+ is similar to that for assertion (i). Assume by contradiction that problem (1.1) has a positive
1482
+ solution (λn, un) with the condition that λn → λ∞ > 0 and ∥un∥ → 0. Lemma 2.5(a) and (c)
1483
+ apply; then, we arrive at a contradiction.
1484
+ (iii) To verify assertion (iii), we prove the following three auxiliary lemmas. Say that Un =
1485
+ λ
1486
+
1487
+ 1
1488
+ 1−q
1489
+ n
1490
+ un.
1491
+ Lemma 4.1. There exists C > 0 such that ∥Un∥ ≤ C for a positive solution (λn, un) of (1.1)
1492
+ with λn > 0 satisfying that (λn, un) → (0, 0) in R × H1(Ω).
1493
+ 15
1494
+
1495
+ Proof. Assume by contradiction that ∥Un∥ → ∞. Say that wn =
1496
+ Un
1497
+ ∥Un∥; then, up to a subse-
1498
+ quence, wn ⇀ w∞ ≥ 0, and wn → w∞ in L2(Ω) and L2(∂Ω) for some w∞ ∈ H1(Ω). Since
1499
+ E(wn) ≤ 0, Lemma 2.1 provides w∞ ̸= 0.
1500
+ Recall that (λ, U) = (λn, Un) satisfies
1501
+
1502
+
1503
+
1504
+ ∇U∇ϕ − Uϕ + λ
1505
+ p−1
1506
+ 1−q U pϕ
1507
+
1508
+ +
1509
+
1510
+ ∂Ω
1511
+ U qϕ = 0,
1512
+ ϕ ∈ H1(Ω).
1513
+ (4.1)
1514
+ Using the test function ϕ = 1 in (4.1), we deduce that
1515
+
1516
+
1517
+ Un = λ
1518
+ p−1
1519
+ 1−q
1520
+ n
1521
+
1522
+
1523
+ U p
1524
+ n +
1525
+
1526
+ ∂Ω
1527
+ U q
1528
+ n =
1529
+
1530
+
1531
+ up−1
1532
+ n
1533
+ Un +
1534
+
1535
+ ∂Ω
1536
+ U q
1537
+ n,
1538
+ implying
1539
+
1540
+
1541
+ wn =
1542
+
1543
+
1544
+ up−1
1545
+ n
1546
+ wn +
1547
+
1548
+ ∂Ω
1549
+ wq
1550
+ n∥Un∥q−1.
1551
+ (4.2)
1552
+ We may assume that un → 0 a.e. in Ω, and since un < 1 in Ω, we deduce that
1553
+
1554
+
1555
+ up−1
1556
+ n
1557
+ wn =
1558
+
1559
+
1560
+ up−1
1561
+ n
1562
+ w∞ +
1563
+
1564
+
1565
+ up−1
1566
+ n
1567
+ (wn − w∞) −→ 0,
1568
+ by applying the Lebesgue dominated convergence theorem and using the condition wn → w∞
1569
+ in L2(Ω).
1570
+ Then, passing to the limit in (4.2) yields
1571
+
1572
+ Ω w∞ = 0, i.e., w∞ = 0, which is a
1573
+ contradiction.
1574
+
1575
+ Lemma 4.2. Assume that βΩ = 1. Then, there is no positive solution U of (4.1) for λ = 0.
1576
+ Proof. If it exists, then from (4.1) with λ = 0 and ϕ = 1, it follows that U > 0 on Γ ⊂ ∂Ω with
1577
+ |Γ| > 0, implying
1578
+
1579
+ ∂Ω
1580
+ ∂φΩ
1581
+ ∂ν U < 0. We use the test function ϕ = φΩ to deduce that
1582
+
1583
+
1584
+
1585
+ ∇U∇φΩ − UφΩ
1586
+
1587
+ = 0.
1588
+ However, the divergence theorem leads us to the contradiction
1589
+
1590
+
1591
+ φΩU =
1592
+
1593
+
1594
+ −∆φΩU =
1595
+
1596
+
1597
+ ∇φΩ∇U −
1598
+
1599
+ ∂Ω
1600
+ ∂φΩ
1601
+ ∂ν U >
1602
+
1603
+
1604
+ ∇φΩ∇U.
1605
+
1606
+ Lemma 4.3. Assume that βΩ = 1 and pq ≥ 1. Then, there exists C > 0 such that ∥Un∥ ≥ C
1607
+ for a positive solution (λn, Un) of (4.1) with λn → 0+.
1608
+ Proof. Assume by contradiction that (λn, Un) → (0, 0) in R × H1(Ω) for a positive solution
1609
+ (λn, Un) of (4.1). Say that wn =
1610
+ Un
1611
+ ∥Un∥; then, up to a subsequence, wn ⇀ w∞ ≥ 0, wn → w∞ in
1612
+ Lp+1(Ω) and L2(∂Ω) for some w∞ ∈ H1(Ω). From (4.1) with (λ, U) = (λn, Un) and ϕ = Un, it
1613
+ follows that
1614
+
1615
+
1616
+
1617
+ |∇Un|2 − U 2
1618
+ n + λ
1619
+ p−1
1620
+ 1−q
1621
+ n
1622
+ U p+1
1623
+ n
1624
+
1625
+ +
1626
+
1627
+ ∂Ω
1628
+ U q+1
1629
+ n
1630
+ = 0.
1631
+ (4.3)
1632
+ We then deduce that
1633
+
1634
+ ∂Ω wq+1
1635
+ n
1636
+
1637
+
1638
+ Ω w2
1639
+ n∥Un∥1−q → 0; thus,
1640
+
1641
+ ∂Ω wq+1
1642
+
1643
+ = 0, i.e., w∞ ∈ H1
1644
+ 0(Ω). We
1645
+ also deduce from (4.3) that E(wn) =
1646
+
1647
+ Ω(|∇wn|2 − w2
1648
+ n) ≤ 0. Thus, we derive that wn → φΩ in
1649
+ H1(Ω) using a similar argument as in the last paragraph of the proof of Proposition 2.3.
1650
+ For a contradiction, we use the same strategy developed in the proof of assertion (i). To this
1651
+ end, we consider the orthogonal decomposition Un = snφΩ + vn ∈ ⟨φΩ⟩ ⊕ V as in (2.3); then,
1652
+ 16
1653
+
1654
+ we obtain (2.4) to (2.6) with un replaced by Un. As in the proof of Lemma 2.5, we deduce the
1655
+ following counterpart of (2.10) and (2.11) for (4.3):
1656
+ E(vn) + 1
1657
+ 2
1658
+
1659
+ ∂Ω
1660
+ vq+1
1661
+ n
1662
+ + Jn ≤ 0,
1663
+ with
1664
+ Jn = 1
1665
+ 2
1666
+
1667
+ ∂Ω
1668
+ vq+1
1669
+ n
1670
+ − 2sn
1671
+
1672
+ ∂Ω
1673
+
1674
+ −∂φΩ
1675
+ ∂ν
1676
+
1677
+ vn.
1678
+ (4.4)
1679
+ In the same spirit of Lemma 2.5 ((2.12)), we establish
1680
+ E(vn) + 1
1681
+ 2
1682
+
1683
+ ∂Ω
1684
+ vq+1
1685
+ n
1686
+ ≤ 0
1687
+ for sufficiently large n,
1688
+ (4.5)
1689
+ by verifying that
1690
+ Jn ≥ 0
1691
+ for sufficiently large n.
1692
+ (4.6)
1693
+ Analogously to (2.14), we obtain
1694
+
1695
+ ∂Ω
1696
+
1697
+ −∂φΩ
1698
+ ∂ν
1699
+
1700
+ vn = λ
1701
+ p−1
1702
+ 1−q
1703
+ n
1704
+ sp
1705
+ n
1706
+
1707
+
1708
+
1709
+ φΩ + vn
1710
+ sn
1711
+ �p
1712
+ φΩ.
1713
+ Using this assertion, we deduce from (4.4) that
1714
+ Jn = sp+1
1715
+ �1
1716
+ 2
1717
+
1718
+ ∂Ω
1719
+ vq+1
1720
+ n
1721
+ sp+1 − 2λ
1722
+ p−1
1723
+ 1−q
1724
+ n
1725
+
1726
+
1727
+
1728
+ φΩ + vn
1729
+ sn
1730
+ �p
1731
+ φΩ
1732
+
1733
+ .
1734
+ (4.7)
1735
+ Furthermore, we use the test function ϕ = 1 in (4.1) to obtain
1736
+
1737
+
1738
+
1739
+ Un + λ
1740
+ p−1
1741
+ 1−q
1742
+ n
1743
+
1744
+
1745
+ U p
1746
+ n +
1747
+
1748
+ ∂Ω
1749
+ U q
1750
+ n = 0.
1751
+ Substituting Un = snφΩ + vn,
1752
+
1753
+
1754
+
1755
+
1756
+ φΩ + vn
1757
+ sn
1758
+
1759
+ + λ
1760
+ p−1
1761
+ 1−q
1762
+ n
1763
+ sp−1
1764
+ n
1765
+
1766
+
1767
+
1768
+ φΩ + vn
1769
+ sn
1770
+ �p
1771
+ +
1772
+
1773
+ ∂Ω
1774
+ vq
1775
+ n
1776
+ sn
1777
+ = 0,
1778
+ from which we use (2.6) with Un to infer that
1779
+
1780
+ ∂Ω
1781
+ vq
1782
+ n
1783
+ sn
1784
+ −→
1785
+
1786
+
1787
+ φΩ > 0.
1788
+ Then, we may deduce that
1789
+ csn ≤
1790
+
1791
+ ∂Ω
1792
+ vq
1793
+ n
1794
+ for some c > 0. By H¨older’s inequality, we deduce that
1795
+ cs
1796
+ q+1
1797
+ q
1798
+
1799
+
1800
+ ∂Ω
1801
+ vq+1
1802
+ for some c > 0. We use this inequality to derive from (4.7) that
1803
+ Jn ≥ sp+1
1804
+
1805
+ cs
1806
+ 1
1807
+ q −p
1808
+ n
1809
+ − 2λ
1810
+ p−1
1811
+ 1−q
1812
+ n
1813
+
1814
+
1815
+
1816
+ φΩ + vn
1817
+ sn
1818
+ �p
1819
+ φΩ
1820
+
1821
+ for some c > 0; thus, (4.6) follows. Assertion (4.5) has been now established.
1822
+ We end the proof of this lemma. Observe from (2.5) with Un that ∥vn∥ → 0. Then, we
1823
+ develop the same argument as in the second paragraph of the proof of assertion (i) to arrive at
1824
+ the same contradiction.
1825
+
1826
+ 17
1827
+
1828
+ Employing the above lemmas, we then verify assertion (iii). Assume by contradiction that
1829
+ (λn, un) → (0, 0) in R × H1(Ω) for a positive solution (λn, un) of (1.1) with λn > 0. Then,
1830
+ (λn, Un) with Un = λ
1831
+
1832
+ 1
1833
+ 1−q
1834
+ n
1835
+ un admits a positive solution of (4.1). Since Un is bounded in H1(Ω)
1836
+ by Lemma 4.1, we deduce that up to a subsequence, Un ⇀ U∞ ≥ 0, and Un → U∞ in Lp+1(Ω)
1837
+ and L2(∂Ω) for some U∞ ∈ H1(Ω). Thanks to Lemma 4.3, we apply Lemma 2.1 to obtain
1838
+ U∞ ̸= 0.
1839
+ Furthermore, substituting (λ, U) = (λn, Un) into (4.1) and then taking the limit, we deduce
1840
+ that
1841
+
1842
+
1843
+
1844
+ ∇U∞∇ϕ − U∞ϕ
1845
+
1846
+ +
1847
+
1848
+ ∂Ω
1849
+ U q
1850
+ ∞ϕ = 0.
1851
+ This implies that U∞ is a nonnegative solution of (4.1) for λ = 0. Finally, Lemma 4.2 provides
1852
+ U∞ = 0, which is a contradiction.
1853
+ The proof of Theorem 1.5 is complete.
1854
+
1855
+ Remark 4.4. Assertions (ii) and (iii) of Theorem 1.5 are also derived from Lemma 3.1 when
1856
+ βΩ = 1 and pq > 1.
1857
+ 5. Stability analysis of the trivial solution
1858
+ In the last section, we consider the stability of the trivial solution u = 0. It is worthwhile
1859
+ to mention that a linearized stability analysis does not work for u = 0 because u �→ uq is not
1860
+ differentiable at u = 0.
1861
+ The corresponding initial-boundary value problem is formulated as
1862
+ follows:
1863
+
1864
+
1865
+
1866
+
1867
+
1868
+ ∂u
1869
+ ∂t (t, x) = ∆u + u − up
1870
+ in (0, ∞) × Ω,
1871
+ ∂u
1872
+ ∂ν = −λuq
1873
+ on (0, ∞) × ∂Ω,
1874
+ u(0, x) = u0(x) ≥ 0
1875
+ in Ω.
1876
+ (5.1)
1877
+ We use the method of monotone iterations to determine the Lyapunov stability of the trivial
1878
+ solution u = 0 (see [26, Definition 5.1.1]).
1879
+ When βΩ < 1 or when βΩ = 1 and pq > 1, we observe from Lemma 3.1 that u = 0 is unstable
1880
+ in the following sense: for u0 ∈ C2(Ω) sufficiently small such that u0 > 0 in Ω, the positive
1881
+ solution u(t, x) of (5.1) corresponding to the initial value u0 moves away from 0 as t → ∞.
1882
+ When βΩ > 1, for ε, δ, τ > 0, we set
1883
+ ψδ,ε,τ(x) = δ(φΩ(x) + ε)τ,
1884
+ x ∈ Ω.
1885
+ Let Ωρ := {x ∈ Ω : dist(x, ∂Ω) < ρ} for ρ > 0 be a tubular neighborhood of ∂Ω. Then, by (1.5),
1886
+ for ρ0 > 0 small, we can choose a constant c3 = c3(ρ0) > 0 such that |∇φΩ|2 ≥ c3 in Ωρ for
1887
+ 0 < ρ ≤ ρ0. If 0 < ρ ≤ ρ0, then there exists c4 = c4(ρ) > 0 such that φΩ ≥ c4 in Eρ := Ω \ Ωρ.
1888
+ The following result would then provide useful information about the stability of the trivial
1889
+ solution u = 0.
1890
+ Theorem 5.1. Assume that βΩ > 1. Then, for
1891
+ 1
1892
+ βΩ < τ < 1 and ε > 0 small, there exists δ1 > 0
1893
+ such that ψδ,ε,τ is a supersolution of (1.1) whenever 0 < δ ≤ δ1.
1894
+ Proof. We write ψδ,ε,τ simply as φδ,ε. By direct computations, we obtain
1895
+ ∇ψδ,ε = δτ(φΩ + ε)τ−1∇φΩ,
1896
+ (5.2)
1897
+ ∆ψδ,ε = δτ(τ − 1)(φΩ + ε)τ−2|∇φΩ|2 + δτ(φΩ + ε)τ−1∆φΩ.
1898
+ (5.3)
1899
+ 18
1900
+
1901
+ We see from (5.3) that for x ∈ Ωρ,
1902
+ ∆ψδ,ε + ψδ,ε − ψp
1903
+ δ,ε ≤ δτ(τ − 1)(φΩ + ε)τ−2|∇φΩ|2 + δ(φΩ + ε)τ
1904
+ = δ(φΩ + ε)τ−2
1905
+
1906
+
1907
+ −τ(1 − τ)c3 +
1908
+
1909
+ ε + max
1910
+ Ωρ
1911
+ φΩ
1912
+ �2
1913
+
1914
+  .
1915
+ We then find 0 < ρ1 ≤ ρ0 and ε1 > 0 such that
1916
+
1917
+ ε + max
1918
+ Ωρ1
1919
+ φΩ
1920
+ �2
1921
+ ≤ τ(1 − τ)c3
1922
+ for 0 < ε ≤ ε1,
1923
+ and then,
1924
+ −∆ψδ,ε + ψδ,ε − ψp
1925
+ δ,ε ≤ 0
1926
+ in Ωρ1.
1927
+ Let us fix c4 = c4(ρ1), and let 0 < ε ≤ ε1. We also see from (5.3) that for x ∈ Eρ1,
1928
+ ∆ψδ,ε + ψδ,ε − ψp
1929
+ δ,ε ≤ δτ(φΩ + ε)τ−1(−βΩ)φΩ + δ(φΩ + ε)τ
1930
+ ≤ δ(φΩ + ε)τ−1 {(1 − τβΩ)c4 + ε} .
1931
+ We then determine 0 < ε2 ≤ ε1 such that (1 − τβΩ)c4 + ε2 ≤ 0, and then,
1932
+ −∆ψδ,ε2 + ψδ,ε2 − ψp
1933
+ δ,ε2 ≤ 0
1934
+ in Eρ1.
1935
+ Finally, using (1.5), we see from (5.2) that
1936
+ ∂ψδ,ε2
1937
+ ∂ν
1938
+ + λψq
1939
+ δ,ε2 ≥ δq(−δ1−qτετ−1
1940
+ 2
1941
+ c2 + λετq
1942
+ 2 ) ≥ 0
1943
+ on ∂Ω,
1944
+ if 0 < δ ≤ δ1 for some δ1 > 0.
1945
+ In summary, ψδ,ε2, 0 < δ ≤ δ1, is as desired.
1946
+
1947
+ From Theorem 5.1, it might be claimed that u = 0 is asymptotically stable for the case where
1948
+ βΩ > 1, meaning that for u0 in the order interval [0, ψδ1,ε2,τ], the positive solution u(t, x) of (5.1)
1949
+ associated with u0 tends to 0 as t → ∞. If this occurs, then Theorem 0(II) means that problem
1950
+ (5.1) is bistable with two nonnegative stable equilibria for 0 < λ ≤ λ∗ (one is uλ, and the other
1951
+ is u = 0), which presents ecologically a conditional persistence strategy for the harvesting effort
1952
+ λ. However, the difficulty arises from the fact that the monotone iteration scheme does not
1953
+ work for (5.1) in the order interval [0, ψδ1,ε2,τ] because u �→ (−uq) does not satisfy the one-sided
1954
+ Lipschitz condition [26, (4.1.19)] for u close to 0. Rigorous verification of the claim is an open
1955
+ question.
1956
+ References
1957
+ [1] S. Alama, Semilinear elliptic equations with sublinear indefinite nonlinearities, Adv. Differential
1958
+ Equations 4 (1999), 813–842.
1959
+ [2] H. Amann, Fixed point equations and nonlinear eigenvalue problems in ordered Banach spaces, SIAM
1960
+ Rev. 18 (1976), 620–709.
1961
+ [3] H. Amann, Nonlinear elliptic equations with nonlinear boundary conditions, New developments in
1962
+ differential equations (Proc. 2nd Scheveningen Conf., Scheveningen, 1975), pp. 43–63, North-Holland
1963
+ Math. Studies, Vol. 21, North-Holland, Amsterdam, 1976.
1964
+ [4] A. Ambrosetti, H. Brezis, G. Cerami, Combined effects of concave and convex nonlinearities in some
1965
+ elliptic problems, J. Funct. Anal. 122 (1994), 519–543.
1966
+ [5] D. Arcoya, J. Carmona, B. Pellacci, Bifurcation for some quasilinear operators, Proc. Roy. Soc.
1967
+ Edinburgh Sect. A 131 (2001), 733–765.
1968
+ 19
1969
+
1970
+ [6] H. Berestycki, P.-L. Lions, Some applications of the method of super and subsolutions, Bifurcation
1971
+ and nonlinear eigenvalue problems (Proc., Session, Univ. Paris XIII, Villetaneuse, 1978), pp. 16–41,
1972
+ Lecture Notes in Math., 782, Springer, Berlin, 1980.
1973
+ [7] H. Brezis, L. Oswald, Remarks on sublinear elliptic equations, Nonlinear Anal. 10 (1986), 55–64.
1974
+ [8] R. S. Cantrell, C. Cosner, Spatial ecology via reaction-diffusion equations, Wiley Series in Mathe-
1975
+ matical and Computational Biology. John Wiley & Sons, Ltd., Chichester, 2003.
1976
+ [9] E. N. Dancer, Uniqueness for elliptic equations when a parameter is large, Nonlinear Anal., Theory
1977
+ Methods Appl. 8 (1984), 835–836.
1978
+ [10] E. N. Dancer, On the number of positive solutions of weakly nonlinear elliptic equations when a
1979
+ parameter is large, Proc. London Math. Soc. (3) 53 (1986), 429–452.
1980
+ [11] D. G. de Figueiredo, J.-P. Gossez, P. Ubilla, Local superlinearity and sublinearity for indefinite
1981
+ semilinear elliptic problems, J. Funct. Anal. 199 (2003), 452–467.
1982
+ [12] D. G. de Figueiredo, J.-P. Gossez, P. Ubilla, Multiplicity results for a family of semilinear elliptic
1983
+ problems under local superlinearity and sublinearity, J. Eur. Math. Soc. (JEMS) 8 (2006), 269–286.
1984
+ [13] J. Garc´ıa-Meli´an, Uniqueness for degenerate elliptic sublinear problems in the absence of dead cores,
1985
+ Electron. J. Differential Equations (2004), No. 110, 16 pp.
1986
+ [14] J. Garc´ıa-Meli´an, J. D. Rossi, A. Su´arez, The competition between incoming and outgoing fluxes in
1987
+ an elliptic problem, Commun. Contemp. Math. 9 (2007), 781–810.
1988
+ [15] J. Garc´ıa-Meli´an, C. Morales-Rodrigo, J. D. Rossi, A. Su´arez, Nonnegative solutions to an elliptic
1989
+ problem with nonlinear absorption and a nonlinear incoming flux on the boundary, Ann. Mat. Pura
1990
+ Appl. (4) 187 (2008), 459–486.
1991
+ [16] J. Garcia-Azorero, I. Peral, J. D. Rossi, A convex–concave problem with a nonlinear boundary
1992
+ condition, J. Differential Equations 198 (2004), 91–128.
1993
+ [17] D. Gilbarg, N. S. Trudinger, Elliptic partial differential equations of second order, Second edition,
1994
+ Springer-Verlag, Berlin, 1983.
1995
+ [18] D. Grass, H. Uecker, T. Upmann, Optimal fishery with coastal catch, Nat. Resour. Model. 32 (2019),
1996
+ e12235, 32 pp.
1997
+ [19] P. Korman, Exact multiplicity and numerical computation of solutions for two classes of non-
1998
+ autonomous problems with concave–convex nonlinearities, Nonlinear Anal. 93 (2013), 226–235.
1999
+ [20] D. D. Hai, R. C. Smith, On uniqueness for a class of nonlinear boundary-value problems, Proc. Roy.
2000
+ Soc. Edinburgh Sect. A 136 (2006), 779–784.
2001
+ [21] D. D. Hai, R. C. Smith, Uniqueness for singular semilinear elliptic boundary value problems, Glasg.
2002
+ Math. J. 55 (2013), 399–409.
2003
+ [22] D. D. Hai, R. C. Smith, Uniqueness for singular semilinear elliptic boundary value problems II,
2004
+ Glasg. Math. J. 58 (2016), 461–469.
2005
+ [23] V. K. Le, K. Schmitt, Some general concepts of sub- and supersolutions for nonlinear elliptic prob-
2006
+ lems, Topol. Methods Nonlinear Anal. 28 (2006), 87–103.
2007
+ [24] S. S. Lin, Some uniqueness results for positone problems when a parameter is large, Chinese J. Math.
2008
+ 13 (1985), 67–81.
2009
+ [25] N. Mizoguchi, T. Suzuki, Equations of gas combustion: S-shaped bifurcation and mushrooms, J.
2010
+ Differential Equations 134 (1997), 183–215.
2011
+ [26] C. V. Pao, Nonlinear parabolic and elliptic equations, Plenum Press, New York, 1992.
2012
+ [27] M. H. Protter, H. F. Weinberger, Maximum principles in differential equations. Prentice-Hall, Inc.,
2013
+ Englewood Cliffs, N.J. 1967.
2014
+ [28] H. Ramos Quoirin, K. Umezu, The effects of indefinite nonlinear boundary conditions on the structure
2015
+ of the positive solutions set of a logistic equation, J. Differential Equations 257 (2014), 3935–3977.
2016
+ [29] H. Ramos Quoirin, K. Umezu, Bifurcation for a logistic elliptic equation with nonlinear boundary
2017
+ conditions: a limiting case, J. Math. Anal. Appl. 428 (2015), 1265–1285.
2018
+ [30] J. D. Rossi, Elliptic problems with nonlinear boundary conditions and the Sobolev trace theorem, Sta-
2019
+ tionary partial differential equations. Vol. II, 311–406, Handb. Differ. Equ., Elsevier/North-Holland,
2020
+ Amsterdam, 2005.
2021
+ [31] N. Tarfulea, Positive solution of some nonlinear elliptic equation with Neumann boundary conditions,
2022
+ Proc. Japan Acad. Ser. A Math. Sci. 71 (1995), 161–163.
2023
+ 20
2024
+
2025
+ [32] K. Umezu, Logistic elliptic equation with a nonlinear boundary condition arising from coastal fishery
2026
+ harvesting, Nonlinear Anal. Real World Appl. 70 (2023), Paper No. 103788, 29 pp.
2027
+ [33] H. Wiebers, S-shaped bifurcation curves of nonlinear elliptic boundary value problems, Math. Ann.
2028
+ 270 (1985), 555–570.
2029
+ [34] M. Wiegner, A uniqueness theorem for some nonlinear boundary value problems with a large pa-
2030
+ rameter, Math. Ann. 270 (1985), 401–402.
2031
+ Department of Mathematics, Faculty of Education, Ibaraki University, Mito 310-8512, Japan
2032
+ Email address: kenichiro.umezu.math@vc.ibaraki.ac.jp
2033
+ 21
2034
+
7tFLT4oBgHgl3EQfsi8k/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
89AzT4oBgHgl3EQfSfsp/content/tmp_files/2301.01232v1.pdf.txt ADDED
@@ -0,0 +1,1779 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sample efficient graph classification using binary Gaussian boson sampling
2
+ Amanuel Anteneh∗
3
+ Department of Computer Science, University of Virginia, Charlottesville, Virginia 22903, USA†
4
+ Olivier Pfister‡
5
+ Department of Physics, University of Virginia, Charlottesville, Virginia 22903, USA
6
+ (Dated: January 4, 2023)
7
+ We present a variation of a quantum algorithm for the machine learning task of classification with
8
+ graph-structured data. The algorithm implements a feature extraction strategy that is based on
9
+ Gaussian boson sampling (GBS) a near term model of quantum computing. However, unlike the
10
+ currently proposed algorithms for this problem, our GBS setup only requires binary (light/no light)
11
+ detectors, as opposed to photon number resolving detectors. These detectors are technologically
12
+ simpler and can operate at room temperature, making our algorithm less complex and less costly
13
+ to implement on the physical hardware. We also investigate the connection between graph theory
14
+ and the matrix function called the Torontonian which characterizes the probabilities of binary GBS
15
+ detection events.
16
+ I.
17
+ INTRODUCTION
18
+ Graphs are one of the most versatile data structures
19
+ used in computing, and developing machine learning
20
+ methods for working with graph-structured data has
21
+ been a growing sub-field of machine learning research.
22
+ Graph classification, in particular, has useful applica-
23
+ tions in fields such as bioinformatics, network science
24
+ and computer vision as many of the objects studied in
25
+ these fields can easily be represented as graphs. How-
26
+ ever, using graph-structured data with machine learning
27
+ models is not a straightforward task.
28
+ This is because
29
+ one of the most common ways of representing a graph
30
+ for computational applications, i.e., as an adjacency ma-
31
+ trix, cannot be easily used as an input to machine learn-
32
+ ing classifiers which primarily take vector-valued data as
33
+ their inputs. Therefore, a common way of working with
34
+ graph-structured data is by defining a feature map φ that
35
+ maps a graph G to a vector in a Hilbert space called a
36
+ feature space. From there a function κ, called a kernel,
37
+ is defined that measures the similarity of two graphs in
38
+ the feature space.
39
+ An example of a feature map from
40
+ R2 → R3 is shown in Fig. 1.
41
+ Kernel methods refer to machine learning algorithms
42
+ that learn by comparing pairs of data points using this
43
+ similarity measure. In our context we have a set of graphs
44
+ G and we call a kernel κ a graph kernel if it is a function
45
+ of the form κ : G × G → R [1, 2]. The most common
46
+ example of a kernel function is the feature space’s inner
47
+ product κ(x, x′) = ⟨φ(x), φ(x′)⟩. The goal of such meth-
48
+ ods is to construct mappings to feature vectors whose en-
49
+ tries (the features) relate to relevant information about
50
+ the graphs. Using a Gaussian boson sampling (GBS) de-
51
+ vice to construct graph kernels was an idea first proposed
52
+ ∗ asa2rc@virginia.edu
53
+ † Current address:
54
+ Booz Allen Hamilton, Arlington, Virginia
55
+ 22202, USA
56
+ ‡ olivier.pfister@gmail.com
57
+ FIG. 1: In the original input space R2 the data points,
58
+ which belong either to the class ‘red’ or ‘blue’, are not
59
+ separable by a linear function (the decision boundary)
60
+ but after mapping the points to feature vectors in a
61
+ higher dimensional space R3 a linear function is able to
62
+ separate the two classes. This linear decision boundary
63
+ can be calculated by supervised machine learning
64
+ models such as a support vector machine. In our case
65
+ the input space is the set of all undirected graphs which
66
+ we denote as G.
67
+ by Schuld et al. in Ref. 3.
68
+ Boson sampling was first proposed by Aaronson and
69
+ Arkhipov [4] as a task—generating the photon-counting
70
+ outcomes of the “quantum Galton board” constituted by
71
+ an M ×M optical interferometer fed with single photons
72
+ into some of its input ports—that is strongly believed
73
+ to be intractable to classical computers. The reason for
74
+ this intractability is that calculating the probability dis-
75
+ tribution for generating random outcomes using Monte
76
+ Carlo simulations requires calculating the permanent of
77
+ an M × M matrix. Calculating the permanent of a gen-
78
+ eral matrix is known to be #P-complete [5] which is a
79
+ class of problems comparable to the class of NP-complete
80
+ problems in their difficulty. Gaussian boson sampling [6]
81
+ is a variant of boson sampling in which the single-photon
82
+ inputs are replaced with single-mode squeezed states, as
83
+ produced, for example, by two-photon-emitting optical
84
+ arXiv:2301.01232v1 [quant-ph] 3 Jan 2023
85
+
86
+ Linear Decision
87
+ Boundary
88
+ Input Space
89
+ Feature Space
90
+ d3
91
+ d22
92
+ parametric amplifiers [7]. The GBS probability distribu-
93
+ tion is governed by the Hafnian of an M × M matrix.
94
+ Calculating the Hafnian of a general square matrix can
95
+ be reduced to the task of calculating permanents there-
96
+ fore calculating the Hafnian is also #P-complete. In both
97
+ cases, a quantum machine implementing boson sampling
98
+ or GBS can easily sample from these hard-to-calculate
99
+ probability distributions, just because they are “wired-
100
+ in,” and this constitutes the “quantum advantage” that
101
+ was recently demonstrated in optical experiments [8, 9].
102
+ Note also that the initial “quantum supremacy” result
103
+ obtained by Google on a superconducting qubit array [10]
104
+ was a quantum (circuit) sampling result as well.
105
+ Beyond these necessary initial steps of demonstrating
106
+ that quantum hardware can indeed reach regions inacces-
107
+ sible to classical hardware, a subsequent question is that
108
+ of the utility of a sampling task. Whereas the usefulness
109
+ of sampling in and of itself is far from established, we
110
+ know that the histograms produced by statistically sig-
111
+ nificant sampling constitute empirical probability distri-
112
+ butions that tend toward the true, classically intractable
113
+ probability distributions for sample numbers linear in
114
+ the number of possible outcomes [11]. The problem is
115
+ that this very number of possible outcomes grows expo-
116
+ nentially with M in a M-qubit quantum circuit in gen-
117
+ eral [12], and exponentially or super-exponentially with
118
+ M in an M-optical-mode boson or Gaussian boson sam-
119
+ pler, which dispels any notion of quantum advantage for
120
+ calculating the corresponding quantum probability dis-
121
+ tributions.
122
+ One direction that has been explored out of this co-
123
+ nundrum is the binning of GBS measurements results
124
+ into outcome classes whose cardinality scales favorably
125
+ (e.g. polynomially) with the problem size (the GBS mode
126
+ number). The immediate downside of such an approach
127
+ is loss of information it entails, which impacts usefulness.
128
+ However, graph classification using feature vectors and
129
+ coarse-graining might provide advantageous GBS appli-
130
+ cations. This was first pointed out by Schuld et al. [3].
131
+ In this paper, we show that a technologically simpler
132
+ version of GBS, which we term binary GBS, can achieve
133
+ comparable or better performance. The paper is struc-
134
+ tured as follows. In Sec.II we give broad reminders about
135
+ GBS and graph theory (with details in Appendix A) and
136
+ the current GBS graph kernel from Ref. 3.
137
+ We then
138
+ present our graph kernel in Sec.III along with results
139
+ from numerical experiments and analyses of its complex-
140
+ ity, features and advantages.
141
+ II.
142
+ REMINDERS ABOUT GAUSSIAN BOSON
143
+ SAMPLING (GBS) AND GRAPH THEORY
144
+ A.
145
+ Gaussian Boson Sampling
146
+ As mentioned above, an M-mode GBS devise com-
147
+ prises M single-mode-squeezing (SMS) inputs, an M ×M
148
+ optical interferometer, and M photon-number-resolving
149
+ FIG. 2: Example of a 3-mode Gaussian boson sampler.
150
+ Mode i ∈ {1, 2, 3} starts in the vacuum state |0⟩, is then
151
+ squeezed by ˆS(ri) and passes through the network of
152
+ two beamsplitters (the interferometer) before the
153
+ number of photons in each mode is measured by the
154
+ detectors Di∈{1,2,3}.
155
+ (PNR) detectors, see Fig.2 for an example. The latter
156
+ have come of age in superconducting devices such as tran-
157
+ sition edge sensors [13] and superconducting nanowire
158
+ single-photon detectors [14].
159
+ Both the former and the
160
+ latter have recently been used to make PNR measure-
161
+ ments of as many as 100 photons [15, 16].
162
+ An M-mode Gaussian boson sampler prepares a Gaus-
163
+ sian (Wigner function) quantum state by the M squeez-
164
+ ers and the interferometer.
165
+ The squeezers output
166
+ squeezed light into the interferometer and the photons
167
+ are then passed through the interferometer after which
168
+ the M detectors detect what modes the photons end up
169
+ in resulting in a detection event.
170
+ We denote a detec-
171
+ tion event as n = (n1, ..., nM), where ni is the photon
172
+ count in the ith mode and the total number of photons
173
+ is n = �M
174
+ i=1 ni.
175
+ We know consider binary detectors, such as single-
176
+ photon avalanche photodiodes, which are single-photon
177
+ sensitive but aren’t PNR and give the same signal how-
178
+ ever many photons were absorbed. In this case, we have
179
+ ni ∈ {0, 1} where ni = 0 indicated zero photons were
180
+ detected in that mode and ni = 1 indicates that at least
181
+ one photon was detected. When using binary detectors
182
+ we no longer know the total photon number n so we use
183
+ N to denote the number of detectors that detect photons
184
+ leading to �M
185
+ i=1 ni = N ≤ M.
186
+ An M-mode Gaussian state is fully described by a co-
187
+ variance matrix Σ ∈ R2M×2M and a displacement vector
188
+ d ∈ R2M [17].
189
+ B.
190
+ Graph theory
191
+ In this paper we define a graph G = (V, E) as a
192
+ set of vertices V
193
+ = {v1, v2, ...} and a set of edges
194
+ E = {(v1, v1), (v1, v2), ...(vi, vj), ...} that connect vertices
195
+ if the edge value is not zero. A graph can be unweighted,
196
+ with all nonzero edge weights equal to 1, or weighted, for
197
+ example with real edge weights in GBS. For undirected
198
+ graphs, which is what we will exclusively work with in
199
+
200
+ <ol
201
+ S(r1)
202
+ D
203
+ B(p2, T2)
204
+ <ol
205
+ S(r2)
206
+ D.
207
+ B(p1, T1)
208
+ <ol
209
+ S(r3)
210
+ D3
211
+ this paper, (vi, vj) = (vj, vi), ∀i, j. The size of a graph is
212
+ equal to the cardinality |V | of its vertex set. The degree
213
+ of a vertex v is the number of edges that are connected to
214
+ it. The maximum degree of a graph is the largest degree
215
+ of a vertex in its vertex set.
216
+ Graphs can be represented in a number of ways such as
217
+ a diagram, Fig.3a, or a more computationally useful way
218
+ as an adjacency matrix, Fig.3b. The adjacency matrix
219
+ of an undirected graph G with |V | vertices is a |V | × |V |
220
+ symmetric matrix A with entries aij where aij is the
221
+ weight of the edge connecting vertices i and j.
222
+ 1
223
+ 2
224
+ 3
225
+ 4
226
+ 3
227
+ 6
228
+ 4
229
+ 9
230
+ (a) Undirected weighted
231
+ 4-vertex graph with 4 edges
232
+
233
+ ���
234
+ 0 4 3 0
235
+ 4 0 6 9
236
+ 3 6 0 0
237
+ 0 9 0 0
238
+
239
+ ���
240
+ (b) Adjacency matrix of graph
241
+ FIG. 3: 4-vertex graph and its corresponding adjacency
242
+ matrix
243
+ C.
244
+ Sample complexity of GBS
245
+ 1.
246
+ The problem with using GBS beyond sampling
247
+ The sample complexity of a machine learning algo-
248
+ rithm refers to the number of samples or amount of data
249
+ required to learn some target function. In the case of
250
+ GBS applications it refers to the number of samples we
251
+ need to generate from the GBS device to learn or approx-
252
+ imate a probability distribution over some set of the pho-
253
+ ton detection events. This complexity type is extremely
254
+ important to examine for any applications of GBS as it
255
+ could potential render certain applications of GBS in-
256
+ tractable for larger problem sizes.
257
+ For example it was shown that the GBS device uti-
258
+ lizing PNR detectors can encode the graph isomorphism
259
+ problem [18]. This is done by encoding two graphs into
260
+ two GBS devices and sampling each S times. The S sam-
261
+ ples could then be used, in principle, to reconstruct the
262
+ probability distribution over all possible detection events
263
+ n for a given M and n. However, this cannot be done effi-
264
+ ciently enough to provide a quantum advantage. Indeed,
265
+ we know from Refs. 11, 19 that reconstructing a proba-
266
+ bility distribution D over a discrete finite set Ω of cardi-
267
+ nality |Ω| from an empirical distribution ˆD constructed
268
+ from samples from D we require
269
+ S =
270
+ �2(ln(2)|Ω| + ln( 1
271
+ δ ))
272
+ ϵ2
273
+
274
+ (1)
275
+ samples to guarantee that
276
+ p(||D − ˆD||1 ≥ ϵ) ≤ δ
277
+ (2)
278
+ where ||D − ˆD||1 denotes the L1 distance between D and
279
+ ˆD. In other words we require
280
+ O
281
+ �|Ω| + ln( 1
282
+ δ )
283
+ ϵ2
284
+
285
+ (3)
286
+ samples to ensure with probability at most δ that the
287
+ sum of the absolute values of the errors on the empirical
288
+ probability distribution is ϵ or greater.
289
+ This means the number of samples we need to approx-
290
+ imate a probability distribution scales linearly with the
291
+ number of elements in it’s sample space i.e. the num-
292
+ ber of outcomes. In the case where D is the probability
293
+ distribution over the set of all possible PNR detection
294
+ events the number of such events for a given number of
295
+ modes M and maximum number of photons n is
296
+ |Ω| =
297
+ �n + M − 1
298
+ M − 1
299
+
300
+ = (n + M − 1)!
301
+ n!(M − 1)!
302
+ (4)
303
+ which in number theory is also known as the formula for
304
+ the number of weak compositions of an integer n into M
305
+ parts. As shown in appendices B and C under the as-
306
+ sumption that the number of mode scales quadratically
307
+ with the number of photons, M ∈ O(n2), this quantity
308
+ grows super exponentially with M and in general scales
309
+ as O((n + M − 1)M−1) meaning that as the size of the
310
+ graphs increase, and therefore as the number of modes
311
+ of the GBS device increase, we require an exponential
312
+ number of samples to ensure the algorithm can give us
313
+ the correct result within a certain probability. Therefore
314
+ while the algorithm may in principle be able to decide
315
+ graph isomorphism, it is sample inefficient to an expo-
316
+ nential degree making it intractable to implement even
317
+ with a fault tolerant quantum computer.
318
+ 2.
319
+ Coarse graining of sample distributions
320
+ However a method was suggested in [18] to coarse-grain
321
+ the probability distribution by combining outcomes into
322
+ groups called orbits. Coarse-graining in this sense means
323
+ to construct a new probability distribution over the set
324
+ of these groups, the cardinality of which is less than the
325
+ original set of all possible detection events. An orbit On
326
+ consists of a detection event n and all of its permutations.
327
+ For example the orbit that contains the detection event
328
+ n = (1, 2, 2) also contains the detection events (2, 1, 2)
329
+ and (2, 2, 1). The number of orbits for a 4-mode GBS
330
+ device is equal to the number of ways one can write n1 +
331
+ n2 + n3 + n4 = n, where the order of the summands does
332
+ not matter. This is called the number of partitions of
333
+ the integer n into M parts and from the number theory
334
+ literature [20] it is known to behave asymptotically as
335
+ |Ω| ≈
336
+
337
+
338
+ 2(n−M)
339
+ 3
340
+ 4
341
+
342
+ 3(n − M), M ≤ n ≤ 2M.
343
+ (5)
344
+
345
+ 4
346
+ If we assume the number of photons grows linearly with
347
+ the number of modes, n ∈ Θ(M) → n = 2M, we have
348
+ the following asymptotic bound on the number of orbits
349
+ 1
350
+ 4
351
+
352
+ 3M eπ√
353
+ 2M
354
+ 3 ∈ O(eπ√
355
+ 2M
356
+ 3
357
+ M
358
+ ).
359
+ (6)
360
+ This means the number of orbits, which is now the num-
361
+ ber of outcomes |Ω| from Eq. 1, grows like M −1eπ√
362
+ 2M/3
363
+ meaning we would have a sample complexity of
364
+ O
365
+
366
+ M −1eπ√
367
+ 2M/3 + ln( 1
368
+ δ )
369
+ ϵ2
370
+
371
+ (7)
372
+ which is subexponential but still intractable for large M.
373
+ 3.
374
+ Sample complexity of previously proposed GBS graph
375
+ kernels
376
+ The first GBS based graph kernel proposed in [3]
377
+ maps a graph G to feature vectors in a feature space
378
+ φ : G → f = (f1, f2, ..., fD) ∈ RD. Where fi = p(Oi
379
+ n)
380
+ is the probability of detecting a detection event from the
381
+ orbit Oi
382
+ n. This kernel was shown to perform well against
383
+ three of the four classical kernels we use as benchmarks
384
+ in this paper. However a shortcoming of this method is
385
+ that the sample complexity is O(M −1eπ√
386
+ 2M/3).
387
+ The second GBS kernel is of the form φ : G →
388
+ f = (f1, f2, ..., fD) ∈ RD, with fi = p(Mi
389
+ n,∆s) where
390
+ p(Mi
391
+ n,∆s) is the probability of detecting a detection event
392
+ that belongs to the “meta-orbit” Mi
393
+ n,∆s. A meta-orbit
394
+ Mn,∆s is uniquely defined by a total photon number n
395
+ and ∆s which is defined as
396
+ ∆s = {n :
397
+
398
+ i
399
+ ni = n ∧ ∀i : ni ≤ s }.
400
+ (8)
401
+ Therefore a meta-orbit consists of all detection events
402
+ where total photon number is equal to n, where no detec-
403
+ tor counts more than s photons. It is claimed that this
404
+ strategy partitions the set of all PNR detection events
405
+ into a polynomial number of subsets in n [21].
406
+ III.
407
+ THE ALGORITHM
408
+ A.
409
+ GBS with binary detectors and it relation to
410
+ graph theory
411
+ While the relationship between GBS with PNR de-
412
+ tectors and graph theory has been thoroughly explored,
413
+ there has been little exploration of how GBS with bi-
414
+ nary detectors fits into the picture. In this section we
415
+ shed some light on the relationship between the two.
416
+ As stated before when using binary detectors the detec-
417
+ tion outcomes are of the form nbin = (n1, ..., nM) where
418
+ ni ∈ {0, 1} ∀i and ni = 1 indicates the ith detector de-
419
+ tected one or more photons. The probability of detect-
420
+ ing a detection outcome with binary detectors is charac-
421
+ terized by a matrix function called the Torontonian, to
422
+ which the same arguments for classical intractability as
423
+ for the Hafnian can be extended [22]. The probability of
424
+ a given binary detection event nbin is given by
425
+ p(nbin) = Tor(Onbin)
426
+
427
+ det(Q)
428
+ = Tor(X ˜Anbin)
429
+
430
+ det(Q)
431
+ (9)
432
+ where Q is defined in Eq. (A2), O = I − Q−1, and Tor()
433
+ is the Torontonian of a 2N × 2N matrix A defined as
434
+ Tor(A) =
435
+
436
+ Z∈P ([N])
437
+ (−1)|Z|
438
+ 1
439
+
440
+ det(I − AZ)
441
+ .
442
+ (10)
443
+ Where P([N]) is the power set, the set of all possible
444
+ subsets, of the set [N] = {1, 2, ..., N}. The probability of
445
+ a PNR detection event n can be written in terms of the
446
+ matrix O as
447
+ p(n) =
448
+ 1
449
+
450
+ det(Q)
451
+ Haf( ˜An)
452
+ n!
453
+ =
454
+ 1
455
+
456
+ det(Q)
457
+ Haf(XOn)
458
+ n!
459
+ (11)
460
+ The probability of a binary GBS detection event is
461
+ simply the sum of all probabilities of the corresponding
462
+ PNR detection events.
463
+ A useful example to illustrate
464
+ this is a 4-mode Gaussian boson sampler programmed
465
+ according to some adjacency matrix A of a graph G. Sup-
466
+ pose we use binary detectors and measure the detection
467
+ event nbin = (1, 0, 1, 0).
468
+ The corresponding detection
469
+ events when using PNR detectors would be of the form
470
+ n = (n1, 0, n3, 0) where n1, n3 > 0. We will define N to
471
+ be the set of all possible 4-mode PNR detection events
472
+ with 0’s in the 2nd and 4th index, i.e. only the 2nd and
473
+ 4th detectors detect no photons. From this we have
474
+ p((1, 0, 1, 0)) = Tor(X ˜A(1,0,1,0))
475
+
476
+ det(Q)
477
+ =
478
+
479
+ n∈N
480
+ p(n) =
481
+
482
+ n∈N
483
+ Haf2(An)
484
+ n!
485
+
486
+ det(Q)
487
+ .
488
+ (12)
489
+ This means the Torontonian of X ˜A is proportional to an
490
+ infinite sum of Hafnians as there are an infinite number
491
+ of integer lists of the form (n1, 0, n3, 0) where n1, n3 > 0.
492
+ In a real GBS experiment, however, the energy is finite
493
+ and therefore the measured probabilities of these events
494
+ would be equal to a finite version of this sum where all
495
+ detection events with total photon number greater than
496
+ some cutoff photon number vanish from the series.
497
+ In terms of graph theory this means the probability of
498
+ detecting nbin = (1, 0, 1, 0) is proportional to the sum of
499
+ the squared Hafnians of all possible subgraphs of G of
500
+ unbounded size with their 2nd and 4th vertices removed.
501
+ But again in practice the maximum size of the subgraphs
502
+
503
+ 5
504
+ will always be bounded by some maximum photon num-
505
+ ber for a real GBS experiment. More generally we have
506
+ p(nbin) = Tor(Onbin)
507
+
508
+ det(Q)
509
+ = Tor(X ˜Anbin)
510
+
511
+ det(Q)
512
+ =
513
+
514
+ n∈N
515
+ Haf2(An)
516
+ n!
517
+
518
+ det(Q)
519
+ (13)
520
+ where N is the set of all PNR events that correspond to
521
+ the binary detection event nbin [23].
522
+ B.
523
+ Constructing the feature vectors
524
+ Once the GBS device is programmed we generate S
525
+ samples from the device. For our algorithm we use bi-
526
+ nary detectors so each sample is a list of length M with
527
+ entries either 0 or 1. Once we have these samples we use
528
+ them to construct the feature vector of which we have
529
+ two definitions based on two coarse-graining strategies.
530
+ The first is based on what we call the µ coarse-graining
531
+ strategy where we group together detection events that
532
+ contain exactly i detector ‘clicks’ or ones.
533
+ For exam-
534
+ ple the detection events (1, 0, 0) and (0, 0, 1) would be
535
+ grouped together since they both contain exactly 1 detec-
536
+ tor click. These groups can also be thought of as ‘binary
537
+ orbits’ since they contain a detection event and all its per-
538
+ mutations. This strategy partitions the set of all binary
539
+ detection events into a linear number of disjoint subsets
540
+ in N. Using this strategy we can define the feature map
541
+ as φ : G → f = (f0, f1, ..., fN) ∈ RN. Where N is the
542
+ maximum number of detector clicks and fi =
543
+ Si
544
+ S with
545
+ Si being the number of samples which contain exactly i
546
+ ones. Equivalently this is the probability of detecting an
547
+ event where exactly i detectors detect a photon.
548
+ The second feature map is based on what we call the
549
+ ν coarse-graining strategy. For a 5 mode boson sampler
550
+ utilizing binary detectors with maximum click number
551
+ 5 there are |Ω| = 32 possible detection outcomes. This
552
+ coarse-graining strategy groups together detection events
553
+ whose first 5 modes are one of these 32 outcomes. For
554
+ example the detection event nbin = (0, 1, 0, 0, 1, 0, 1) be-
555
+ longs in the group associated with the detection event
556
+ (0, 1, 0, 0, 1) since they are equal if one is only concerned
557
+ with the first 5 modes.
558
+ This strategy partitions the
559
+ set of all detection events of 5 or more modes into a
560
+ constant number of subsets, i.e. 32. The feature map
561
+ based on this strategy is defined as φ : G → f =
562
+ (f[0,0,0,0,0], f[1,0,0,0,0], ..., fn) ∈ R32. Where fn is the prob-
563
+ ability of detecting an event where the first 5 modes cor-
564
+ respond to one of the 32 possible detection outcomes. For
565
+ example f[1,0,0,0,0] is the probability that the first detec-
566
+ tor detects photons and the following 4 detectors detect
567
+ vacuum.
568
+ Once we construct the feature vector for each graph in
569
+ the data set we input them to a machine learning classi-
570
+ fier such as a support vector machine.
571
+ C.
572
+ Numerical experiments
573
+ We used The Walrus python library to classically sam-
574
+ ple from the GBS output distribution when running our
575
+ experiments and the GraKel python library to fetch the
576
+ data sets and simulate the classical graph kernels [24, 25].
577
+ Classically sampling from a GBS output distribution is
578
+ very time intensive even when using binary detectors so
579
+ we choose to follow the choice made in [3] and discard
580
+ graphs with greater than 25 and less than 6 vertices for
581
+ each data set. Before sampling from the GBS device we
582
+ have four parameters we can set: the maximum number
583
+ of detector clicks allowed N, the average photon number
584
+ ¯n, the displacement on each mode of the GBS device d
585
+ and lastly the number of samples generated by the GBS
586
+ device S. We set N = 6, ¯n = 5 and d = 0 for our re-
587
+ sults reported here leading to probability distribution of
588
+ 32 outcomes using the ν coarse-graining strategy and 7
589
+ outcomes using the µ coarse-graining strategy. Using Eq.
590
+ 1 with δ = 0.01 and ϵ = 0.06 we require about S = 15000
591
+ samples for the ν feature vectors and about S = 6000
592
+ samples for the µ feature vectors.
593
+ For the machine learning classifier we use a support
594
+ vector machine with an RBF kernel κrbf. We obtain the
595
+ accuracies in Table II by running a double 10-fold cross-
596
+ validation 10 times. The inner fold performs a grid search
597
+ through the discrete set of values [10−4, 10−3, ..., 102, 103]
598
+ on the C hyper-parameter of the SVM which controls
599
+ the penalty on misclassifications. We tested our graph
600
+ kernel on the same data sets used in [3]. We also ignored
601
+ vertex labels, vertex attributes and edge attributes and
602
+ converted all adjacency matrices to be unweighted.
603
+ Four classical graph kernels were used as a bench-
604
+ mark for our algorithms classification accuracy.
605
+ The
606
+ subgraph matching kernel (SM) with time complexity
607
+ O(kM k+1) where M is the number of vertices and k the
608
+ size of the subgraphs being considered [26], the graphlet
609
+ sampling kernel (GS) with worst case time complexity
610
+ O(M k) which can be optimized to O(Mdk−1) for graphs
611
+ of bounded degree with the restriction that k ∈ {3, 4, 5},
612
+ where k is the graphlet size and d is the maximum de-
613
+ gree of the graph [27], the random walk kernel (RW) with
614
+ time complexity O(M 3) [28] and the shortest path kernel
615
+ (SP) with time complexity O(M 4) [29]. For the graphlet
616
+ sampling kernel we set maximum graphlet size to k = 5
617
+ and draw 5174 samples, for the random walk kernel we
618
+ use fast computation and a geometric kernel type with
619
+ the decay factor set to λ = 10−3, for the subgraph match-
620
+ ing kernel we set maximum subgraph size to k = 5 and
621
+ for the shortest path kernel we used the Floyd–Warshall
622
+ algorithm to calculate shortest paths. The accuracies of
623
+ all four classical kernels, the original GBS graph kernels
624
+ from [3] with n = 6 and our kernel are shown in Table II.
625
+
626
+ 6
627
+ TABLE I: Graph data set statistics after prepossessing. A more detailed description of these data sets can be found
628
+ in appendix B of [3].
629
+ Data set
630
+ # of graphs # of classes avg. # of vertices avg. # of edges
631
+ AIDS
632
+ 1723
633
+ 2
634
+ 11.11
635
+ 11.29
636
+ BZR MD
637
+ 257
638
+ 2
639
+ 20.10
640
+ 197.69
641
+ COX2 MD
642
+ 118
643
+ 2
644
+ 23.90
645
+ 274.40
646
+ ENZYMES
647
+ 204
648
+ 6
649
+ 18.56
650
+ 36.30
651
+ ER MD
652
+ 357
653
+ 2
654
+ 19.27
655
+ 185.15
656
+ FINGERPRINT
657
+ 1080
658
+ 3
659
+ 10.58
660
+ 9.10
661
+ IMDB-BINARY
662
+ 806
663
+ 2
664
+ 15.98
665
+ 63.32
666
+ MUTAG
667
+ 179
668
+ 2
669
+ 17.48
670
+ 19.23
671
+ NCI1
672
+ 1853
673
+ 2
674
+ 19.77
675
+ 21.27
676
+ PROTEINS
677
+ 515
678
+ 2
679
+ 15.77
680
+ 29.37
681
+ PTC FM
682
+ 284
683
+ 2
684
+ 13.64
685
+ 13.99
686
+ TABLE II: Average test accuracies of the support vector machine with different data sets and graph kernels. The
687
+ error reported is the standard deviation between 10 repeats of double cross validation. GS, RW, SM and SP refer to
688
+ the graphlet sampling, random walk, subgraph matching and shortest path kernels respectively. GBSbin
689
+ ν
690
+ and GBSbin
691
+ µ
692
+ denotes our GBS kernel with binary detectors that use the ν and µ coarse-graining strategies to construct the
693
+ feature vectors respectively. GBSbin+
694
+ ν
695
+ denotes that the feature associated with detecting vacuum [0, 0, 0, 0, 0] in the
696
+ first 5 modes was dropped from all feature vectors. GBSPNR and GBSPNR+ refer to the original GBS kernels with
697
+ PNR detectors that use orbit and meta-orbit probabilities as features respectively with a displacement of d on each
698
+ mode. *Runtime > 7 days
699
+ Data set
700
+ GBSbin+
701
+ ν
702
+ GBSbin
703
+ ν
704
+ GBSbin
705
+ µ
706
+ GS
707
+ RW
708
+ SM
709
+ SP
710
+ AIDS
711
+ 98.47(0.10)
712
+ 98.74(0.20)
713
+ 99.53(0.05)
714
+ 99.30(0.07)
715
+ 53.11(11.90)
716
+ 77.85(2.44)
717
+ 99.34(0.09)
718
+ BZR MD
719
+ 60.14(1.28)
720
+ 61.73(0.89)
721
+ 58.79(1.17)
722
+ 51.42(3.51)
723
+ 64.54(0.36)
724
+ time out*
725
+ 50.82(1.76)
726
+ COX2 MD
727
+ 51.62(2.76)
728
+ 50.18(2.96)
729
+ 51.30(3.86)
730
+ 49.01(3.18)
731
+ 48.98(4.78)
732
+ time out*
733
+ 48.11(4.30)
734
+ ENZYMES
735
+ 48.10(1.18)
736
+ 41.75(2.35)
737
+ 19.83(1.43)
738
+ 34.59(2.54)
739
+ 19.50(2.29)
740
+ 37.38(1.60)
741
+ 22.15(1.88)
742
+ ER MD
743
+ 67.74(0.94)
744
+ 69.19(0.33)
745
+ 68.84(0.50)
746
+ 48.88(4.53)
747
+ 70.32(0.02)
748
+ time out*
749
+ 45.23(4.35)
750
+ FINGERPRINT
751
+ 64.45(0.78)
752
+ 65.53(0.86)
753
+ 63.56(0.67)
754
+ 65.25(1.30)
755
+ 33.63(3.57)
756
+ 46.89(0.56)
757
+ 46.22(1.02)
758
+ IMDB-BINARY
759
+ 60.69(0.84)
760
+ 61.35(0.98)
761
+ 67.34(0.38)
762
+ 68.49(0.63)
763
+ 67.78(0.38)
764
+ time out*
765
+ 65.50(0.27)
766
+ MUTAG
767
+ 84.63(0.91)
768
+ 85.94(0.98)
769
+ 81.37(0.90)
770
+ 80.80(0.91)
771
+ 83.22(0.04)
772
+ 83.24(1.27)
773
+ 82.74(1.65)
774
+ NCI1
775
+ 63.45(0.57)
776
+ 56.99(1.69)
777
+ 59.09(1.02)
778
+ 50.34(3.22)
779
+ 50.96(3.58)
780
+ time out*
781
+ 53.40(2.25)
782
+ PROTEINS
783
+ 65.95(1.03)
784
+ 63.38(0.73)
785
+ 63.11(0.55)
786
+ 65.75(0.94)
787
+ 56.91(1.39)
788
+ 62.93(0.83)
789
+ 63.63(0.41)
790
+ PTC FM
791
+ 52.63(3.95)
792
+ 57.47(2.72)
793
+ 59.17(1.58)
794
+ 60.74(1.48)
795
+ 50.95(3.68)
796
+ 56.36(2.66)
797
+ 55.38(4.04)
798
+ Data set
799
+ GBSPNR (d = 0)
800
+ GBSPNR (d = 0.25)
801
+ GBSPNR+ (d = 0)
802
+ GBSPNR+ (d = 0.25)
803
+ AIDS
804
+ 99.60(0.05)
805
+ 99.62(0.03)
806
+ 99.58(0.06)
807
+ 99.61(0.05)
808
+ BZR MD
809
+ 62.73(0.71)
810
+ 62.13(1.44)
811
+ 62.01(1.43)
812
+ 63.16(2.11)
813
+ COX2 MD
814
+ 44.98(1.80)
815
+ 50.11(0.97)
816
+ 57.84(4.04)
817
+ 57.89(2.62)
818
+ ENZYMES
819
+ 22.29(1.60)
820
+ 28.01(1.83)
821
+ 25.72(2.60)
822
+ 40.42(2.02)
823
+ ER MD
824
+ 70.36(0.78)
825
+ 70.41(0.47)
826
+ 71.01(1.26)
827
+ 71.05(0.83)
828
+ FINGERPRINT
829
+ 65.42(0.49)
830
+ 65.85(0.36)
831
+ 66.19(00.84)
832
+ 66.26(4.29)
833
+ IMDB-BINARY
834
+ 64.09(0.34)
835
+ 68.71(0.59)
836
+ 68.14(0.71)
837
+ 67.60(0.75)
838
+ MUTAG
839
+ 86.41(0.33)
840
+ 85.58(0.59)
841
+ 85.64(0.78)
842
+ 84.46(0.44)
843
+ NCI1
844
+ 63.61(0.00)
845
+ 62.79(0.00)
846
+ 63.59(0.17)
847
+ 63.11(0.93)
848
+ PROTEINS
849
+ 66.88(0.22)
850
+ 66.14(0.48)
851
+ 65.73(0.69)
852
+ 66.16(0.76)
853
+ PTC FM
854
+ 53.84(0.96)
855
+ 52.45(1.78)
856
+ 59.14(1.72)
857
+ 56.25(2.04)
858
+ D.
859
+ Complexity analysis
860
+ In this section we discuss, in addition to the time and
861
+ space complexity, the sample complexity of our algo-
862
+ rithm.
863
+ 1.
864
+ Sample Complexity
865
+ Since the ni’s for binary detection events can be either
866
+ 0 or 1 we can think of the detection outcomes as binary
867
+ strings of length M with at most M ones. The number
868
+ of binary strings of length M with exactly i ones is
869
+ �M
870
+ i
871
+
872
+ .
873
+ So the number of possible binary detection events, the
874
+ number of binary strings of length M with at most M
875
+
876
+ 7
877
+ ones, is given by
878
+ |Ω| =
879
+ M
880
+
881
+ i=0
882
+ �M
883
+ i
884
+
885
+ .
886
+ (14)
887
+ We can show this function grows like 2M using the bino-
888
+ mial expansion
889
+ 2M = (1 + 1)M =
890
+ M
891
+
892
+ i=0
893
+ �M
894
+ i
895
+
896
+ 1M−i1i =
897
+ M
898
+
899
+ i=0
900
+ �M
901
+ i
902
+
903
+ .
904
+ (15)
905
+ Therefore we could not simply use the probability of the
906
+ individual detection events as features without coarse-
907
+ graining even when using binary detectors as we would
908
+ still need a prohibitively large number of samples to ap-
909
+ proximate their probabilities to within a constant error.
910
+ This was the reason for introducing the ν and µ coarse-
911
+ graining strategies.
912
+ Since the number of outcomes of the µ distribution
913
+ scales linearly with N which is ≤ M the sample com-
914
+ plexity of approximating the µ coarse-grained probability
915
+ distribution is
916
+ O
917
+ �M + ln( 1
918
+ δ )
919
+ ϵ2
920
+
921
+ (16)
922
+ which reduces to O(M) for constant ϵ and δ. The sample
923
+ complexity of approximating the ν coarse-grained prob-
924
+ ability distribution is
925
+ O
926
+ �32 + ln( 1
927
+ δ )
928
+ ϵ2
929
+
930
+ (17)
931
+ which reduces to O(1) for constant ϵ and δ.
932
+ 2.
933
+ Space Complexity
934
+ The size of the ν feature vectors is constant with re-
935
+ spect to the graph size so the space required is O(1) and
936
+ for the µ feature vectors the size grows linearly with N
937
+ which is ≤ M so the space required is O(M).
938
+ How-
939
+ ever storing the adjacency matrix of the graphs requires
940
+ O(M 2) space complexity.
941
+ 3.
942
+ Time Complexity
943
+ The time complexity is determined by the most com-
944
+ putationally time intensive step of the algorithm which
945
+ is encoding the adjacency matrix into the GBS device.
946
+ This is the case because the encoding process requires
947
+ taking the Takagi decomposition of the matrix A which
948
+ for a M × M matrix has time complexity O(M 3) as it
949
+ is a special case of the singular value decomposition [30].
950
+ However there do exist quantum algorithms for comput-
951
+ ing the singular value decomposition of a matrix with
952
+ complexity that is polylogarithmic in the size of the ma-
953
+ trix [31]. In particular the quantum singular value esti-
954
+ mation algorithm for a m × n matrix presented in [32]
955
+ has complexity O(polylog(mn)/ϵ) where ϵ is an additive
956
+ error.
957
+ E.
958
+ Feature analysis & comparison to classical
959
+ kernels
960
+ Fig. 4 shows the results of performing a principal com-
961
+ ponent analysis on the feature vectors generated using
962
+ the ν coarse-graining strategy for various datasets. The
963
+ analysis shows that the feature associated with vacuum
964
+ [0, 0, 0, 0, 0] contributes by far the most in the support of
965
+ the first principal component. The analysis also suggests
966
+ that in some cases the first 10 or so features contribute
967
+ the most to the support of all of the first four principal
968
+ components but in other cases, such as with FINGER-
969
+ PRINT, most features contribute more or less equally.
970
+ Our graph kernel has a time complexity that is equiva-
971
+ lent to the random walk kernel and better than the short-
972
+ est path kernel by a factor of M while outperforming both
973
+ on most data sets. Furthermore the time complexity of
974
+ our kernel is not exponential in the size of the subgraphs
975
+ we are probing like the subgraph matching kernel. The
976
+ graphlet sampling kernel does have a more favorable com-
977
+ plexity of O(Mdk−1) for graphs with maximum degree
978
+ d. However it’s important to note that many real world
979
+ graphs are what are called ‘scale-free networks’ and from
980
+ the network science literature [33] the maximum degree
981
+ of these graphs grows polynomially with the graph size.
982
+ Therefore it is possible that the the maximum degree
983
+ of these graphs grows linearly with the graph size i.g.
984
+ d ∈ O(M) which would lead to a complexity of O(M k)
985
+ for the graphlet sampling kernel. What is also interesting
986
+ is that GBS kernels seems to provide more distinguish-
987
+ ing power than some classical kernels for graphs with no
988
+ vertex and edge labels like those used in our simulations.
989
+ Take for example the ENZYMES dataset for which the
990
+ binary GBS kernel achieves a classification accuracy of
991
+ ≈ 48% while the shortest path kernel reaches about 23%.
992
+ If we instead choose to not ignore vertex labels we found
993
+ the shortest path kernel gives a classification accuracy of
994
+ about 50%. Since the GBS features are related to Haf-
995
+ nians this suggests that features related to the number
996
+ of perfect matchings of a graph could be more useful for
997
+ distinguishing graphs of different classes when one has no
998
+ information about the attributes of the graph nodes.
999
+ IV.
1000
+ CONCLUSION
1001
+ We proposed a variation of an algorithm for the ma-
1002
+ chine learning task of classification with graph-structured
1003
+ data that uses a Gaussian boson sampler utilizing only
1004
+ binary detectors. We show that our algorithm out per-
1005
+ forms four classical graph kernels for the task of graph
1006
+
1007
+ 8
1008
+ FIG. 4: Results of the principal component analysis (PCA) on the ν feature vector entries for the ENZYMES,
1009
+ MUTAG, IMDB BINARY and FINGERPRINT datasets. The heatmaps show the weight/coefficient associated with
1010
+ each feature with regard to the first four principal components.
1011
+ classification on many data sets. This is most evident
1012
+ with regard to the ENZYMES data set where the ν fea-
1013
+ ture map outperforms all methods. The feature corre-
1014
+ sponding to detecting vacuum in the first 5 modes plays
1015
+ a particularity important role as shown by the princi-
1016
+ pal component analysis as it is related to the Hafnian of
1017
+ all possible subgraphs of G with their first 5 vertices re-
1018
+ moved. We also show that it is sample efficient, a major
1019
+ issue for applications of GBS, and has a time complexity
1020
+ that is comparable with the classical strategies.
1021
+ The fact that a GBS kernel using only binary detec-
1022
+ tors produces such accuracies suggests that technologi-
1023
+ cally more feasible—binary detectors such as SPADs do
1024
+ not operate at cryogenic temperatures such as supercon-
1025
+ ducting PNR ones—GBS devices could have useful appli-
1026
+ cations for machine learning with graph-structured data.
1027
+ We believe that GBS with PNR detectors should also be
1028
+ explored more for this application with particular atten-
1029
+ tion given to coarse-graining strategies that both reduce
1030
+ the sample complexity as well as provide features that
1031
+ capture useful information about the graphs.
1032
+ A number of questions remain open for investigation
1033
+ such as how vertex and edge labels can be encoded into
1034
+ the GBS device. Also as stated earlier it is known that
1035
+ the existence of a polynomial-time classical algorithm for
1036
+ exact sampling from the output probability distribution
1037
+ of a boson sampling or Gaussian boson sampling device
1038
+ would imply the collapse of the polynomial hierarchy to
1039
+ the third level and thus the existence of such an algorithm
1040
+ is believed to be very unlikely [34]. This result can also
1041
+ be extended to GBS with binary detectors [22]. However
1042
+ it is not known, although some work has been done in
1043
+ this area [21], if such arguments exist for algorithms that
1044
+ sample from coarse-grained versions of these probability
1045
+ distributions such as those defined in [3] or our work. It is
1046
+ vital to know if such arguments exist as they would imply
1047
+ these quantum kernels are also likely hard to simulate
1048
+ classically.
1049
+ ACKNOWLEDGMENTS
1050
+ We thank Maria Schuld, Kamil Br´adler, Scott Aaron-
1051
+ son, Ignacio Cirac, Miller Eaton, Nicol´as Quesada, An-
1052
+ drew Blance, Shreyas Murthy and Sefonias Maereg for
1053
+ useful advice and discussions. We thank Research Com-
1054
+ puting at the University of Virginia for providing access
1055
+ to, and support with, the Rivanna computing cluster.
1056
+
1057
+ ENZYMES
1058
+ FINGERPRNT
1059
+ Feautures
1060
+ 1
1061
+ 1
1062
+ 01 - [0, 0, 0, 0, 0]
1063
+ 02 - [1, 0, 0, 0, 0]
1064
+ 5 -
1065
+ 5 -
1066
+ 1.00
1067
+ 03 - [0. 1, 0, 0, 0
1068
+ 04 - [0, 0, 1, 0, 0]
1069
+ 9 -
1070
+ - 6
1071
+ 05 - [0, 0, 0, 1, 0]
1072
+ tures
1073
+ eautures
1074
+ 0.75
1075
+ 06 - 0. 0. 0, 0, 1
1076
+ 13
1077
+ 13
1078
+ 07 - [1, 1, 0, 0, 0]
1079
+ 08 - [1, 0, 1, 0, 0]
1080
+ 17
1081
+ 17
1082
+ eau
1083
+ 09 - [1, 0, 0, 1, 0]
1084
+ 0.50
1085
+ 10 - [1, 0, 0, 0, 1]
1086
+ 21
1087
+ 21
1088
+ 11 - [0, 1, 1, 0, 0]
1089
+ 25 -
1090
+ 25
1091
+ 12 - [0, 1, 0, 1, 0
1092
+ - 0.25
1093
+ 13 - [0, 1, 0, 0, 1]
1094
+ 29 -
1095
+ 29
1096
+ 14 - [0, 0, 1, 1, 0]
1097
+ 15 - [0, 0, 1, 0, 1]
1098
+ 16 - [0, 0, 0, 1, 1]
1099
+ PC1
1100
+ PC2
1101
+ PC3
1102
+ PC4
1103
+ PC1
1104
+ PC2
1105
+ PC3
1106
+ PC4
1107
+ - 0.00
1108
+ MUTAG
1109
+ 17 - [1, 1, 1, 0, 0]
1110
+ IMDBBINARY
1111
+ 18 - [1, 1, 0, 1, 0]
1112
+ 1
1113
+ 19 - [1, 1, 0, 0, 1]
1114
+ 5
1115
+ -0.25
1116
+ 5 -
1117
+ 20 - [1, 0, 1, 1, 0]
1118
+ 21 - [1, 0, 1, 0, 1]
1119
+ 22 - [1, 0, 0, 1, 1
1120
+ 23 - [0, 1, 1, 1, 0]
1121
+ -0.50
1122
+ es
1123
+ 13
1124
+ 13
1125
+ 24 - [0, 1, 1, 0, 1]
1126
+ eautur
1127
+ 25 - [0, 1, 0, 1, 1]
1128
+ 17
1129
+ 17
1130
+ 26 - [0, 0, 1, 1, 1]
1131
+ -0.75
1132
+ 27 - [1, 1, 1, 1, 0
1133
+ 21
1134
+ E 21
1135
+ 28 - [1, 1, 1, 0, 1]
1136
+ 29 - [1, 1, 0, 1, 1
1137
+ 25 -
1138
+ 25
1139
+ 30 - [1, 0, 1, 1, 1]
1140
+ -1.00
1141
+ 31 - [0, 1, 1, 1, 1]
1142
+ 29 -
1143
+ 29 -
1144
+ 32 - [1, 1, 1, 1, 1]
1145
+ PC1
1146
+ PC2
1147
+ PC3
1148
+ PC4
1149
+ PC1
1150
+ PC2
1151
+ PC3
1152
+ PC49
1153
+ This work was supported by NSF grant PHY-2112867.
1154
+ Appendix A: Reminders about standard GBS
1155
+ 1.
1156
+ GBS with PNR detectors
1157
+ There has been substantial work done already on the
1158
+ connection between graph theory and Gaussain boson
1159
+ sampling with PNR detectors [18, 35, 36].
1160
+ Here we
1161
+ present the important concepts. Any undirected graph
1162
+ G with no self-loops and |V | = M vertices can be en-
1163
+ coded into a M-mode GBS setup consisting of a set of
1164
+ M squeezers followed by an interferometer of beamsplit-
1165
+ ters according to its adjacency matrix A. Once the graph
1166
+ is encoded into the GBS device the probability of detect-
1167
+ ing a specific detection event n = (n1, ..., nM) is equal
1168
+ to
1169
+ p(n) =
1170
+ 1
1171
+
1172
+ det(Q)
1173
+ Haf( ˜An)
1174
+ n!
1175
+ =
1176
+ 1
1177
+
1178
+ det(Q)
1179
+ Haf2(An)
1180
+ n!
1181
+ (A1)
1182
+ with
1183
+ Q = (I2M − X ˜A)−1,
1184
+ X =
1185
+
1186
+ 0 I
1187
+ I 0
1188
+
1189
+ ,
1190
+ (A2)
1191
+ n! = n1!×...×nM!, ˜A = (A⊕A) and Haf() denoting the
1192
+ Hafnian of a 2M × 2M matrix. The Hafnian is a matrix
1193
+ function defined mathematically as
1194
+ Haf(A) =
1195
+
1196
+ π∈SM
1197
+
1198
+ (u,v)∈π
1199
+ Au,v,
1200
+ (A3)
1201
+ where SM is the partition of the set {1, 2, ..., 2M} into
1202
+ unordered disjoint pairs.
1203
+ For example if M = 2 then
1204
+ SM = ({(1, 2), (3, 4)}, {(1, 4), (2, 3)}, {(1, 3), (2, 4)}).
1205
+ If
1206
+ A is the adjacency matrix of a unweighted graph then
1207
+ the Hafnian is equal to the number of perfect matchings
1208
+ of the vertices of the graph.
1209
+ A perfect matching is a
1210
+ partition of the vertex set of a graph into pairs such that
1211
+ each vertex is connected to exactly one edge from the
1212
+ edge set. All perfect matchings of a complete 4-vertex
1213
+ graph are shown in Fig.5.
1214
+ An is the n×n submatrix of A induced according to the
1215
+ photon detection event n. An is obtained by repeating
1216
+ the ith row and column according to the measurement
1217
+ pattern n. If ni = 0 then the ith row and column are
1218
+ deleted from A but if ni > 0 then the ith row and col-
1219
+ umn are repeated ni times. For example the probability
1220
+ of detecting the event where each mode has exactly one
1221
+ photon n = (1, 1, ..., 1) would be proportional to the Haf-
1222
+ nian of the original matrix A since An = A. What this
1223
+ means in terms of the graph is that vertex i and all its
1224
+ edges are either deleted if ni = 0 or duplicated ni times
1225
+ if ni > 0. Therefore the probability of a detection event
1226
+ n is proportional to the squared Hafnian of the subgraph
1227
+ Gn corresponding to the induced adjacency matrix An.
1228
+ 1
1229
+ 2
1230
+ 3
1231
+ 4
1232
+ (a) Complete graph of 4
1233
+ vertices
1234
+ 1
1235
+ 2
1236
+ 3
1237
+ 4
1238
+ 1
1239
+ 2
1240
+ 3
1241
+ 4
1242
+ 1
1243
+ 2
1244
+ 3
1245
+ 4
1246
+ (b) The three perfect matchings of
1247
+ the complete 4-vertex graph
1248
+ FIG. 5: The complete graph of 4 vertices and its
1249
+ corresponding perfect matching
1250
+ FIG. 6: Table of different photon detection events n
1251
+ and the corresponding subgraphs Gn they induce and
1252
+ the value of the squared Hafnians of those subgraphs.
1253
+ The probability of the detection event where each
1254
+ detector detects one photon corresponds to the Hafnian
1255
+ of the graph encoded into the GBS. We can see in the
1256
+ third graph from the top when a detector detects 2
1257
+ photons the corresponding vertex is duplicated.
1258
+ Examples of different detection events and their corre-
1259
+ sponding induced subgraphs are shown in Fig.6.
1260
+ These induced subgraphs are of even size since the
1261
+ number of photons detected is always even due to the
1262
+ fact that the inputs are squeezed states. However when
1263
+ displacement is applied to the modes of the GBS the
1264
+ probability of detecting an odd number of photons is in
1265
+ general not zero anymore and the probability of individ-
1266
+ ual detection events is characterized by the loop Hafnian
1267
+ lHaf() as opposed to the Hafnian [37, 38]. We don’t apply
1268
+ displacement for the experiments done in this paper.
1269
+
1270
+ Gn
1271
+ Haf(An)
1272
+ n
1273
+ 2
1274
+ 4
1275
+ (1,1,1,1)
1276
+ 3
1277
+ 4
1278
+ (1, 0, 0, 1)
1279
+ 1
1280
+ 4
1281
+ 2
1282
+ (1,1,1,2)
1283
+ 0
1284
+ 4
1285
+ 3
1286
+ 410
1287
+ 2.
1288
+ Encoding a graph into a GBS device
1289
+ To map a graph to a feature vector we must first pro-
1290
+ gram the GBS device, by setting the squeezing parame-
1291
+ ters and beamsplitter angles of the device, according to
1292
+ the adjacency matrix A of the graph. Any adjacency ma-
1293
+ trix A ∈ RM×M of an undirected graph of M vertices can
1294
+ be mapped to a symmetric, positive definite 2M ×2M co-
1295
+ variance matrix Σ of a pure Gaussian state of M modes
1296
+ via the following procedure. First a doubled adjacency
1297
+ matrix ˜A is constructed,
1298
+ ˜A = c
1299
+
1300
+ A 0
1301
+ 0 A
1302
+
1303
+ = c(A ⊕ A),
1304
+ (A4)
1305
+ where c is a rescaling constant chosen such that 0 < c <
1306
+ 1/λmax where λmax is the maximum singular value of
1307
+ A [3]. We use ˜A as, unlike A, it is guaranteed to map
1308
+ to a covariance matrix of a pure Gaussian state which
1309
+ is easier to prepare than a mixed one [35].
1310
+ This also
1311
+ has the advantage of allowing us to utilize the identity
1312
+ Haf(A ⊕ A) = Haf2(A) to relate ˜A to A.
1313
+ To map ˜A
1314
+ to a covariance matrix Σ we use the following matrix
1315
+ equations
1316
+ Σ = Q−I2M/2, with Q = (I2M −X ˜A)−1,
1317
+ X =
1318
+
1319
+ 0 I
1320
+ I 0
1321
+
1322
+ .
1323
+ (A5)
1324
+ To program the GBS device to sample from the probabil-
1325
+ ity distribution corresponding to the covariance matrix Σ
1326
+ of the pure Gaussian state we need the unitary matrix
1327
+ U that characterizes the interferometer of the device as
1328
+ well as the squeezing parameters r1, ..., rM of each the
1329
+ M squeezers. We can obtain these values by taking the
1330
+ Takagi decomposition of A which is of the form
1331
+ A = Udiag(λ1, ..., λM)U T .
1332
+ (A6)
1333
+ The squeezing parameters are determined by the sin-
1334
+ gular values λ1, ..., λM and c via the relationship ri =
1335
+ tanh−1(cλi).
1336
+ The singular values and c also uniquely
1337
+ determine the mean photon number ¯n of the device ac-
1338
+ cording to
1339
+ ¯n =
1340
+ M
1341
+
1342
+ i=1
1343
+ (cλi)2
1344
+ 1 − (cλi)2 =
1345
+ M
1346
+
1347
+ i=1
1348
+ sinh2(ri).
1349
+ (A7)
1350
+ The rescaling constant c can be used to adjust ¯n as multi-
1351
+ plying A by c scales it’s singular values without changing
1352
+ the structure of the graph other than scaling all edge
1353
+ weights by c. The matrix U can be decomposed to give
1354
+ the parameters of the beamsplitter gates of the interfer-
1355
+ ometer [39].
1356
+ The GBS device, if using PNR detectors, now samples
1357
+ from the probability distribution
1358
+ p(n) =
1359
+ 1
1360
+
1361
+ det(Q)
1362
+ Haf( ˜An)
1363
+ n!
1364
+ =
1365
+ 1
1366
+
1367
+ det(Q)
1368
+ Haf2(An)
1369
+ n!
1370
+ . (A8)
1371
+ Appendix B: Super Exponential Growth of GBS
1372
+ Detection Events for M ∈ O(n2)
1373
+ Lemma 1.
1374
+ (n+M−1)!
1375
+ n!(M−1)! ∈ ω(
1376
+
1377
+ M
1378
+
1379
+ M) for n = ⌊
1380
+
1381
+ M⌋
1382
+ Proof.
1383
+ (n + M − 1)!
1384
+ n!(M − 1)!
1385
+ n=⌊
1386
+
1387
+ M⌋
1388
+ −−−−−−→ (⌊
1389
+
1390
+ M⌋ + M − 1)!
1391
+ (⌊
1392
+
1393
+ M⌋)!(M − 1)!
1394
+ (⌊
1395
+
1396
+ M⌋ + M − 1)!
1397
+ (⌊
1398
+
1399
+ M⌋)!(M − 1)!
1400
+ = [�⌊
1401
+
1402
+ M⌋
1403
+ i=1
1404
+ (M − 1 + i)](M − 1)!
1405
+ [�⌊
1406
+
1407
+ M⌋
1408
+ i=1
1409
+ i](M − 1)!
1410
+ = [�⌊
1411
+
1412
+ M⌋
1413
+ i=1
1414
+ (M − 1 + i)]
1415
+ [�⌊
1416
+
1417
+ M⌋
1418
+ i=1
1419
+ i]
1420
+ =
1421
+
1422
+
1423
+ M⌋
1424
+
1425
+ i=1
1426
+ [M − 1
1427
+ i
1428
+ + 1]
1429
+ >
1430
+
1431
+
1432
+ M⌋
1433
+
1434
+ i=1
1435
+ [M − 1
1436
+
1437
+
1438
+ M⌋
1439
+ + 1]
1440
+ = (M − 1
1441
+
1442
+
1443
+ M⌋
1444
+ + 1)⌊
1445
+
1446
+ M⌋
1447
+ = (
1448
+ M
1449
+
1450
+
1451
+ M⌋
1452
+ + 1 −
1453
+ 1
1454
+
1455
+
1456
+ M⌋
1457
+ )⌊
1458
+
1459
+ M⌋
1460
+ ≥ ⌊
1461
+
1462
+ M⌋
1463
+
1464
+
1465
+ M⌋
1466
+ Therefore (n+M−1)!
1467
+ n!(M−1)! ∈ ω(
1468
+
1469
+ M
1470
+
1471
+ M) for n = ⌊
1472
+
1473
+ M⌋. We
1474
+ drop the floor function for simplicity as ⌊x⌋ ∈ Θ(x).
1475
+ Appendix C: Induction Proof for
1476
+ �n
1477
+ k
1478
+
1479
+ ∈ Θ(nk)
1480
+ Lemma 2.
1481
+ �n
1482
+ k
1483
+
1484
+ ∈ Θ(nk)
1485
+ Proof. Base Case: k = 2
1486
+ �n
1487
+ 2
1488
+
1489
+ = n(n − 1)
1490
+ 2!
1491
+ lim
1492
+ n→∞
1493
+ n(n−1)
1494
+ 2!
1495
+ n2
1496
+ = 1
1497
+ 2!
1498
+ 0 < 1
1499
+ 2! < ∞
1500
+
1501
+ �n
1502
+ 2
1503
+
1504
+ ∈ Θ(n2)
1505
+ Assume result holds up to k = ℓ
1506
+ �n
1507
+
1508
+
1509
+ = n(n − 1)(n − 2) · · · (n − ℓ − 1)
1510
+ ℓ!
1511
+ ∈ Θ(nℓ)
1512
+
1513
+ 11
1514
+ Inductive Step: k = ℓ + 1
1515
+ � n
1516
+ ℓ + 1
1517
+
1518
+ = n(n − 1)(n − 2) · · · (n − (ℓ + 2))
1519
+ (ℓ + 1)!
1520
+ lim
1521
+ n→∞
1522
+ n(n−1)(n−2)···(n−ℓ−2)
1523
+ (ℓ+1)!
1524
+ nℓ+1
1525
+ = lim
1526
+ n→∞
1527
+ n(n−1)(n−2)···(n−ℓ−1)
1528
+ ℓ!
1529
+ nℓ
1530
+ (n−ℓ−2)
1531
+ ℓ+1
1532
+ n
1533
+ = lim
1534
+ n→∞
1535
+ n(n−1)(n−2)···(n−ℓ−1)
1536
+ ℓ!
1537
+ nℓ
1538
+ lim
1539
+ n→∞
1540
+ (n−ℓ−2)
1541
+ ℓ+1
1542
+ n
1543
+ ≡ 1
1544
+ ℓ!
1545
+ 1
1546
+ ℓ + 1
1547
+ =
1548
+ 1
1549
+ (ℓ + 1)!
1550
+ 0 <
1551
+ 1
1552
+ (ℓ + 1)! < ∞
1553
+
1554
+ � n
1555
+ ℓ + 1
1556
+
1557
+ ∈ Θ(nℓ+1)
1558
+ [1] G. Nikolentzos, G. Siglidis, and M. Vazirgiannis, Graph
1559
+ kernels: A survey, Journal of Artificial Intelligence Re-
1560
+ search 72, 943 (2021).
1561
+ [2] N. M. Kriege, F. D. Johansson, and C. Morris, A survey
1562
+ on graph kernels, Applied Network Science 5, 1 (2020).
1563
+ [3] M. Schuld, K. Br´adler, R. Israel, D. Su, and B. Gupt,
1564
+ Measuring the similarity of graphs with a gaussian boson
1565
+ sampler, Physical Review A 101, 032314 (2020).
1566
+ [4] S. Aaronson and A. Arkhipov, The computational com-
1567
+ plexity of linear optics, Electronic Colloquium on Com-
1568
+ putational Complexity Report No. 170, 1 (2010).
1569
+ [5] L. G. Valiant, The complexity of computing the perma-
1570
+ nent, Theoretical computer science 8, 189 (1979).
1571
+ [6] C. S. Hamilton, R. Kruse, L. Sansoni, S. Barkhofen,
1572
+ C. Silberhorn, and I. Jex, Gaussian boson sampling,
1573
+ Phys. Rev. Lett. 119, 170501 (2017).
1574
+ [7] H.-A.
1575
+ Bachor
1576
+ and
1577
+ T.
1578
+ C.
1579
+ Ralph,
1580
+ A Guide to Experiments in Quantum Optics,
1581
+ 3rd
1582
+ ed. (Wiley-VCH, 2019).
1583
+ [8] H.-S. Zhong, H. Wang, Y.-H. Deng, M.-C. Chen, L.-C.
1584
+ Peng, Y.-H. Luo, J. Qin, D. Wu, X. Ding, Y. Hu,
1585
+ P. Hu, X.-Y. Yang, W.-J. Zhang, H. Li, Y. Li, X. Jiang,
1586
+ L. Gan, G. Yang, L. You, Z. Wang, L. Li, N.-L. Liu,
1587
+ C.-Y. Lu, and J.-W. Pan, Quantum computational
1588
+ advantage using photons, Science 370, 1460 (2020),
1589
+ https://science.sciencemag.org/content/370/6523/1460.full.pdf.
1590
+ [9] L. S. Madsen, F. Laudenbach, M. F. Askarani, F. Rortais,
1591
+ T. Vincent, J. F. F. Bulmer, F. M. Miatto, L. Neuhaus,
1592
+ L. G. Helt, M. J. Collins, A. E. Lita, T. Gerrits, S. W.
1593
+ Nam, V. D. Vaidya, M. Menotti, I. Dhand, Z. Vernon,
1594
+ N. Quesada, and J. Lavoie, Quantum computational ad-
1595
+ vantage with a programmable photonic processor, Nature
1596
+ 606, 75 (2022).
1597
+ [10] F. Arute, K. Arya, R. Babbush, D. Bacon, J. C.
1598
+ Bardin, R. Barends, R. Biswas, S. Boixo, F. G. S. L.
1599
+ Brandao, D. A. Buell, B. Burkett, Y. Chen, Z. Chen,
1600
+ B. Chiaro, R. Collins, W. Courtney, A. Dunsworth,
1601
+ E. Farhi, B. Foxen, A. Fowler, C. Gidney, M. Giustina,
1602
+ R. Graff, K. Guerin, S. Habegger, M. P. Harrigan,
1603
+ M. J. Hartmann, A. Ho, M. Hoffmann, T. Huang,
1604
+ T. S. Humble,
1605
+ S. V. Isakov,
1606
+ E. Jeffrey,
1607
+ Z. Jiang,
1608
+ D. Kafri, K. Kechedzhi, J. Kelly, P. V. Klimov, S. Knysh,
1609
+ A. Korotkov,
1610
+ F. Kostritsa,
1611
+ D. Landhuis,
1612
+ M. Lind-
1613
+ mark, E. Lucero, D. Lyakh, S. Mandr`a, J. R. Mc-
1614
+ Clean, M. McEwen, A. Megrant, X. Mi, K. Michielsen,
1615
+ M. Mohseni, J. Mutus, O. Naaman, M. Neeley, C. Neill,
1616
+ M. Y. Niu, E. Ostby, A. Petukhov, J. C. Platt, C. Quin-
1617
+ tana, E. G. Rieffel, P. Roushan, N. C. Rubin, D. Sank,
1618
+ K. J. Satzinger, V. Smelyanskiy, K. J. Sung, M. D. Tre-
1619
+ vithick, A. Vainsencher, B. Villalonga, T. White, Z. J.
1620
+ Yao, P. Yeh, A. Zalcman, H. Neven, and J. M. Marti-
1621
+ nis, Quantum supremacy using a programmable super-
1622
+ conducting processor, Nature 574, 505 (2019).
1623
+ [11] T. Weissman, E. Ordentlich, G. Seroussi, S. Verdu, and
1624
+ M. J. Weinberger, Inequalities for the l1 deviation of
1625
+ the empirical distribution, HP Technical Reports HPL-
1626
+ 2003-97, R1 (2003).
1627
+ [12] Note that this is not related to the number of possi-
1628
+ ble output quantum states, which scales with the num-
1629
+ ber of parameters governing the quantum evolution, e.g.
1630
+ parameters of a simulated Hamiltonian. Obviously, no
1631
+ quantum advantage can be obtained for M-qubit Hamil-
1632
+ tonians that have O(2M) parameters but all classically
1633
+ intractable M-qubit Hamiltonians of physical interest
1634
+ are local and have parameter numbers polynomial in
1635
+ M [40], which validates Feynman’s proposed advantage
1636
+ for quantum simulation [41]. An M × M optical interfer-
1637
+ ometer has M 2 parameters, for example. However, even
1638
+ though any useful quantum computer will explore but
1639
+ a O(M k)-dimensional region of an O(2M)-dimensional
1640
+
1641
+ 12
1642
+ Hilbert space, the number of measurement outcomes will
1643
+ still scale like O(2M) a priori, simply because we do not
1644
+ know the adequate measurement basis that best contains
1645
+ the O(M k) output states. This is the well known expo-
1646
+ nential overhead of quantum state tomography.
1647
+ [13] A. E. Lita, A. J. Miller, and S. W. Nam, Counting near-
1648
+ infrared single-photons with 95% efficiency, Opt. Expr.
1649
+ 16, 3032 (2008).
1650
+ [14] C. Cahall, K. L. Nicolich, N. T. Islam, G. P. Lafyatis,
1651
+ A. J. Miller, D. J. Gauthier, and J. Kim, Multi-photon
1652
+ detection using a conventional superconducting nanowire
1653
+ single-photon detector, Optica 4, 1534 (2017).
1654
+ [15] M. Eaton, A. Hossameldin, R. J. Birrittella, P. M. Alsing,
1655
+ C. C. Gerry, H. Dong, C. Cuevas, and O. Pfister, Resolu-
1656
+ tion of 100 photons and quantum generation of unbiased
1657
+ random numbers, Nature Photonics 10.1038/s41566-022-
1658
+ 01105-9 (2022).
1659
+ [16] R. Cheng, Y. Zhou, S. Wang, M. Shen, T. Taher,
1660
+ and H. X. Tang, A 100-pixel photon-number-resolving
1661
+ detector unveiling photon statistics, Nature Photonics
1662
+ 10.1038/s41566-022-01119-3 (2022).
1663
+ [17] C. Weedbrook, S. Pirandola, R. Garc´ıa-Patr´on, N. J.
1664
+ Cerf, T. C. Ralph, J. H. Shapiro, and S. Lloyd, Gaus-
1665
+ sian quantum information, Reviews of Modern Physics
1666
+ 84, 621 (2012).
1667
+ [18] K. Br´adler, S. Friedland, J. Izaac, N. Killoran, and D. Su,
1668
+ Graph isomorphism and gaussian boson sampling, Spe-
1669
+ cial Matrices 9, 166 (2021).
1670
+ [19] C. L. Canonne, A short note on learning discrete distri-
1671
+ butions, arXiv preprint arXiv:2002.11457 (2020).
1672
+ [20] A. Y. Oru¸c, On number of partitions of an integer into
1673
+ a fixed number of positive integers, Journal of Number
1674
+ Theory 159, 355 (2016).
1675
+ [21] K. Bradler, R. Israel, M. Schuld, and D. Su, A duality
1676
+ at the heart of gaussian boson sampling, arXiv preprint
1677
+ arXiv:1910.04022 (2019).
1678
+ [22] N. Quesada, J. M. Arrazola, and N. Killoran, Gaussian
1679
+ boson sampling using threshold detectors, Physical Re-
1680
+ view A 98, 062322 (2018).
1681
+ [23] The Torontonian was also shown to be a generating func-
1682
+ tion for the Hafnian given by
1683
+ Haf(A) =
1684
+ 1
1685
+ M!
1686
+ dM
1687
+ dzM Tor(zXA)
1688
+ ��
1689
+ z=0
1690
+ (C1)
1691
+ where A is a 2M × 2M matrix.
1692
+ [24] B. Gupt, J. Izaac, and N. Quesada, The walrus: a library
1693
+ for the calculation of hafnians, hermite polynomials and
1694
+ gaussian boson sampling, Journal of Open Source Soft-
1695
+ ware 4, 1705 (2019).
1696
+ [25] G. Siglidis, G. Nikolentzos, S. Limnios, C. Giatsidis,
1697
+ K. Skianis, and M. Vazirgiannis, Grakel: A graph ker-
1698
+ nel library in python., J. Mach. Learn. Res. 21, 1 (2020).
1699
+ [26] N. Kriege and P. Mutzel, Subgraph matching kernels
1700
+ for attributed graphs, arXiv preprint arXiv:1206.6483
1701
+ (2012).
1702
+ [27] N.
1703
+ Shervashidze,
1704
+ S.
1705
+ Vishwanathan,
1706
+ T.
1707
+ Petri,
1708
+ K.
1709
+ Mehlhorn,
1710
+ and
1711
+ K.
1712
+ Borgwardt,
1713
+ Efficient
1714
+ graphlet
1715
+ kernels
1716
+ for
1717
+ large
1718
+ graph
1719
+ comparison,
1720
+ in
1721
+ Artificial intelligence and statistics
1722
+ (PMLR,
1723
+ 2009)
1724
+ pp. 488–495.
1725
+ [28] S. V. N. Vishwanathan, N. N. Schraudolph, R. Kondor,
1726
+ and K. M. Borgwardt, Graph kernels, Journal of Machine
1727
+ Learning Research 11, 1201 (2010).
1728
+ [29] K.
1729
+ M.
1730
+ Borgwardt
1731
+ and
1732
+ H.-P.
1733
+ Kriegel,
1734
+ Shortest-path
1735
+ kernels
1736
+ on
1737
+ graphs,
1738
+ in
1739
+ Fifth IEEE international conference on data mining (ICDM’05)
1740
+ (IEEE, 2005) pp. 8–pp.
1741
+ [30] T. Hahn, Routines for the diagonalization of complex ma-
1742
+ trices, arXiv preprint physics/0607103 (2006).
1743
+ [31] L. Gu, X. Wang, and G. Zhang, Quantum higher or-
1744
+ der singular value decomposition (IEEE, 2019) pp. 1166–
1745
+ 1171.
1746
+ [32] I. Kerenidis and A. Prakash, Quantum recommendation
1747
+ systems, arXiv preprint arXiv:1603.08675 (2016).
1748
+ [33] A.-L. Barab´asi and M. P´osfai, Network Science (Cam-
1749
+ bridge University Press, Cambridge, 2016).
1750
+ [34] Although this has been proven rigorously for the exact
1751
+ sampling case [4, 42] the proof pertaining to the approx-
1752
+ imate sampling case rests on the assumption of two con-
1753
+ jectures known as the Permanent-of-Gaussians Conjec-
1754
+ ture and the Permanent Anti-Concentration Conjecture
1755
+ which are as of now still unproven.
1756
+ [35] K. Br´adler, P.-L. Dallaire-Demers, P. Rebentrost, D. Su,
1757
+ and C. Weedbrook, Gaussian boson sampling for perfect
1758
+ matchings of arbitrary graphs, Physical Review A 98,
1759
+ 032310 (2018).
1760
+ [36] J. M. Arrazola and T. R. Bromley, Using gaussian boson
1761
+ sampling to find dense subgraphs, Physical review letters
1762
+ 121, 030503 (2018).
1763
+ [37] N. Quesada, Franck-condon factors by counting perfect
1764
+ matchings of graphs with loops, The Journal of chemical
1765
+ physics 150, 164113 (2019).
1766
+ [38] J. F. Bulmer, S. Paesani, R. S. Chadwick, and N. Que-
1767
+ sada, Threshold detection statistics of bosonic states,
1768
+ arXiv preprint arXiv:2202.04600 (2022).
1769
+ [39] W. R. Clements, P. C. Humphreys, B. J. Metcalf, W. S.
1770
+ Kolthammer, and I. A. Walmsley, Optimal design for uni-
1771
+ versal multiport interferometers, Optica 3, 1460 (2016).
1772
+ [40] S. Lloyd, Universal quantum simulators, Science 273,
1773
+ 1073 (1996).
1774
+ [41] R. P. Feynman, Simulating physics with computers, Int.
1775
+ J. Theor. Phys. 21, 467 (1982).
1776
+ [42] R. Kruse, C. S. Hamilton, L. Sansoni, S. Barkhofen,
1777
+ C. Silberhorn, and I. Jex, Detailed study of gaussian bo-
1778
+ son sampling, Physical Review A 100, 032326 (2019).
1779
+
89AzT4oBgHgl3EQfSfsp/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8NFLT4oBgHgl3EQfsy_c/content/2301.12149v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:695f9f51f97440f8258259358d2bedd76dedf3ad45bdbf41fe9af9343f11956f
3
+ size 30479448
99E1T4oBgHgl3EQfUgM7/content/tmp_files/2301.03090v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
99E1T4oBgHgl3EQfUgM7/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
A9E2T4oBgHgl3EQfnQhW/content/tmp_files/2301.04006v1.pdf.txt ADDED
@@ -0,0 +1,2077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ IRONFORGE: An Open, Secure, Fair, Decentralized
2
+ Federated Learning
3
+ Guangsheng Yu∗, Xu Wang†, Caijun Sun§, Qin Wang∗, Ping Yu‡,
4
+ Wei Ni∗, Renping Liu†, Xiwei Xu∗
5
+ ∗CSIRO Data61, Australia
6
+ †University of Technology Sydney, Australia
7
+ ‡Harbin University of Technology, China
8
+ §Zhejiang Lab, China
9
+ Abstract—Federated learning (FL) provides an effective ma-
10
+ chine learning (ML) architecture to protect data privacy in a
11
+ distributed manner. However, the inevitable network asynchrony,
12
+ the over-dependence on a central coordinator, and the lack of an
13
+ open and fair incentive mechanism collectively hinder its further
14
+ development. We propose IRONFORGE, a new generation of FL
15
+ framework, that features a Directed Acyclic Graph (DAG)-based
16
+ data structure and eliminates the need for central coordinators
17
+ to achieve fully decentralized operations. IRONFORGE runs in
18
+ a public and open network, and launches a fair incentive
19
+ mechanism by enabling state consistency in the DAG, so that the
20
+ system fits in networks where training resources are unevenly
21
+ distributed. In addition, dedicated defense strategies against
22
+ prevalent FL attacks on incentive fairness and data privacy are
23
+ presented to ensure the security of IRONFORGE. Experimental
24
+ results based on a newly developed testbed FLSim highlight the
25
+ superiority of IRONFORGE to the existing prevalent FL frame-
26
+ works under various specifications in performance, fairness, and
27
+ security. To the best of our knowledge, IRONFORGE is the first
28
+ secure and fully decentralized FL framework that can be applied
29
+ in open networks with realistic network and training settings.
30
+ Index Terms—Federated Learning, DAG, Blockchain
31
+ I. INTRODUCTION
32
+ Federated learning (FL), officially introduced by Google
33
+ in 2017 [1], has become the preference to aggregate data
34
+ from distributed ends without breaching data privacy [1],
35
+ [2]. By aggregating huge data with comprehensive extracted
36
+ features in FL, critical issues such as model overfitting can be
37
+ significantly addressed [3]. However, Œ the inevitable network
38
+ asynchrony,  the over-dependence on a central coordinator,
39
+ and Ž the lack of an open and fair incentive mechanism hinder
40
+ the further development of FL in large and open scenarios [4].
41
+ Traditional FL considers no or low delay throughout an ag-
42
+ gregation process, namely, synchronous FL. However, network
43
+ synchrony is unrealistic due to the inevitable capacity limit
44
+ of computation, bandwidth, and storage, as well as the im-
45
+ balanced capacities among the distributed participants. Thus,
46
+ recent studies propose pseudo-asynchronous FL [5] and asyn-
47
+ chronous FL [6]. The aggregation of pseudo-asynchronous
48
+ FL allows a short interval for collecting the model caches in
49
+ order to ensure that the number of models aggregated can be
50
+ sufficiently large, while the central coordinator immediately
51
+ updates the global model once receiving a new local model
52
+ from any idle participants in asynchronous FL.
53
+ Neither pseudo-asynchronous FL nor asynchronous FL can
54
+ tolerate the single-point-of-failure (SPoF) of the central coor-
55
+ dinator or even a malicious and corrupted coordinator (issue-
56
+ ). The over-dependence on the central coordinator could
57
+ potentially degrade the system availability and the training
58
+ flexibility in the sense that an FL network may be confined to
59
+ specific training domains or tasks determined by the coordina-
60
+ tor. Participants in many existing studies [7]–[9], once opting
61
+ in an FL network, would have to obey the defined training
62
+ target with no flexibility to go for different tasks at will.
63
+ In addition to the weak training flexibility, the lack of an
64
+ open and fair incentive mechanism results in participants who
65
+ have fewer resources and a weaker capacity not willing to
66
+ contribute their resources to the global aggregation. This issue
67
+ deteriorates particularly in FL networks where resources are
68
+ not evenly distributed, and potentially leads to the model over-
69
+ fitting and weak generality against contingencies. Although
70
+ the authors of [10] survey the incentive mechanisms in FL,
71
+ all mentioned frameworks require a central coordinator, also
72
+ leading to issue-.
73
+ Existing studies propose to replace the central coordinator
74
+ with a committee running a consensus process in a blockchain
75
+ network to prevent the SPoF or a corrupted coordinator. Mean-
76
+ while, by sharing the model collection during the consensus
77
+ in the committee, pseudo-asynchronous FL can be achieved in
78
+ a decentralized manner, i.e., BlockFL [7], [11]–[14]. Consid-
79
+ ering only issue- being solved and issue-Œ being partially
80
+ solved by BlockFL, the authors of [9] introduce a Directed
81
+ Acyclic Graph (DAG)-based FL where both issue-Œ and issue-
82
+  are solved using the concept of asynchronous FL [6] to
83
+ fully decentralize the FL process. However, the paper [9] only
84
+ considers an ideal network in which the training resources are
85
+ evenly distributed. Moreover, the approach to enabling state
86
+ consistency for a secure and fair incentive mechanism (issue-
87
+ Ž) is missing in [9], which results in difficulty in adopting the
88
+ mechanism in a public and open network.
89
+ We propose IRONFORGE that is an open, secure, fair,
90
+ and decentralized FL system. IRONFORGE solves the above
91
+ mentioned pain points at one time. Openness: It features a
92
+ DAG-based data structure in an open network. Decentral-
93
+ ization: The need for a central coordinator is eliminated
94
+ throughout the process by IRONFORGE, inheriting from the
95
+ concept of asynchronous FL. As a result, the models are
96
+ 1
97
+ arXiv:2301.04006v1 [cs.LG] 7 Jan 2023
98
+
99
+ TABLE I: Qualitative comparisons between the proposed IRONFORGE and the existing FL frameworks
100
+ FL Framework
101
+ Data Structure
102
+ Data Asynchrony
103
+ Decentralization
104
+ Openness
105
+ Incentive
106
+ Security
107
+ Google FL [2]
108
+ Isolated models
109
+ Synchronous
110
+ Centralized
111
+ Private
112
+
113
+
114
+ Asynchronous FL [6]
115
+ Isolated models
116
+ Asynchronous
117
+ Centralized
118
+ Private
119
+
120
+
121
+ Block FL [15]
122
+ Blockchain
123
+ Synchronous
124
+ Decentralized
125
+ Private
126
+ Reward
127
+
128
+ DAG FL [9]
129
+ DAG
130
+ Asynchronous
131
+ Decentralized
132
+ Public
133
+ Reward
134
+ Poisoning/Backdoor/Lazy
135
+ IRONFORGE
136
+ DAG
137
+ Asynchronous
138
+ Decentralized
139
+ Public
140
+ Reward, Penalty
141
+ Poisoning/Backdoor/Stealing*/Collusion
142
+ * The stealing attack considered in this paper includes the traditional lazy attack.
143
+ The difference is that stealing attackers not only upload their previous models, but also fake the ownership of others’ previous models.
144
+ � Lack of corresponding designs.
145
+ maintained in a decentralized manner by all participants.
146
+ Fairness: IRONFORGE considers a practical scenario, where
147
+ resources are unevenly distributed among users. Each user,
148
+ based on its resource amount, selects several existing models,
149
+ verifies the correctness and evaluates the model accuracy over
150
+ the local dataset, and conducts the aggregation. IRONFORGE
151
+ also enables state consistency, by using which an open and
152
+ fair incentive mechanism can be established to motivate more
153
+ participants. Security: Moreover, dedicated defense strategies
154
+ against malicious attacks on incentive fairness, and against
155
+ dataset privacy breaching are presented to ensure the security
156
+ of IRONFORGE. The key contributions are as follows.
157
+ ⊲ We propose a fully decentralized FL framework, namely,
158
+ IRONFORGE, which features a DAG-based data structure.
159
+ IRONFORGE addresses the network asynchrony typically
160
+ undergone in an FL process, and improves the motivation
161
+ of agents participating in the process in an open envi-
162
+ ronment by enabling reliable token rewards with strong
163
+ consistency and model prediction accuracy.
164
+ ⊲ We specifically design a new validation mechanism
165
+ guarding against well-known FL attacks, including model
166
+ poisoning attacks, backdoor attacks, lazy attacks, and
167
+ model stealing attacks, among which the model of steal-
168
+ ing attack has never been considered in any existing FL
169
+ frameworks. By making use of noise-enabled Proof-of-
170
+ Learning (PoL) to validate the gradient descent process,
171
+ any malicious behaviors, such as faking the ownership
172
+ or directly using the existing models, or embezzling the
173
+ rewards for their conspirator by claiming a falsified source
174
+ list, can be captured and given punishments.
175
+ ⊲ We build a flexible and efficient testbed, named FLSim,
176
+ to simulate the workflow across all considered FL frame-
177
+ works in this paper, including the proposed IRONFORGE.
178
+ We conduct comprehensive experiments based on FLSim,
179
+ comparing the system performance, security, and fairness
180
+ between the existing FL frameworks and IRONFORGE.
181
+ Insights are shed to provide guidelines on how to select
182
+ strategies in IRONFORGE to meet different requirements.
183
+ Extensive experiments corroborate that IRONFORGE outper-
184
+ forms the prevalent FL frameworks with and without attacks
185
+ leveraged, which highlights the holistic solution to the network
186
+ asynchrony (issue-Œ) and the over-dependence on the central
187
+ coordinators (issue-). Strictly and approximately monotonic
188
+ increases of rewards are observed in experiments with increas-
189
+ ing CPU cores, memory capacity, and bandwidth in different
190
+ incentive settings. This indicates that fairness (issue-Ž) can be
191
+ ensured in IRONFORGE under various definitions of fairness.
192
+ The rest of the paper is organized as follows. Section I
193
+ gives the introduction, followed by related works in Section II.
194
+ Section III provides the system overview and Section IV
195
+ details the design of IRONFORGE. Section V presents our
196
+ implementation based on a new testbed with comprehensive
197
+ experimental results. Section VI discusses system security and
198
+ properties. Finally, Section VII concludes this work.
199
+ II. RELATED WORK
200
+ A conventional synchronous FL framework is constructed
201
+ by a central coordinator and numbers of nodes, which main-
202
+ tains the global model and perform FL iterations, respec-
203
+ tively [2]. The coordinator periodically distributes the latest
204
+ global model to the nodes, and then the nodes independently
205
+ train the model with their local data and upload the trained
206
+ local models to the coordinator [16]. After receiving updated
207
+ models from nodes, the coordinator aggregates all the local
208
+ models as a new global model. Such synchronous FL frame-
209
+ work can hardly be adapted to large-scale and heterogeneous
210
+ networks, where asynchrony is non-negligible.
211
+ The issue of data asynchrony is tackled by the asyn-
212
+ chronous FL enabling nodes to train the global model from
213
+ central coordinators at any time, and the coordinators can
214
+ update the global model immediately when any local model
215
+ is collected. In [14], the authors introduced a cache layer
216
+ between the coordinator and local nodes. Each node trains
217
+ the global model with its local data and uploads its model to
218
+ the cache. The coordinator periodically aggregates the local
219
+ model in the cache and generates a new global model. Semi-
220
+ asynchronous FL protocols address the problems in FL such
221
+ as low round efficiency and poor convergence rate happened
222
+ in asynchronous FL. The system [5] incorporates a client se-
223
+ lection algorithm decoupling the coordinator and the selected
224
+ clients for a reduction of average round time. The authors
225
+ of [17] proposed an asynchronous federating-based detection
226
+ approach for end devices. A pre-shared data training strategy
227
+ for non-independent-and-identically-distributed (non-IID) data
228
+ is developed to avoid convergence divergence under the non-
229
+ IID patterns. After the collaborative model training procedure,
230
+ each client further conducts an additional local training process
231
+ to fit respective patterns.
232
+ The aforementioned FL frameworks require central coor-
233
+ dinators to schedule model training and aggregate models.
234
+ The centralized architecture suffers inherent security risks,
235
+ such as SPoF and malicious central coordinator, and limited
236
+ 2
237
+
238
+ scalability with the bottleneck of the central coordinator. The
239
+ most recent Distributed Ledger Technology (DLT) holds the
240
+ potential to decentralize FL systems [18], [19]. Two key
241
+ technologies in DLT are blockchain and DAG. In blockchain,
242
+ a group of miners run the consensus protocol to generate hash-
243
+ chained data blocks, which are assembled from transactional
244
+ data, and synchronize the chained blocks. Blockchain assures
245
+ strong consistency among blockchain nodes and enables smart
246
+ contracts to be executed across the blockchain network in a
247
+ consistent and trustworthy way. In DAG, transactions from
248
+ decentralized DAG users are organized in a DAG structure
249
+ where directed edges indicate the reference relationship be-
250
+ tween the transactions. DAG can achieve high throughput with
251
+ short latency compared with blockchain [20].
252
+ DLT has been developed to remove the central coordinator
253
+ and decentralize FL networks [9], [15], [21]. In BlockFL [15],
254
+ [22], [23], decentralized blockchain miners conduct model
255
+ verification and aggregation. To be specific, miners obtain
256
+ trained local models from working nodes and other miners.
257
+ After verification, miners aggregate local models for the
258
+ updated global models and conduct Proof-of-Work (PoW) to
259
+ create valid blocks containing the new global models. Then,
260
+ the blocks are propagated to all miners to start the next FL
261
+ iteration. The BlockFL relies on the resource-intensive PoW
262
+ consensus protocol to slow down the system and keep miners
263
+ synchronized. To reduce overhead and improve scalability,
264
+ DAG technology [24] is introduced to FL networks [9], [21],
265
+ where trained models are updated to a DAG topology by
266
+ working nodes without any coordination. Working nodes can
267
+ learn the latest local models in the DAG by exchanging data
268
+ with other nodes. By themselves, working nodes select and
269
+ verify aggregate local models and train the models using local
270
+ datasets. Next, working nodes publish their trained models to
271
+ the DAG with directed edges indicating the model reference.
272
+ Existing works only consider homogeneous networks where
273
+ the training resources are evenly distributed and thus lack
274
+ open and fair incentive mechanisms. IRONFORGE proposed
275
+ by this paper, on the other hand, improves the motivation of
276
+ participants with rewards for training contributions and penal-
277
+ ties for dishonest behaviors. IRONFORGE also tackles new
278
+ vulnerabilities in open FL networks, including model stealing
279
+ attacks where attackers steal models from others and claim
280
+ rewards from the plagiarized model, and collusion attacks
281
+ where attackers claim trained models are from conspirators.
282
+ III. SYSTEM OVERVIEW
283
+ In this section, we describe IRONFORGE from the aspects
284
+ of its architecture, workflow, and system assumptions.
285
+ A. System Overview
286
+ We first introduce the roles that participate in the system
287
+ and present our high-level design.
288
+ Architecture. IRONFORGE is a decentralized FL system that
289
+ features a DAG-based network structure to tackle the incon-
290
+ sistency in the decentralized FL process, excessive reliance
291
+ on central coordination, and ineffective motivation of con-
292
+ tributing the learning resources at the same time. Specifically,
293
+ Fig. 1. System model of IRONFORGE
294
+ IRONFORGE builds a hybrid architecture (cf. Fig. 1) that
295
+ involves two types of DAG, namely, Task-DAG and Global-
296
+ DAG (details refer to Fig. 2 and Fig. 3, respectively). The
297
+ training processes in both Task-DAG and Global-DAG are
298
+ traceable owing to the DAG data structure. A DAG node
299
+ published by a participant consists of a model update and the
300
+ directed edges of the node indicate the aggregating relationship
301
+ with existing models during the update, hence no central
302
+ coordinator required to conduct the training processes.
303
+ Global-DAG contains a variety of models adopted by all
304
+ participants, which can be viewed as a “unique” and public
305
+ model resource pool. No consistent testing dataset is given
306
+ in Global-DAG. Each user comes to Global-DAG and hunts
307
+ for models that uniquely meet its own local testing dataset.
308
+ Without central coordinators, any user can fetch models from
309
+ the pool for direct uses, release his task requests, or make con-
310
+ tributions, such as training on Global-DAG or on uncompleted
311
+ training tasks, or verifying the tasks.
312
+ Each training task is managed by a Task-DAG, while IRON-
313
+ FORGE can contain multiple Task-DAGs at the same time
314
+ to handle a range of different training tasks (see the right-
315
+ hand side in Fig. 1). Task-DAGs are task-specific and are
316
+ released by users who aim at improving their local model
317
+ prediction accuracy by virtue of the computational powers
318
+ and resources of others. Within a task, the Task-DAG network
319
+ contains multiple contributors who have the same training
320
+ target provided by the publisher. The trained models for each
321
+ task are broadcast and stored in the corresponding Task-DAG,
322
+ and await the check and verification. The satisfied model of a
323
+ task, observed by the publisher, is subsequently merged into
324
+ Global-DAG, increasing the exposure to the public users. As a
325
+ result, parallel learning on our hybrid DAG networks becomes
326
+ possible, and the resultant models can be collected by Global-
327
+ DAG for further involvement.
328
+ Roles. In IRONFORGE, the users can take different roles:
329
+ viewer, task publisher, verifier, and contributor. A user is a
330
+ participant in the network. Each user can select one or multiple
331
+ roles to perform specific functional activities (see the left-hand
332
+ side in Fig. 1). Specifically, a viewer can directly fetch models
333
+ from the public resource pool without further actions. The
334
+ task publisher aims to propose new tasks and the proposed
335
+ tasks are broadcast and await others’ contributions. In order
336
+ 3
337
+
338
+ Global Network
339
+ User
340
+ Global DAG
341
+ Contributor
342
+ User
343
+ Task DAG
344
+ Global DAG
345
+ Contributor
346
+ Task DAG
347
+ User
348
+ Global DAG
349
+ Contributor
350
+ Task DAG
351
+ Viewer
352
+ Fetching
353
+ Evaluating
354
+ Models
355
+ Selected Models
356
+ Satisfied Mode
357
+ Task Publisher
358
+ Aggregating
359
+ Verification Committee
360
+ Aggregated Model
361
+ Local dataset
362
+ Contributor
363
+ Training
364
+ Trained Model
365
+ Selected Models
366
+ Task Publisher
367
+ sk
368
+ Aggregated Model
369
+ Verification Committee
370
+ %
371
+ Trained Model
372
+ Task
373
+ WorkNerifyTask
374
+ Community
375
+ ……
376
+ ……
377
+ t
378
+ ① Register and Release a task
379
+ Publisher
380
+ Workers
381
+ ② Announce
382
+ ③ Observe
383
+ ……
384
+ b. Sync the DAG and validate DAG nodes
385
+ c. Evaluate some models with local test dataset
386
+ d. Pick up the best ones and aggregate them
387
+ e. Start training with local training dataset
388
+ f. Publish the model to the DAG
389
+ ④ Train and publish models
390
+ Task-model node
391
+ Header:
392
+ Sender: …
393
+ Timestamp: …
394
+ Sources: […]
395
+ Evaluations: […]
396
+ Payload:
397
+ Weights: ipfs_uri+hash
398
+
399
+
400
+
401
+ Local
402
+ dataset
403
+ Local
404
+ dataset
405
+ Local
406
+ dataset
407
+ Task-termination node
408
+ Sender: Publisher
409
+ Timestamp: …
410
+ Winner: …
411
+ Public testing dataset: Dtest
412
+ Accuracy: …
413
+ Balance update: …
414
+ Task-genesis node
415
+ Header:
416
+ Sender: Publisher
417
+ Timestamp: …
418
+ Target: <e.g., 95%>
419
+ Commitment of Dtest: …
420
+ Prize: …
421
+ Contest strategy: …
422
+ Penalty strategy: …
423
+ Verification committee:
424
+ Payload:
425
+ Weights: ipfs_uri+hash
426
+ ⑤ End a task by selecting the winner with the highest accuracy
427
+ a. Register on the global FL-DAG
428
+ V
429
+ P
430
+ Verify
431
+ Fig. 2. Task-DAG. The figure illustrates an overview of starting a new DAG-based FL task, also known as Task-DAG. One
432
+ with aims to improve his model accuracy to a certain target with the help of the community can release a task as the task
433
+ publisher. Some amount of token is deposited as the prize which will be subsequently awarded to all eligible participants
434
+ until the winning model is found and selected by the task publisher. The balance update of each participant is recorded in a
435
+ task-termination node published by the task publisher, and can be subsequently settled by the Global-DAG network.
436
+ to reap profits, a user can become a contributor to process a
437
+ training process by selecting, aggregating and training models.
438
+ He can either start the work on Global-DAG or enroll in others’
439
+ published work from the uncompleted tasks. Also, a verifier in
440
+ the system is to verify existing tasks in the resource pool. He
441
+ can contribute or verify either one favorable task or multiple
442
+ tasks in parallel for a higher profit. In short, the four roles
443
+ cover all potential functional activities within IRONFORGE.
444
+ B. Workflow Overview
445
+ Then, we provide an overview of the workflow of IRON-
446
+ FORGE. We focus on the procedures of task establishment and
447
+ task processing by presenting the interactive steps of a user
448
+ between the Task-DAG and Global-DAG.
449
+ Step-1. The user registers a task in the Global-DAG network by
450
+ depositing the committed prize. He obtains a task identifier
451
+ and then broadcasts the task to the network. We assume
452
+ that another user has accepted the proposed task prior and
453
+ worked on the task as a task contributor.
454
+ Step-2. The contributor enters the procedure of training mod-
455
+ els. He first evaluates several existing models from the pool
456
+ and selects a series of models for the shortlist.
457
+ Step-3. Based on the selected models, the contributor aggre-
458
+ gates all the short-listed models and integrates them with
459
+ local datasets to train the model according to requirements.
460
+ Step-4. Once completing the training, the contributor submits
461
+ the trained model to the Task-DAG. Meanwhile, peer con-
462
+ tributors may also work on the same task and generate com-
463
+ petitively trained models. All these models are propagated
464
+ within the Task-DAG network.
465
+ Step-5. The publisher who obtains the trained model termi-
466
+ nates the task by marking it with a termination tag. Once
467
+ selected by users who are conducting the training process
468
+ in Global-DAG, the trained model is deemed to be formally
469
+ synchronized into Global-DAG.
470
+ Notably, a user in the Global-DAG network can either
471
+ contribute to other tasks proposed by peer users, or personally
472
+ publish a task by himself. All the procedures follow similar
473
+ steps, as described from Step-2 to Step-5.
474
+ C. System Assumptions
475
+ In this section, we list our assumptions on the network,
476
+ security, and threat models of IRONFORGE.
477
+ Resource assumption. We do not assume any resource dis-
478
+ tribution in our work. The resource distribution in the entire
479
+ network is random. This means different participants, with a
480
+ high probability, hold different computing resources, including
481
+ computing power, network bandwidth, memory space, storage
482
+ capability, and training dataset quality. Addressing the system
483
+ heterogeneity is one of the core contributions in this work, as
484
+ we weaken the long-existing implicit assumption in previous
485
+ work [9]: the even resource distribution. IRONFORGE enables
486
+ any distribution of shares of any type of resources among the
487
+ participants, making the system practical.
488
+ User behavior assumption. We have two assumptions on user
489
+ behaviors. First, the participants in the network are rational,
490
+ meaning that they can select an arbitrary task, switch to
491
+ others, or quit existing tasks for better profits. Second, different
492
+ participants can focus on different training targets, including
493
+ both task bundles (one task has dependency on another) and
494
+ orthogonal tasks (one task is independent of the others). This
495
+ enables the processing of multiple tasks in parallel, greatly
496
+ improving the system’s overall scalability and performance.
497
+ Security assumption. We assume that the honest nodes
498
+ always conduct honest behaviors, where they obey all the
499
+ policies during the model selection, model aggregation, model
500
+ 4
501
+
502
+ Task-1
503
+ (terminated)
504
+ Task-2
505
+ (terminated)
506
+ Task-N
507
+ (ongoing)
508
+ G
509
+ 1
510
+ t1
511
+ t2
512
+ tn
513
+ 2
514
+ V
515
+ P
516
+ τ1
517
+ 3
518
+ Local
519
+ dataset
520
+ Local
521
+ dataset
522
+ Local
523
+ dataset
524
+ ……
525
+ Community
526
+ Publish
527
+ Publish
528
+ Publish
529
+ Contribute
530
+ Contribute
531
+ VRF-
532
+ consensus
533
+ Settlement node
534
+ Sender: <based on consensus>
535
+ Timestamp: …
536
+ PoL results: …
537
+ Unverified PoL: …
538
+ Balance: …
539
+ Fig. 3. Global-DAG. This figure illustrates an overview of the Global-DAG network. With the absence of a centralized
540
+ coordinator, each participant trains a model by selecting and aggregating as many models (including the outcomes of terminated
541
+ tasks) published by others as possible (based on the local capability). Model targets are not unique and according to different
542
+ needs, the network can be treated as a global resource pool containing a variety of models. Ones can either find a model which
543
+ satisfies his local testing dataset from the pool, or make contributions to the pool and obtain token rewards by improving
544
+ existing models. Token balances are periodically settled (endorsed by a verifiable random function (VRF)-driven consensus)
545
+ by settlement nodes that employ a chain structure to achieve strong consistency.
546
+ training, task verification, and other operations related to the
547
+ defense strategies against adversaries. The adversaries have the
548
+ ability to delay the model convergence and lower the model
549
+ accuracy by leveraging popular FL attacks, including lazy
550
+ attacks [9], poisoning attacks [25], and backdoor attacks [26].
551
+ The adversaries also have the ability to breach the incentive
552
+ fairness by leveraging model stealing attacks [27]–[30], and
553
+ compromising the privacy of others’ training datasets. Adver-
554
+ saries not only can upload their previous models (traditional
555
+ lazy attacks), but also fake the ownership of others’ previous
556
+ models or fake their own training process to embezzle rewards
557
+ for their conspirators. These two faking types are defined as
558
+ stealing attacks and collusion attacks, respectively, and both
559
+ belong to the context of model stealing attacks in this paper.
560
+ IV. DECENTRALIZED FEDERATED LEARNING
561
+ IRONFORGE involves four novel mechanisms, i.e., the re-
562
+ lease of Task-DAG networks, the decentralized model training,
563
+ the defense strategy, and the incentive mechanism. IRON-
564
+ FORGE features two types of network, public Global-DAG and
565
+ task-specific Task-DAG. A training task can be outsourced to
566
+ communities by releasing a Task-DAG network and following
567
+ four steps including preparation, initialization, monitoring, and
568
+ finalization. The decentralized training processes of Task-DAG
569
+ and Global-DAG are specifically defined by the new decen-
570
+ tralized model training mechanism, in which users aggregate
571
+ existing models, train the aggregated models and publish the
572
+ model updates to the network in a decentralized way. The
573
+ training processes are guarded by a new defense strategy
574
+ against model stealing attacks in the decentralized setting that
575
+ has never been considered in existing studies. The crafted
576
+ incentive mechanism assures the state consistency of networks
577
+ and enables smart-contract-enhanced incentives including both
578
+ rewards and penalties. Table II summarizes the notations.
579
+ A. Managing Task-DAG
580
+ Any user can outsource an FL training task by managing
581
+ a DAG, as shown in Algo. 1. An FL training task can
582
+ be described with a training target, i.e., the model to be
583
+ trained, the targeted accuracy, and the testing dataset. To build
584
+ incentives to motivate distributed workers and deter malicious
585
+ workers, we design reward, penalty, and verification schemes
586
+ for Task-DAG networks.
587
+ 1) Preparation: A training task Task𝑚 can be described
588
+ with an initial model, i.e., 𝑊𝑚,𝑔, and an accuracy target 𝛼𝑚 of
589
+ the model tested on the dataset 𝐷test
590
+ 𝑚
591
+ from the task publisher
592
+ 𝑈𝑚,𝑝. To reduce the storage and bandwidth overhead, the
593
+ model weights can be stored in external infrastructures, e.g.,
594
+ the InterPlanetary File System (IPFS). The Uniform Resource
595
+ Identifiers (URIs) and hash codes to the weights are embedded
596
+ in DAG nodes for access and verification. The testing dataset
597
+ 𝐷test
598
+ 𝑚
599
+ is committed in the genesis node of the Task-DAG by
600
+ embedding the hash code, and is revealed by the end of the
601
+ training for model verification. This commit-and-reveal design
602
+ prevents direct access to 𝐷test
603
+ 𝑚 during the training process and
604
+ ensures that the final selected model can be publicly verified.
605
+ To run a Task-DAG with an incentive in a secure way, the
606
+ task publisher 𝑈𝑚,𝑝 needs to design a contest strategy Φ𝑚 to
607
+ allocate reward 𝜈𝑚 to contributors and a penalty strategy Υ𝑚
608
+ to suppress the flooding of excessive trivial models and other
609
+ malicious behaviors.
610
+ Some examples of the plug-and-play contest strategy in-
611
+ clude an egalitarian strategy where the prize is divided equally
612
+ among contributors along the traversal of the final winner
613
+ node, or an implementation of “to each according to his
614
+ 5
615
+
616
+ TABLE II: Notation Definition
617
+ Notation
618
+ Definition
619
+ 𝑈𝑘
620
+ The 𝑘-th user
621
+ 𝐵𝑘
622
+ The balance of 𝑘-th user
623
+ 𝐷train
624
+ 𝑘
625
+ The local training dataset of 𝑈𝑘
626
+ 𝐷test
627
+ 𝑘
628
+ The local testing dataset of 𝑈𝑘
629
+ 𝛽
630
+ The number of candidate weights
631
+ 𝜎
632
+ The number of aggregated weights
633
+ Task-DAG
634
+ Task𝑚
635
+ The 𝑚-th Task-DAG network
636
+ 𝛼𝑚
637
+ The accuracy target of Task𝑚
638
+ 𝜈𝑚
639
+ The committed prize of Task𝑚
640
+ 𝑈𝑚,𝑝
641
+ The publisher of Task𝑚
642
+ 𝑁𝑚,𝑔
643
+ The genesis node of Task𝑚
644
+ 𝑇𝑚,𝑔
645
+ The creation timestamp of 𝑁𝑚,𝑔
646
+ 𝑊𝑚,𝑔
647
+ The initial model weights of Task𝑚
648
+ 𝑁𝑚,𝑘,𝑖
649
+ The 𝑖-th node published by the 𝑘-th user in Task𝑚
650
+ M𝑚,𝑘,𝑖
651
+ The source list of 𝑁𝑚,𝑘,𝑖
652
+ 𝐸𝑚,𝑘,𝑖
653
+ The evaluation result for 𝑁𝑚,𝑘,𝑖 over 𝐷test
654
+ 𝑘
655
+ E𝑚,𝑘,𝑖
656
+ The evaluation result for M𝑚,𝑘,𝑖 over 𝐷test
657
+ 𝑘
658
+ 𝑊 ∗
659
+ 𝑚,𝑘,𝑖
660
+ The model weights aggregated by M𝑚,𝑘,𝑖 before the local
661
+ training
662
+ 𝑊𝑚,𝑘,𝑖
663
+ The model weights after the local training
664
+ 𝜌𝑚,𝑘,𝑖
665
+ The setting for training 𝑊 ∗
666
+ 𝑚,𝑘,𝑖 into 𝑊𝑚,𝑘,𝑖
667
+ 𝑇𝑚,𝑘,𝑖
668
+ The creation timestamp of 𝑁𝑚,𝑘,𝑖
669
+ 𝑁𝑚,𝑒
670
+ The task-termination node of Task𝑚
671
+ 𝑇𝑚,𝑒
672
+ The creation timestamp of 𝑁𝑚,𝑒
673
+ Ψ𝑚
674
+ The prize allocation of Task𝑚
675
+ Φ𝑚
676
+ The contest strategy of Task𝑚
677
+ Υ𝑚
678
+ The penalty strategy for malicious attacks of Task𝑚
679
+ Ω𝑚
680
+ The verification committee of Task𝑚
681
+ Global-DAG
682
+ 𝑁𝑔
683
+ The genesis node of Global-DAG
684
+ 𝑁𝑘,𝑖
685
+ The 𝑖-th node published by the 𝑘-th user in Global-DAG
686
+ 𝑇𝑘,𝑖
687
+ The creation timestamp of 𝑁𝑘,𝑖
688
+ 𝑆ℎ
689
+ The ℎ-th settlement node in the settlement sets S
690
+ 𝜆ℎ
691
+ The subtree that is aggregated by 𝑆ℎ
692
+ 𝑉𝑘,𝑘′,𝑖
693
+ A PoL-challenge raised by 𝑈𝑘′ for 𝑁𝑘,𝑖 where 𝑘 ≠ 𝑘′
694
+ 𝜋𝑘,𝑘′,𝑖
695
+ The deposit to raise 𝑉𝑘,𝑘′,𝑖
696
+ 𝜖PoL
697
+ The threshold of PoL-verification
698
+ 𝑃𝑘,𝑘′,𝑖
699
+ The PoL-response replied by the publisher 𝑈𝑘 of 𝑁𝑘,𝑖 for
700
+ 𝑉𝑘,𝑘′,𝑖 raised by 𝑈𝑘′ where 𝑘 ≠ 𝑘′
701
+ 𝑅𝑘, ˆ𝑘,𝑖
702
+ The PoL-result sent from 𝑈 ˆ𝑘 on 𝑃𝑘,𝑘′,𝑖 where 𝑘 ≠ ˆ𝑘
703
+ 𝜏ℎ
704
+ The timeout for 𝑃𝑘,𝑘′,𝑖 to be published after 𝑉𝑘′,𝑘,𝑖 has
705
+ been published and confirmed by 𝑆ℎ
706
+ Θℎ
707
+ The committee elected to conduct consensus for 𝑆ℎ
708
+ contribution” whereby the prize is allocated based on the
709
+ amount of contribution, or striking a balance in-between. Some
710
+ examples of the penalty strategy include an implementation
711
+ of S-Index
712
+ H-Index (i.e., preventing excessive self-citations) [31] or the
713
+ occupation ratio along the traversal of the final winner node.
714
+ The publisher 𝑈𝑚,𝑝 also invokes the election to nominate a
715
+ set of 𝑈𝑘 that constitute a task committee Ω𝑚 of Task𝑚 for
716
+ evaluating trained models and conducting PoL-verification.
717
+ The committee is elected via a Verifiable Random Function
718
+ (VRF) [32] upon the balances 𝐵𝑘 of eligible 𝑈𝑘.
719
+ 2) Initialization: To initialize Task𝑚, the publisher 𝑈𝑚,𝑝
720
+ firstly registers Task𝑚 to the task management smart contract
721
+ Algorithm 1: Manage Task-DAG
722
+ ⊲ Initialize a training task
723
+ 1 𝑈𝑚,𝑝.Deposit(Task𝑚, 𝜈𝑚)
724
+ 2 Ω𝑚 ← 𝑈𝑚,𝑝.VRF(Task𝑚)
725
+ ⊲ Elect the nominated verification committee to Task𝑚
726
+ 3 𝑁𝑚,𝑝 ← {𝐻 (𝑊𝑚,𝑔), URI(𝑊𝑚,𝑔), 𝐻 (𝐷test
727
+ 𝑚 ), 𝛼𝑚,
728
+ 𝜈𝑚, Φ𝑚, Υ𝑚, Ω𝑚, 𝑇𝑚,𝑔 }
729
+ 4 𝑁𝑚,𝑝 ← 𝑈𝑚,𝑝.Sign(𝑁𝑚,𝑝)
730
+ 5 𝑈𝑚,𝑝.Announce(𝑁𝑚,𝑝)
731
+ ⊲ Broadcast to the network
732
+ ⊲ Observe the training
733
+ 6 while True do
734
+ 7
735
+ 𝑁𝑚,𝑘,𝑖 ← 𝑈𝑚,𝑝.Monitor(Task𝑚)
736
+ 8
737
+ if 𝑁𝑚,𝑘,𝑖 breaches Υ𝑚 then
738
+ 9
739
+ 𝑈𝑚,𝑝.ApplyPenalty(Υ𝑚, 𝑁𝑚,𝑘,𝑖, 𝑈𝑘)
740
+ 10
741
+ if 𝐸𝑚,𝑘,𝑖 > 𝛼𝑚 then
742
+ 11
743
+ ˆ𝐸𝑚,𝑘,𝑖 ← 𝑈𝑚,𝑝.Evaluate(𝑊𝑚,𝑘,𝑖, 𝐷test
744
+ 𝑚 )
745
+ 12
746
+ if ˆ𝐸𝑚,𝑘,𝑖 > 𝛼𝑚 then
747
+ 13
748
+ ˆ𝑁𝑚,𝑘,𝑖 ← 𝑁𝑚,𝑘,𝑖
749
+ 14
750
+ break
751
+ ⊲ Finalize the training task
752
+ 15 Ψ𝑚 ← 𝑈𝑚,𝑝.AllocatePrize(Φ𝑚, ˆ𝑁𝑚,𝑘,𝑖)
753
+ 16 𝑁𝑚,𝑒 ← { ˆ𝑁𝑚,𝑘,𝑖,
754
+ ˆ𝑈𝑘, URI(𝐷test
755
+ 𝑚 ),
756
+ ˆ𝐸𝑚,𝑘,𝑖, Ψ𝑚, 𝑇𝑚,𝑒 }
757
+ 𝑆𝐶𝑇 on the Global-DAG network by depositing the committed
758
+ prize 𝜈𝑚. Next, 𝑈𝑚,𝑝 prepares a genesis node 𝑁𝑚,𝑔 including
759
+ the commitment and the URI of the initial model to be trained,
760
+ i.e., 𝐻(𝑊𝑚,𝑔) and URI(𝑊𝑚,𝑔), the model accuracy target 𝛼𝑚,
761
+ the commitment of the public testing dataset 𝐻(𝐷test
762
+ 𝑚 ), the
763
+ committed prize 𝜈𝑚, the contest strategy Φ𝑚, the penalty
764
+ strategy Υ𝑚, the nominated verification committee Ω𝑚, and
765
+ the creation timestamp 𝑇𝑚,𝑔1. Then, 𝑈𝑚,𝑝 can sign the genesis
766
+ node 𝑁𝑚,𝑔 and announce the genesis node.
767
+ 3) Monitoring: Upon these operations, training starts in
768
+ Task𝑚, and the publisher 𝑈𝑚,𝑝 observes the progress until the
769
+ model becomes mature enough. Any 𝑈𝑘, who is interested in
770
+ contributing the computational resources and competing for
771
+ the prize 𝜈𝑚, continues training models and publishing node
772
+ 𝑁𝑚,𝑘,𝑖, i.e., the 𝑖-th node released by 𝑈𝑘 in Task𝑚, as a worker.
773
+ If any nodes breaching the penalty strategy Υ𝑚 are found,
774
+ 𝑈𝑚,𝑝 issues fines and updates the balance of the publisher of
775
+ the breaching nodes. Note that the balance change in regard
776
+ to the penalty has yet to be finalized at this stage.
777
+ 4) Finalization: When the claim of reaching the targeted
778
+ model accuracy 𝛼𝑚 is realized, 𝑈𝑚,𝑝 evaluates the model
779
+ over the testing dataset 𝐷test
780
+ 𝑚 . Once the accuracy is surely
781
+ met by the winner node
782
+ ˆ𝑁𝑚,𝑘,𝑖, 𝑈𝑚,𝑝 executes the contest
783
+ strategy Φ𝑚 and obtains the prize allocation Ψ𝑚. Next, 𝑈𝑚,𝑝
784
+ terminates Task𝑚 by creating a task-termination node 𝑁𝑚,𝑒 that
785
+ points to the winner node ˆ𝑁𝑚,𝑘,𝑖 and contains the winner’s
786
+ address ˆ𝑈𝑘, the URI to the testing dataset URI(𝐷test
787
+ 𝑚 ), the
788
+ achieved testing accuracy ˆ𝐸𝑚,𝑘,𝑖, the prize allocation Ψ𝑚, and
789
+ the creation timestamp 𝑇𝑚,𝑒. The task publisher 𝑈𝑚,𝑝 then
790
+ signs 𝑁𝑚,𝑒 and broadcasts 𝑁𝑚,𝑒 to the Global-DAG network.
791
+ The balance change in regard to both the prize allocation Ψ𝑚
792
+ and the penalty is subsequently finalized by settlement nodes
793
+ once the termination node is revealed to the public and is
794
+ 1The trustworthiness of the timestamp is guaranteed by trusted timestamp-
795
+ ing services.
796
+ 6
797
+
798
+ Algorithm 2: Train Models
799
+ 1 for 𝑈𝑘 parallelly do
800
+ ⊲ Worker registration
801
+ 2
802
+ 𝑈𝑘.Register(Task𝑚)
803
+ 3
804
+ while Task𝑚 is ongoing do
805
+ ⊲ Sync, verify and select nodes
806
+ 4
807
+ while unsynchronized do
808
+ 5
809
+ 𝑁𝑚,𝑘′,𝑖′ ← 𝑈𝑘.SyncNodes(Task𝑚)
810
+ 6
811
+ 𝑈𝑘.VerifySignature(𝑁𝑚,𝑘′,𝑖′)
812
+ 7
813
+ 𝑈𝑘.VerifyRegistration(𝑈𝑘′)
814
+ 8
815
+ 𝑈𝑘.VerifyBalance(𝑈𝑘′)
816
+ 9
817
+ if verification passes then
818
+ 10
819
+ 𝑈𝑘.Propagate(𝑁𝑚,𝑘′,𝑖′)
820
+ 11
821
+ for 𝛽 number of 𝑁𝑚,𝑘′,𝑖′ ∈ Task𝑚 parallelly do
822
+ 12
823
+ 𝐸′
824
+ 𝑚,𝑘′,𝑖′ ← 𝑈𝑘.Evaluate(𝑊𝑚,𝑘′,𝑖′, 𝐷test
825
+ 𝑘 )
826
+ 13
827
+ M𝑚,𝑘,𝑖, E𝑚,𝑘,𝑖 ← 𝑈𝑘.Select({𝑁𝑚,𝑘′,𝑖′, 𝐸′
828
+ 𝑚,𝑘′,𝑖′ },
829
+ 𝜎)
830
+ ⊲ Aggregate, train and contribute nodes
831
+ 14
832
+ 𝑊 ∗
833
+ 𝑚,𝑘,𝑖 ← 𝑈𝑘.Aggregate(M𝑚,𝑘,𝑖)
834
+ 15
835
+ 𝑊𝑚,𝑘,𝑖 ← 𝑈𝑘.T𝑚,𝑘 (𝑊 ∗
836
+ 𝑚,𝑘,𝑖, 𝜌𝑚,𝑘,𝑖, 𝐷train
837
+ 𝑘
838
+ )
839
+ 16
840
+ 𝐸𝑚,𝑘,𝑖 ← 𝑈𝑘.Evaluate(𝑊𝑚,𝑘,𝑖, 𝐷test
841
+ 𝑘 )
842
+ 17
843
+ 𝑁𝑚,𝑘,𝑖 ← {M𝑚,𝑘,𝑖, E𝑚,𝑘,𝑖, 𝜌𝑚,𝑘,𝑖,
844
+ 𝐻 (𝑊𝑚,𝑘,𝑖), URI(𝑊𝑚,𝑘,𝑖), 𝐸𝑚,𝑘,𝑖, 𝑇𝑚,𝑘,𝑖}
845
+ 18
846
+ 𝑁𝑚,𝑘,𝑖 ← 𝑈𝑘.Sign(𝑁𝑚,𝑘,𝑖)
847
+ 19
848
+ 𝑈𝑘.Announce(𝑁𝑚,𝑘,𝑖)
849
+ referred by any future model in Global-DAG; see details in
850
+ Section IV-D.
851
+ B. Decentralized Model Training
852
+ As an open system, IRONFORGE allows the workers to
853
+ contribute to the decentralized training in both a Task𝑚 and
854
+ Global-DAG. The training process in a Task-DAG is given
855
+ in Algo. 2, while the training process in Global-DAG shares
856
+ the same algorithm except that there does not exist a public
857
+ shared testing dataset 𝐷test
858
+ 𝑚 that decides the stopping point (i.e.,
859
+ the training accuracy target 𝛼) in the Global-DAG network.
860
+ Global-DAG acts as a public resource pool of diversified
861
+ models, allowing for free hunting of models that uniquely
862
+ meet customized training targets upon local testing datasets
863
+ 𝐷test
864
+ 𝑘
865
+ for each 𝑘-th user 𝑈𝑘.
866
+ Task-DAG Training. Any user 𝑈𝑘, who is interested in com-
867
+ peting for the training rewards in a Task𝑚, needs to admit the
868
+ contest and penalty strategies specified in 𝑁𝑚,𝑔 and registers to
869
+ the task management smart contract 𝑆𝐶𝑇 on the Global-DAG
870
+ network by depositing a certain amount of tokens. This can
871
+ suppress Sybil attacks, distributed denial-of-service (DDoS)
872
+ attacks, and other malicious behaviors under the regulation of
873
+ the penalty strategy Υ𝑚.
874
+ While Task𝑚 is running, 𝑈𝑘 can synchronize the view of
875
+ Task𝑚 and obtain the latest nodes 𝑁𝑚,𝑘′,𝑖′ with (𝑘 ≠ 𝑘′ ∨
876
+ 𝑖 ≠ 𝑖′) ∧ (𝑇𝑚,𝑘′,𝑖′ < 𝑇𝑚,𝑘,𝑖). 𝑈𝑘 then verifies the signature
877
+ of the nodes and confirms that the corresponding worker 𝑈𝑘′
878
+ is registered and has enough balance from 𝑆𝐶𝑇 . Worker 𝑈𝑘
879
+ drops the nodes that do not pass verification and propagates
880
+ the success nodes to other workers.
881
+ After verification, 𝑈𝑘 randomly evaluates several 𝑁𝑚,𝑘′,𝑖′
882
+ over its local testing dataset 𝐷test
883
+ 𝑘
884
+ for the testing accuracy
885
+ 𝐸 ′
886
+ 𝑚,𝑘′,𝑖′ until it collects 𝛽 candidated weights (Lines 11-13
887
+ of Algo. 2). Next, 𝑈𝑘 picks up the top 𝜎 models constituting
888
+ the source list M𝑚,𝑘,𝑖 which are then aggregated into the pre-
889
+ trained model 𝑊∗
890
+ 𝑚,𝑘,𝑖 using a weighted aggregation function,
891
+ as given by
892
+ 𝑊∗
893
+ 𝑚,𝑘,𝑖 =
894
+ ∑︁
895
+ 𝑊𝑝 in M𝑚,𝑘,𝑖
896
+ 𝐸𝑝 in E𝑚,𝑘,𝑖
897
+ 𝐸 𝑝
898
+
899
+ 𝐸𝑞 ∈E𝑚,𝑘,𝑖 𝐸𝑞
900
+ 𝑊𝑝.
901
+ (1)
902
+ After that, 𝑈𝑘 trains the aggregated model 𝑊∗
903
+ 𝑚,𝑘,𝑖 over its local
904
+ training dataset 𝐷train
905
+ 𝑘
906
+ with training settings 𝜌𝑚,𝑘,𝑖 for a trained
907
+ model 𝑊𝑚,𝑘,𝑖 , as given by
908
+ 𝑊𝑚,𝑘,𝑖 = T𝑚,𝑘 (𝑊∗
909
+ 𝑚,𝑘,𝑖, 𝜌𝑚,𝑘,𝑖, 𝐷train
910
+ 𝑘
911
+ ),
912
+ (2)
913
+ where T𝑚,𝑘 is the training function of 𝑈𝑘 for Task𝑚.
914
+ Once the training is done, 𝑈𝑘 evaluates the model over
915
+ its local testing dataset 𝐷test
916
+ 𝑘
917
+ and obtains the testing accuracy
918
+ 𝐸𝑚,𝑘,𝑖. Next, 𝑈𝑘 prepares a model update node 𝑁𝑚,𝑘,𝑖 with the
919
+ source list M𝑚,𝑘,𝑖, the corresponding accuracy list E𝑚,𝑘,𝑖, the
920
+ hash code and URI to the trained model, i.e., 𝐻(𝑊𝑚,𝑘,𝑖) and
921
+ URI(𝑊𝑚,𝑘,𝑖), the testing accuracy 𝐸𝑚,𝑘,𝑖, the training settings
922
+ 𝜌𝑚,𝑘,𝑖, and the creation timestamp 𝑇𝑚,𝑘,𝑖. Worker 𝑈𝑘 then
923
+ signs 𝑁𝑚,𝑘,𝑖 and broadcasts the signed node. Note that the
924
+ training settings 𝜌𝑚,𝑘,𝑖, such as the learning rate and batch
925
+ size, are embedded in nodes as a record of the training process
926
+ for any upcoming PoL processes.
927
+ Global-DAG Training. Training in Global-DAG also goes
928
+ through Algo. 2, except that there exists neither a public shared
929
+ testing dataset 𝐷test
930
+ 𝑚 , nor a unique training target that decides
931
+ the stopping point, hence an indefinitely growing DAG. The
932
+ testing accuracy 𝐸 ′
933
+ 𝑚,𝑘′,𝑖′ is also removed in rewarding con-
934
+ tributors who upload models to Global DAG. 𝑈𝑘 receives a
935
+ reward whenever one of its models gets referred by any other
936
+ subsequent models in the network, which becomes the default
937
+ contest strategy for Global-DAG. Users conducting training
938
+ in Global-DAG need to be responsible for their own training
939
+ processes, including preparing their own goals and local
940
+ testing datasets and hunting for appropriate models across the
941
+ whole network. Note that, the task-termination nodes of each
942
+ Task𝑚 are also included in Global-DAG, which enables tasks
943
+ to be advertised to the wider public and to be further evolvable
944
+ along with diversified models in Global-DAG.
945
+ C. Proof-of-Learning: Defense against Model Stealing Attacks
946
+ We design a privacy-preserving PoL scheme to prove the
947
+ computing-extensive training work and suppress model steal-
948
+ ing attacks [27]–[30]. The idea of the privacy-preserving PoL
949
+ is based on the reproducibility of training and the PoL in [33]
950
+ in which the training process from the same starting point
951
+ over the same training dataset with the same training settings
952
+ results in the same trained model or bounded differences
953
+ among the trained models. In the proposed privacy-preserving
954
+ PoL, provers can provide obfuscated training dataset and
955
+ give an estimated bound of model differences. We propose
956
+ a new dataset obfuscation process to protect the privacy of
957
+ the training dataset. The proposed privacy-preserving PoL
958
+ scheme in the Global-DAG network is given in Algo. 3. The
959
+ privacy-preserving PoL scheme for Task-DAG networks can be
960
+ 7
961
+
962
+ Algorithm 3: Proof-of-Learning
963
+ ⊲ Raise a PoL-challenge
964
+ 1 𝑉𝑘,𝑘′,𝑖 ← 𝑈𝑘′.Challenge(𝑁𝑘,𝑖 | 𝑘 ≠ 𝑘′ ∧ not been challenged)
965
+ 2 𝑉𝑘,𝑘′,𝑖 ← 𝑈𝑘′.Deposit(𝜋𝑘,𝑘′,𝑖)
966
+ ⊲ Broadcast to the network
967
+ 3 𝑉𝑘,𝑘′,𝑖 ∈ 𝜆ℎ is subsequently settled by 𝑆ℎ.
968
+ ⊲ 𝜏ℎ countdown starts
969
+ ⊲ Reply with a PoL-proof
970
+ 4
971
+
972
+ 𝐷train
973
+ 𝑘
974
+ ←𝑈𝑘.Obfuscate(𝐷train
975
+ 𝑘
976
+ )
977
+ 5 𝑃𝑘,𝑘′,𝑖 ← 𝑈𝑘.Prove(𝑉𝑘,𝑘′,𝑖, �
978
+ 𝐷train
979
+ 𝑘
980
+ ))
981
+ ⊲ Broadcast to the network
982
+ 6 𝑃𝑘,𝑘′,𝑖 ∈ 𝜆ℎ+𝑛 is subsequently settled by 𝑆ℎ+𝑛.
983
+ ⊲ Verify the PoL-proof
984
+ 7 if 𝑃𝑘,𝑘′,𝑖 presents before the timeout then
985
+ 8
986
+ if 𝑇𝑘,𝑘′,𝑖 ≤ 𝑇ℎ + 𝜏ℎ then
987
+ 9
988
+ for 𝑈 ˆ𝑘 ∈ Θℎ+𝑛+1 parallelly do
989
+ 10
990
+ 𝑊 replay
991
+ 𝑘,𝑖
992
+ ←𝑈 ˆ𝑘.LearningReplay(𝑃𝑘,𝑘′,𝑖.(𝑊 ∗
993
+ 𝑘,𝑖, �
994
+ 𝐷train
995
+ 𝑘
996
+ ,
997
+ 𝜌𝑘,𝑖))
998
+ 11
999
+ if ∥ 𝑊 replay
1000
+ 𝑘,𝑖
1001
+ − 𝑁𝑘,𝑖.𝑊𝑘,𝑖 ∥2< 𝜖PoL then
1002
+ 12
1003
+ 𝑅𝑘, ˆ𝑘,𝑖 roots for 𝑃𝑘,𝑘′,𝑖
1004
+ 13
1005
+ R𝑘,𝑖 ←Consensus(𝑅𝑘, ˆ𝑘,𝑖 | 𝑈 ˆ𝑘 ∈ Θℎ+𝑛+1)
1006
+ 14
1007
+ if R𝑘,𝑖 roots for 𝑃𝑘,𝑘′,𝑖 then
1008
+ 15
1009
+ emit Challenge fails
1010
+ ⊲ Learning proved
1011
+ 16
1012
+ goto Finalization
1013
+ 17 emit Challenge succeeds
1014
+ ⊲ Learning invalidated
1015
+ ⊲ Finalization
1016
+ 18 if Challenge fails then
1017
+ 19
1018
+ 𝑈𝑘′ ←Refund(𝛽𝜋𝑘,𝑘′,𝑖 | 𝛽 ∈ (0, 1))
1019
+ 20 else
1020
+ 21
1021
+ 𝑈𝑘′ ←Refund(𝜋𝑘,𝑘′,𝑖)
1022
+ 22
1023
+ 𝑈𝑘′ ←Penalty(𝑈𝑘, 𝜋𝑘,𝑘′,𝑖)
1024
+ 23
1025
+ WithdrawReward(𝑁𝑘,𝑖)
1026
+ 24 𝑆ℎ+𝑛+1 is generated via Algo. 4
1027
+ ⊲ Notice
1028
+ The Consensus is conducted by the nominated
1029
+ verification committee Ω𝑚 when the PoL is
1030
+ done in a Task𝑚.
1031
+ conducted in the same way where the PoL-proof is verified by
1032
+ the nominated task committee Ω𝑚.
1033
+ 1) Challenge: If a node 𝑁𝑘,𝑖 has not been challenged
1034
+ before, any worker 𝑁𝑘′ can raise a PoL challenge against the
1035
+ node as a challenger. 𝑈𝑘′ needs to deposit a certain amount
1036
+ of tokens 𝜋𝑘,𝑘′,𝑖 to the PoL smart contract 𝑆𝐶𝑃𝑜𝐿 for the
1037
+ challenging node 𝑉𝑘,𝑘′,𝑖. Then, 𝑈𝑘′ signs and broadcasts the
1038
+ challenging node 𝑉𝑘,𝑘′,𝑖 to the network and starts a countdown.
1039
+ 2) Response: The publisher of 𝑁𝑘,𝑖, i.e., 𝑈𝑘, needs to reply
1040
+ to the challenge 𝑉𝑘,𝑘′,𝑖 as a prover within the PoL timeout 𝜏.
1041
+ 𝑈𝑘 firstly obtains the obfuscated dataset �𝐷train
1042
+ 𝑘
1043
+ by applying
1044
+ the noise 𝛿𝑘,𝑘′,𝑖 to its local training dataset 𝐷train
1045
+ 𝑘
1046
+ . Next, 𝑈𝑘
1047
+ prepares a PoL proof node 𝑃𝑘,𝑘′,𝑖 with the hash code and
1048
+ URI to the obfuscated dataset, i.e., 𝐻( �𝐷train
1049
+ 𝑘
1050
+ ) and URI( �𝐷train
1051
+ 𝑘
1052
+ ).
1053
+ Then, 𝑈𝑘 signs 𝑃𝑘,𝑘′,𝑖 and broadcasts it to the network.
1054
+ 3) Verification: At the PoL verification stage, the commit-
1055
+ tee Θℎ (see details in Section IV-D for the use of Θℎ) verifies
1056
+ 𝑃𝑘,𝑘′,𝑖 in parallel if the timestamp of the prover node is within
1057
+ the timeout 𝜏ℎ. To be specific, a verifier 𝑈 ˆ𝑘 can fetch the
1058
+ obfuscated dataset �𝐷train
1059
+ 𝑘
1060
+ with the URI given in 𝑃𝑘,𝑘′,𝑖 and
1061
+ confirm its integrity. Next, 𝑈 ˆ𝑘 conducts a training task with
1062
+ the starting model described in 𝑁𝑘,𝑖, the training settings 𝜌𝑘,𝑖
1063
+ embedded in 𝑁𝑘,𝑖, and the training dataset �𝐷train
1064
+ 𝑘
1065
+ , i.e.,
1066
+ 𝑊∗
1067
+ 𝑘,𝑖 =
1068
+ ∑︁
1069
+ 𝑊𝑝 in M𝑘,𝑖
1070
+ 𝐸𝑝 in E𝑘,𝑖
1071
+ ��� 𝑝
1072
+
1073
+ 𝐸𝑞 ∈E𝑘,𝑖 𝐸𝑞
1074
+ 𝑊𝑝
1075
+ 𝑊replay
1076
+ 𝑘,𝑖
1077
+ = Tˆ𝑘 (𝑊∗
1078
+ 𝑘,𝑖, 𝜌𝑘,𝑖, �𝐷train
1079
+ 𝑘
1080
+ ).
1081
+ (3)
1082
+ The verifier 𝑈 ˆ𝑘 then calculates the Frobenius-Norm (F-Norm)
1083
+ between trained 𝑊replay
1084
+ 𝑘,𝑖
1085
+ and 𝑊𝑘,𝑖 in 𝑁𝑘,𝑖 as the PoL re-
1086
+ sult 𝑅𝑘, ˆ𝑘,𝑖 for 𝑃𝑘,𝑘′,𝑖 [34]. If 𝑅𝑘, ˆ𝑘,𝑖 is within the 𝜖PoL, the
1087
+ verification on 𝑈 ˆ𝑘 is success and 𝑅𝑘, ˆ𝑘,𝑖 roots for 𝑃𝑘,𝑘′,𝑖.
1088
+ All versifiers in the committee Θℎ run consensus algorithms,
1089
+ e.g., Practical Byzantine Fault Tolerance (PBFT) [35], on
1090
+ PoL results and get the final committee decision R𝑘,𝑖 as
1091
+ a proving node in the network. Notice that, to prevent the
1092
+ spoofing attacks against the PoL verification and improve
1093
+ the security and robustness [36], 𝜖PoL can be dynamically
1094
+ adjustable from the early stage to the later stage to circumvent
1095
+ the consistent F-norm-based model distance during a PoL
1096
+ spoofing attack. Also, any PoL prover 𝑃𝑘,𝑘′,𝑖, ∀𝑘, 𝑘′, 𝑖 could
1097
+ alternatively conduct Verifiable Computation (VC) by showing
1098
+ an additional VC-proof to guarantee the identity of 𝑊replay
1099
+ 𝑘,𝑖
1100
+ .
1101
+ 4) Clearing: At the finalization stage, the challenger 𝑈𝑘′
1102
+ can only get 𝛽𝜋𝑘,𝑘′,𝑖 from the challenge deposit, 𝛽 ∈ (0, 1),
1103
+ if the challenge fails (i.e., the learning is proved). Otherwise,
1104
+ 𝑈𝑘′ can get a full refund of the challenge deposit and also
1105
+ receive a reward from the penalty on 𝑈𝑘. The reward to 𝑈𝑘
1106
+ from 𝑁𝑘,𝑖 is revoked as well if the learning cannot be proved.
1107
+ D. Achieving State Consistency: Incentive Basis
1108
+ IRONFORGE features settlement nodes to achieve state con-
1109
+ sistency for the Global-DAG network, enabling smart contracts
1110
+ in DAG and recording consistent account states. The process
1111
+ is shown in Algo. 4.
1112
+ Global-DAG periodically, with the time interval Δ𝑇 , elects
1113
+ the settlement committee Θ among which consensus is reached
1114
+ to generate settlement nodes. At the beginning of the ℎ-th
1115
+ interval, VRF is used to elect a committee securely Θℎ for
1116
+ the settlement node 𝑆ℎ and the committee leader 𝑈 ¯ℎ. The
1117
+ probability that any worker 𝑈𝑘 is selected for the committee
1118
+ depends on the balance of 𝑈𝑘, i.e., 𝐵𝑘; see (4) below,
1119
+ VRF-hash(prv𝑘, 𝑠𝑒𝑒𝑑) ∈ Λ𝑘,
1120
+ (4)
1121
+ where Λ𝑘 is the area portion occupied by 𝑈𝑘 in a hash ring,
1122
+ and Λ𝑘 ∝ 𝐵𝑘.
1123
+ The committee can then settle the status of Global-DAG.
1124
+ To be specific, the leader of the committee 𝑈 ¯𝑘 synchronizes
1125
+ the view of a particular preceding moment of Global-DAG
1126
+ via consensus with others, and identifies all the tip nodes
1127
+ in the ℎ-th interval, which do not have any successor in the
1128
+ current interval. From each of the tip nodes, 𝑈 ¯𝑘 traverses back
1129
+ according to every preceding node list M𝑘,𝑖 of 𝑁𝑘,𝑖 along the
1130
+ path. The search stops and the subtree 𝜆ℎ is obtained when all
1131
+ specific nodes are met, i.e., the first visible node which does
1132
+ belong to the previous subtree 𝜆ℎ−1 in each path or the genesis
1133
+ node 𝑁𝑔. Based on all nodes in 𝜆ℎ, 𝑈 ¯𝑘 updates the balances
1134
+ of involved workers according to the training contributions,
1135
+ PoL challenges, PoL proofs, and smart contract executions. In
1136
+ 8
1137
+
1138
+ Algorithm 4: Node Generation in the Settlement Set
1139
+ ⊲ Generating a settlement node upon consensus
1140
+ 1 while True do
1141
+ 2
1142
+ if 𝑇
1143
+ mod Δ𝑇 = 0 then
1144
+ 3
1145
+ Θℎ ← VRF(Balance(𝑈𝑘 | ∀𝑘))).
1146
+ ⊲ Election
1147
+ 4
1148
+ while Θℎ.Consensus(𝑈𝑘.view | ∀𝑘 ∧ (𝑈𝑘 ∈ Θℎ)) do
1149
+ 5
1150
+ if consensus is reached then
1151
+ 6
1152
+ break and obtain 𝑆ℎ
1153
+ 7
1154
+ S.Append(𝑆ℎ)
1155
+ ⊲ The latest balance can be found in S[latest]
1156
+ ⊲ Consensus
1157
+ 8 for 𝑈𝑘 in Θℎ parallelly do
1158
+ 9
1159
+ Tips ← Prune({𝑁𝑘,𝑖 | 𝑇𝑘,𝑖 > 𝑇 })
1160
+ 10
1161
+ while Traverse(start ←Tips) do
1162
+ 11
1163
+ if (Path-𝑝 reaches 𝑁𝑔 OR 𝑁𝑘,𝑖 ∈ 𝜆ℎ−1) is True then
1164
+ 12
1165
+ Stop Path-𝑝
1166
+ 13
1167
+ if ALL paths have stopped then
1168
+ 14
1169
+ break and obtain 𝜆ℎ
1170
+ 15
1171
+ 𝜆ℎ ← Prune(Tips)
1172
+ ⊲ Obtain the subtree 𝜆ℎ for the creation of 𝑆ℎ
1173
+ 16
1174
+ ¯𝑆𝑘,ℎ ← Form(𝜆ℎ.balance, 𝜆ℎ.PoL,
1175
+ {𝑁𝑚,𝑒 | never been collected by S})
1176
+ ⊲ Tips are excluded in balance calculation
1177
+ 17
1178
+ if 𝑈𝑘 is the leader 𝑈 ¯𝑘 of Θℎ then
1179
+ 18
1180
+ ¯𝑆leader ← ¯𝑆𝑘,ℎ
1181
+ 19
1182
+ Broadcast( ¯𝑆leader)
1183
+ 20
1184
+ else
1185
+ 21
1186
+ Verify( ¯𝑆leader, ¯𝑆𝑘,ℎ)
1187
+ 22
1188
+ if verification passes then
1189
+ 23
1190
+ emit Consensus is reached
1191
+ 24
1192
+ else
1193
+ 25
1194
+ Elect a new Θℎ ← Θ
1195
+
1196
+ ℎ and redo Consensus
1197
+ Global-DAG, each valid reference from 𝑁𝑘′,𝑖 awards the owner
1198
+ 𝑈𝑘 (𝑘 ≠ 𝑘′) of the referred model 𝑁𝑘,𝑖 with a certain amount
1199
+ of tokens. Next, 𝑈 ¯𝑘 proposes a new settlement node ¯𝑆𝑘,ℎ
1200
+ covering the updated balances and PoL results. The settlement
1201
+ committee Θℎ verifies and votes ¯𝑆𝑘,ℎ. The committee Θℎ
1202
+ endorses ¯��𝑘,ℎ as 𝑆ℎ if the committee reaches consensus, or
1203
+ elects a new leader otherwise.
1204
+ V. IMPLEMENTATION AND EVALUATION
1205
+ In this section, we conduct comparisons between the pro-
1206
+ posed FL system, IRONFORGE, and other popular frameworks,
1207
+ including GoogleFL [2], AsyncFL [6], and BlockFL [15]. We
1208
+ experimentally assess IRONFORGE in terms of the model per-
1209
+ formance and expected amount of rewards that can be earned
1210
+ under a variety of different environment settings, including
1211
+ different aggregation strategies, different sizes of hardware and
1212
+ software resources, and different types and levels of malicious
1213
+ attacks such as lazy attacks [9], poisoning attacks [25], back-
1214
+ door attacks [26], and model stealing attacks [27].
1215
+ A. Experimental Configurations
1216
+ 1) Hardware settings: The experiments are conducted on
1217
+ 6 servers listed as follows.
1218
+ Type-A (#1-3):
1219
+ • CPU. 2 × Intel(R) Xeon(R) Gold 6230R CPU @
1220
+ 2.10GHz, 2 × 52 cores
1221
+ • GPU. 1 × Quadro RTX 4000, 1 × 8GB
1222
+ • Memory. 528GB
1223
+ • Bandwidth. 1000Mb/s
1224
+ Type-B (#4-6):
1225
+ • CPU. 2 × Intel(R) Xeon(R) Gold 6138 CPU @ 2.00GHz,
1226
+ 2 × 40 cores
1227
+ • GPU. 8 × NVIDIA PCIe A100, 8 × 40GB
1228
+ • Memory. 250GB
1229
+ • Bandwidth. 1000Mb/s
1230
+ 2) Software settings: We carry out the experiments upon
1231
+ Ubuntu 18.04.6 LTS with Keras 2.7 in Python 3.7.13 and
1232
+ Docker 20.10.12. We use FastDFS as the distributed file
1233
+ system with 15TB storage space for the model weights.
1234
+ 3) A new testbed - FLSim: To benchmark the considered FL
1235
+ frameworks, we build an FL testbed named FLSim as shown
1236
+ in Fig. 4. FLSim is docker-containerized upon our servers (#1–
1237
+ 6). Choosing different FL frameworks is flexibly plug-and-play
1238
+ in FLSim via three generic interfaces, i.e., the event emitter,
1239
+ model channel, and capability configuration.
1240
+ Event emitter. FLSim is event-driven where all events are
1241
+ delivered through Redis which serves as a message queue.
1242
+ Each runner can receive events in the network in real-time
1243
+ by the subscription function of Redis, and can broadcast
1244
+ corresponding events according to their role.
1245
+ Fig. 4. Architecture of the new FL testbed FLSim
1246
+ Model channel. Indexing models are done via MySQL where
1247
+ properties such as the URIs of weights are included, while
1248
+ the actual model weights are stored in FastDFS. As a result,
1249
+ the query efficiency can be significantly improved with no
1250
+ need of retaining the large weights unless they are required
1251
+ for evaluation or aggregation.
1252
+ Capability configuration. The tasks are trained on runners
1253
+ deployed on docker clusters. Each docker container represents
1254
+ 9
1255
+
1256
+ GoogleFL
1257
+ BlockFL
1258
+ FL Framework
1259
+ AsyncFL
1260
+ IronForge
1261
+ Interface
1262
+ Event emitter
1263
+ Redis
1264
+ Model channel
1265
+ CapabilityCofig
1266
+ MySQL+FastDF
1267
+ Master
1268
+ Operational Layer
1269
+ Minier
1270
+ Worker
1271
+ Runner
1272
+ Virtualisation Layer
1273
+ Docker Engine
1274
+ Docker Orchestration
1275
+ Physical Layer
1276
+ Server 1
1277
+ Server nTABLE III: Hyper-parameter settings
1278
+ Notation
1279
+ Definition
1280
+ Value (unit)
1281
+ P
1282
+ idle probability
1283
+ 0.1
1284
+ E
1285
+ global epoch
1286
+ 2000
1287
+ 𝑒
1288
+ default local epoch
1289
+ 5
1290
+ 𝑙
1291
+ learning rate
1292
+ 0.002
1293
+ 𝜂
1294
+ sampled weights
1295
+ 30
1296
+ 𝛽
1297
+ default number of candidate weights
1298
+ 6
1299
+ 𝜎
1300
+ default number of aggregated weights
1301
+ 5
1302
+ B
1303
+ default batch size
1304
+ 100
1305
+ V
1306
+ validation set size
1307
+ 100
1308
+ a runner with different resource settings, such as CPU, mem-
1309
+ ory, and bandwidth. Specifications of the containers are craft-
1310
+ specified with strong scalability and flexibility by defining the
1311
+ capability configuration to simulate various scenarios, such
1312
+ as the resource imbalance considered in the experiments.
1313
+ Moreover, each runner is categorized into different roles based
1314
+ on which FL framework has been plugged in, e.g., “workers”
1315
+ in all considered frameworks, “masters” in GoogleFL and
1316
+ AsyncFL, and “miners” in BlockFL.
1317
+ 4) Training settings: The tuned hyper-parameters of the
1318
+ experiments are summarized in Table III. We perform training
1319
+ over the MNIST with 60,000 data samples and a Convolutional
1320
+ Neural Network (CNN) model illustrated in Fig. 5.
1321
+ Totally 60,000 MNIST samples are randomly split into two
1322
+ parts, 48,000 samples are used as the training set and 12,000
1323
+ samples are used as the testing set. We create non-IID training
1324
+ shards for contributors from the training set to simulate a
1325
+ practical network condition. The training set is divided into
1326
+ two subsets, i.e., 24,000 for each. The samples in the first
1327
+ subset are sorted by labels, and are subsequently distributed
1328
+ into 120 shards, 200 for each shard. Thus, the sample labels
1329
+ in each shard are relatively concentrated. The other half of
1330
+ the samples are randomly selected and distributed into 120
1331
+ shards, i.e., 200 samples for each shard. Thus, the sample
1332
+ labels in each shard are relatively uniform. Finally, we repeat
1333
+ the sampling operation 120 times, and each time we take a
1334
+ shard from each of the two subsets for merging. We end up
1335
+ obtaining 120 new shards each of which contains 400 samples.
1336
+ 5) Environment settings: To demonstrate the state-of-the-
1337
+ art of IRONFORGE, a comprehensive comparison between
1338
+ IRONFORGE and the other three FL frameworks are conducted
1339
+ in our experiments, i.e., the synchronous GoogleFL, AsyncFL,
1340
+ and BlockFL. We launch 120 runners as workers to train
1341
+ models with a probability of P. We also define two events
1342
+ that represent receiving two types of intermediate models:
1343
+ • GLOBAL MODEL UPLOADED EVENT
1344
+ (GMUE):
1345
+ the model acquired by aggregating the uploaded local
1346
+ models prior to training.
1347
+ • LOCAL MODEL UPLOADED EVENT (LMUE): the
1348
+ model trained by workers with their local datasets
1349
+ These events are broadcast to notify each runner associated
1350
+ with the next step to take. Note that the genesis model of a
1351
+ task is tagged as a global model in order to initiate any selected
1352
+ FL framework. This indicates that emitting GMUE is used to
1353
+ notify the network when the task is published.
1354
+ Fig. 5. The CNN model is a lightweight version of the model
1355
+ in [2]. It contains one convolution layer of which the filter
1356
+ size is 32 and the kernel size is 5 × 5, one 2 × 2 max-pooling
1357
+ layers, one fully connected layer with 256 units, and ReLu
1358
+ activation. The output is processed by a fully connected layer
1359
+ with 10 units and softmax activation.
1360
+ Synchronous GoogleFL. One additional runner is launched
1361
+ as the master to aggregate local model weights. For GoogleFL,
1362
+ workers are activated by GMUE and the master is activated
1363
+ by LMUE. The workers, when receiving a GMUE, train on
1364
+ top of a global model downloaded from the master with
1365
+ their own local datasets. The master receives an LMUE when
1366
+ the workers upload the trained models, and subsequently
1367
+ aggregates all collected local models after a timeout, followed
1368
+ by uploading the aggregated result as the global model. The
1369
+ above iterating process continues until the task reached the
1370
+ max iteration threshold E.
1371
+ AsyncFL. One additional runner is launched as the master to
1372
+ aggregate local models. AsyncFL shares the same procedure
1373
+ as GoogleFL, except that the master, when receiving an
1374
+ LMUE during its idle period, creates a new global model by
1375
+ aggregating the most recent global model and newly-collected
1376
+ local models with an identical weighting factor.
1377
+ BlockFL. Five additional runners are launched as miners for
1378
+ BlockFL. In each iteration, the miners behave the same way
1379
+ as the master does in GoogleFL or AsyncFL. An additional
1380
+ step is that the miners compute the nonce to finalize the block
1381
+ and compete for the rewards with a synchronized lock being
1382
+ used in Redis to ensure the mining order.
1383
+ IRONFORGE. No additional runners are launched for IRON-
1384
+ FORGE. Each runner acts as a worker and a master at the same
1385
+ time, i.e., it supports both aggregating and training operations,
1386
+ thus receiving both GMU and LMUE during each iteration.
1387
+ 6) Security settings: We implement five types of con-
1388
+ tributors: normal contributors, poison contributors, backdoor
1389
+ contributors, and stealing and colluding contributors.
1390
+ Normal contributors act honestly and independently across
1391
+ all phases.
1392
+ 10
1393
+
1394
+ input:
1395
+ [(None, 28, 28, 1)]
1396
+ conv2d 2 input
1397
+ InputLayer
1398
+ output:
1399
+ [(None, 28, 28, 1)]
1400
+ input:
1401
+ (None,28,28,1)
1402
+ conv2d 2
1403
+ Conv2D
1404
+ output:
1405
+ (None,24,24,32)
1406
+ input:
1407
+ (None,24,24,32)
1408
+ max_pooling2d_2
1409
+ MaxPooling2D
1410
+ output:
1411
+ (None,12,12,32)
1412
+ input:
1413
+ (None, 12,12,32)
1414
+ flatten 2
1415
+ Flatten
1416
+ output:
1417
+ (None,4608)
1418
+ input:
1419
+ (None,4608)
1420
+ dense_4
1421
+ Dense
1422
+ output:
1423
+ (None, 256)
1424
+ input:
1425
+ (None, 256)
1426
+ dense 5
1427
+ Dense
1428
+ output:
1429
+ (None, 10)Poisoning contributors aim to undermine the integrity and
1430
+ availability of the global model by crafting local poisoning
1431
+ models [25]. In this paper, we simulate poison contributors by
1432
+ adopting the label-flipping strategy that fakes labels and then
1433
+ conducting training on the forged datasets.
1434
+ Backdoor contributors aim to fail the global model on
1435
+ targeted tasks, typically by adhering crafted triggers to training
1436
+ samples, conducting training on the amended samples, and
1437
+ then uploading the attack models [26]. In this paper, backdoor
1438
+ contributors layer 5 × 5 white patches to training samples and
1439
+ change the label of the manipulated samples to a fixed one.
1440
+ Stealing contributors aim to gain rewards by stealing model
1441
+ weights trained by others and uploading the plagiarized
1442
+ weights as their own work [9]. In this paper, a stealing
1443
+ contributor 𝑘′ selects and directly uploads one of the existing
1444
+ weights by simply changing the ownership from 𝑘 to 𝑘′.
1445
+ Colluding contributors aim to embezzle training rewards for
1446
+ their conspirators by performing honest training processes but
1447
+ claiming source list M from the conspirators. In this paper, a
1448
+ certain proportion of contributors tamper the source list M in
1449
+ their uploaded weights, with a certain probability.
1450
+ B. Results and Evaluations
1451
+ Several experiments are conducted from two perspectives,
1452
+ i.e., the performance comparison between IRONFORGE and
1453
+ others with and without attacks, and the fairness comparison
1454
+ between different resource levels in terms of rewards.
1455
+ 1) Performance - with and without attacks: Fig. 6 shows
1456
+ that the proposed IRONFORGE outperforms AsyncFL and
1457
+ BlockFL with only slightly slower convergence than the base-
1458
+ line GoogleFL across 2,000 iterations with no attacks lever-
1459
+ aged. It is worth noting that the red curve increases sharply
1460
+ with as few oscillations as that of the baseline, particularly
1461
+ highlighting the stability of IRONFORGE.
1462
+ Fig. 6. Comparison between IRONFORGE and others in terms
1463
+ of accuracy with no attacks leveraged
1464
+ The performance comparison with different levels of attack
1465
+ behaviors being applied to IRONFORGE is shown in Fig. 7. It
1466
+ is realized that IRONFORGE is resistant to the stealing attack
1467
+ the most, followed by the resistance to the backdoor attacks
1468
+ and poisoning attacks. It is worth noting that the performance
1469
+ of a stealing ratio of 20% can be as good as that of others.
1470
+ This is because the native validation process in IRONFORGE
1471
+ can capture and eliminate the plagiarized models which are
1472
+ reused or whose ownerships are fake. The remaining 80% of
1473
+ models are still sufficient for contributors to aggregate and
1474
+ train by offering strong diversity of the non-IID data samples.
1475
+ On the other hand, an evident degradation of the accuracy,
1476
+ around 5%, is shown in both poisoning (cf. Fig. 7(b)) and
1477
+ backdoor (cf. Fig. 7(c)) contexts. Nevertheless, it can be found
1478
+ from Fig. 8(a) and 8(b) that IRONFORGE outperforms all
1479
+ the other FL frameworks with either 20% ratio of poisoning
1480
+ attackers or 20% ratio of backdoor attackers. This significantly
1481
+ highlights the superiority of IRONFORGE in terms of its strong
1482
+ resistance to malicious model updating. Fig. 8(c) highlights
1483
+ the resistance to stealing attacks and collusion attacks of
1484
+ IRONFORGE. Note that only BlockFL is considered in the
1485
+ comparison as GoogleFL and AsyncFL do not support incen-
1486
+ tives natively. It is found that, by using the native validation
1487
+ process in IRONFORGE, including the PoL verification, none
1488
+ of the dishonest contributors who leverage either the stealing
1489
+ attack or collusion attack can gain rewards. This prevents the
1490
+ malicious contributors from faking the ownership of or directly
1491
+ using the existing models, or embezzling the rewards for their
1492
+ conspirators by claiming a falsified source list.
1493
+ Fig. 9(a) shows the accuracy ranges of adjusting the candi-
1494
+ date size (𝛽) for different levels of aggregation sizes (𝜎), e.g.,
1495
+ the blue band representing the accuracy ranged from 1-of-2
1496
+ to 1-of-8 with 𝜎 = 1 and 𝛽 ∈ [2, 8]. The results reveal that
1497
+ increasing the aggregation size 𝜎 stabilizes the performance
1498
+ in the beginning stage, and allows for an increasingly higher
1499
+ convergence point. The effect of increasing the candidate size
1500
+ for a certain aggregation size becomes gradually weakened
1501
+ as the aggregation size increases, as you can see that the
1502
+ width of each band turns more and more narrow. The effect
1503
+ of increasing the aggregation size also becomes weakened, as
1504
+ you can see from Fig. 9(a) that the performance of 5-of-6
1505
+ is as good as that of 7-of-8. The same insight can also be
1506
+ shed by observing the performance comparison of adjusting
1507
+ the aggregation sizes (𝜎) for different levels of candidate size
1508
+ (𝛽) is shown in Fig. 9(b), e.g., the blue band representing the
1509
+ accuracy ranged from 1-of-3 to 2-of-3. The bottom line of the
1510
+ red band for 𝛽 = 9 performs as poorly as the 2-of-3 strategy
1511
+ does, while the top line of the red band performs the best
1512
+ among the kinds. It can thus be concluded that knowing that
1513
+ no attacks are leveraged, aggregating more weights is more
1514
+ beneficial for obtaining high accuracy than merely aggregating
1515
+ a low number of weights from more candidate weights.
1516
+ Fig. 9(c) shows the running time of different aggregation
1517
+ strategies all the way from downloading the weights to up-
1518
+ loading a new model. There is a stronger effect on the running
1519
+ time when adjusting the candidate size (𝛽) than adjusting the
1520
+ aggregation size (𝜎). An implication can be realized based on
1521
+ this observation that the bandwidth could be the bottleneck in
1522
+ IRONFORGE. We design dedicated experiments that learn this
1523
+ phenomenon, as explained in the following Section V-B2.
1524
+ 2) Fairness - earn rewards: The fairness is investigated in
1525
+ terms of the difference in rewards between different levels
1526
+ of hardware specifications. The bottleneck of gaining more
1527
+ 11
1528
+
1529
+ 0.95
1530
+ 06'0
1531
+ 08'0
1532
+ BlockFL
1533
+ 0.75
1534
+ AsyncFL
1535
+ GoogleFL
1536
+ IRONFORGE
1537
+ 0.70
1538
+ 0
1539
+ 250
1540
+ 500
1541
+ 750
1542
+ 1000
1543
+ 1250
1544
+ 1500
1545
+ 1750
1546
+ 2000
1547
+ Iteration(a) Stealing attacks
1548
+ (b) Poisoning attacks
1549
+ (c) Backdoor attacks
1550
+ Fig. 7. Comparison between different levels of stealing attacks, poisoning attacks, and backdoor attacks applied to IRONFORGE
1551
+ in terms of accuracy
1552
+ (a) Poisoning attacks with a poisoning ratio of 20% (b) Backdoor attacks with a poisoning ratio of 20%
1553
+ (c) Stealing and collusion attacks
1554
+ Fig. 8. Comparison between IRONFORGE and others in terms of accuracy or rewards with a certain level of poisoning attacks,
1555
+ backdoor attacks, and stealing and colluding attacks
1556
+ rewards in IRONFORGE can also be realized. We select two
1557
+ different contest strategies, i.e., Immediate settlement and
1558
+ Winner traverse. The immediate-settlement is the strategy used
1559
+ in Global-DAG by default, rewarding every model every time
1560
+ it gets referred by others. The winner-traverse could be one of
1561
+ the main options used in a Task-DAG, rewarding every model
1562
+ that exists in the traversal path all the way from the winner
1563
+ node to the genesis node (excluded).
1564
+ According to Fig. 10, A monotonic increase of the rewards
1565
+ can be realized for the immediate-settlement with increasing
1566
+ CPU cores, memory capacity, and bandwidth, and for the
1567
+ winner-traverse only with increasing bandwidth (the winner-
1568
+ traverse appears to have similar characteristics to BlockFL
1569
+ which also fails to offer a pure monotonic increase of rewards
1570
+ in all specifications). This is because the winner-traverse strat-
1571
+ egy could include moderate models being aggregated during
1572
+ each iteration in the winner-traversal path while the winner
1573
+ could be highly random when the competition is intense
1574
+ and the convergence is near. Therefore, many models with
1575
+ high performance could be excluded by the unique winner
1576
+ traversal path at the end. On the contrary, the immediate-
1577
+ settlement allows every model not to be missed so long as
1578
+ a valid reference relationship is confirmed. Nevertheless, the
1579
+ result difference between these two strategies does not tell
1580
+ the superiority of fairness. Different requirements may lead
1581
+ to different principles of fairness. Rooting for an egalitarian
1582
+ strategy, or “to each according to his contribution”, or striking
1583
+ a balance in between is a flexible option that the proposed
1584
+ IRONFORGE offers back to users without a harsh setting.
1585
+ On the other hand, the monotonic increase in the band-
1586
+ width comparison for both strategies, as shown in Fig. 10(c),
1587
+ highlights the bandwidth being the most critical effect for
1588
+ earning rewards in IRONFORGE. That is to say, relatively
1589
+ poorer users being more active in uploading models to the
1590
+ network with higher frequency can help them be more likely
1591
+ to share the rewards, rather than spending much time on a
1592
+ strongly performant model.
1593
+ Fig. 11 learns the effect of data quality upon the rewards by
1594
+ adding a random perturbation to 50% of the training samples
1595
+ of half of the users with a mean of 0 and a standard deviation
1596
+ of 1. We define the affected nodes as “Poor” nodes, while
1597
+ “Excellent” nodes own normal data samples only. The result
1598
+ shows that the poor nodes that use the immediate-settlement
1599
+ strategy enjoy a narrower range of rewards compared to that
1600
+ of the winner-traverse strategy and BlockFL. This highlights
1601
+ that poor nodes earning rewards via the immediate-settlement
1602
+ 12
1603
+
1604
+ 0.9
1605
+ 0.8
1606
+ 0.7
1607
+ Accuracy
1608
+ 0.6
1609
+ 0.5
1610
+ 0.4
1611
+ BlockFL
1612
+ 0.3
1613
+ AsyncFL
1614
+ 0.2
1615
+ GoogleFL
1616
+ IRONFORGE
1617
+ 0.1
1618
+ 0
1619
+ 500
1620
+ 1000
1621
+ 1500
1622
+ 2000
1623
+ 2500
1624
+ 3000
1625
+ 3500
1626
+ 4000
1627
+ Iteration0.8
1628
+ Accuracy
1629
+ 0.6
1630
+ 0.4
1631
+ BlockFL
1632
+ AsyncFL
1633
+ 0.2
1634
+ GoogleFL
1635
+ IRONFORGE
1636
+ 0
1637
+ 500
1638
+ 1000
1639
+ 1500
1640
+ 2000
1641
+ 2500
1642
+ 3000
1643
+ 3500
1644
+ 4000
1645
+ IterationBlockFL
1646
+ 30
1647
+ IRONFORGE
1648
+ 25
1649
+ 20
1650
+ Rewards
1651
+ 15
1652
+ 10
1653
+ 5
1654
+ 0
1655
+ Normal
1656
+ Dishonest
1657
+ ContributorType,<Normal>/<Stealing/Colluded>1.0
1658
+ 0.8
1659
+ 0.6
1660
+ 0.4
1661
+ Stealing Ratio:0%
1662
+ StealingRatio:5%
1663
+ 0.2
1664
+ StealingRatio:10%
1665
+ Stealing Ratio:20%
1666
+ 0
1667
+ 500
1668
+ 1000
1669
+ 1500
1670
+ 2000
1671
+ 2500
1672
+ 3000
1673
+ 3500
1674
+ 4000
1675
+ Iteration1.0
1676
+ 0.8
1677
+ Accuracy
1678
+ 0.6
1679
+ 0.4
1680
+ Poisoning Ratio:0%
1681
+ Poisoning Ratio: 5%
1682
+ 0.2
1683
+ PoisoningRatio:10%
1684
+ PoisoningRatio:20%
1685
+ 0
1686
+ 500
1687
+ 1000
1688
+ 1500
1689
+ 2000
1690
+ 2500
1691
+ 3000
1692
+ 3500
1693
+ 4000
1694
+ Iteration1.0
1695
+ 0.8
1696
+ 0.6
1697
+ 0.4
1698
+ BackdoorRatio:0%
1699
+ BackdoorRatio:5%
1700
+ 0.2
1701
+ BackdoorRatio:10%
1702
+ BackdoorRatio:20%
1703
+ 0
1704
+ 500
1705
+ 1000
1706
+ 1500
1707
+ 2000
1708
+ 2500
1709
+ 3000
1710
+ 3500
1711
+ 4000
1712
+ Iteration(a) Accuracy difference between aggregation sizes
1713
+ (b) Accuracy difference between candidate sizes
1714
+ (c) Time difference between aggregation strategies
1715
+ Fig. 9. Comparison between different combination of the aggregation strategies (𝜎-of-𝛽) applied to IRONFORGE in terms of
1716
+ accuracy and execution time
1717
+ (a) Reward difference between different CPU cores
1718
+ (b) Reward difference between memory capacity
1719
+ (c) Reward difference between bandwidths
1720
+ Fig. 10. Comparison between different levels of the CPU core, memory capacity, and bandwidth in terms of rewards
1721
+ Fig. 11. Comparison between IRONFORGE and others in terms
1722
+ of rewards under the influence of the data quality issue
1723
+ in IRONFORGE can be more stable and predictable than
1724
+ BlockFL and the winner-traverse, and can be less affected by
1725
+ unexpected data degradation or network noise in unreliable
1726
+ channels. Excellent nodes have more opportunities to earn
1727
+ higher rewards than poor nodes while the median value and
1728
+ the minima of rewards remain as high as that of poor nodes.
1729
+ This reflects the fairness between excellent and poor nodes,
1730
+ i.e., offering stable rewards to the poor while the excellent are
1731
+ given chances to make a great fortune.
1732
+ VI. DISCUSSION AND ANALYSIS
1733
+ This analysis focuses on the security of IRONFORGE. Every
1734
+ role in the system is involved in the attack model except that
1735
+ the timestamp in IRONFORGE is considered synchronous via
1736
+ external trustworthy servers.
1737
+ Adversaries target to break the state consistency in Global-
1738
+ DAG so that operations such as incentive and consensus fail to
1739
+ be executed. The adversaries also target to leverage the model
1740
+ stealing attack in order to:
1741
+ • forge the “amount of work” by simply stealing others’
1742
+ models with no more effort being put into the training;
1743
+ • collude with attackers by creating a model referring to
1744
+ models from colluded attackers.
1745
+ In addition, the adversaries can target on breaching the dataset
1746
+ privacy during the dataset sharing in a PoL process. At the
1747
+ same time, the adversaries can also unbalance the competition
1748
+ by abusing others’ datasets to enrich local resources.
1749
+ State consistency: The state consistency is guaranteed over a
1750
+ sufficiently long period Δ𝑇 as long as the seeds being used in
1751
+ each VRF process are secure and the lower bounds of faulty
1752
+ tolerance of consensus protocol (e.g., 33% for PBFT) are
1753
+ satisfied in the VRF-elected committees. We consider the time
1754
+ gap between two settlement nodes Δ𝑆ℎ,𝑆ℎ−1 is sufficiently large
1755
+ in IRONFORGE to expect that each user who gets registered
1756
+ 13
1757
+
1758
+ 0.9
1759
+ 0.970
1760
+ 0.8
1761
+ 0.965
1762
+ Accuracy
1763
+ 0.960
1764
+ Aggregation Size(α)
1765
+ 0.950
1766
+ 0.7
1767
+ 0.945
1768
+ 1
1769
+ 2
1770
+ 0.940
1771
+ 0.935
1772
+ 3
1773
+ 0.930
1774
+ 4
1775
+ 0.6
1776
+ 1000
1777
+ 1200
1778
+ 1400
1779
+ 1600
1780
+ 1800
1781
+ 2000
1782
+ 5
1783
+ 7
1784
+ 0
1785
+ 250
1786
+ 500
1787
+ 750
1788
+ 1000
1789
+ 1250
1790
+ 1500
1791
+ 1750
1792
+ 2000
1793
+ Iteration0.96
1794
+ 0.94
1795
+ Accurao
1796
+ 0.92
1797
+ 0.90
1798
+ Candidate Size (β)
1799
+ 3
1800
+ 0.88
1801
+ 5
1802
+ 7
1803
+ 9
1804
+ 250
1805
+ 500
1806
+ 750
1807
+ 1000
1808
+ 1250
1809
+ 1500
1810
+ 1750
1811
+ 2000
1812
+ Iteration1-of-3
1813
+ 2-of-3
1814
+ 1-of-5
1815
+ 2-of-5
1816
+ 3-of-5
1817
+ (o-of-
1818
+ 4-of-5
1819
+ Strategy(
1820
+ 1-of-7
1821
+ 3-of-7
1822
+ 5-of-7
1823
+ 1-of-9
1824
+ 3-of-9
1825
+ 5-of-9
1826
+ 7-of-9
1827
+ 0
1828
+ 20
1829
+ 40
1830
+ 60
1831
+ 80
1832
+ Running Time (s)600
1833
+ BlockFL
1834
+ IronForge(Immediatesettlement)
1835
+ 500
1836
+ IronForge(Winnertraverse)
1837
+ 400
1838
+ Rewards
1839
+ 300
1840
+ 200
1841
+ 100
1842
+ 0
1843
+ 2
1844
+ 6
1845
+ 8
1846
+ CPUCores400
1847
+ BlockFL
1848
+ 350
1849
+ IronForge(lmmediatesettlement)
1850
+ ronForge(Winnertraverse
1851
+ 300
1852
+ 250
1853
+ rds
1854
+ Rewar
1855
+ 200
1856
+ 150
1857
+ 100
1858
+ 50
1859
+ 0
1860
+ 4
1861
+ 6
1862
+ 8
1863
+ 10
1864
+ Memory(GB)400
1865
+ BlockFL
1866
+ IronForge(immediatesettlement)
1867
+ 350
1868
+ IronForge(Winnertraverse)
1869
+ 300
1870
+ Rewards
1871
+ 250
1872
+ 200
1873
+ 150
1874
+ 100
1875
+ 50
1876
+ 0
1877
+ 2
1878
+ 4
1879
+ 8
1880
+ 12
1881
+ Bandwidth(MB/s)BlockFL
1882
+ 25
1883
+ IronForge(lmmediate settlement)
1884
+ IronForge (Winner traverse
1885
+ 20
1886
+ Rewards
1887
+ 15
1888
+ 10
1889
+ 5
1890
+ 0
1891
+ Excellent
1892
+ Poor
1893
+ Data Quality,Poor/Excellentfor committee election has an identical “view” of Global-DAG
1894
+ starting from 𝑇ℎ−1 to 𝑇ℎ−1 + Δ𝑇 . This prevents the consensus
1895
+ process in the committee from being trapped into an indefinite
1896
+ disagreement due to the network asynchrony. On the other
1897
+ hand, an unbiased and unpredictable random seed is crucial for
1898
+ a fair VRF process where (4) cannot be manipulated. This can
1899
+ be achieved by implementing existing randomness generators
1900
+ such as RANDAO [37] or RandHound-VRF [38].
1901
+ Model stealing attack: Offering the proposed incentive mech-
1902
+ anism in a decentralized FL attracts attackers to leverage
1903
+ model stealing attacks, by either stealing the model ownership
1904
+ or faking the training processes. Attackers can, with no effort
1905
+ on local training, steal others’ models and fake ownership with
1906
+ ease. Attackers can alternatively fake the source lists upon an
1907
+ honest local training process so that their accomplices, who are
1908
+ instead placed in the source list, can reap profits against other
1909
+ honest users. These two types of model stealing attacks are
1910
+ used for misleading those who wish to ensure the necessary
1911
+ training overhead and the efforts in the source list. They are
1912
+ particularly useful when attackers intend to steal the rewards
1913
+ and share them with their accomplices. IRONFORGE enables
1914
+ PoL-challenge where users can choose to challenge a model
1915
+ via idle resources, and the model owner requires to provide
1916
+ the valid PoL-proof in time for a public verification during
1917
+ the consensus process. By the committee replaying parts of the
1918
+ training from scratch and reaching the consensus, the “amount
1919
+ of work” and the source list M can be explicitly determined.
1920
+ Dataset privacy and model melting: This security metric is
1921
+ an implementation of our work [34]. Dataset obfuscation helps
1922
+ to preserve dataset privacy when datasets require to be publicly
1923
+ shared for PoL-challenge. Experimental results in [34] show
1924
+ that an obfuscated dataset satisfies PoL verification without
1925
+ sacrificing the privacy level while being able to decrease the
1926
+ model utility against the abuse of collecting provers’ obfus-
1927
+ cated datasets, namely, model melting. In addition, applying
1928
+ training over different data samples or using non-IID noise
1929
+ significantly can reduce the risks of privacy decline when a
1930
+ sufficient number of challenges against the same model owner
1931
+ are deliberately raised by attackers.
1932
+ VII. CONCLUSION
1933
+ This paper proposed IRONFORGE, a new generation of FL
1934
+ framework constructed by DAG-based structure, which for
1935
+ the first time eliminates the need for the central coordinator
1936
+ to solve the issues of network asynchrony and the excessive
1937
+ reliance on the central coordinator while at the same time
1938
+ enabling an open and fair incentive mechanism to encourage
1939
+ more participants, particularly in networks where training
1940
+ resources are unevenly distributed. Experimental results based
1941
+ on a newly developed testbed FLSim along with the security
1942
+ analysis highlight the superiority of IRONFORGE over the
1943
+ existing prevalent FL frameworks under various specifica-
1944
+ tions regarding performance, fairness, and security. To our
1945
+ knowledge, this is the first paper proposing a secure and
1946
+ fully decentralized FL framework that can be applied in open
1947
+ networks with realistic network and training settings.
1948
+ REFERENCES
1949
+ [1] J. Koneˇcn´y, H. B. McMahan, F. X. Yu, P. Richt´arik, A. T. Suresh, and
1950
+ D. Bacon, “Federated learning: Strategies for improving communication
1951
+ efficiency,” CoRR, vol. abs/1610.05492, 2016.
1952
+ [2] H. B. McMahan, E. Moore, D. Ramage, S. Hampson, and B. A.
1953
+ y Arcas, “Communication-efficient learning of deep networks from
1954
+ decentralized data,” in International Conference on Artificial Intelligence
1955
+ and Statistics, 2016.
1956
+ [3] L. Lyu, X. Xu, Q. Wang, and H. Yu, Collaborative Fairness in Federated
1957
+ Learning. Cham: Springer International Publishing, 2020, pp. 189–204.
1958
+ [4] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, “Federated learning:
1959
+ Challenges, methods, and future directions,” IEEE Signal Processing
1960
+ Magazine, vol. 37, no. 3, pp. 50–60, 2020.
1961
+ [5] W. Wu, L. He, W. Lin, R. Mao, C. Maple, and S. Jarvis, “Safa: A semi-
1962
+ asynchronous protocol for fast federated learning with low overhead,”
1963
+ IEEE Transactions on Computers, vol. 70, no. 5, pp. 655–668, 2021.
1964
+ [6] C. Xie, S. Koyejo, and I. Gupta, “Asynchronous federated optimization,”
1965
+ CoRR, vol. abs/1903.03934, 2019.
1966
+ [7] J. Kang, Z. Xiong, D. Niyato, Y. Zou, Y. Zhang, and M. Guizani,
1967
+ “Reliable federated learning for mobile networks,” IEEE Wireless Com-
1968
+ munications, vol. 27, no. 2, pp. 72–80, 2020.
1969
+ [8] Y. Liu, J. Peng, J. Kang, A. M. Iliyasu, D. Niyato, and A. A. A. El-
1970
+ Latif, “A secure federated learning framework for 5g networks,” IEEE
1971
+ Wireless Communications, vol. 27, no. 4, pp. 24–31, 2020.
1972
+ [9] M. Cao, L. Zhang, and B. Cao, “Towards on-device federated learning:
1973
+ A direct acyclic graph-based blockchain approach,” IEEE Transactions
1974
+ on Neural Networks and Learning Systems (TNNLS), pp. 1–15, 2021.
1975
+ [10] Y. Zhan, J. Zhang, Z. Hong, L. Wu, P. Li, and S. Guo, “A survey of
1976
+ incentive mechanism design for federated learning,” IEEE Transactions
1977
+ on Emerging Topics in Computing (TETC), vol. 10, no. 2, pp. 1035–
1978
+ 1044, 2022.
1979
+ [11] H. Kim, J. Park, M. Bennis, and S.-L. Kim, “Blockchained on-device
1980
+ federated learning,” IEEE Communications Letters, vol. 24, no. 6, pp.
1981
+ 1279–1283, 2020.
1982
+ [12] U. Majeed and C. S. Hong, “Flchain: Federated learning via mec-enabled
1983
+ blockchain network,” in 2019 20th Asia-Pacific Network Operations and
1984
+ Management Symposium (APNOMS), 2019, pp. 1–4.
1985
+ [13] E. Madill, B. Nguyen, C. K. Leung, and S. Rouhani, “Scalesfl: A shard-
1986
+ ing solution for blockchain-based federated learning,” in Proceedings of
1987
+ the Fourth ACM International Symposium on Blockchain and Secure
1988
+ Critical Infrastructure (BSCI), 2022, p. 95–106.
1989
+ [14] Z. Yu, J. Hu, G. Min, H. Lu, Z. Zhao, H. Wang, and N. Georgalas,
1990
+ “Federated learning based proactive content caching in edge computing,”
1991
+ in 2018 IEEE Global Communications Conference (GLOBECOM),
1992
+ 2018, pp. 1–6.
1993
+ [15] H. Kim, J. Park, M. Bennis, and S.-L. Kim, “Blockchained on-device
1994
+ federated learning,” IEEE Communications Letters, vol. 24, no. 6, pp.
1995
+ 1279–1283, 2020.
1996
+ [16] F. Sattler, S. Wiedemann, K.-R. M¨uller, and W. Samek, “Robust and
1997
+ communication-efficient federated learning from non-i.i.d. data,” IEEE
1998
+ Transactions on Neural Networks and Learning Systems (TNNLS),
1999
+ vol. 31, no. 9, pp. 3400–3413, 2020.
2000
+ [17] P. Tian, Z. Chen, W. Yu, and W. Liao, “Towards asynchronous federated
2001
+ learning based threat detection: A dc-adam approach,” Computers &
2002
+ Security, vol. 108, p. 102344, 2021.
2003
+ [18] X. Wang, X. Zha, W. Ni, R. P. Liu, Y. J. Guo, X. Niu, and K. Zheng,
2004
+ “Survey on blockchain for internet of things,” Computer Communica-
2005
+ tions, vol. 136, pp. 10–29, 2019.
2006
+ [19] G. Yu, X. Wang et al., “Survey: Sharding in blockchains,” IEEE Access,
2007
+ vol. 8, pp. 14 155–14 181, 2020.
2008
+ [20] B. Cao, Z. Zhang, D. Feng, S. Zhang, L. Zhang, M. Peng, and Y. Li,
2009
+ “Performance analysis and comparison of pow, pos and dag based
2010
+ blockchains,” Digital Communications and Networks, vol. 6, no. 4, pp.
2011
+ 480–485, 2020.
2012
+ [21] Y. Lu, X. Huang, K. Zhang, S. Maharjan, and Y. Zhang, “Blockchain
2013
+ empowered asynchronous federated learning for secure data sharing in
2014
+ internet of vehicles,” IEEE Transactions on Vehicular Technology (TVT),
2015
+ vol. 69, no. 4, pp. 4298–4311, 2020.
2016
+ [22] C. Ma, J. Li, L. Shi, M. Ding, T. Wang, Z. Han, and H. V. Poor,
2017
+ “When federated learning meets blockchain: A new distributed learning
2018
+ paradigm,” IEEE Computational Intelligence Magazine, vol. 17, no. 3,
2019
+ pp. 26–33, 2022.
2020
+ [23] D. C. Nguyen, M. Ding, Q.-V. Pham, P. N. Pathirana, L. B. Le,
2021
+ A. Seneviratne, J. Li, D. Niyato, and H. V. Poor, “Federated learning
2022
+ meets blockchain in edge computing: Opportunities and challenges,”
2023
+ 14
2024
+
2025
+ IEEE Internet of Things Journal (IOTJ), vol. 8, no. 16, pp. 12 806–
2026
+ 12 825, 2021.
2027
+ [24] Q. Wang, J. Yu, S. Chen, and Y. Xiang, “Sok: Dag-based blockchain
2028
+ systems,” ACM Computing Surveys (CSUR), 2022.
2029
+ [25] A. N. Bhagoji, S. Chakraborty, P. Mittal, and S. Calo, “Analyzing
2030
+ federated learning through an adversarial lens,” in Proceedings of the
2031
+ 36th International Conference on Machine Learning (ICML), vol. 97,
2032
+ 09–15 Jun 2019, pp. 634–643.
2033
+ [26] E. Bagdasaryan, A. Veit, Y. Hua, D. Estrin, and V. Shmatikov, “How to
2034
+ backdoor federated learning,” in Proceedings of the Twenty Third Inter-
2035
+ national Conference on Artificial Intelligence and Statistics (AISTATS),
2036
+ vol. 108, 26–28 Aug 2020, pp. 2938–2948.
2037
+ [27] F. Tram`er, F. Zhang, A. Juels, M. K. Reiter, and T. Ristenpart, “Stealing
2038
+ machine learning models via prediction APIs,” in 25th USENIX Security
2039
+ Symposium (USENIX Security), Aug. 2016, pp. 601–618.
2040
+ [28] N. Papernot, P. McDaniel, I. Goodfellow, S. Jha, Z. B. Celik, and
2041
+ A. Swami, “Practical black-box attacks against machine learning,” in
2042
+ Proceedings of the 2017 ACM on Asia Conference on Computer and
2043
+ Communications Security (AsiaCCS), 2017, p. 506–519.
2044
+ [29] J. R. Correia-Silva, R. F. Berriel, C. Badue, A. F. de Souza, and
2045
+ T. Oliveira-Santos, “Copycat cnn: Stealing knowledge by persuading
2046
+ confession with random non-labeled data,” in International Joint Con-
2047
+ ference on Neural Networks (IJCNN), 2018, pp. 1–8.
2048
+ [30] T. Orekondy, B. Schiele, and M. Fritz, “Knockoff nets: Stealing function-
2049
+ ality of black-box models,” in Proceedings of the IEEE/CVF Conference
2050
+ on Computer Vision and Pattern Recognition (CVPR), June 2019.
2051
+ [31] J. W. Flatt, A. Blasimme, and E. Vayena, “Improving the measurement
2052
+ of scientific success by reporting a self-citation index,” Publications,
2053
+ vol. 5, no. 3, 2017.
2054
+ [32] D. Galindo, J. Liu, M. Ordean, and J.-M. Wong, “Fully distributed
2055
+ verifiable random functions and their application to decentralised random
2056
+ beacons,” in 2021 IEEE European Symposium on Security and Privacy
2057
+ (EuroS&P), 2021, pp. 88–102.
2058
+ [33] H. Jia, M. Yaghini, C. A. Choquette-Choo, N. Dullerud, A. Thudi,
2059
+ V. Chandrasekaran, and N. Papernot, “Proof-of-learning: Definitions and
2060
+ practice,” in 2021 IEEE Symposium on Security and Privacy (SP), 2021,
2061
+ pp. 1039–1056.
2062
+ [34] G. Yu, X. Wang, P. Yu, C. Sun, W. Ni, and R. P. Liu, “Dataset
2063
+ obfuscation: Its applications to and impacts on edge machine learning,”
2064
+ 2022.
2065
+ [35] M. Castro, B. Liskov et al., “Practical byzantine fault tolerance,” in
2066
+ OsDI, vol. 99, no. 1999, 1999, pp. 173–186.
2067
+ [36] R. Zhang, J. Liu, Y. Ding, Z. Wang, Q. Wu, and K. Ren, ““adversarial
2068
+ examples” for proof-of-learning,” in 2022 IEEE Symposium on Security
2069
+ and Privacy (SP), 2022, pp. 1408–1422.
2070
+ [37] (2017) Randao: Verifiable Random Number Generation. [Online].
2071
+ Available: https://www.randao.org/whitepaper/Randao v0.85 en.pdf
2072
+ [38] E. Kokoris-Kogias, P. Jovanovic, L. Gasser, N. Gailly, E. Syta, and
2073
+ B. Ford, “Omniledger: A secure, scale-out, decentralized ledger via
2074
+ sharding,” in 2018 IEEE Symposium on Security and Privacy (SP), May
2075
+ 2018, pp. 583–598.
2076
+ 15
2077
+
A9E2T4oBgHgl3EQfnQhW/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
BtAyT4oBgHgl3EQfePgk/content/tmp_files/2301.00316v1.pdf.txt ADDED
@@ -0,0 +1,595 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPTIMIZATION PERSPECTIVES ON SHELLSORT
2
+ Oscar Skean
3
+ Department of Computer Science
4
+ University of Kentucky
5
+ oscar.skean@uky.edu
6
+ Richard Ehrenborg
7
+ Department of Mathematics
8
+ University of Kentucky
9
+ Jerzy W. Jaromczyk
10
+ Department of Computer Science
11
+ University of Kentucky
12
+ ABSTRACT
13
+ Shellsort is a sorting method that is attractive due to its simplicity, yet it takes effort to analyze its
14
+ efficiency. The heart of the algorithm is the gap sequence chosen a priori and used during sorting.
15
+ The selection of this gap sequence affects the efficiency of Shellsort, and thus drives both its theo-
16
+ retical and experimental analysis. We contribute to Shellsort by identifying efficient gap sequences
17
+ based on new parameterized functions. Specifically, a parameter grid-search identifies optimal pa-
18
+ rameters for different input sizes for sorting by observing minimal overhead in three categories:
19
+ number of comparisons, number of exchanges, and running time. We report that our method finds
20
+ sequences that outperform state-of-the-art gap sequences concerning the number of comparisons for
21
+ chosen small array sizes. Additionally, our function-based sequences outperform the running time
22
+ of the Tokuda sequences for chosen large array sizes. However, no substantial improvements were
23
+ observed when minimizing the number of exchanges.
24
+ 1
25
+ Introduction
26
+ The Shellsort algorithm is a sorting method that was among the first to be discovered. Published in 1959 [1], it saw
27
+ early interest due to its low memory requirements and simple implementation. Despite this, its analysis is difficult and
28
+ remains incomplete. The algorithm has found practical use today in memory-constrained environments, embedded
29
+ systems, and the bzip2 compressor. Recently it has also found use in data-oblivious sorting [2] and in fully homomor-
30
+ phic encryption [3].
31
+ Shellsort is an in-place comparison sort and can be viewed as a generalization of insertion sort. For a data array
32
+ A of size N, Shellsort operates using a predetermined gap sequence 1 = k1 < · · · < km < N. The algorithm
33
+ performs m passes over A: starting with the largest km and ending with k1. During pass j, for a given gap km−j,
34
+ insertion sort occurs for the km−j subarrays consisting of the data elements A(i), A(i + km−j), A(i + 2 · km−j), . . .
35
+ for i = 0, . . . , km−j − 1. We say a k-inversion is a pair (i, i + k) such that the inequality A(i) > A(i + k) holds.
36
+ After pass j, all km−j-inversions that were originally in A have been solved. We say an array with no k-inversions
37
+ is k-sorted. Note that the final pass with k1 is equivalent to insertion sort and is necessary to guarantee sortedness.
38
+ Therefore, the purpose of the gap sequence is to presort A as much as possible before the expensive final insertion sort
39
+ pass.
40
+ The main results of this paper are:
41
+ 1. New efficient Shellsort sequences derived from experimentally optimizing sequence-generating functions.
42
+ For prescribed array sizes, these sequences outperform well-known efficient sequences (e.g. Tokuda, Ciura)
43
+ with respect to the number of comparisons. These sequences also outperform the running time of the Tokuda
44
+ sequence, making them the fastest function-based sequence on the tested array sizes.
45
+ 2. We demonstrate results of experimental analysis comparing our proposed approaches with well-known se-
46
+ quences by measuring the number of comparisons, exchange operations, and running time needed to sort
47
+ randomized permutations.
48
+ Traditionally, improvements for Shellsort have come from finding gap sequences with theoretical properties. We
49
+ discuss some particularly important sequences in Section 2. Then in Section 3, we introduce parameterized sequence-
50
+ arXiv:2301.00316v1 [cs.DS] 1 Jan 2023
51
+
52
+ generating functions that generate a Shellsort sequence. The parameters are then optimized in a grid-search finding
53
+ the best possible sequence that can be produced from that function for a chosen array size. In Section 4 we discuss our
54
+ experimental methodology to compare the performance of the optimized template sequences to the baseline sequences
55
+ mentioned in Section 2.
56
+ 2
57
+ Background
58
+ The selection of a good gap sequence is critical to the performance of Shellsort. There has been a plethora of work
59
+ focused on selecting good sequences [4, 5, 6]. Some of the earliest proposed sequences were based on powers of
60
+ 2 [7, 1]. Then Pratt showed that the sequence of 2p3q obtains a number of inversions that is Θ(Nlog2N) in the
61
+ worst case [8]. We call this sequence Pratt-23 in Table 1. This sequence still has the best known asymptotic time
62
+ complexity for any Shellsort sequence. However, it has a very large constant factor which spurred the development of
63
+ new sequences.
64
+ The proof technique used to show the time complexity of Pratt-23 was based on counting the inversions of a sequence
65
+ that has already been 2-sorted and 3-sorted. A natural extension of this is to apply the Frobenius problem to place
66
+ bounds on what has already been sorted in prior passes. A typical formulation of the Frobenius problem is as follows:
67
+ Suppose that you have k coins of denominations u1, u2, . . . , uk. What is the largest value which cannot be made
68
+ with a nonnegative linear combination of these coins? This largest value is known as the Frobenius number [9]. In
69
+ the context of Shellsort, the coins can be equated to gap size and the Frobenius number can be equated to the largest
70
+ remaining inversion after sorting with the gaps. Using the Frobenius problem, several sequences were proposed that
71
+ had a lower constant factor than Pratt-23 despite having a worse time-complexity [10, 11].
72
+ Following those sequences, the focus of gap sequence selection shifted from finding theoretically good sequences
73
+ to finding experimentally good ones. For example, one property that was observed was that a geometric sequence
74
+ with a growth of 2.25 often performed well in practice. This observation was the basis of the Tokuda sequence [6].
75
+ See Table 1 for a functional form. To the best of our knowledge, this remains the most competitive function in the
76
+ literature.
77
+ The next improvement came from Ciura in 2002 [4]. The Ciura sequence disregarded the idea of a function-based
78
+ sequence, and instead searched for the best set of gap elements themselves. Ciura found the best sequence for array
79
+ sizes of 128 and 1000, as well as a sequence that was conjectured to perform better for much larger array sizes. We
80
+ call these Ciura-128, Ciura-1000, and Ciura-Large in Table 1.
81
+ The Ciura sequences also marked a transition in how Shellsort performance was measured. Previously, most works
82
+ counted the number of exchanges used by the algorithm [8]. This partly because some proof techniques relied on
83
+ counting the number of inversions. Ciura instead focused on optimizing the number of comparisons, which was found
84
+ to be more directly related to the computation time of Shellsort. A comparison is defined as checking if a pair of array
85
+ elements are inverted. An exchange is typically defined as the variable swap used to fix an inversion. Minimizing the
86
+ number of comparisons is especially beneficial when comparisons are expensive to make, such as when sorting large
87
+ satellite data. Similarly, minimizing exchanges is beneficial in memory-constrained systems. In this work, we make
88
+ clear distinctions in Section 4 about what measurement we’re optimizing for. For a full treatment of the history of gap
89
+ sequences, we point the reader to [5].
90
+ 3
91
+ Parameterized Template Functions
92
+ The approach of directly optimizing the gap sequence, as in [4], grows in computational cost very quickly as N
93
+ increases. This growth is due the fact that as N increases, both the expected number of sequence elements and their
94
+ possible range of values increases. To help alleviate this, the authors of [4] found a suitable sequence prefix and
95
+ optimize the extension of it by a few values. However, at very large N even finding this prefix would be very costly.
96
+ Here, we formulate the problem of finding the optimal sequence as optimizing the parameters of a pre-defined
97
+ sequence-generating function. Guided by principles that we have seen in sequences that perform well, we define
98
+ two functions as follows.
99
+ kA(i) = ⌊(a⌊ i
100
+ b ⌋ · c⌊ i
101
+ d ⌋)f + e⌋
102
+ (1)
103
+ kB(i) = ⌊(a · b⌊ i
104
+ c ⌋) + d⌋
105
+ (2)
106
+ We refer to (1) as Ours-A and (2) as Ours-B in Table 1. Both formats contain the floor function in exponents. We
107
+ found this to allow the function to express a more ”chaotic” sequence, which helped improve performance. The unique
108
+ characteristic of Ours-A is the parameter f, an exponent that helps regulate growth. The parameter a of Ours-B was
109
+ 2
110
+
111
+ Sequence Name
112
+ Function
113
+ Optimized for N
114
+ Parameters
115
+ Initial Terms
116
+ Ciura [4]
117
+ -
118
+ 128
119
+ -
120
+ 1 4 9 24 85 126
121
+ -
122
+ 1000
123
+ -
124
+ 1 4 10 23 57 156 409 995
125
+ -
126
+ Large
127
+ -
128
+ 1 4 10 23 57 132 301 701 1750
129
+ Tokuda [6]
130
+ ⌈ (9/4)k−1
131
+ (9/4)−1 ⌉
132
+ -
133
+ -
134
+ 1 4 9 20 46 103 233 525 . . .
135
+ Ours A
136
+ ⌊(a⌊ i
137
+ b ⌋ · c⌊ i
138
+ d ⌋)f + e⌋
139
+ 128 (Comp)
140
+ Table 2
141
+ 1 4 9 24 85 150 . . .
142
+ 1000 (Comp)
143
+ Table 2
144
+ 1 4 10 23 57 153 400 . . .
145
+ 1000 (Time)
146
+ Table 2
147
+ 1 3 7 16 33 85 179 472 . . .
148
+ Ours B
149
+ ⌊(a · b⌊ i
150
+ c ⌋) + d⌋
151
+ 10000 (Comp)
152
+ Table 2
153
+ 1 4 10 27 72 187 488 . . .
154
+ Pratt-23 [8]
155
+ Ordered 2p · 3q
156
+ -
157
+ -
158
+ 1 2 3 4 6 8 9 . . .
159
+ Pratt-25
160
+ Ordered 2p · 5q
161
+ -
162
+ -
163
+ 1 2 4 5 8 10 15 16 . . .
164
+ Pratt-34
165
+ Ordered 3p · 4q
166
+ -
167
+ 1 3 4 9 12 16 24 . . .
168
+ Table 1: Gap sequences that are compared during experiments
169
+ Template
170
+ a
171
+ b
172
+ c
173
+ d
174
+ e
175
+ f
176
+ Ours-A128-Comp
177
+ 2.6321
178
+ 1.6841
179
+ 2.1570
180
+ 0.7360
181
+ 3
182
+ 0.7630
183
+ Ours-A1000-Comp
184
+ 3.5789
185
+ 2.6316
186
+ 3.8158
187
+ 2.1579
188
+ 3
189
+ 0.7632
190
+ Ours-A1000-Time
191
+ 2.75
192
+ 2.75
193
+ 3.7142
194
+ 2.4286
195
+ 2
196
+ 0.7429
197
+ Ours-B10000-Comp
198
+ 4.0816
199
+ 8.5714
200
+ 2.2449
201
+ 0
202
+ -
203
+ -
204
+ Table 2: Optimized parameters for template functions
205
+ designed to have a similar purpose, albeit via multiplication. Ours-B contains fewer parameters which allows for
206
+ quicker optimization. For conciseness, we only optimize Ours-A for array sizes 128 and 1000, and Ours-B for 10000.
207
+ Furthermore, we denote as Ours-A1000-Comp if optimizing Format A for number of comparisons on arrays of size
208
+ 1000. In the following section, we discuss the experimental procedure for optimizing (1) and (2), as well as for
209
+ comparing them to other baseline sequences.
210
+ 4
211
+ Experimental Procedure
212
+ Because the function is highly non-convex, it is difficult to utilize efficient techniques such as gradient descent. In-
213
+ stead, we employ a grid-search approach. One benefit of using a function grid-search, as opposed to direct sequence
214
+ optimization, is that the size of the search space has no relation to N. It depends only on the granularity and bounds
215
+ of the search. This is in constrast to the methodology used by Ciura, in which the number of tested sequences grows
216
+ with N [4].
217
+ For Ours-A, we define the grid for parameters a, b, c, d, f as 20 linearly spaced values between 0.5 and 5. We also
218
+ allow e to be an integer value between 0 and 10, including both endpoints. Because Ours-B has fewer parameters,
219
+ we can take a more fine-grained approach. For parameters a, b, c, we test 50 linearly spaced values from 0 to 10. For
220
+ parameter d, we constrain it to be the same as e in Ours-A.
221
+ The data array that we test on contains N distinct values 1 through N, and we shuffle it with the Fischer-Yates shuffle.
222
+ For each set of parameters in the grid-space, we compute the mean cost over 1000 iterations. We then take the set of
223
+ parameters producing the lowest mean cost as optimal. This cost can be defined as number of comparisons, number
224
+ of exchanges, or time.
225
+ Because Ciura sequences are optimized for specific array sizes with no means of extending them, it would be inap-
226
+ propriate to directly compare them with any function-based sequence at large array sizes. One method of extending a
227
+ Ciura sequence is by starting a geometric series on its last term with a ratio of 2.25. We adopt this method of extension
228
+ when measuring the performance of Ciura sequences.
229
+ 3
230
+
231
+ Sequence
232
+ N=20
233
+ N=128
234
+ N=200
235
+ µCO
236
+ µEX
237
+ µCO
238
+ µEX
239
+ µCO
240
+ µEX
241
+ Ours-A128-Comp
242
+ 76 ± 6
243
+ 38 ± 6
244
+ 998 ± 33
245
+ 531 ± 33
246
+ 1786 ± 46
247
+ 948 ± 48
248
+ Ours-A1000-Comp
249
+ 76 ± 6
250
+ 39 ± 7
251
+ 1004 ± 32
252
+ 516 ± 31
253
+ 1787 ± 44
254
+ 919 ± 45
255
+ Ours-A1000-Time
256
+ 79 ± 5
257
+ 39 ± 7
258
+ 1035 ± 26
259
+ 468 ± 27
260
+ 1832 ± 38
261
+ 846 ± 39
262
+ Ours-B10000-Comp
263
+ 76 ± 7
264
+ 33 ± 5
265
+ 1096 ± 52
266
+ 535 ± 36
267
+ 1775 ± 49
268
+ 960 ± 49
269
+ Ciura-128
270
+ 76 ± 6
271
+ 37 ± 6
272
+ 998 ± 32
273
+ 531 ± 33
274
+ 1800 ± 46
275
+ 970 ± 49
276
+ Ciura-1000
277
+ 76 ± 7
278
+ 39 ± 7
279
+ 1006 ± 31
280
+ 519 ± 34
281
+ 1787 ± 45
282
+ 920 ± 44
283
+ Ciura-Long
284
+ 76 ± 7
285
+ 39 ± 7
286
+ 1004 ± 32
287
+ 516 ± 32
288
+ 1794 ± 44
289
+ 907 ± 42
290
+ Tokuda
291
+ 76 ± 6
292
+ 37 ± 6
293
+ 1020 ± 28
294
+ 490 ± 28
295
+ 1808 ± 42
296
+ 891 ± 43
297
+ Pratt-25
298
+ 111 ± 4
299
+ 27 ± 4
300
+ 1732 ± 16
301
+ 345 ± 17
302
+ 3207 ± 21
303
+ 610 ± 24
304
+ Pratt-23
305
+ 136 ± 3
306
+ 25 ± 4
307
+ 2209 ± 13
308
+ 333 ± 15
309
+ 4095 ± 19
310
+ 589 ± 21
311
+ Pratt-34
312
+ 95 ± 4
313
+ 29 ± 4
314
+ 1424 ± 16
315
+ 374 ± 19
316
+ 2593 ± 25
317
+ 660 ± 26
318
+ Table 3: Number of operations to sort small arrays averaged over 1000 random array permutations. µCO denotes the
319
+ number of comparisons, µEX denotes the number of exchanges.
320
+ Sequence
321
+ N=1000
322
+ N=2000
323
+ N=5000
324
+ µCO
325
+ µEX
326
+ µCO
327
+ µEX
328
+ µCO
329
+ µEX
330
+ Ours-A128-Comp
331
+ 13250 ± 203
332
+ 7847 ± 199
333
+ 30530 ± 378
334
+ 18611 ± 384
335
+ 91122 ± 973
336
+ 57728 ± 904
337
+ Ours-A1000-Comp
338
+ 12941 ± 167
339
+ 7004 ± 155
340
+ 29596 ± 293
341
+ 16234 ± 282
342
+ 86821 ± 768
343
+ 50349 ± 770
344
+ Ours-A1000-Time
345
+ 13193 ± 144
346
+ 6461 ± 146
347
+ 30120 ± 263
348
+ 14913 ± 257
349
+ 87455 ± 548
350
+ 44305 ± 552
351
+ Ours-B10000-Comp
352
+ 12980 ± 186
353
+ 7245 ± 177
354
+ 29643 ± 305
355
+ 17241 ± 325
356
+ 86514 ± 617
357
+ 57388 ± 817
358
+ Ciura-128
359
+ 13300 ± 166
360
+ 7003 ± 168
361
+ 30359 ± 318
362
+ 15987 ± 310
363
+ 88193 ± 629
364
+ 46689 ± 627
365
+ Ciura-1000
366
+ 12918 ± 161
367
+ 7002 ± 155
368
+ 29534 ± 282
369
+ 16138 ± 274
370
+ 86641 ± 757
371
+ 47852 ± 751
372
+ Ciura-Long
373
+ 13035 ± 142
374
+ 6701 ± 149
375
+ 29567 ± 246
376
+ 15427 ± 261
377
+ 86232 ± 502
378
+ 45347 ± 496
379
+ Tokuda
380
+ 13116 ± 143
381
+ 6556 ± 142
382
+ 29888 ± 241
383
+ 14952 ± 228
384
+ 86838 ± 454
385
+ 44116 ± 472
386
+ Pratt-25
387
+ 26211 ± 68
388
+ 4318 ± 72
389
+ 62722 ± 122
390
+ 9755 ± 131
391
+ 194196 ± 263
392
+ 28195 ± 278
393
+ Pratt-23
394
+ 34380 ± 64
395
+ 4253 ± 69
396
+ 82785 ± 106
397
+ 9669 ± 116
398
+ 259088 ± 242
399
+ 28354 ± 257
400
+ Pratt-34
401
+ 20974 ± 89
402
+ 4671 ± 87
403
+ 50038 ± 153
404
+ 10543 ± 160
405
+ 154298 ± 372
406
+ 30448 ± 372
407
+ Table 4: Number of operations to sort medium-sized arrays averaged over 1000 random array permutations
408
+ 4.1
409
+ Filtering
410
+ There are two techniques we employ to reduce our grid-search space.
411
+ First, we notice that different sets of parameters could produce the same sequence. For example, for the template
412
+ function Ours-A, the ordering of (a, b) and (c, d) do not matter. We precalculate all of the sequences produced in the
413
+ grid and only experiment on the unique ones. For example, for N = 10000, this reduces the grid search space from
414
+ over 1.5 million sets of parameters to about 1 million.
415
+ Second, we use sequential analysis to act as a low-pass filter for screening out obviously poor sequences. This statis-
416
+ tical approach was first applied to Shellsort in [4]. Given bounds for the mean and an upper bound for the variance,
417
+ sequential analysis is able to tell in just a few repetitions whether or not a sample mean falls below the mean bounds
418
+ with a certain confidence. Sequential analysis allows us to quickly accept good gap sequences that have a low mean
419
+ number of comparisons. Any sequence that is accepted by the filter is then run for the full 1000 iterations to obtain a
420
+ more accurate estimate of the mean. We adopt the same setup as in [4].
421
+ The Ciura sequences were optimized with respect to the number of comparisons, and because they are well-known to
422
+ be some of the most practical sequences, we optimize our template functions with respect to the number of comparisons
423
+ as well.
424
+ 4.2
425
+ Hardware
426
+ The experiments were performed on a Ubuntu machine with an 8-core Intel Xeon W-3225. Experiments counting
427
+ number of comparisons and exchanges were multithreaded. Experiments involving measurement of time were done
428
+ 4
429
+
430
+ Sequence
431
+ N=10000
432
+ µCO
433
+ µEX
434
+ Ours-A128-Comp
435
+ 206356 ± 1796
436
+ 132351 ± 1797
437
+ Ours-A1000-Comp
438
+ 196336 ± 1707
439
+ 119012 ± 1710
440
+ Ours-A1000-Time
441
+ 194052 ± 879
442
+ 98952 ± 883
443
+ Ours-B10000-Comp
444
+ 192029 ± 992
445
+ 209292 ± 1293
446
+ Ciura-128
447
+ 195256 ± 1106
448
+ 105544 ± 1109
449
+ Ciura-1000
450
+ 193778 ± 1895
451
+ 111338 ± 1897
452
+ Ciura-Long
453
+ 191435 ± 892
454
+ 101680 ± 897
455
+ Tokuda
456
+ 192574 ± 795
457
+ 98071 ± 796
458
+ Pratt-25
459
+ 450131 ± 516
460
+ 62191 ± 526
461
+ Pratt-23
462
+ 604502 ± 451
463
+ 66923 ± 725
464
+ Pratt-34
465
+ 355382 ± 723
466
+ 63272 ± 462
467
+ Table 5: Number of operations to sort an array size of 10000 averaged over 1000 random array permutations
468
+ Sequence
469
+ Running Time (ms)
470
+ Ours-A128-Comp
471
+ 3.15 ± 0.08
472
+ Ours-A1000-Comp
473
+ 3.02 ± 0.06
474
+ Ours-A1000-Time
475
+ 3.01 ± 0.06
476
+ Ours-B10000-Comp
477
+ 3.04 ± 0.07
478
+ Ciura-128
479
+ 3.07 ± 0.06
480
+ Ciura-1000
481
+ 3.01 ± 0.06
482
+ Ciura-Long
483
+ 3.04 ± 0.07
484
+ Tokuda
485
+ 3.06 ± 0.08
486
+ Pratt-25
487
+ 5.00 ± 0.09
488
+ Pratt-23
489
+ 6.35 ± 0.11
490
+ Pratt-34
491
+ 4.17 ± 0.08
492
+ Table 6: Time to sort an array size of 1000 averaged over 1000 random array permutations
493
+ Figure 1: (Left) For varying array sizes, shows the difference in number of comparisons between baseline sequences
494
+ and Ours-A128. A positive value means Ours-A128 uses fewer comparisons. (Right) Number of comparisons for
495
+ varying array sizes larger than what Ours-A128 was optimized for.
496
+ 5
497
+
498
+ Difference inMeanNumberof Comparisons
499
+ 35
500
+ Ciura-128
501
+ Ciura-Large
502
+ 30
503
+ Tokuda
504
+ 25
505
+ differences
506
+ 20
507
+ 15
508
+ 10
509
+ 5
510
+ 0
511
+ 25
512
+ 50
513
+ 75
514
+ 100
515
+ 125
516
+ 150
517
+ 175
518
+ 200
519
+ Array SizeMeanNumberofComparisonsvsArraySize
520
+ 1800
521
+ Ours-A128
522
+ Ciura-128
523
+ Ciura-Large
524
+ 1700
525
+ Tokuda
526
+ NumberofComparisons
527
+ 1600
528
+ 1500
529
+ 1400
530
+ 1300
531
+ 1200
532
+ 150
533
+ 160
534
+ 170
535
+ 180
536
+ 190
537
+ 200
538
+ Array Sizesingle threaded, as any multithreaded applications could cause discrepancies in time measurement. All code was
539
+ written in Python.
540
+ 4.3
541
+ Results
542
+ The best parameters that we found for Ours-A and Ours-B are in Table 2. Additionally, the first few terms of the
543
+ sequences are shown in Table 1.
544
+ For array size 200, we have found that Ours-B10000-Comp outperforms all other tested sequences in terms of number
545
+ of comparisons, as shown in Table 3. Figure 1 is a graphical aid to this table. These graphs show that sequences
546
+ generated by template functions can still perform well for array sizes larger, but not significantly so, than what they
547
+ were optimized for. Furthermore, we found that both Ours-A128-Comp and Ours-A1000-Comp met the number of
548
+ comparisons of, but did not surpass, the Ciura and Tokuda sequences. It’s interesting to note that several of the initial
549
+ terms are equivalent between Ciura-128 and Ours-A128-Comp.
550
+ We also test medium and large arrays, with results shown in Tables 4 and 5. For a graphical representation of Table 5,
551
+ see Figure 1 in the Appendix. Our new sequences approach the performance of Ciura sequences without surpassing
552
+ them. However, Ours-B10000 still surpasses the Tokuda sequence for all array sizes that we have tested here. Recall
553
+ that the Tokuda sequence is currently the best known sequence to be generated from a function. Therefore, we have
554
+ shown a new function-based sequence that outperforms other function-based sequences in terms of the number of
555
+ comparisons. As mentioned previously, this is particularly useful if comparisons are a dominant operation such as
556
+ when sorting large satellite data.
557
+ On the other hand, our experiments with optimizing sequences to minimize running time are shown in Table 6. The
558
+ relevant sequence, Ours-A1000-Time, takes a similar running time to the Ciura-1000 sequence. Both are faster than
559
+ any other tested sequence. The sequence Ours-A1000-Time is particularly interesting because its first few elements (1
560
+ 3 7) are different than most other fast sequences (typically starting with 1 4 9). This difference may imply that while
561
+ sequences beginning with (1 4 9) may be very good at minimizing number of comparisons, it does not guarantee that
562
+ they have a good overall running time.
563
+ 5
564
+ Conclusion
565
+ Improvements for Shellsort traditionally come from finding gap sequences with better theoretical properties. Here
566
+ we introduced an experimental framework to find improved gap sequences, following in the footsteps of [4]. Our
567
+ generated gap sequences outperformed all well-known gap sequences in terms of number of comparisons on prescribed
568
+ array sizes. Furthermore, the sequence Ours-A1000-Time is, to our knowledge, the function-based sequence with the
569
+ quickest running time. However, it meets the performance of the Ciura sequence but does not surpass it. This may be
570
+ improved with different sequence-generating functions or experimental setup, which we leave to future work. While
571
+ the sequences presented here were optimized for chosen array sizes, the optimization may be repeated for any array
572
+ size of interest.
573
+ References
574
+ [1] D. L. Shell, “A high-speed sorting procedure,” Communications of the ACM, vol. 2, no. 7, pp. 30–32, 1959.
575
+ [2] M. T. Goodrich, “Randomized shellsort: A simple data-oblivious sorting algorithm,” Journal of the ACM (JACM), vol. 58,
576
+ no. 6, pp. 1–26, 2011.
577
+ [3] J.-W. Lee, Y.-S. Kim, and J.-S. No, “Analysis of modified shell sort for fully homomorphic encryption,” IEEE Access, vol. 9,
578
+ pp. 126198–126215, 2021.
579
+ [4] M. Ciura, “Best increments for the average case of shellsort,” in International Symposium on Fundamentals of Computation
580
+ Theory, pp. 106–117, Springer, 2001.
581
+ [5] R. Sedgewick, “Analysis of shellsort and related algorithms,” in European Symposium on Algorithms, pp. 1–11, Springer,
582
+ 1996.
583
+ [6] N. Tokuda, “An improved shellsort,” in Proceedings of the IFIP 12th World Computer Congress on Algorithms, Software,
584
+ Architecture-Information Processing’92, Volume 1-Volume I, pp. 449–457, 1992.
585
+ [7] T. N. Hibbard, “An empirical study of minimal storage sorting,” Communications of the ACM, vol. 6, no. 5, pp. 206–213,
586
+ 1963.
587
+ [8] V. R. Pratt, “Shellsort and sorting networks,” tech. rep., STANFORD UNIV CA DEPT OF COMPUTER SCIENCE, 1972.
588
+ [9] E. S. Selmer, “On the linear diophantine problem of Frobenius,” 1977.
589
+ 6
590
+
591
+ [10] J. Incerpi and R. Sedgewick, “Improved upper bounds on shellsort,” Journal of Computer and System Sciences, vol. 31, no. 2,
592
+ pp. 210–224, 1985.
593
+ [11] E. S. Selmer, “On shellsort and the Frobenius problem,” 1987.
594
+ 7
595
+
BtAyT4oBgHgl3EQfePgk/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,508 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf,len=507
2
+ page_content='OPTIMIZATION PERSPECTIVES ON SHELLSORT Oscar Skean Department of Computer Science University of Kentucky oscar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
3
+ page_content='skean@uky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
4
+ page_content='edu Richard Ehrenborg Department of Mathematics University of Kentucky Jerzy W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
5
+ page_content=' Jaromczyk Department of Computer Science University of Kentucky ABSTRACT Shellsort is a sorting method that is attractive due to its simplicity, yet it takes effort to analyze its efficiency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
6
+ page_content=' The heart of the algorithm is the gap sequence chosen a priori and used during sorting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
7
+ page_content=' The selection of this gap sequence affects the efficiency of Shellsort, and thus drives both its theo- retical and experimental analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
8
+ page_content=' We contribute to Shellsort by identifying efficient gap sequences based on new parameterized functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
9
+ page_content=' Specifically, a parameter grid-search identifies optimal pa- rameters for different input sizes for sorting by observing minimal overhead in three categories: number of comparisons, number of exchanges, and running time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
10
+ page_content=' We report that our method finds sequences that outperform state-of-the-art gap sequences concerning the number of comparisons for chosen small array sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
11
+ page_content=' Additionally, our function-based sequences outperform the running time of the Tokuda sequences for chosen large array sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
12
+ page_content=' However, no substantial improvements were observed when minimizing the number of exchanges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
13
+ page_content=' 1 Introduction The Shellsort algorithm is a sorting method that was among the first to be discovered.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
14
+ page_content=' Published in 1959 [1], it saw early interest due to its low memory requirements and simple implementation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
15
+ page_content=' Despite this, its analysis is difficult and remains incomplete.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
16
+ page_content=' The algorithm has found practical use today in memory-constrained environments, embedded systems, and the bzip2 compressor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
17
+ page_content=' Recently it has also found use in data-oblivious sorting [2] and in fully homomor- phic encryption [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
18
+ page_content=' Shellsort is an in-place comparison sort and can be viewed as a generalization of insertion sort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
19
+ page_content=' For a data array A of size N, Shellsort operates using a predetermined gap sequence 1 = k1 < · · · < km < N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
20
+ page_content=' The algorithm performs m passes over A: starting with the largest km and ending with k1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
21
+ page_content=' During pass j, for a given gap km−j, insertion sort occurs for the km−j subarrays consisting of the data elements A(i), A(i + km−j), A(i + 2 · km−j), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
22
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
23
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
24
+ page_content=' for i = 0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
25
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
26
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
27
+ page_content=' , km−j − 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
28
+ page_content=' We say a k-inversion is a pair (i, i + k) such that the inequality A(i) > A(i + k) holds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
29
+ page_content=' After pass j, all km−j-inversions that were originally in A have been solved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
30
+ page_content=' We say an array with no k-inversions is k-sorted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
31
+ page_content=' Note that the final pass with k1 is equivalent to insertion sort and is necessary to guarantee sortedness.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
32
+ page_content=' Therefore, the purpose of the gap sequence is to presort A as much as possible before the expensive final insertion sort pass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
33
+ page_content=' The main results of this paper are: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
34
+ page_content=' New efficient Shellsort sequences derived from experimentally optimizing sequence-generating functions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
35
+ page_content=' For prescribed array sizes, these sequences outperform well-known efficient sequences (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
36
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
37
+ page_content=' Tokuda, Ciura) with respect to the number of comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
38
+ page_content=' These sequences also outperform the running time of the Tokuda sequence, making them the fastest function-based sequence on the tested array sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
39
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
40
+ page_content=' We demonstrate results of experimental analysis comparing our proposed approaches with well-known se- quences by measuring the number of comparisons, exchange operations, and running time needed to sort randomized permutations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
41
+ page_content=' Traditionally, improvements for Shellsort have come from finding gap sequences with theoretical properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
42
+ page_content=' We discuss some particularly important sequences in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
43
+ page_content=' Then in Section 3, we introduce parameterized sequence- arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
44
+ page_content='00316v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
45
+ page_content='DS] 1 Jan 2023 generating functions that generate a Shellsort sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
46
+ page_content=' The parameters are then optimized in a grid-search finding the best possible sequence that can be produced from that function for a chosen array size.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
47
+ page_content=' In Section 4 we discuss our experimental methodology to compare the performance of the optimized template sequences to the baseline sequences mentioned in Section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
48
+ page_content=' 2 Background The selection of a good gap sequence is critical to the performance of Shellsort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
49
+ page_content=' There has been a plethora of work focused on selecting good sequences [4, 5, 6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
50
+ page_content=' Some of the earliest proposed sequences were based on powers of 2 [7, 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
51
+ page_content=' Then Pratt showed that the sequence of 2p3q obtains a number of inversions that is Θ(Nlog2N) in the worst case [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
52
+ page_content=' We call this sequence Pratt-23 in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
53
+ page_content=' This sequence still has the best known asymptotic time complexity for any Shellsort sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
54
+ page_content=' However, it has a very large constant factor which spurred the development of new sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
55
+ page_content=' The proof technique used to show the time complexity of Pratt-23 was based on counting the inversions of a sequence that has already been 2-sorted and 3-sorted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
56
+ page_content=' A natural extension of this is to apply the Frobenius problem to place bounds on what has already been sorted in prior passes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
57
+ page_content=' A typical formulation of the Frobenius problem is as follows: Suppose that you have k coins of denominations u1, u2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
58
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
59
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
60
+ page_content=' , uk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
61
+ page_content=' What is the largest value which cannot be made with a nonnegative linear combination of these coins?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
62
+ page_content=' This largest value is known as the Frobenius number [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
63
+ page_content=' In the context of Shellsort, the coins can be equated to gap size and the Frobenius number can be equated to the largest remaining inversion after sorting with the gaps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
64
+ page_content=' Using the Frobenius problem, several sequences were proposed that had a lower constant factor than Pratt-23 despite having a worse time-complexity [10, 11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
65
+ page_content=' Following those sequences, the focus of gap sequence selection shifted from finding theoretically good sequences to finding experimentally good ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
66
+ page_content=' For example, one property that was observed was that a geometric sequence with a growth of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
67
+ page_content='25 often performed well in practice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
68
+ page_content=' This observation was the basis of the Tokuda sequence [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
69
+ page_content=' See Table 1 for a functional form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
70
+ page_content=' To the best of our knowledge, this remains the most competitive function in the literature.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
71
+ page_content=' The next improvement came from Ciura in 2002 [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
72
+ page_content=' The Ciura sequence disregarded the idea of a function-based sequence, and instead searched for the best set of gap elements themselves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
73
+ page_content=' Ciura found the best sequence for array sizes of 128 and 1000, as well as a sequence that was conjectured to perform better for much larger array sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
74
+ page_content=' We call these Ciura-128, Ciura-1000, and Ciura-Large in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
75
+ page_content=' The Ciura sequences also marked a transition in how Shellsort performance was measured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
76
+ page_content=' Previously, most works counted the number of exchanges used by the algorithm [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
77
+ page_content=' This partly because some proof techniques relied on counting the number of inversions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
78
+ page_content=' Ciura instead focused on optimizing the number of comparisons, which was found to be more directly related to the computation time of Shellsort.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
79
+ page_content=' A comparison is defined as checking if a pair of array elements are inverted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
80
+ page_content=' An exchange is typically defined as the variable swap used to fix an inversion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
81
+ page_content=' Minimizing the number of comparisons is especially beneficial when comparisons are expensive to make, such as when sorting large satellite data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
82
+ page_content=' Similarly, minimizing exchanges is beneficial in memory-constrained systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
83
+ page_content=' In this work, we make clear distinctions in Section 4 about what measurement we’re optimizing for.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
84
+ page_content=' For a full treatment of the history of gap sequences, we point the reader to [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
85
+ page_content=' 3 Parameterized Template Functions The approach of directly optimizing the gap sequence, as in [4], grows in computational cost very quickly as N increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
86
+ page_content=' This growth is due the fact that as N increases, both the expected number of sequence elements and their possible range of values increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
87
+ page_content=' To help alleviate this, the authors of [4] found a suitable sequence prefix and optimize the extension of it by a few values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
88
+ page_content=' However, at very large N even finding this prefix would be very costly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
89
+ page_content=' Here, we formulate the problem of finding the optimal sequence as optimizing the parameters of a pre-defined sequence-generating function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
90
+ page_content=' Guided by principles that we have seen in sequences that perform well, we define two functions as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
91
+ page_content=' kA(i) = ⌊(a⌊ i b ⌋ · c⌊ i d ⌋)f + e⌋ (1) kB(i) = ⌊(a · b⌊ i c ⌋) + d⌋ (2) We refer to (1) as Ours-A and (2) as Ours-B in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
92
+ page_content=' Both formats contain the floor function in exponents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
93
+ page_content=' We found this to allow the function to express a more ”chaotic” sequence, which helped improve performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
94
+ page_content=' The unique characteristic of Ours-A is the parameter f, an exponent that helps regulate growth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
95
+ page_content=' The parameter a of Ours-B was 2 Sequence Name Function Optimized for N Parameters Initial Terms Ciura [4] 128 1 4 9 24 85 126 1000 1 4 10 23 57 156 409 995 Large 1 4 10 23 57 132 301 701 1750 Tokuda [6] ⌈ (9/4)k−1 (9/4)−1 ⌉ 1 4 9 20 46 103 233 525 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
96
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
97
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
98
+ page_content=' Ours A ⌊(a⌊ i b ⌋ · c⌊ i d ⌋)f + e⌋ 128 (Comp) Table 2 1 4 9 24 85 150 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
99
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
100
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
101
+ page_content=' 1000 (Comp) Table 2 1 4 10 23 57 153 400 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
102
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
103
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
104
+ page_content=' 1000 (Time) Table 2 1 3 7 16 33 85 179 472 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
105
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
106
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
107
+ page_content=' Ours B ⌊(a · b⌊ i c ⌋) + d⌋ 10000 (Comp) Table 2 1 4 10 27 72 187 488 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
108
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
109
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
110
+ page_content=' Pratt-23 [8] Ordered 2p · 3q 1 2 3 4 6 8 9 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
111
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
112
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
113
+ page_content=' Pratt-25 Ordered 2p · 5q 1 2 4 5 8 10 15 16 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
114
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
115
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
116
+ page_content=' Pratt-34 Ordered 3p · 4q 1 3 4 9 12 16 24 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
117
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
118
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
119
+ page_content=' Table 1: Gap sequences that are compared during experiments Template a b c d e f Ours-A128-Comp 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
120
+ page_content='6321 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
121
+ page_content='6841 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
122
+ page_content='1570 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
123
+ page_content='7360 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
124
+ page_content='7630 Ours-A1000-Comp 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
125
+ page_content='5789 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
126
+ page_content='6316 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
127
+ page_content='8158 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
128
+ page_content='1579 3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
129
+ page_content='7632 Ours-A1000-Time 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
130
+ page_content='75 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
131
+ page_content='75 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
132
+ page_content='7142 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
133
+ page_content='4286 2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
134
+ page_content='7429 Ours-B10000-Comp 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
135
+ page_content='0816 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
136
+ page_content='5714 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
137
+ page_content='2449 0 Table 2: Optimized parameters for template functions designed to have a similar purpose, albeit via multiplication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
138
+ page_content=' Ours-B contains fewer parameters which allows for quicker optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
139
+ page_content=' For conciseness, we only optimize Ours-A for array sizes 128 and 1000, and Ours-B for 10000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
140
+ page_content=' Furthermore, we denote as Ours-A1000-Comp if optimizing Format A for number of comparisons on arrays of size 1000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
141
+ page_content=' In the following section, we discuss the experimental procedure for optimizing (1) and (2), as well as for comparing them to other baseline sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
142
+ page_content=' 4 Experimental Procedure Because the function is highly non-convex, it is difficult to utilize efficient techniques such as gradient descent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
143
+ page_content=' In- stead, we employ a grid-search approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
144
+ page_content=' One benefit of using a function grid-search, as opposed to direct sequence optimization, is that the size of the search space has no relation to N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
145
+ page_content=' It depends only on the granularity and bounds of the search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
146
+ page_content=' This is in constrast to the methodology used by Ciura, in which the number of tested sequences grows with N [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
147
+ page_content=' For Ours-A, we define the grid for parameters a, b, c, d, f as 20 linearly spaced values between 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
148
+ page_content='5 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
149
+ page_content=' We also allow e to be an integer value between 0 and 10, including both endpoints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
150
+ page_content=' Because Ours-B has fewer parameters, we can take a more fine-grained approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
151
+ page_content=' For parameters a, b, c, we test 50 linearly spaced values from 0 to 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
152
+ page_content=' For parameter d, we constrain it to be the same as e in Ours-A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
153
+ page_content=' The data array that we test on contains N distinct values 1 through N, and we shuffle it with the Fischer-Yates shuffle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
154
+ page_content=' For each set of parameters in the grid-space, we compute the mean cost over 1000 iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
155
+ page_content=' We then take the set of parameters producing the lowest mean cost as optimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
156
+ page_content=' This cost can be defined as number of comparisons, number of exchanges, or time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
157
+ page_content=' Because Ciura sequences are optimized for specific array sizes with no means of extending them, it would be inap- propriate to directly compare them with any function-based sequence at large array sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
158
+ page_content=' One method of extending a Ciura sequence is by starting a geometric series on its last term with a ratio of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
159
+ page_content='25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
160
+ page_content=' We adopt this method of extension when measuring the performance of Ciura sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
161
+ page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
162
+ page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
163
+ page_content='Sequence ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
164
+ page_content='N=20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
165
+ page_content='N=128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
166
+ page_content='N=200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
167
+ page_content='µCO ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
168
+ page_content='µEX ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
169
+ page_content='µCO ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
170
+ page_content='µEX ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
171
+ page_content='µCO ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
172
+ page_content='µEX ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
173
+ page_content='Ours-A128-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
174
+ page_content='76 ± 6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
175
+ page_content='38 ± 6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
176
+ page_content='998 ± 33 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
177
+ page_content='531 ± 33 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
178
+ page_content='1786 ± 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
179
+ page_content='948 ± 48 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
180
+ page_content='Ours-A1000-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
181
+ page_content='76 ± 6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
182
+ page_content='39 ± 7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
183
+ page_content='1004 ± 32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
184
+ page_content='516 ± 31 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
185
+ page_content='1787 ± 44 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
186
+ page_content='919 ± 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
187
+ page_content='Ours-A1000-Time ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
188
+ page_content='79 ± 5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
189
+ page_content='39 ± 7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
190
+ page_content='1035 ± 26 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
191
+ page_content='468 ± 27 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
192
+ page_content='1832 ± 38 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
193
+ page_content='846 ± 39 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
194
+ page_content='Ours-B10000-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
195
+ page_content='76 ± 7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
196
+ page_content='33 ± 5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
197
+ page_content='1096 ± 52 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
198
+ page_content='535 ± 36 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
199
+ page_content='1775 ± 49 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
200
+ page_content='960 ± 49 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
201
+ page_content='Ciura-128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
202
+ page_content='76 ± 6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
203
+ page_content='37 ± 6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
204
+ page_content='998 ± 32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
205
+ page_content='531 ± 33 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
206
+ page_content='1800 ± 46 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
207
+ page_content='970 ± 49 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
208
+ page_content='Ciura-1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
209
+ page_content='76 ± 7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
210
+ page_content='39 ± 7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
211
+ page_content='1006 ± 31 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
212
+ page_content='519 ± 34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
213
+ page_content='1787 ± 45 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
214
+ page_content='920 ± 44 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
215
+ page_content='Ciura-Long ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
216
+ page_content='76 ± 7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
217
+ page_content='39 ± 7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
218
+ page_content='1004 ± 32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
219
+ page_content='516 ± 32 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
220
+ page_content='1794 ± 44 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
221
+ page_content='907 ± 42 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
222
+ page_content='Tokuda ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
223
+ page_content='76 ± 6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
224
+ page_content='37 ± 6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
225
+ page_content='1020 ± 28 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
226
+ page_content='490 ± 28 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
227
+ page_content='1808 ± 42 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
228
+ page_content='891 ± 43 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
229
+ page_content='Pratt-25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
230
+ page_content='111 ± 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
231
+ page_content='27 ± 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
232
+ page_content='1732 ± 16 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
233
+ page_content='345 ± 17 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
234
+ page_content='3207 ± 21 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
235
+ page_content='610 ± 24 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
236
+ page_content='Pratt-23 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
237
+ page_content='136 ± 3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
238
+ page_content='25 ± 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
239
+ page_content='2209 ± 13 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
240
+ page_content='333 ± 15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
241
+ page_content='4095 ± 19 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
242
+ page_content='589 ± 21 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
243
+ page_content='Pratt-34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
244
+ page_content='95 ± 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
245
+ page_content='29 ± 4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
246
+ page_content='1424 ± 16 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
247
+ page_content='374 ± 19 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
248
+ page_content='2593 ± 25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
249
+ page_content='660 ± 26 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
250
+ page_content='Table 3: Number of operations to sort small arrays averaged over 1000 random array permutations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
251
+ page_content=' µCO denotes the number of comparisons, µEX denotes the number of exchanges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
252
+ page_content=' ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
253
+ page_content='Sequence ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
254
+ page_content='N=1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
255
+ page_content='N=2000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
256
+ page_content='N=5000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
257
+ page_content='µCO ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
258
+ page_content='µEX ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
259
+ page_content='µCO ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
260
+ page_content='µEX ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
261
+ page_content='µCO ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
262
+ page_content='µEX ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
263
+ page_content='Ours-A128-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
264
+ page_content='13250 ± 203 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
265
+ page_content='7847 ± 199 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
266
+ page_content='30530 ± 378 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
267
+ page_content='18611 ± 384 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
268
+ page_content='91122 ± 973 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
269
+ page_content='57728 ± 904 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
270
+ page_content='Ours-A1000-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
271
+ page_content='12941 ± 167 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
272
+ page_content='7004 ± 155 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
273
+ page_content='29596 ± 293 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
274
+ page_content='16234 ± 282 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
275
+ page_content='86821 ± 768 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
276
+ page_content='50349 ± 770 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
277
+ page_content='Ours-A1000-Time ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
278
+ page_content='13193 ± 144 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
279
+ page_content='6461 ± 146 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
280
+ page_content='30120 ± 263 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
281
+ page_content='14913 ± 257 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
282
+ page_content='87455 ± 548 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
283
+ page_content='44305 ± 552 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
284
+ page_content='Ours-B10000-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
285
+ page_content='12980 ± 186 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
286
+ page_content='7245 ± 177 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
287
+ page_content='29643 ± 305 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
288
+ page_content='17241 ± 325 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
289
+ page_content='86514 ± 617 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
290
+ page_content='57388 ± 817 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
291
+ page_content='Ciura-128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
292
+ page_content='13300 ± 166 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
293
+ page_content='7003 ± 168 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
294
+ page_content='30359 ± 318 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
295
+ page_content='15987 ± 310 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
296
+ page_content='88193 ± 629 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
297
+ page_content='46689 ± 627 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
298
+ page_content='Ciura-1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
299
+ page_content='12918 ± 161 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
300
+ page_content='7002 ± 155 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
301
+ page_content='29534 ± 282 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
302
+ page_content='16138 ± 274 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
303
+ page_content='86641 ± 757 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
304
+ page_content='47852 ± 751 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
305
+ page_content='Ciura-Long ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
306
+ page_content='13035 ± 142 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
307
+ page_content='6701 ± 149 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
308
+ page_content='29567 ± 246 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
309
+ page_content='15427 ± 261 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
310
+ page_content='86232 ± 502 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
311
+ page_content='45347 ± 496 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
312
+ page_content='Tokuda ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
313
+ page_content='13116 ± 143 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
314
+ page_content='6556 ± 142 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
315
+ page_content='29888 ± 241 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
316
+ page_content='14952 ± 228 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
317
+ page_content='86838 ± 454 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
318
+ page_content='44116 ± 472 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
319
+ page_content='Pratt-25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
320
+ page_content='26211 ± 68 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
321
+ page_content='4318 ± 72 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
322
+ page_content='62722 ± 122 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
323
+ page_content='9755 ± 131 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
324
+ page_content='194196 ± 263 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
325
+ page_content='28195 ± 278 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
326
+ page_content='Pratt-23 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
327
+ page_content='34380 ± 64 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
328
+ page_content='4253 ± 69 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
329
+ page_content='82785 ± 106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
330
+ page_content='9669 ± 116 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
331
+ page_content='259088 ± 242 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
332
+ page_content='28354 ± 257 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
333
+ page_content='Pratt-34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
334
+ page_content='20974 ± 89 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
335
+ page_content='4671 ± 87 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
336
+ page_content='50038 ± 153 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
337
+ page_content='10543 ± 160 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
338
+ page_content='154298 ± 372 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
339
+ page_content='30448 ± 372 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
340
+ page_content='Table 4: Number of operations to sort medium-sized arrays averaged over 1000 random array permutations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
341
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
342
+ page_content='1 Filtering There are two techniques we employ to reduce our grid-search space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
343
+ page_content=' First, we notice that different sets of parameters could produce the same sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
344
+ page_content=' For example, for the template function Ours-A, the ordering of (a, b) and (c, d) do not matter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
345
+ page_content=' We precalculate all of the sequences produced in the grid and only experiment on the unique ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
346
+ page_content=' For example, for N = 10000, this reduces the grid search space from over 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
347
+ page_content='5 million sets of parameters to about 1 million.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
348
+ page_content=' Second, we use sequential analysis to act as a low-pass filter for screening out obviously poor sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
349
+ page_content=' This statis- tical approach was first applied to Shellsort in [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
350
+ page_content=' Given bounds for the mean and an upper bound for the variance, sequential analysis is able to tell in just a few repetitions whether or not a sample mean falls below the mean bounds with a certain confidence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
351
+ page_content=' Sequential analysis allows us to quickly accept good gap sequences that have a low mean number of comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
352
+ page_content=' Any sequence that is accepted by the filter is then run for the full 1000 iterations to obtain a more accurate estimate of the mean.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
353
+ page_content=' We adopt the same setup as in [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
354
+ page_content=' The Ciura sequences were optimized with respect to the number of comparisons, and because they are well-known to be some of the most practical sequences, we optimize our template functions with respect to the number of comparisons as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
355
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
356
+ page_content='2 Hardware The experiments were performed on a Ubuntu machine with an 8-core Intel Xeon W-3225.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
357
+ page_content=' Experiments counting number of comparisons and exchanges were multithreaded.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
358
+ page_content=' Experiments involving measurement of time were done ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
359
+ page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
360
+ page_content='Sequence ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
361
+ page_content='N=10000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
362
+ page_content='µCO ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
363
+ page_content='µEX ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
364
+ page_content='Ours-A128-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
365
+ page_content='206356 ± 1796 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
366
+ page_content='132351 ± 1797 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
367
+ page_content='Ours-A1000-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
368
+ page_content='196336 ± 1707 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
369
+ page_content='119012 ± 1710 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
370
+ page_content='Ours-A1000-Time ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
371
+ page_content='194052 ± 879 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
372
+ page_content='98952 ± 883 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
373
+ page_content='Ours-B10000-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
374
+ page_content='192029 ± 992 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
375
+ page_content='209292 ± 1293 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
376
+ page_content='Ciura-128 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
377
+ page_content='195256 ± 1106 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
378
+ page_content='105544 ± 1109 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
379
+ page_content='Ciura-1000 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
380
+ page_content='193778 ± 1895 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
381
+ page_content='111338 ± 1897 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
382
+ page_content='Ciura-Long ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
383
+ page_content='191435 ± 892 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
384
+ page_content='101680 ± 897 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
385
+ page_content='Tokuda ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
386
+ page_content='192574 ± 795 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
387
+ page_content='98071 ± 796 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
388
+ page_content='Pratt-25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
389
+ page_content='450131 ± 516 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
390
+ page_content='62191 ± 526 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
391
+ page_content='Pratt-23 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
392
+ page_content='604502 ± 451 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
393
+ page_content='66923 ± 725 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
394
+ page_content='Pratt-34 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
395
+ page_content='355382 ± 723 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
396
+ page_content='63272 ± 462 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
397
+ page_content='Table 5: Number of operations to sort an array size of 10000 averaged over 1000 random array permutations ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
398
+ page_content='Sequence ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
399
+ page_content='Running Time (ms) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
400
+ page_content='Ours-A128-Comp ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
401
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
402
+ page_content='15 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
403
+ page_content='08 Ours-A1000-Comp 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
404
+ page_content='02 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
405
+ page_content='06 Ours-A1000-Time 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
406
+ page_content='01 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
407
+ page_content='06 Ours-B10000-Comp 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
408
+ page_content='04 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
409
+ page_content='07 Ciura-128 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
410
+ page_content='07 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
411
+ page_content='06 Ciura-1000 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
412
+ page_content='01 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
413
+ page_content='06 Ciura-Long 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
414
+ page_content='04 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
415
+ page_content='07 Tokuda 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
416
+ page_content='06 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
417
+ page_content='08 Pratt-25 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
418
+ page_content='00 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
419
+ page_content='09 Pratt-23 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
420
+ page_content='35 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
421
+ page_content='11 Pratt-34 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
422
+ page_content='17 ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
423
+ page_content='08 Table 6: Time to sort an array size of 1000 averaged over 1000 random array permutations Figure 1: (Left) For varying array sizes, shows the difference in number of comparisons between baseline sequences and Ours-A128.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
424
+ page_content=' A positive value means Ours-A128 uses fewer comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
425
+ page_content=' (Right) Number of comparisons for varying array sizes larger than what Ours-A128 was optimized for.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
426
+ page_content=' 5 Difference inMeanNumberof Comparisons 35 Ciura-128 Ciura-Large 30 Tokuda 25 differences 20 15 10 5 0 25 50 75 100 125 150 175 200 Array SizeMeanNumberofComparisonsvsArraySize 1800 Ours-A128 Ciura-128 Ciura-Large 1700 Tokuda NumberofComparisons 1600 1500 1400 1300 1200 150 160 170 180 190 200 Array Sizesingle threaded, as any multithreaded applications could cause discrepancies in time measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
427
+ page_content=' All code was written in Python.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
428
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
429
+ page_content='3 Results The best parameters that we found for Ours-A and Ours-B are in Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
430
+ page_content=' Additionally, the first few terms of the sequences are shown in Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
431
+ page_content=' For array size 200, we have found that Ours-B10000-Comp outperforms all other tested sequences in terms of number of comparisons, as shown in Table 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
432
+ page_content=' Figure 1 is a graphical aid to this table.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
433
+ page_content=' These graphs show that sequences generated by template functions can still perform well for array sizes larger, but not significantly so, than what they were optimized for.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
434
+ page_content=' Furthermore, we found that both Ours-A128-Comp and Ours-A1000-Comp met the number of comparisons of, but did not surpass, the Ciura and Tokuda sequences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
435
+ page_content=' It’s interesting to note that several of the initial terms are equivalent between Ciura-128 and Ours-A128-Comp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
436
+ page_content=' We also test medium and large arrays, with results shown in Tables 4 and 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
437
+ page_content=' For a graphical representation of Table 5, see Figure 1 in the Appendix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
438
+ page_content=' Our new sequences approach the performance of Ciura sequences without surpassing them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
439
+ page_content=' However, Ours-B10000 still surpasses the Tokuda sequence for all array sizes that we have tested here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
440
+ page_content=' Recall that the Tokuda sequence is currently the best known sequence to be generated from a function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
441
+ page_content=' Therefore, we have shown a new function-based sequence that outperforms other function-based sequences in terms of the number of comparisons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
442
+ page_content=' As mentioned previously, this is particularly useful if comparisons are a dominant operation such as when sorting large satellite data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
443
+ page_content=' On the other hand, our experiments with optimizing sequences to minimize running time are shown in Table 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
444
+ page_content=' The relevant sequence, Ours-A1000-Time, takes a similar running time to the Ciura-1000 sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
445
+ page_content=' Both are faster than any other tested sequence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
446
+ page_content=' The sequence Ours-A1000-Time is particularly interesting because its first few elements (1 3 7) are different than most other fast sequences (typically starting with 1 4 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
447
+ page_content=' This difference may imply that while sequences beginning with (1 4 9) may be very good at minimizing number of comparisons, it does not guarantee that they have a good overall running time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
448
+ page_content=' 5 Conclusion Improvements for Shellsort traditionally come from finding gap sequences with better theoretical properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
449
+ page_content=' Here we introduced an experimental framework to find improved gap sequences, following in the footsteps of [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
450
+ page_content=' Our generated gap sequences outperformed all well-known gap sequences in terms of number of comparisons on prescribed array sizes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
451
+ page_content=' Furthermore, the sequence Ours-A1000-Time is, to our knowledge, the function-based sequence with the quickest running time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
452
+ page_content=' However, it meets the performance of the Ciura sequence but does not surpass it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
453
+ page_content=' This may be improved with different sequence-generating functions or experimental setup, which we leave to future work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
454
+ page_content=' While the sequences presented here were optimized for chosen array sizes, the optimization may be repeated for any array size of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
455
+ page_content=' References [1] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
456
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
457
+ page_content=' Shell, “A high-speed sorting procedure,” Communications of the ACM, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
458
+ page_content=' 2, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
459
+ page_content=' 7, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
460
+ page_content=' 30–32, 1959.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
461
+ page_content=' [2] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
462
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
463
+ page_content=' Goodrich, “Randomized shellsort: A simple data-oblivious sorting algorithm,” Journal of the ACM (JACM), vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
464
+ page_content=' 58, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
465
+ page_content=' 6, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
466
+ page_content=' 1–26, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
467
+ page_content=' [3] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
468
+ page_content='-W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
469
+ page_content=' Lee, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
470
+ page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
471
+ page_content=' Kim, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
472
+ page_content='-S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
473
+ page_content=' No, “Analysis of modified shell sort for fully homomorphic encryption,” IEEE Access, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
474
+ page_content=' 9, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
475
+ page_content=' 126198–126215, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
476
+ page_content=' [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
477
+ page_content=' Ciura, “Best increments for the average case of shellsort,” in International Symposium on Fundamentals of Computation Theory, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
478
+ page_content=' 106–117, Springer, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
479
+ page_content=' [5] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
480
+ page_content=' Sedgewick, “Analysis of shellsort and related algorithms,” in European Symposium on Algorithms, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
481
+ page_content=' 1–11, Springer, 1996.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
482
+ page_content=' [6] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
483
+ page_content=' Tokuda, “An improved shellsort,” in Proceedings of the IFIP 12th World Computer Congress on Algorithms, Software, Architecture-Information Processing’92, Volume 1-Volume I, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
484
+ page_content=' 449–457, 1992.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
485
+ page_content=' [7] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
486
+ page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
487
+ page_content=' Hibbard, “An empirical study of minimal storage sorting,” Communications of the ACM, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
488
+ page_content=' 6, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
489
+ page_content=' 5, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
490
+ page_content=' 206–213, 1963.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
491
+ page_content=' [8] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
492
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
493
+ page_content=' Pratt, “Shellsort and sorting networks,” tech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
494
+ page_content=' rep.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
495
+ page_content=', STANFORD UNIV CA DEPT OF COMPUTER SCIENCE, 1972.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
496
+ page_content=' [9] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
497
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
498
+ page_content=' Selmer, “On the linear diophantine problem of Frobenius,” 1977.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
499
+ page_content=' 6 [10] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
500
+ page_content=' Incerpi and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
501
+ page_content=' Sedgewick, “Improved upper bounds on shellsort,” Journal of Computer and System Sciences, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
502
+ page_content=' 31, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
503
+ page_content=' 2, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
504
+ page_content=' 210–224, 1985.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
505
+ page_content=' [11] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
506
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
507
+ page_content=' Selmer, “On shellsort and the Frobenius problem,” 1987.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
508
+ page_content=' 7' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/BtAyT4oBgHgl3EQfePgk/content/2301.00316v1.pdf'}
C9FQT4oBgHgl3EQf_jdA/content/2301.13458v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:103334c00d19b4563067b6dbb44e6be9afa0f6ab5ad0ecc337e4d7bd384913dd
3
+ size 5835964
EtE5T4oBgHgl3EQfUw_x/content/2301.05547v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8fc12a1300659a8c71a0701e19d313b9d88f01a452b6655400e9e85a4bf691f
3
+ size 627874
EtE5T4oBgHgl3EQfUw_x/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:141f9e522ab6e6a9779f157147a031a087489a7f56f8b6ac67edffa977380cb8
3
+ size 3080237
EtE5T4oBgHgl3EQfUw_x/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f07257ee0bac28da5fdc17bf98037b6537a344688d486849feb5316bc07a655d
3
+ size 142538
FdAzT4oBgHgl3EQfi_0V/content/tmp_files/2301.01507v1.pdf.txt ADDED
@@ -0,0 +1,1706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Bifurcation instructed design of multistate machines
2
+ Teaya Yang,1 David Hathcock,1 Yuchao Chen,1 Paul
3
+ McEuen,2 James P. Sethna,1 Itai Cohen,2 and Itay Griniasty1
4
+ 1Laboratory of Atomic and Solid State Physics,
5
+ Cornell University, Ithaca, New York 14853-2501, USA
6
+ 2Laboratory of Atomic and Solid State Physics,
7
+ Cornell University, Ithaca, New York 14853-2501,
8
+ USA and Kavli Institute at Cornell for Nanoscale Science, Cornell University, Ithaca, NY, USA
9
+ (Dated: January 5, 2023)
10
+ We propose a novel design paradigm for multi-state machines where transitions from one state
11
+ to another are organized by bifurcations of multiple equilibria of the energy landscape describing
12
+ the collective interactions of the machine components. This design paradigm is attractive since,
13
+ near bifurcations, small variations in a few control parameters can result in large changes to the
14
+ system’s state providing an emergent lever mechanism. Further, the topological configuration of
15
+ transitions between states near such bifurcations ensures robust operation, making the machine less
16
+ sensitive to fabrication errors and noise. To design such machines, we develop and implement a
17
+ new efficient algorithm that searches for interactions between the machine components that give
18
+ rise to energy landscapes with these bifurcation structures. We demonstrate a proof of concept for
19
+ this approach by designing magneto elastic machines whose motions are primarily guided by their
20
+ magnetic energy landscapes and show that by operating near bifurcations we can achieve multiple
21
+ transition pathways between states. This proof of concept demonstration illustrates the power of
22
+ this approach, which could be especially useful for soft robotics and at the microscale where typical
23
+ macroscale designs are difficult to implement.
24
+ Systems composed of a large number of interact-
25
+ ing elements such as meta-materials, elastic mem-
26
+ branes, and proteins can exhibit emergent behaviors
27
+ that arise from the collaborative interaction of the
28
+ system components. Designing functionality in such
29
+ systems is a formidable task that requires searches
30
+ in a high dimensional parameter space of the sys-
31
+ tem components and their interactions. Developing
32
+ organizing principles for effectively designing such
33
+ systems remains an outstanding problem in the field
34
+ [1–6]. Here, we propose that designing multi-state
35
+ machines around bifurcations of multiple equilibria
36
+ is a powerful paradigm that can be used to system-
37
+ atically organize such searches.
38
+ Bifurcations, where a single equilibrium configu-
39
+ ration splits into multiple equilibria as a function of
40
+ a control parameter is a canonical dynamical sys-
41
+ tems structure that has been used to explain vari-
42
+ ous natural phenomena ranging from phase transi-
43
+ tions [7] to the operation of simple machines. Exam-
44
+ ples of simple machines include Venus flytraps and
45
+ hummingbird beaks that have been shown to open
46
+ smoothly and then snap shut by operating about
47
+ a cusp bifurcation where three equilibria converge
48
+ [8]. Designing systems to operate near such bifur-
49
+ cations provides several advantages. Since the split-
50
+ ting of the equilibria has a power law dependence
51
+ on the control parameters [4, 7], operating near bi-
52
+ furcations automatically provides a lever mechanism
53
+ by which small variations in the control parameters
54
+ lead to large changes in the system state [12, 13].
55
+ In the case of the Venus fly trap, slight changes in
56
+ hydrostatic pressure can drive large motions of the
57
+ trap. Similarly in hummingbirds, slight twisting of
58
+ the jaw bones enables rapid closing of a wide open
59
+ beak.
60
+ Further, such bifurcations organize a topo-
61
+ logically protected structure of saddle node mani-
62
+ folds. As such, provided that the system trajectory
63
+ encircles the cusp bifurcation where the saddle node
64
+ manifolds meet, the system is guaranteed to exhibit
65
+ a smooth change in state followed by a snap.
66
+ In
67
+ the Venus fly trap and hummingbird examples, this
68
+ topological protection guarantees that the opening
69
+ and snapping of the trap or beak is robust against
70
+ variations in the applied hydrostatic or muscle forces
71
+ driving the transitions in the system state.
72
+ Here,
73
+ we propose that moving beyond cusp bifurcations
74
+ to design systems that operate near bifurcations of
75
+ arbitrarily many equilibria preserves the lever ad-
76
+ vantage and topological protection of cusp bifurca-
77
+ tions.
78
+ Such systems can be driven by only a few
79
+ control parameters to undergo snapping transitions
80
+ between multiple states making the design of ma-
81
+ chines near such bifurcations a powerful paradigm
82
+ for organizing complex functions. To develop and
83
+ demonstrate this paradigm, we experimentally in-
84
+ vestigate increasingly sophisticated magneto elastic
85
+ machines whose function is organized by such bifur-
86
+ cations.
87
+ We start by constructing a simple magneto elas-
88
+ tic machine consisting of a control panel that can
89
+ be translated in the x − y plane and a second panel
90
+ arXiv:2301.01507v1 [cond-mat.soft] 4 Jan 2023
91
+
92
+ 2
93
+ FIG. 1.
94
+ Magneto-Elastic
95
+ machine
96
+ capable
97
+ of
98
+ adopting multiple configurations due to operat-
99
+ ing near a cusp bifurcation (a.) System: Panels P1
100
+ and P2 are decorated with identical magnets. Panel P1 is
101
+ actuated externally to translate in the x and y directions,
102
+ in response Panel P2 rotates about a hinge, the dynam-
103
+ ics are over-damped.
104
+ (b.)
105
+ Magnetic potential energy
106
+ landscapes: We plot the potential for dx = −0.09 and
107
+ dy ∈ [0.28, 0.17, 0.02], where dx and dy are deviations
108
+ from the cusp’s position. Varying y we cross two saddle
109
+ node bifurcations where the number of extrema of the
110
+ magneto elastic landscape changes. (c.)
111
+ Equilibrium
112
+ manifold: The system’s equilibria θ(dx, dy) are plotted
113
+ as a function of the deviation of the parameters, color
114
+ signifies the value of θ. Brown curve marks saddle node
115
+ bifurcations where the number of equilibria change, and
116
+ the light red curve denotes the experimental trajectory.
117
+ (d.) Experimentally observed snap-through transition:
118
+ The system follows a parametric trajectory marked by a
119
+ red curve and colored tube whose color denotes the pre-
120
+ dicted state θ, around a cusp bifurcation. The colored
121
+ disks represent the experimentally measured state θ. As
122
+ expected a single snap through transition at a saddle
123
+ node bifurcation (curves colored according to the bifur-
124
+ cating state θ and converging at the cusp) is observed.
125
+ that is free to rotate about a hinge connecting the
126
+ two panels (Fig. 1a and experimental apparatus
127
+ schematic Fig.S1). The state of the system, is given
128
+ by the angle θ between the panels. By decorating
129
+ the panels with magnets, we are able to design a
130
+ magneto elastic landscape with different numbers of
131
+ minima as a function of the parameters x and y
132
+ (Fig. 1b).
133
+ Transitions between these minima cor-
134
+ respond to changes in the state of the system. To
135
+ understand the various pathways for making such
136
+ transitions we construct the manifold defined by the
137
+ local equilibria as a function of the parameters x and
138
+ y. For this particular arrangement of magnets, we
139
+ calculate (see SI) that the resulting manifold has a
140
+ domain with multiple solutions delineated by saddle
141
+ node bifurcation curves (Brown). These curves in-
142
+ tersect and terminate at a cusp bifurcation beyond
143
+ which there is only a single equilibrium state. By
144
+ translating the control panel in the x-y plane, the
145
+ system can undergo either smooth or abrupt changes
146
+ in θ. For example, starting the system at point (i)
147
+ and moving through points (ii-v), the hinge angle
148
+ increases smoothly. A further slight increase in the
149
+ control parameter y, however, leads to an abrupt
150
+ transition from a high to a low angle, correspond-
151
+ ing to points (v) and (vi) respectively. These pre-
152
+ dictions are born out by the experiments (Fig. 1d),
153
+ which also show a smooth increase in θ for a path-
154
+ way that encircles the cusp (i-v) and an abrupt tran-
155
+ sition in θ when crossing a saddle node curve (v-vi).
156
+ In this 2D representation the system makes a transi-
157
+ tion when the color of the path (yellow) matches the
158
+ color denoting the state associated with the saddle
159
+ node curve (yellow). This magneto elastic mecha-
160
+ nism is reminiscent of the cocking and snapping of
161
+ a Venus flytrap or a humming bird’s beak.
162
+ In addition to providing a mechanism for abrupt
163
+ transitions, operating near a cusp bifurcation creates
164
+ a lever mechanism where small variations in the con-
165
+ trol parameters lead to large variations in the system
166
+ state. This mechanism resolves the generic problem
167
+ that creating large variations in the system state of-
168
+ ten requires unfeasibly large variations in the control
169
+ parameters. Lever mechanisms are generic near bi-
170
+ furcations of equilibria since the magnitude of the
171
+ transition in the system state is typically propor-
172
+ tional to the square root of the parameter distance
173
+ from the bifurcation.
174
+ To characterize this lever mechanism in our exper-
175
+ iment, we map the snapping transition curves asso-
176
+ ciated with the saddle node bifurcations.
177
+ Specifi-
178
+ cally, for a given value of y (or x) we toggle x (y)
179
+ so that the system snaps back and forth, and record
180
+ the values of the control parameters x and y, and θ
181
+ immediately after each transition (Fig. 2a).
182
+ To test the scaling relations, we define the normal
183
+
184
+ a)
185
+ HingeAxis
186
+ N
187
+ s
188
+ Parameter
189
+ SystemState
190
+ X-ControlParameter
191
+ b)
192
+ [nrun qe] A
193
+ L
194
+ 2.2
195
+ 2.952.2
196
+ 2.95
197
+ 2.2
198
+ 2.95
199
+ e[rad]-SystemState
200
+ c)
201
+ 111
202
+ 2.9
203
+ [rad] - System State
204
+ Snap-through
205
+ S
206
+ 2.5
207
+ d)
208
+ LS
209
+ Cusp Point
210
+ 0.03
211
+ 0.3
212
+ 0.15
213
+ -0.06
214
+ 0
215
+ dy [cm]-Control Parameter
216
+ -0.15
217
+ -0.093
218
+ FIG. 2.
219
+ Parametric levers The change in the state
220
+ of the system after a snap through transition near a
221
+ cusp bifurcation scales sub linearly with the normal
222
+ form parameters. This sublinear scaling leads to large
223
+ variation of the state in response to small variation
224
+ of the system parameters.
225
+ a) Measurements of snap
226
+ through transitions near a cusp: The blue points mark
227
+ the state of the magneto elastic system of Fig. 1 af-
228
+ ter a snap through transition.
229
+ The dashed curve is a
230
+ fit of snap through transitions near a cusp bifurcation
231
+ to the data, derived from the normal form potential
232
+ ˜V = δθ4 + a2δθ2 + a1δθ.
233
+ The normal form parame-
234
+ ters a1 and a2 are locally given by re-scaled rotations
235
+ of dx and dy, which are the deviations of the parame-
236
+ ters away from the cusp. b) Scaling laws near a cusp:
237
+ The predicted scaling laws are demonstrated by project-
238
+ ing the measurements and fit onto log-log plots. Near
239
+ the cusp the system response to a1 acts as a giant lever,
240
+ ∂δθ/∂a1 ∼ 50.
241
+ form parameters a1 and a2 as rotations of the dis-
242
+ placement of the parameters x and y from the cusp.
243
+ We then fit the predicted scaling form ∆θ ∝ √a2
244
+ and a1 ∝ a3/2
245
+ 2
246
+ near a cusp to determine the cusp’s
247
+ position and the rotation of the normal form param-
248
+ eters. The fitted model then predicts that ∆θ ∝ a1/3
249
+ 1
250
+ (see SI). Because the scaling exponents for ∆θ are
251
+ fractions of unity, small variations of the parameters
252
+ along a1 and a2 lead to large variations of the sys-
253
+ tem’s state.
254
+ For example, in our experiments the
255
+ range of actuation for panel 1’s position is approx-
256
+ imately 1 cm and the range of angles accessible to
257
+ panel 2 is 180◦ or π radians. Near the bifurcation
258
+ a translation along a1 of 0.1% of its range (∼ 10µ
259
+ m) leads to a snap that changes θ by ∼ 5% of it
260
+ range (∼ 0.1 rad) providing a lever advantage of
261
+ ∼50 (Fig. 2b ).
262
+ The complexity of the actions achieved by such
263
+ magneto elastic mechanisms is dictated by the range
264
+ and number of stable states that the system can ac-
265
+ cess. This complexity can be achieved by designing
266
+ the magneto elastic potentials such that the system
267
+ operates near bifurcations between multiple states.
268
+ For example, working near a hypothetical symmetric
269
+ butterfly bifurcation associated with the potential
270
+ V = θ6 + a4θ4 + a2θ2 + a1θ should enable smooth
271
+ and abrupt transitions between three stable states
272
+ in any order depending on the chosen trajectory for
273
+ the control parameters. In Fig. 3a we show a cut
274
+ through parameter space of the saddle node surfaces
275
+ near this butterfly bifurcation. If the system starts
276
+ in the S (Small) state and moves along the depicted
277
+ trajectory (black arrows), it would first snap to the
278
+ M (Medium) state when the system crosses the pur-
279
+ ple saddle node bifurcation and then the L (Large)
280
+ state when it crosses the green curve. For the return
281
+ path, however, the system would transition from
282
+ the L minimum directly to the S minimum when
283
+ it crosses the yellow saddle node bifurcation curve.
284
+ Moreover, by working near the bifurcation, the lever
285
+ mechanism should allow for transitioning between
286
+ these distinct states within an accessible range of
287
+ experimental control parameters.
288
+ SEARCH ALGORITHM FOR
289
+ BIFURCATIONS OF MULTIPLE
290
+ EQUILIBRIA
291
+ To design parametric configurations correspond-
292
+ ing to bifurcations of multiple equilibria we develop
293
+ a search gradient continuation algorithm that takes
294
+ advantage of their nested structure. Bifurcations as-
295
+ sociated with k equilibria (minima plus maxima) are
296
+ degenerate singularities where the first k derivatives
297
+ of the potential vanish. Thus they can be found iter-
298
+ atively by searching for singularities of the potential
299
+ with increasing order, solving for one constraint at a
300
+ time. We find that this method is especially efficient
301
+ in finding experimentally realizable parametric con-
302
+ figurations corresponding to bifurcations of multiple
303
+ equilibria. Moreover, this method naturally extends
304
+ to searching for bifurcations with desired properties
305
+ by introducing further constraints, for example opti-
306
+ mizing the robustness of the bifurcation’s associated
307
+ states to external noise.
308
+ For ease of illustration we describe how to use this
309
+ approach to find the symmetrized butterfly bifurca-
310
+ tion described above with parameters a1, a2, a4 and
311
+ variable θ. For a random combination of parame-
312
+ ters we find an equilibrium angle where dV/dθ = 0.
313
+ Generically, this point is part of a smooth man-
314
+ ifold over which this constraint holds.
315
+ We then
316
+ vary a1, a2, a4 and θ within this manifold to min-
317
+ imize the next constraint |d2V/dθ2|.
318
+ The trajec-
319
+ tory follows the gradient of the second constraint as
320
+ closely as possible while maintaining the first con-
321
+ straint dV/dθ = 0 until we reach a point on a sad-
322
+
323
+ a)
324
+ b)
325
+ 0.09 h
326
+ [rad]
327
+ System State
328
+ 0.05
329
+ 0.06
330
+ 0.04
331
+ 0
332
+ 0.05
333
+ 0.1
334
+ 0.18
335
+ -0.05
336
+ a2 [cm] - Normal Form Parameter
337
+ ] - System State
338
+ 0.09
339
+ 0
340
+ - 0.02
341
+ dx [cm]
342
+ 0.05.
343
+ 0.06
344
+ ControlParameter
345
+ 73
346
+ 0.1
347
+ 80 [rad]
348
+ 0.04
349
+ 0.15
350
+ 1×10-4
351
+ 3×10-4
352
+ 0.001
353
+ dy [cm] - Control Parameter
354
+ a,[cm] - Normal Form Parameter4
355
+ dle node surface, which is a manifold where both
356
+ the first and second constraints hold.1 Minimizing
357
+ the third derivative within the saddle node manifold
358
+ maintains the first two constraints and allows for
359
+ finding a cusp bifurcation associated with two sta-
360
+ ble equilibria. Successive iterations allow for identi-
361
+ fying bifurcations between an increasing number of
362
+ equilibria and eventually the butterfly bifurcation.
363
+ Our gradient continuation algorithm adapts stan-
364
+ dard algorithms from the dynamical systems liter-
365
+ ature [1, 2, 15] and retools them to locally follow
366
+ the gradient of the unsatisfied constraint (see SI for
367
+ further details). We depict the resulting search path
368
+ in Fig. 3b, which highlights the fact that, indepen-
369
+ dent of the number of parameters, the search algo-
370
+ rithm follows a 1D trajectory, which is organized
371
+ by the nested structure of the intermediate bifurca-
372
+ tions. These properties enable the algorithm to find
373
+ realizable bifurcations for systems with hundreds of
374
+ parameters.
375
+ THREE STATES AND THE BUTTERFLY
376
+ BIFURCATION
377
+ As a proof of concept for our approach we demon-
378
+ strate the construction and operation of a magneto
379
+ elastic machine with 3 stable states operating near
380
+ a bifurcation of multiple equilibria. The first step in
381
+ designing such a machine is to implement our gra-
382
+ dient continuation algorithm to design a magneto
383
+ elastic potential with a butterfly bifurcation between
384
+ three stable states. To realize a system operating
385
+ near such a bifurcation where only three control pa-
386
+ rameters (x,y,z positions of panel 1) are actively var-
387
+ ied, we allowed the algorithm to also determine the
388
+ x,y positions of two of the nine magnets on panel
389
+ 1.2 With these seven parameters, the algorithm was
390
+ able to identify multiple butterfly bifurcations that
391
+ satisfied these criteria (See SI for details).
392
+ Having found an appropriate butterfly bifurca-
393
+ tion, we use standard dynamical systems continua-
394
+ tion algorithms[1, 17] to compute and plot the saddle
395
+ node surfaces in the control parameter space (x,y,z)
396
+ near the bifurcation (Fig. 4). We find multiple dis-
397
+ tinct surfaces where the color denotes the angle θ
398
+ 1 A local minimum of |∂2
399
+ θV | with respect to variation of all
400
+ parameters, that lies on the fixed point manifold will throw
401
+ the algorithm off, but this is a co-dimension m point for a
402
+ system with m parameters, and so highly unlikely.
403
+ 2 Typically, a butterfly bifurcation requires four control pa-
404
+ rameters to navigate between all of the stable states. Here,
405
+ we have identified a nonlinear mapping of the three active
406
+ control parameters (x,y,z) onto the four dimensional space,
407
+ which enables transitions between arbitrary minima.
408
+ FIG. 3.
409
+ Bifurcations of multiple equilibria.
410
+ a)
411
+ Work cycle near a butterfly: A system operating near a
412
+ hypothetical symmetrized butterfly bifurcation can cycle
413
+ between three states. The bifurcation is associated with
414
+ a potential V = θ6+a4θ4+a2θ2+a1θ and three accessible
415
+ states denoted by large (L), medium (M) and small (S).
416
+ As the system follows the trajectory denoted by black ar-
417
+ rows with colored background marking its state θ, it cy-
418
+ cles between the three states snapping from S to M to L
419
+ and back to S by changing a2 and a1 while a4 = 0.1. The
420
+ snaps occur at saddle node bifurcations (colored curves)
421
+ whose color signifies the state θ of the minima that is an-
422
+ nihilated at each boundary. b) Gradient Continuation
423
+ algorithm: The search algorithm finds bifurcations of
424
+ multiple equilibria by following a one dimensional curve.
425
+ Starting from a bifurcation of k equilibria the algorithm
426
+ searches for a bifurcation of k+1 equilibria by following a
427
+ curve in the augmented parameter space, tangent to the
428
+ gradient of |V k+1| in the kth bifurcation manifold. We
429
+ draw a search for a butterfly bifurcation in its symmet-
430
+ ric normal form potential.
431
+ The entire volume denotes
432
+ the equilibrium manifold. Starting from a fixed point,
433
+ the algorithm finds a saddle node bifurcation (along the
434
+ white curve), Parameters are then varied on the saddle
435
+ node surface (yellow), and cusp surface (thin lines) to re-
436
+ spectively find a cusp bifurcation (along the gray curve)
437
+ and a swallow tail bifurcation (along the black curve)
438
+ near a butterfly bifurcation (black point).
439
+ at which the saddle node bifurcation occurs3. In-
440
+ structed by these surfaces, we design a cyclic path
441
+ 3 There is further local data in the potential at a saddle node
442
+
443
+ D[a.u.]
444
+ a1
445
+ 1.0
446
+ L
447
+ s
448
+ 0.5
449
+ SL
450
+ SM
451
+ M
452
+ 0
453
+ M
454
+ SML
455
+ 12
456
+ SL
457
+ SL
458
+ -0.5
459
+ L
460
+ -1.0
461
+ Butterfly
462
+ Saddle node
463
+ Cusp
464
+ Equilibrium
465
+ a1
466
+ a4
467
+ a25
468
+ through the parameter space such that the system
469
+ snaps between the large, medium, and small min-
470
+ ima. The path color at each point denotes the sys-
471
+ tem state, θ.
472
+ As with the cusp and symmetrized
473
+ butterfly bifurcations depictions in Figs. 1d and 3a,
474
+ transitions occur at intersections of the path and
475
+ saddle node surfaces where their colors match. We
476
+ note that for the generic butterfly bifurcation, the
477
+ surface structure can be quite complicated as shown
478
+ by the two projections in Fig. 4a,b.
479
+ In contrast
480
+ to the symmetrized butterfly bifurcation structure
481
+ (Fig. 2a), this complicated structure necessitate us-
482
+ ing all three control parameters x, y, and z, to design
483
+ a pathway that cycles between the three states. Im-
484
+ portantly, despite the surface complexity the design
485
+ is robust. Specifically, since the trajectory crosses
486
+ surfaces, slight deviations in the control parameters
487
+ should still lead to similar snaps, snap sequences,
488
+ and ultimately the resulting complex actions of the
489
+ entire magneto elastic machine.
490
+ Using the design parameters determined by our
491
+ search algorithm, we built a magneto elastic machine
492
+ similar to that depicted in Fig. 1a, but with a dif-
493
+ ferent magnetic dipole pattern and with two of the
494
+ magnets in panel 1 displaced in the panel plane (See
495
+ SI). By following the theoretically predicted path, we
496
+ found three snap through transitions from small to
497
+ large, large to medium, and medium to small (Fig. 4c
498
+ and Movie S1). Two of the transitions occurred at
499
+ the predicted locations, while the large to medium
500
+ transition was displaced by 0.4 cm from its predicted
501
+ location. In addition, we found excellent fidelity be-
502
+ tween the predicted and measured angles θ for the
503
+ equilibrium states. Using the same magneto elastic
504
+ machine, we also designed and demonstrated cycli-
505
+ cal paths with two transitions (See Fig.S3 and Movie
506
+ S3). Finally, when the system was taken apart and
507
+ reassembled, we were able to reliably reproduce the
508
+ transitions associated with the designed trajectories.
509
+ DISCUSSION
510
+ The
511
+ experimental
512
+ validation
513
+ of
514
+ this
515
+ design
516
+ paradigm with a butterfly bifurcation of 5 equilib-
517
+ ria strongly supports the conjecture that this frame-
518
+ work could be extended to design systems perform-
519
+ ing increasingly sophisticated functions by operating
520
+ surface that can instruct the design of a trajectory.
521
+ For
522
+ example the sign of the third derivative of the potential
523
+ signals whether the state’s angle will increase or decrease as
524
+ it bifurcates. Moreover, the merging of saddle node surfaces
525
+ can also be delineated by plotting the cusp bifurcations.
526
+ Here, we do not include this additional information for ease
527
+ of viewing
528
+ near bifurcations with a growing number of equilib-
529
+ ria. Potential energies with these increasingly rare
530
+ bifurcations can be found efficiently, because the gra-
531
+ dient continuation algorithm follows a one dimen-
532
+ sional search path. Moreover, the associated lever
533
+ mechanisms provide a design feature where the op-
534
+ eration of the machine will likely be confined to a
535
+ small parameter volume, enabling the execution of
536
+ these actions by realizable machines.
537
+ Microscopic magneto-elastic machines could prove
538
+ to be a useful instance of design instructed by bifur-
539
+ cations of multiple equilibria: An important emerg-
540
+ ing strategy for manufacturing microscopic and soft
541
+ machines is fabricating them using two dimensional
542
+ lithographic and printing techniques [18–22]. Such
543
+ fabrication techniques, however, restrict the imple-
544
+ mentation of compound mechanisms composed of
545
+ springs, cogs, screws etc. that are used to achieve
546
+ complex actions in traditional macroscale machines.
547
+ These lever mechanisms could be replaced with mag-
548
+ neto elastic mechanisms with lever advantages in-
549
+ duced by bifurcations. Magnetic interactions are es-
550
+ pecially well suited for this purpose since they are
551
+ long ranged and not easily screened. This long range
552
+ allows for global changes to the conformation in re-
553
+ sponse to local actuation of system components.
554
+ Importantly, since bifurcations of multiple equi-
555
+ libria are notoriously sensitive to variations of pa-
556
+ rameters, there is a concern that a machine oper-
557
+ ating near such bifurcations will be very sensitive
558
+ to environmental noise, such as thermal vibrations,
559
+ as well as to fabrication precision. Indeed, close to
560
+ a bifurcation the sensitivity of the system to varia-
561
+ tions of certain combinations of the system param-
562
+ eters grows exponentially as the number of asso-
563
+ ciated equilibria increases.
564
+ Mathematically this is
565
+ captured by mapping the potential to a canonical
566
+ normal form via a change of coordinates [4, 5, Sec.
567
+ 36.6] (see SI for derivation). Practically, however,
568
+ this increased sensitivity is often blunted outside of
569
+ the infinitesimal environment of the bifurcation. At
570
+ a finite distance from the bifurcation the mapping
571
+ to the normal form or its linearization will often
572
+ cease to be valid because of other singularities of
573
+ the potential or the nonlinear fall off in the poten-
574
+ tial. This non-linearity is especially pronounced in
575
+ keplerian potentials such as that of magnetic inter-
576
+ actions. Critically, the saddle node manifolds coa-
577
+ lescing at the bifurcation are generically preserved
578
+ outside this radius of convergence as they are topo-
579
+ logically protected and can only annihilate at a cusp
580
+ or a bifurcation of more equilibria. Thus, operating
581
+ a machine near a bifurcation of multiple equilibria,
582
+ but at a finite distance from it, allows the design
583
+ of trajectories that take advantage of the multiple
584
+ saddle node transitions associated with it, and their
585
+
586
+ 6
587
+ FIG. 4.
588
+ 3-state Cycle Near Butterfly Bifurcation Point (a.) Theory The saddle node surfaces of a magneto-
589
+ elastic system with three active control parameters, x,y and z are plotted, their color denotes the angle θ at which
590
+ the snap occurs. The system’s magnetic pattern is designed using the gradient continuation algorithm such that it
591
+ operates near a butterfly bifurcation where multiple saddle node surfaces coalesce, enabling multiple snap-through
592
+ transitions at the surfaces. A trajectory (colored tube with white arrows) is chosen such that the system snaps in
593
+ cycles between three states Large (L), Medium (M) and Small (S) angles. The system’s predicted state is denoted by
594
+ the tube’s color. At intersections of the trajectory with a surface where their colors match the system is predicted
595
+ to snap to a new state. (b.) Experimental demonstration: The colored dots mark the experimental value of the
596
+ system’s state as it follows the designed trajectory. We observe three distinct transitions as predicted.
597
+ lever advantages, while avoiding the local exponen-
598
+ tial sensitivity.
599
+ Similarly, the sensitivity of a system designed near
600
+ a bifurcation of multiple equilibria to external noise
601
+ grows exponentially with the number of associated
602
+ states.
603
+ This growth in sensitivity arises from the
604
+ decrease in the potential barriers between adjacent
605
+ states. For example in a potential with k equilibria
606
+ where all the potential barriers are of equal height,
607
+ and the minima are equally deep (which is propor-
608
+ tional to a Chebyshev polynomial of the first kind
609
+ of order k + 1) the barrier heights decay as 2−k.
610
+ This sensitivity seems prohibitive as we imagine im-
611
+ plementing this design principle to create systems
612
+ cycling between multiple states.
613
+ Despite this in-
614
+ creased sensitivity, however, we estimate that the
615
+ strength of magnetic interactions assures that mag-
616
+ neto elastic systems are robust to thermal noise at
617
+ the microscale. Specifically, in magneto elastic sys-
618
+ tems the potential is proportional to the dipole-
619
+ dipole interaction strength µ0µ2L6/R3 of two mag-
620
+ nets with magnetic dipole densities µ panel size L
621
+ and typical distance between dipoles R.
622
+ Thermal
623
+ noise is then comparable to the magneto elastic po-
624
+ tential barrier height when the number of equilibria
625
+ k ∼ log2
626
+
627
+ µ0µ2L3/(R/L)3
628
+ kbT
629
+
630
+ . The magnetic dipole den-
631
+ sities µ are of order 106A/m at the microscale [24].
632
+ The smallest two state door (equivalent to the de-
633
+ vice in Fig. 1a) that is robust to thermal noise is
634
+ then ∼ .1µm in size, approaching the size limit of
635
+ 30nm for fabricating stable magnetic domains [25].
636
+ Conversely, a 100 µm machine will become sensitive
637
+ to thermal noise near a bifurcation of ∼ 40 equilib-
638
+ ria, that is 20 distinct states compressed in a span
639
+ of 100 degrees.
640
+ Finally, the designs that we have implemented in
641
+ this paper assume operation in a low Reynolds num-
642
+ ber regime where inertia can be neglected. In the
643
+ macroscale implementation this was achieved by at-
644
+ taching a damping panel immersed in a solution of
645
+ glycerol. We expect our designs to work even bet-
646
+ ter as these machines are implemented at smaller
647
+ scales since the importance of inertia drops quadrat-
648
+ ically with the system size. Operation of a 100 µm
649
+ scale machine in water, for example, would enable
650
+ the system to be in the low Re regime while operat-
651
+ ing at rates that are 1000 fold faster than those in
652
+ the macroscale experiment.
653
+ CONCLUSIONS
654
+ We have shown that the operation of multi-
655
+ parameter machines near bifurcations of multiple
656
+ equilibria allows them to efficiently and robustly cy-
657
+ cle between multiple conformation.
658
+ Moreover, we
659
+ developed a generic step-by-step framework to de-
660
+ sign and implement systems that operate near such
661
+ bifurcations.
662
+ Specifically, we: 1) created a search
663
+ algorithm that optimizes over fabrication and other
664
+ system parameters to enable operation near such bi-
665
+ furcations; 2) mapped the manifold of saddle node
666
+ bifurcations to determine a useful trajectory for the
667
+ machine operation and; 3) demonstrated the ro-
668
+ bustness of this approach by constructing and op-
669
+ erating a magneto elastic machine that can cycle
670
+ and robustly snap between multiple distinct con-
671
+ figurations in response to small variations of a few
672
+ control parameters.
673
+ Importantly, this design ap-
674
+ proach and step-by-step implementation is generic
675
+ and could be applied to many complex systems with
676
+
677
+ z [cm] - Control
678
+ Parameter
679
+ -0.2
680
+ L
681
+ -0.4 -
682
+ M
683
+ -0.4
684
+ -0.2
685
+ -0.6
686
+ -0.4 -
687
+ -0.8
688
+ 2
689
+ -0.8
690
+ -0.6 -
691
+ -0.8 -
692
+ -
693
+ [rad]
694
+ 2.0-
695
+ 0
696
+ 1
697
+ 1.5
698
+ MO
699
+ -1.0
700
+ -0.5
701
+ y
702
+ -0.5
703
+ -1.0
704
+ -0.5
705
+ y [cm] - Control Parameter
706
+ -3
707
+ -1.0
708
+ -2
709
+ X
710
+ -2.0
711
+ 1.5
712
+ X7
713
+ multiple interacting components ranging from arti-
714
+ ficial proteins, where the interactions are electro-
715
+ static, to neural networks (both biological and syn-
716
+ thetic) where the interactions are governed by net-
717
+ work topology.
718
+ Cycling between transitions in mechanical imple-
719
+ mentations of such systems can generate work or lo-
720
+ comotion. If the system is over-damped, as is often
721
+ the case in microscopic systems operating in fluids,
722
+ work and locomotion can be achieved by coupling
723
+ the system to mechanisms that break time reversal
724
+ symmetry.
725
+ These mechanisms include ratchets or
726
+ cilia-like flexible rods [26]. In the case of the mag-
727
+ neto elastic hinge described here, time reversal sym-
728
+ metry is broken by combining the smooth transla-
729
+ tions of the control panel with abrupt transitions in
730
+ the state of the dynamic panel. In systems where
731
+ the control variable is not a mechanical parameter
732
+ time reversal symmetry can be broken by using the
733
+ angle as an effective dynamical variable governing a
734
+ system with multiple degrees of freedom such as is
735
+ often used to parameterize robot locomotion.
736
+ More broadly, it is interesting to consider the ex-
737
+ tension of our work to systems with a larger number
738
+ of dynamical variables (θ1, θ2, . . .). Here, we envision
739
+ that by working near bifurcations of multiple vari-
740
+ ables (e.g. elliptic umbilic bifurcations) one could
741
+ organize snaps between states separated along mul-
742
+ tiple variables. Such designs require extending our
743
+ search algorithm to multiple variables while main-
744
+ taining its low dimensional search path.
745
+ Alterna-
746
+ tively, one could design mechanisms based on multi-
747
+ ple local bifurcations that are weakly coupled across
748
+ the machine.
749
+ For example, one bifurcation of n
750
+ states could be used to control θ1 while a second
751
+ bifurcation of m states organizes the dynamics of
752
+ the variable θ2. By weakly coupling the panels, and
753
+ hence the variables θ1 and θ2, the machine can trans-
754
+ form between n × m states in a coordinated fashion.
755
+ Indeed this approach is already being implemented
756
+ for bifurcations with two states [27–29]. Increasing
757
+ the number of states associated with each variable
758
+ would enable a similarly rich landscape for machine
759
+ design with far fewer mechanical elements or panels.
760
+ Finally, it is interesting to consider whether this
761
+ design paradigm can be used to understand natural
762
+ systems beyond the Venus fly trap and humming-
763
+ bird beak. For example, molecular machines such
764
+ as proteins often transition between different config-
765
+ urations. It is interesting to consider whether such
766
+ transitions can be thought of as snaps organized by
767
+ bifurcations of many states [3, 6]. As another ex-
768
+ ample, bifurcation theory has been implemented to
769
+ identify and explain epigenetic dynamics of cell dif-
770
+ ferentiation [30–32]. These approaches often focus
771
+ on consecutive 2-state bifurcations. The results pre-
772
+ sented here however, suggest that a comparably sim-
773
+ ple evolutionary pathway could entail development
774
+ of multi-state bifurcations. Such a structures could
775
+ allow the addition of new states while maintaining
776
+ the existing configuration through an evolutionary
777
+ process, similar to the path taken by the gradient
778
+ continuation algorithm.
779
+ I.
780
+ MATERIALS AND METHODS:
781
+ Construction of experimental hinge system
782
+ Panel P1 is constrained to a set of linear trans-
783
+ lation stages that allow its position to be adjusted
784
+ manually to any x or y coordinates near the cusp.
785
+ For experiments near the butterfly bifurcation point,
786
+ an extra translation stage is attached to Panel P1 to
787
+ allow adjustment of its z coordinate. Panel P2 is
788
+ attached to an OVA friction-less thrust air bushing
789
+ with a 13mm shaft. The air bushing is attached to
790
+ a fixed metal housing to limit Panel P2 to its ro-
791
+ tational degree of freedom. A T-shaped paddle is
792
+ attached to the bottom of the shaft and immersed
793
+ in glycerol to introduce damping to the system. Ad-
794
+ ditionally, we position a Basler Ace aca3088-57um
795
+ area scan camera above the center of the air bushing
796
+ to take top-view images of the air bushing which are
797
+ then used to calculate the angle response of Panel
798
+ P2 to high precision.
799
+ A.
800
+ Panels for experiments near cusp point
801
+ Each magnetic panel is constructed using two 1/16
802
+ in thick laser-cut acrylic pieces and nine grade N48
803
+ neodymium magnets of diameter 1/16 in and height
804
+ 1/8 in.
805
+ Magnets are arranged in a 3-by-3 square
806
+ lattice with lattice constant of 2.5 cm.
807
+ B.
808
+ Panels for experiments near butterfly point
809
+ Each magnetic panel is constructed using two 1/16
810
+ in thick laser-cut acrylic pieces and nine grade N48
811
+ neodymium magnets of diameter 1/8 in and height
812
+ 1/8 in. Magnets are arranged in a 3-by-3 square lat-
813
+ tice with lattice constant of 2.5 cm. In panel P1 the
814
+ x, y position of two of the magnets is displaced ac-
815
+ cording to the design determined by the search algo-
816
+ rithm. The two magnets whose position is offset are
817
+ the magnet in the bottom row on the right column,
818
+ whose offsets are dx1 = 1.418cm, dy1 = −0.273cm,
819
+ and the magnet in the middle row on the left column,
820
+ with offsets dx2 = −0.826cm, dy2 = −0.986cm. A
821
+
822
+ 8
823
+ technical drawing illustrating the panels used for the
824
+ butterfly experiment is included in the SI.
825
+ FIG. 5. Experimental Setup Sketch of the experimen-
826
+ tal system used for demonstration of cycles and angle
827
+ measurements. Panel P1 is attached to a set of transla-
828
+ tion stages which allows us to implement the spatial con-
829
+ trol parameters in all experiments. Panel P2 is attached
830
+ to an air bushing that is fixed in space. An attachment
831
+ submerged in glycerol is added to the base of Panel P2
832
+ to introduce damping to the system.
833
+ Angle measurements
834
+ A marker is attached to the top of the air bush-
835
+ ing, and a camera records the location of the marker
836
+ during the experiment. At each given time, the mea-
837
+ sured angle is the determined by three points: cur-
838
+ rent marker location, location of the center of rota-
839
+ tion, and marker location at θ = 0. We calibrate
840
+ the system by recording the location of the pixel at
841
+ θ = 0 and several other distinct angles. The pixel
842
+ location corresponding to the center of rotation is
843
+ obtained using a fitted circle through the calibra-
844
+ tion data points.
845
+ The resulting angle is then de-
846
+ duced from the three measured points. This data
847
+ collection process is conducted in MATLAB.
848
+ Acknowledgments We thank Michael Brenner,
849
+ Chrisy Xiyu Du, Yan Yang, Robert Distasio, and
850
+ John Guckenheimer for inspiring discussions. This
851
+ work was financially supported primarily by NSF
852
+ Grant DMREF-89228, NSF Grant EFRI-1935252,
853
+ NSF Grant CBET-2010118, Cornell Center for Ma-
854
+ terials Research DMR-1719875, and by Air Force
855
+ Office of Scientific Research Grant MURI: FA9550-
856
+ 16-1-0031. I.G was also supported by the Cornell
857
+ Laboratory of Atomic and Solid State Physics. D.H
858
+ was supported by an NSF Graduate Research Fel-
859
+ lowship Grant No. DGE-2139899.
860
+ [1] O. Sigmund and K. Maute, Structural and Multidis-
861
+ ciplinary Optimization 48, 1031 (2013).
862
+ [2] C. P. Goodrich, A. J. Liu,
863
+ and S. R. Nagel, Phys.
864
+ Rev. Lett. 114, 225501 (2015).
865
+ [3] P.-S. Huang, S. E. Boyken,
866
+ and D. Baker, Nature
867
+ 537, 320 (2016).
868
+ [4] J. W. Rocks, N. Pashine, I. Bischofberger, C. P.
869
+ Goodrich, A. J. Liu, and S. R. Nagel, Proceedings of
870
+ the National Academy of Sciences 114, 2520 (2017).
871
+ [5] V. F. Hagh, S. R. Nagel, A. J. Liu, M. L. Man-
872
+ ning, and E. I. Corwin, Proceedings of the National
873
+ Academy of Sciences 119, e2117622119 (2022).
874
+ [6] J.-P. Eckmann, J. Rougemont, and T. Tlusty, Rev.
875
+ Mod. Phys. 91, 031001 (2019).
876
+ [7] R. K. Pathria, Statistical mechanics (Elsevier, 2016).
877
+ [8] Y. Forterre, J. M. Skotheim, J. Dumais, and L. Ma-
878
+ hadevan, Nature 433, 421 (2005).
879
+ [8] M. Smith, G. Yanega, and A. Ruina, Journal of the-
880
+ oretical biology 282, 41 (2011).
881
+ [7] V.
882
+ I.
883
+ Arnol’d,
884
+ “Bifurcations
885
+ of
886
+ equilibria,”
887
+ in
888
+ Dynamical Systems V,
889
+ edited
890
+ by
891
+ V.
892
+ I.
893
+ Arnol’d
894
+ (Springer
895
+ Berlin
896
+ Heidelberg,
897
+ Berlin,
898
+ Heidelberg,
899
+ 1994) pp. 10–38.
900
+ [4] M. V. Berry, Journal of Physics A: Mathematical and
901
+ General 10, 2061 (1977).
902
+ [12] J. T. Overvelde, T. Kloek, J. J. D’haen,
903
+ and
904
+ K. Bertoldi, Proceedings of the National Academy
905
+ of Sciences 112, 10863 (2015).
906
+ [13] Y. Chi, Y. Hong, Y. Zhao, Y. Li, and J. Yin, Science
907
+ Advances 8, eadd3788 (2022).
908
+ [1] Y. A. Kuznetsov, “Topological equivalence, bifur-
909
+ cations, and structural stability of dynamical sys-
910
+ tems,”
911
+ in
912
+ Elements of Applied Bifurcation Theory
913
+ (Springer New York, New York, NY, 2004).
914
+ [15] R. H. Clewley, W. E. Sherwood, M. D. LaMar, and
915
+ J. M. Guckenheimer, “Pydstool, a software environ-
916
+ ment for dynamical systems modeling,” (2007).
917
+ [2] J.
918
+ Guckenheimer
919
+ and
920
+ P.
921
+ Holmes,
922
+ Nonlinear Oscillations, Dynamical Systems, and Bifurcations of Vector Fields
923
+ (Springer New York, New York, NY, 1983).
924
+ [17] J. Guckenheimer and Y. A. Kuznetsov, Scholarpedia
925
+ 2, 1852 (2007).
926
+ [18] J. Kim, J. A. Hanna, M. Byun, C. D. Santangelo,
927
+ and R. C. Hayward, Science 335, 1201 (2012).
928
+ [19] T. H. Ware, M. E. McConney, J. J. Wie, V. P.
929
+ Tondiglia, and T. J. White, Science 347, 982 (2015).
930
+ [20] J.-H. Na, A. A. Evans, J. Bae, M. C. Chiappelli,
931
+ C. D. Santangelo, R. J. Lang, T. C. Hull, and R. C.
932
+
933
+ Camera
934
+ Marker
935
+ Adjustment
936
+ Screws
937
+ Air
938
+ Bushing
939
+ Magnetic
940
+ Panels
941
+ Damping
942
+ Panel
943
+ Translation
944
+ Stages
945
+ Glycerol1
946
+ Hayward, Advanced Materials 27, 79 (2015).
947
+ [21] A. Sydney Gladman, E. A. Matsumoto, R. G.
948
+ Nuzzo, L. Mahadevan, and J. A. Lewis, Nature Ma-
949
+ terials 15, 413 EP (2016).
950
+ [22] M. Z. Miskin, A. J. Cortese, K. Dorsey, E. P. Espos-
951
+ ito, M. F. Reynolds, Q. Liu, M. Cao, D. A. Muller,
952
+ P. L. McEuen, and I. Cohen, Nature 584, 557 (2020).
953
+ [5] DLMF, “NIST Digital Library of Mathematical Func-
954
+ tions,” http://dlmf.nist.gov/, Release 1.1.4 of 2022-
955
+ 01-15 (2022), f. W. J. Olver, A. B. Olde Daalhuis,
956
+ D. W. Lozier, B. I. Schneider, R. F. Boisvert, C. W.
957
+ Clark, B. R. Miller, B. V. Saunders, H. S. Cohl, and
958
+ M. A. McClain, eds.
959
+ [24] J. Cui, T.-Y. Huang, Z. Luo, P. Testa, H. Gu, X.-Z.
960
+ Chen, B. J. Nelson,
961
+ and L. J. Heyderman, Nature
962
+ 575, 164 (2019).
963
+ [25] R. Niu, C. X. Du, E. Esposito, J. Ng, M. P. Bren-
964
+ ner, P. L. McEuen, and I. Cohen, Proceedings of the
965
+ National Academy of Sciences 116, 24402 (2019).
966
+ [26] E. Lauga, Soft Matter 7, 3060 (2011).
967
+ [27] C. Coulais, A. Sabbadini, F. Vink,
968
+ and M. van
969
+ Hecke, Nature 561, 512 (2018).
970
+ [28] H. Bense and M. van Hecke, Proceedings of the Na-
971
+ tional Academy of Sciences 118, e2111436118 (2021).
972
+ [29] D. Shohat, D. Hexner,
973
+ and Y. Lahini, Proceed-
974
+ ings of the National Academy of Sciences 119,
975
+ e2200028119 (2022).
976
+ [30] D. A. Rand, A. Raju, M. S´aez, F. Corson,
977
+ and
978
+ E. D. Siggia, Proceedings of the National Academy
979
+ of Sciences 118 (2021), 10.1073/pnas.2109729118.
980
+ [31] E. Marco, R. L. Karp, G. Guo, P. Robson, A. H.
981
+ Hart, L. Trippa, and G.-C. Yuan, Proceedings of the
982
+ National Academy of Sciences 111, E5643 (2014).
983
+ [32] M. Setty, M. D. Tadmor, S. Reich-Zeliger, O. An-
984
+ gel, T. M. Salame, P. Kathail, K. Choi, S. Bendall,
985
+ N. Friedman,
986
+ and D. Pe’er, Nature biotechnology
987
+ 34, 637 (2016).
988
+ [3] J.
989
+ W.
990
+ Bruce
991
+ and
992
+ P.
993
+ J.
994
+ Giblin,
995
+ Curves and Singularities: A Geometrical Introduction to Singularity Theory,
996
+ 2nd ed. (Cambridge University Press, 1992).
997
+ [6] W.
998
+ H.
999
+ Press,
1000
+ S.
1001
+ A.
1002
+ Teukolsky,
1003
+ W.
1004
+ T.
1005
+ Vetterling,
1006
+ and
1007
+ B.
1008
+ P.
1009
+ Flannery,
1010
+ Numerical recipes 3rd edition: The art of scientific computing
1011
+ (Cambridge university press, 2007).
1012
+ Supplemental material - Bifurcation instructed design of multistate machines
1013
+ II.
1014
+ CALCULATION OF THE POTENTIAL ENERGY LANDSCAPE
1015
+ To model the dynamics of our experimental hinge system, we compute the potential energy landscape
1016
+ arising from the dipole-dipole interactions between the magnets embedded in each panel. The magnets used
1017
+ in our experiments are well approximated by perfect dipoles. Therefore, the potential energy for the system
1018
+ is a sum of dipole-dipole interaction energies
1019
+ V = −
1020
+
1021
+ i∈P 1
1022
+
1023
+ j∈P 2
1024
+ µ0m2
1025
+ 4π|rij|3 [3( ˆmi · ˆrij)( ˆmj · ˆrij) − ˆmi · ˆmj] ,
1026
+ (S1)
1027
+ where µ0 is the vacuum permeability, m is the dipole strength (identical for all magnets), mi is the orientation
1028
+ of dipole i, and rij is the distance between magnets i and j. Note that the interaction energy for dipoles in
1029
+ the same panel is constant, so we can restrict the sum to pairs of dipoles in different panels.
1030
+ To derive the θ dependence of the energy landscape, we must write the dipole orientations and positions
1031
+ in terms of our control parameters x, y, and z and the dynamical variable θ. The dipoles on P1 are always
1032
+ oriented in the z-direction, while the dipoles on the rotating panel P2 have orientation that changes with θ:
1033
+ ˆmi = δiˆz
1034
+ ˆmj = δj{sin θ , 0, − cos θ},
1035
+ (S2)
1036
+ where δi = ±1 is the orientation of magnet i with respect to panel P1 (similar for δj). The positions of
1037
+ individual dipoles are given by
1038
+ ri = {xi, yi, 0} + {x, y, z}
1039
+ rj = Rθ{xj, yj, 0},
1040
+ (S3)
1041
+ leading to interdipole distance rij = ri − rj. Here xi and yi are the x − y positions of dipole i in panel
1042
+ P1 (similar for xj, yj), x, y, and z are the coordinates of the control panel, and Rθ is the rotation matrix
1043
+ corresponding to a rotation by angle θ about the y-axis.
1044
+ Together Eqs. (S1-S3) give the potential energy in terms of the hinge angle θ, our control parameters x,
1045
+ y, and z, and design parameters xi, yi, δi, xj, yj, and δj. Since the hinge experiment is heavily damped, θ
1046
+ follows gradient dynamics ˙θ = ∂θV and the stable equilibrium angles are given by the local minima of the
1047
+ potential landscape V .
1048
+
1049
+ 2
1050
+ III.
1051
+ CUSP EXPERIMENTS
1052
+ In the cusp experiments, Panel P1’s x and y positions are measured as displacements from their value
1053
+ when the panels are 180◦ open, and are aligned along z and y such that the panel’s backs and bottoms
1054
+ are parallel. The magnets closest to the hinge axis are removed from it by 0.75cm on both panels. The
1055
+ back of the cylindrical magnets are aligned with the panel’s backs. The damping paddle used in the cusp
1056
+ experiments have dimensions 1.5cm by 3.0cm.
1057
+ A.
1058
+ Experimental estimation of the cusp point
1059
+ We estimate the location of the cusp point as the bifurcation of the two measured saddle node curves. We
1060
+ map the saddle node curve by toggling x (y), for a given value of y (or x), so that the system snaps back
1061
+ and forth, and record the values of the control parameters x and y, and θ immediately after each transition
1062
+ (Fig. S2). Moreover, to verify the position of the cusp we record the angle θ of the system before and after
1063
+ snapping, and observe that the change in angle upon snapping disappears at the cusp point.
1064
+ Finally, we inspect all data collected along the bifurcation curves as shown in Fig. 2a in the main text,
1065
+ and use a spline fit for the saddle-node bifurcations from L to S and the saddle-node bifurcations from S to
1066
+ L. We define the cusp point as the intersection of the two splines.
1067
+ B.
1068
+ Single snap experiment
1069
+ The mangeto elastic potential calculated for the experiment predicts a cusp at a slightly removed para-
1070
+ metric position. The discrepency between the experimentally measured and theoretically predicted cusps
1071
+ could be due to fabrication errors. To effectively compare theory and experiment in this section only, we
1072
+ parameterize the system as a function of its displacement from the cusp for both theory and experiment
1073
+ using using dx and dy. We then follow the predicted path by controlling panel P1’s x and y positions using
1074
+ the translation stages. We begin the experiment by letting the system maintain its equilibrium at the initial
1075
+ dx, dy position. We then change the position of Panel P1 at a slow and steady rate. Angle measurements
1076
+ are recorded at various locations in the loop as shown in Fig. S1(a) (see also Fig. 1 in the main text), and
1077
+ the change in position is paused once the transition happens at point vi in order to let the system settle
1078
+ down and obtain an accurate angle measurement. We confirm that the system returns to its original state
1079
+ once we return to the starting dx, dy position.
1080
+ C.
1081
+ Scaling experiment
1082
+ To fit the scaling relations, we use the the same section of the data set used for determining the location
1083
+ of the experimental cusp point. We neglect data in the nonlinear region of the saddle-node curves far away
1084
+ from the cusp point, as well as data too close to the cusp point, where the errors due to measurement noise
1085
+ are comparable to the distance to the cusp. The data points used for the scaling relations are highlighted in
1086
+ Fig. S2(a). The state parameter values used in the scaling analysis correspond to the angle measurements
1087
+ obtained at the points right after the snap through transitions.
1088
+ IV.
1089
+ ONE DIMENSIONAL BIFURCATIONS OF EQUILIBRIA: NORMAL FORM AND
1090
+ SCALING
1091
+ The ability to design magneto elastic machines and control parameter pathways that robustly lead to
1092
+ complex actions corroborates the validity of a new design paradigm: operation near bifurcations of multiple
1093
+ equilibria. The demonstrated trajectories take advantage of the structure of available dynamics near bifur-
1094
+ cations of equilibira. These bifurcations are the loci of multiple distinct coalescing saddle node manifolds,
1095
+ as illustrated for the idealized symmetric butterfly bifurcation (Fig 3b in the main text). By weaving a
1096
+ trajectory that crosses and avoids chosen saddle node bifurcations we design a pathway that leads to com-
1097
+ plex actions. The system then cycles through multiple states via small variations of the control parameters,
1098
+
1099
+ 3
1100
+ taking advantage of the multiple accessible lever mechanisms associated with these saddle node surfaces.
1101
+ The sensitivity of the realized design increases as the number of equilibria associated with the bifurcation
1102
+ grows.
1103
+ Butterfly, cusp and saddle node bifurcations are the first in a series of bifurcations of equilibria in
1104
+ one-dimensional gradient systems.
1105
+ More generally, in systems with a single degree of freedom x, bifur-
1106
+ cations of k equilibria are points in parameter space where the first k derivatives of the potential vanish,
1107
+ {dV/dx, d2V/dx2, . . . , dkV/dxk} = ⃗0. That is, they are equilibrium points satisfying k − 1 equations beyond
1108
+ that of mechanical equilibrium dV/dx = 0 and therefore lie on a manifold of co-dimension k − 1 within the
1109
+ equilibrium manifold. The sensitivity of a bifurcation of k equilibria to variation in its parameters can be
1110
+ estimated through the topological equivalence of the dynamics near it to those in a normal form potential
1111
+ �V = ϕk+1 +
1112
+ k−1
1113
+
1114
+ i=1
1115
+ aiϕi,
1116
+ (S4)
1117
+ where the variable ϕ(θ) and normal form parameters ai(p) are coordinate transformations of the angle θ and
1118
+ parameters p respectively. The normal form describes the unfolding of the Taylor expansion of the potential
1119
+ at the bifurcation V ∼ xk+1 by variations of the parameters [S1–S3]. The unfolded normal form potential
1120
+ demonstrates that the parameteric environment of a codimension k bifurcation includes domains with 1 to
1121
+ ⌈(k + 1)/2⌉ minima delineated by k saddle-node manifolds which coalesce at the bifurcation. Moreover, it
1122
+ implies scaling relations between the variation in the system’s state upon a snap through transition induced
1123
+ by crossing a saddle node bifurcation associated with a codimension k − 1 bifurcation and the variation of a
1124
+ normal form parameter that causes the snap:
1125
+ δϕ ∝ a1/(k−m+1)
1126
+ m
1127
+ ,
1128
+ m < k.
1129
+ (S5)
1130
+ Heuristically the scaling can be derived from the normal form by noting that near the bifurcation the kth
1131
+ derivative of the potential must still vanish, and so δϕ2 ∼ ak. Similarly the next k − 1 derivatives must
1132
+ progressively vanish, setting the scaling of am. An explicit proof is given in [S4] and summarized in [S5,
1133
+ Sec. 36.6]. These scaling relations carry over to the original variable and parameters near the bifurcation
1134
+ where the maps ϕ(x) and am(p) are approximately linear. Indeed, the scaling relations we experimentally
1135
+ observed near a the cusp bifurcations are those of the systems state with the normal form parameters near
1136
+ a bifurcation of three equilibria, i.e., a cusp [S4, S5].
1137
+ These scaling relations imply that the sensitivity of the system to variations of parameters grows expo-
1138
+ nentially with the number of associated equilibria. A system designed near a bifurcation of k equilibria can
1139
+ toggle its state between order unity separated states, δϕ ∼ 1/2, in response to variations of the linear normal
1140
+ form coefficient a1 of order 1/2k. That is, both the potential lever advantage and the sensitivity to noise in
1141
+ the parameters grow as the number of associated equilibria grows. However, the parametric domain in which
1142
+ the mapping to the normal form is linear is often very small. The nonlinearity of the mapping often blunts
1143
+ the sensitivity of the response. Thus, the increased lever advantage near bifurcations of multiple equilibria
1144
+ is often not experimentally accessible. Conversely the system is not so sensitive to parametric noise when
1145
+ operated at a small parametric distance from the bifurcation about which it is designed, as demonstrated
1146
+ by the reproducibility of the experimental three state system, which was easily constructed twice.
1147
+ V.
1148
+ CONTINUATION ALGORITHMS
1149
+ To find bifurcations of multiple equilibria in the dynamics of our model system and to map out the saddle
1150
+ node structure in the vicinity of the high-order point, we use a series of continuation algorithms. In one
1151
+ dimension, a codimension k bifurcation point is defined by the vanishing of the first k derivatives of the
1152
+ potential: ∂j
1153
+ θV (θ∗, {ξi}) = 0 for j = 1, 2, . . . , k. These constraints define a codimenion k manifold in the
1154
+ space of dynamical variables and parameters (θ∗, {ξi}).
1155
+ A.
1156
+ Traditional continuation
1157
+ Standard continuation algorithms compute bifurcation curves by varying a small number of parameters,
1158
+ and then projecting onto the bifurcation manifold [S1]. For example, suppose we have found a co-dimension
1159
+
1160
+ 4
1161
+ k bifurcation. This requires the first k derivatives of the potential vanish, fixing θ∗ and k − 1 parame-
1162
+ ters ξ1, ξ2, . . . , ξk−1. Varying an additional parameter ξk produces a line emanating from our initial point
1163
+ (θ∗, {ξ}) = p. The continuation algorithm maps out this line by (i) taking a step along the tangent vec-
1164
+ tor Tk(p) to the curve, which is the null-vector of the gradient of the first k derivatives of the potential
1165
+ Tk(p) ≡
1166
+
1167
+ ⃗v ∈ Rk+1 | ∀j ∈ (1, 2, . . . , k), ⃗v · ∇θ,ξ1,ξ2,...,ξk∂j
1168
+ θV = 0
1169
+
1170
+ and (ii) correcting this step using a Newton-
1171
+ Raphson algorithm4 to search perpendicular to the step for a point where the first k derivatives of the po-
1172
+ tential vanish. This approach can be used to progressively search for higher order bifurcation points. For
1173
+ example, a fixed-point can be continued until ∂2V (θ∗, {ξi})/∂θ2 vanishes, indicating a saddle node bifurca-
1174
+ tion. Continuing the saddle-node can lead to a cusp bifurcation, which in turn might lead to a swallowtail
1175
+ bifurcation. In this way, progressively adding parameters and performing continuations of one-dimensional
1176
+ curves can lead toward high-codimension bifurcation points. Once we have found a high-order bifurcation
1177
+ point, we use this algorithm to map out the saddle node surfaces nearby. The surfaces can in turn be used
1178
+ to design cycles in control parameters that cause the system to perform desired snapping transitions.
1179
+ The standard continuation approach, however, has limitations for microscopic machine design. In partic-
1180
+ ular, it has limited utility for finding the high-order bifurcation points near which our machine will operate.
1181
+ In our model system we have many free parameters, including the positions of each of the magnets embed-
1182
+ ded in the panels. Varying a given experimental parameter does not guarantee we will find the next order
1183
+ bifurcation point. Instead we want to vary many parameters simultaneously, which greatly improves the
1184
+ likelihood that a higher-order bifurcation point is contained within the search space and allows for a more
1185
+ efficient approach toward that point. We have developed a gradient continuation algorithm to carry out this
1186
+ multi-parameter search.
1187
+ B.
1188
+ Design algorithm: Gradient continuation
1189
+ The gradient continuation algorithm works as follows. Suppose we have N parameters ξi in our system,
1190
+ plus the degree-of-freedom θ.
1191
+ A point, p, where the first k derivatives of the potential vanish belongs
1192
+ to a co-dimension k manifold in the full (N + 1)-dimensional augmented parameter space, composed of
1193
+ the equilibrium state and control parameters, (θ∗, {ξi}). Starting from the point p, take a step along the
1194
+ gradient of the k+1 derivative of the potential ∇θ,ξ1,ξ2,...,ξN ∂k+1
1195
+ θ
1196
+ V , projected onto the tangent surface to the
1197
+ manifold at p. The tangent surface is the null-space of the gradient of the first k derivatives of the potential5,
1198
+ Tk,N(p) ≡
1199
+
1200
+ ⃗v ∈ RN+1 | ∀j ∈ (1, 2, . . . , k), ⃗v · ∇θ,ξ1,ξ2,...,ξN ∂j
1201
+ θV = 0
1202
+
1203
+ . This procedure finds the step within the
1204
+ co-dimension k manifold that maximizes the change in ∂k+1
1205
+ θ
1206
+ V , which we need to vanish in order to find the
1207
+ next order bifurcation. After the step, the algorithm performs a corrective Newton-Raphson search [S6],
1208
+ constrained to the hyperplane T ⊥
1209
+ k,N(p) perpendicular to the null-space, which returns to the codimension
1210
+ k manifold on which the first k derivatives of the potential vanish. As in the standard continuation, this
1211
+ approach is repeated to progressively find higher order bifurcation points. A visualization of the gradient
1212
+ search algorithm, applied to the potential V = θ6 + a4θ4 + a2θ2 + a1θ, is shown in Fig. 3b in the main text.
1213
+ VI.
1214
+ BUTTERFLY EXPERIMENTS
1215
+ A.
1216
+ Butterfly panels
1217
+ In the butterfly experiments, Panel P1’s x, y and z positions are measured as displacements from their
1218
+ value when the panels are 180◦ open, the magnets closest to the hinge axis are removed from it by 2.5cm
1219
+ on both panels, the panels are aligned vertically, and the back of the cylindrical magnets on Panel P1 are
1220
+ aligned with the center of the magnets on Panel P2. This small change in magnet alignment (compared
1221
+ 4 Newton-Raphson(f, Ω, p) [S6] searches for the roots of the
1222
+ functions f over the space Ω starting at the point p.
1223
+ 5 Notice that this algorithm uses all N parameters ξ1, . . . , ξN
1224
+ to search for a codimension k bifurcation, while the stan-
1225
+ dard continuation in the previous section only used k pa-
1226
+ rameters ξ1, . . . , ξk. The null-space Tk,N(p) has dimension
1227
+ (N − k + 1).
1228
+
1229
+ 5
1230
+ to the single snap experiment) is found to reduce the discrepancy between experiment and prediction. An
1231
+ illustration for the panels is shown in Fig. S3. The damping paddle has dimensions 8.0cm by 2.5cm for the
1232
+ butterfly experiments. The position of the magnets on panel P1 was changed such that the system operates
1233
+ next to a butterfly bifurcation, as specified in the main text and in the following sections.
1234
+ B.
1235
+ Application of the continuation algorithm
1236
+ To find an experimentally feasible path and magnetic pattern, we implement the continuation algorithm
1237
+ by first finding a butterfly point in parameter space, then validating the resulting pattern against known
1238
+ experimental constraints (e.g. we require physically realizable panel angles and magnet positions). Before
1239
+ each search using the continuation algorithm, we first randomly generate orientations of the 18 magnetic
1240
+ dipoles on the two panels. We also randomly select two magnets on Panel P1 to be displaced from their
1241
+ lattice positions, by (dx1, dy1) and (dx2, dy2) respectively. The search algorithm is always initialized with
1242
+ the values {θ, dx, dy, dz, dx1, dy1, dx2, dy2} = {1.1rad, 0.5cm, −0.25cm, 0, 0, 0, 0, 0}.
1243
+ Next,
1244
+ we
1245
+ let
1246
+ the
1247
+ algorithm
1248
+ try
1249
+ to
1250
+ find
1251
+ a
1252
+ butterfly
1253
+ bifurcation
1254
+ point.
1255
+ If
1256
+ no
1257
+ but-
1258
+ terfly
1259
+ point
1260
+ can
1261
+ be
1262
+ found,
1263
+ we
1264
+ repeat
1265
+ the
1266
+ initialization
1267
+ process
1268
+ and
1269
+ repeat
1270
+ the
1271
+ search
1272
+ with
1273
+ a
1274
+ new
1275
+ randomly
1276
+ generated
1277
+ magnetic
1278
+ pattern.
1279
+ The
1280
+ butterfly
1281
+ point
1282
+ corresponding
1283
+ to
1284
+ the
1285
+ pattern
1286
+ we
1287
+ used
1288
+ in
1289
+ our
1290
+ experiments
1291
+ is
1292
+ located
1293
+ at
1294
+ {θ, dx, dy, dz, dx1, dy1, dx2, dy2}
1295
+ =
1296
+ {2.131rad, −0.355cm, −0.304cm, −0.824cm, 0.918cm, −0.698cm, −0.326cm, −0.486cm}.
1297
+ If the butterfly point is found, we investigate the potential plots at various points in parameter space near
1298
+ the bifurcation point. Specifically, we offset one or more of the 6 search parameters by ±0.2 and find the
1299
+ number of minima that exist between 0 to 180 degrees at each of these locations. The potential plots at
1300
+ locations with three minima are then inspected to decide the experimental feasibility of the pattern. Ideally,
1301
+ all three minima are at least 5 degrees apart, and the smallest minimum is at least 5 degrees (for z = 0)
1302
+ to prevent the panels from touching during experiment. We also look for patterns with large triple-minima
1303
+ regions, for example if three visibly deep minima can be observed when at least one parameter is changed
1304
+ by ±0.5cm.
1305
+ After an experimentally feasible pattern is discovered, we manipulate the three experimentally controllable
1306
+ parameters (x,y,z) continuously around the point with deepest triple minima and observe changes in our
1307
+ model of the potential landscape. The design of the control path is guided by visualization of the saddle-node
1308
+ surfaces mapped out using the standard continuation algorithm detailed above. Several paths are tested in
1309
+ the model to obtain the desired sequence of bifurcations and to optimize various properties of the transitions
1310
+ (e.g. the magnitude of the snaps and depth of the minima).
1311
+ C.
1312
+ Experiments for trajectories near a butterfly point
1313
+ We set up the experiment by laser-cutting the holes for magnets at the exact locations corresponding to the
1314
+ found dx1, dy1, dx2, dy2 values, which were 1.418cm, −0.273cm, −0.826cm, and −0.986cm respectively. We
1315
+ also add a translation stage to control Panel P1’s z position. We begin the experiment by following the exact
1316
+ coordinates provided by the theoretically designed path. In the event that a predicted transition cannot be
1317
+ seen using the predicted path coordinates (due to fabrication or calibration errors shifting the surface), we
1318
+ translate the system further from the original predicted path to determine a more robust path that may
1319
+ account for some shifting in coordinates due to experimental errors (for example see Fig. 4b in the main
1320
+ text). Once an experimental path is shown to demonstrate the predicted behavior with the desired number
1321
+ of state transitions, we record the locations for state transitions in experiment, and repeat the experiment
1322
+ while slowing down the rate of change in x,y positions near the transitions to give the system enough time to
1323
+ respond in the presence of large damping. Those experiments show excellent qualitative agreement with the
1324
+ theoretically designed paths, although the locations at which transitions happen and the equilibrium angle
1325
+ of the panel are often shifted by a small amount due to experimental error.
1326
+
1327
+ 6
1328
+ D.
1329
+ Additional operation mode: double-snap trajectories
1330
+ The intricate saddle-node surface structure near the butterfly bifurcation enables a variety of snapping
1331
+ behaviors with the same panel design, beyond the 3-state cycle presented in the main text. Here we present
1332
+ a second snapping sequence that was measured experimentally.
1333
+ By using the same trajectory in parameter space as the three-snap sequence in the main text, but traverses
1334
+ the path in the reverse direction, we observe a two-snap sequence between small (S) and large (L) angles.
1335
+ Fig. S4a shows this trajectory together with the same saddle surfaces from Fig. 4a in the main text. The
1336
+ experimentally measured angles along this backward cycle are shown in Fig. S4b (see also Movie S2). Besides
1337
+ a minor systematic shift in the angles of the L state, we find excellent fidelity between the predicted and
1338
+ measured angles. The snapping transitions occur almost exactly at the predicted locations.
1339
+ Our example trajectories demonstrate that the saddle-node structure in the vicinity of a butterfly bifurca-
1340
+ tions enables a great deal of flexibility in controlling state transitions of a mechanical system. For practical
1341
+ applications, further fine-tuning of the control trajectory can be used to optimize features the system’s
1342
+ behavior (e.g., the positions of the steady states and their lifetimes in the presence of environmental noise).
1343
+ VII.
1344
+ GENERALIZATIONS: MULTIDIMENSIONAL BIFURCATIONS AND SUPPLEMENTAL
1345
+ SCALING BEHAVIOURS
1346
+ A.
1347
+ Stopping conditions in higher dimensions
1348
+ While our proof-of-concept experiment is limited to a hinge with a single degree of freedom (the opening
1349
+ angle), our approach and gradient continuation algorithm are straightforward to apply to systems with
1350
+ multiple degrees of freedom, e.g. a microscopic robot with multiple panels connected by elastic hinges. The
1351
+ cuspoidal bifurcations discussed in this paper also naturally appear in higher-dimensional gradient systems.
1352
+ However, the analytic criteria to classify them is somewhat more complicated: we can not simply search
1353
+ for points where higher order derivatives of the potential vanish. In this section we will discuss stopping
1354
+ criteria in higher dimensions, i.e. what quantities should we follow with the gradient continuation algorithm
1355
+ to search for bifurcations of increasing order?
1356
+ With two or more degrees of freedom, a saddle-node bifurcation occurs when a fixed-point (stable or
1357
+ unstable) collides with a saddle point, resulting in mutual annihilation. This occurs when an eigenvalue of
1358
+ the Hessian of the potential Aij = −∂θi∂θjV crosses 0 (here θi are the dynamical variables). For the purposes
1359
+ of applying gradient continuation starting from a fixed point, it is therefore convenient to use det A as the
1360
+ stopping criteria, since the determinant vanishes when an eigenvalue does.
1361
+ Near a saddle-node bifurcation, the state space can be decomposed (by the Center Manifold Theorem)
1362
+ into (i) the invariant center manifold emanating from the fixed point along the direction of the critical
1363
+ eigenvector (with eigenvalue 0) and (ii) a stable/unstable manifold on which the flows exponentially grow or
1364
+ decay (for the purposes of machine design we generally want only stable directions). Due to the vanishing
1365
+ eigenvalue, the dynamics on the center manifold are nonlinear at lowest order.
1366
+ These dynamics can be
1367
+ determined perturbatively by expanding the gradient of the potential, projecting onto the center manifold
1368
+ and enforcing the invariance of the center manifold [S1]. Higher-order bifurcations occur when the center
1369
+ manifold expansion coefficients vanish. For example, vanishing quadratic term indicates a cusp bifurcation,
1370
+ vanishing cubic term indicates a swallowtail, and so on. Thus these coefficients replace the higher-order
1371
+ derivatives of the potential as the stopping criteria in the gradient continuation algorithm. Below we give
1372
+ explicit expressions for these expansion coefficients.
1373
+ Suppose we have an n-dimensional system θ ∈ Rn that undergoes a saddle node bifurcation at θ = 0.
1374
+ Near this point, the dynamics can be expanded as follows,
1375
+ ˙θ = A θ + F(θ),
1376
+ (S6)
1377
+ where A is the Hessian of the potential (which has a zero eigenvalue) and F(θ) collects all quadratic and
1378
+
1379
+ 7
1380
+ higher-order terms in multilinear forms,
1381
+ F(θ) = 1
1382
+ 2B(θ, θ) + 1
1383
+ 6C(θ, θ, θ) + 1
1384
+ 24D(θ, θ, θ, θ) + O(||θ||5)
1385
+ = 1
1386
+ 2
1387
+ n
1388
+
1389
+ i,j=1
1390
+ ∂2F(φ)
1391
+ ∂φi∂φj
1392
+ ����
1393
+ φ=0
1394
+ θiθj + 1
1395
+ 6
1396
+ n
1397
+
1398
+ i,j,k=1
1399
+ ∂3F(φ)
1400
+ ∂φi∂φj∂φk
1401
+ ����
1402
+ φ=0
1403
+ θiθjθk
1404
+ + 1
1405
+ 24
1406
+ n
1407
+
1408
+ i,j,k,l=1
1409
+ ∂4F(φ)
1410
+ ∂φi∂φj∂φk∂φl
1411
+ ����
1412
+ φ=0
1413
+ θiθjθkθl + O(||θ||5).
1414
+ (S7)
1415
+ Let ψ and ϕ be the right and left eigenvectors corresponding to the zero eigenvalue: Aψ = 0 and AT ϕ = 0.
1416
+ The projection of θ onto the center manifold ϑ = ϕ · θ has dynamics
1417
+ ˙ϑ = a2ϑ2 + a3ϑ3 + O(ϑ4).
1418
+ (S8)
1419
+ Following Kuznetsov, we derive the coefficients up to fourth order (third order is given in Ref. [S1]),
1420
+ a2 = 1
1421
+ 2ϕ · B(ψ, ψ)
1422
+ a3 = 1
1423
+ 6ϕ · C(ψ, ψ, ψ) + 1
1424
+ 2ϕ · B(ψ, b2)
1425
+ a4 = 1
1426
+ 24ϕ · D(ψ, ψ, ψ, ψ) + 1
1427
+ 4ϕ · C(ψ, ψ, b2) + 1
1428
+ 8ϕ · B(b2, b2) + 1
1429
+ 6ϕ · B(ψ, b3),
1430
+ (S9)
1431
+ where
1432
+ b2 = A−1
1433
+ su
1434
+
1435
+ ψ[ϕ · B(ψ, ψ)] − B(ψ, ψ)
1436
+
1437
+ b3 = A−1
1438
+ su
1439
+
1440
+ ψ[ϕ · C(ψ, ψ, ψ) + 3ϕ · B(ψ, b2)] + 3b2[ϕ · B(q, q)] − C(ψ, ψ, ψ) − 3B(ψ, b2)
1441
+
1442
+ (S10)
1443
+ and A−1
1444
+ su is the inverse of A restricted to the stable/unstable subspace (which doesn’t have zero eigenvalues).
1445
+ As mentioned above, vanishing a2 indicates a cusp, if a3 also vanishes we have a swallowtail, and if all three
1446
+ coefficients are zero we have a butterfly. The vectors b2 and b3 describe the curvature of the center manifold
1447
+ in the full θ space, θ = qϑ + b2ϑ2/2 + b3ϑ3/6. While these bifurcations are one dimensional (they occur on
1448
+ the one-dimensional invariant center manifold), the curvature of the center manifold as we move further from
1449
+ the bifurcation point could allow snapping between states with reasonable separation in multiple dimensions.
1450
+ In principle, this would enable machines to carry out work cycles near a butterfly bifurcation.
1451
+ B.
1452
+ Scaling for the Thom’s seven: hyperbolic and elliptic umbilics
1453
+ Beyond the quasi-one-dimensional bifurcations there are also cuspoidal bifurcations that are genuinely mul-
1454
+ tidimensional. In two dimensions, for example, we have elliptic umbilic, hyperbolic umbilic, and parabolic
1455
+ umbilic catastrophes (these together with the four one-dimensional bifurcations saddle-node, cusp, swallow-
1456
+ tail, and butterfly make up the Thom seven). Like the cusp and butterfly bifurcations, the unfolding of the
1457
+ normal form predicts and intricate saddle-surface structure describing how fixed-points and saddle-points
1458
+ come together and collide in the vicinity of the bifurcation point. These higher-dimensional bifurcations also
1459
+ obey advantageous scaling laws, relating the changes in state to the variation of control parameters. For
1460
+ example, the normal form potentials for the elliptic and hyperbolic umbilics are
1461
+ Velliptic = θ3
1462
+ 1
1463
+ 3 − θ1θ2
1464
+ 2 + a(θ2
1465
+ 1 + θ2
1466
+ 2) + bθ1 + cθ2
1467
+ Vhyperbolic = θ3
1468
+ 1 + θ3
1469
+ 2 + aθ1θ2 + bθ1 + cθ2
1470
+ (S11)
1471
+ from which the follow scaling can be derived [S4],
1472
+ δθ1, δθ2 ∼ a
1473
+ b, c ∼ a2.
1474
+ (S12)
1475
+ Increasing the dimension further leads to even more cuspoidal bifurcations; these have been enumerated
1476
+ by Arnold using an ADE classification [S7]. While the search criteria for such bifurcations is increasingly
1477
+ complicated, they provide a rich design space for multi-component machines.
1478
+
1479
+ 8
1480
+ C.
1481
+ Reynolds number scaling
1482
+ The magnetic decorations in our experiments are arranged in each panel about a square lattice with
1483
+ unit separation of 2.5cm. To explore over-damped, gradient dynamics, that are ubiquitous in microscopic
1484
+ mechanisms, the rotating panel is attached to a paddle moving through a glycerol bath. The results of our
1485
+ experiments then hold also for smaller systems in fluid with comparable kinematic viscosity. If the system
1486
+ is smaller by a factor Ω ≪ 1, the time ∆t it takes our macroscopic over-damped system, of typical size L,
1487
+ to traverse an angular expanse ∆θ is equal to the time it takes a microscopic system, of size ΩL to traverse
1488
+ the same angular expanse in the same liquid. This comes about because both the viscous drag force and
1489
+ the magnetic force between dipoles of magnetization M1 and M2, FDrag ∼ L2 ˙γ, Fdipole ∼ M1M2/R4, are
1490
+ quadratic in the typical system sizes. For over-damped dynamics this results in a length-scale independent
1491
+ strain-rate, ˙γ. The system is over-damped if its Reynolds number Re = L2 ˙γ/ν, is smaller then 1, where ν
1492
+ is the fluid’s kinematic viscosity. The Reynold’s number of a miniaturized system is therefore smaller by a
1493
+ factor of Ω2. Reducing the system’s size can compensate for changes in the system’s composition, such as
1494
+ embedding it in water rather than glycerol, or the growth of magnetic dipole strength density as the system
1495
+ size decreases.
1496
+ [S1] Y. A. Kuznetsov, “Topological equivalence, bifurcations, and structural stability of dynamical systems,” in
1497
+ Elements of Applied Bifurcation Theory (Springer New York, New York, NY, 2004)
1498
+ .
1499
+ [S2] J. Guckenheimer and P. Holmes, Nonlinear Oscillations, Dynamical Systems, and Bifurcations of Vector Fields
1500
+ (Springer New York, New York, NY, 1983)
1501
+ .
1502
+ [S3] J. W. Bruce and P. J. Giblin, Curves and Singularities: A Geometrical Introduction to Singularity Theory, 2nd
1503
+ ed. (Cambridge University Press, 1992)
1504
+ .
1505
+ [S4] M. V. Berry, Journal of Physics A: Mathematical and General 10, 2061 (1977)
1506
+ .
1507
+ [S5] DLMF, “NIST Digital Library of Mathematical Functions,” http://dlmf.nist.gov/, Release 1.1.4 of 2022-01-15
1508
+ (2022), f. W. J. Olver, A. B. Olde Daalhuis, D. W. Lozier, B. I. Schneider, R. F. Boisvert, C. W. Clark, B. R.
1509
+ Miller, B. V. Saunders, H. S. Cohl, and M. A. McClain, eds.
1510
+ [S6] W.
1511
+ H.
1512
+ Press,
1513
+ S.
1514
+ A.
1515
+ Teukolsky,
1516
+ W.
1517
+ T.
1518
+ Vetterling,
1519
+ and
1520
+ B.
1521
+ P.
1522
+ Flannery,
1523
+ Numerical recipes 3rd edition: The art of scientific computing (Cambridge university press, 2007)
1524
+ .
1525
+ [S7] V. I. Arnol’d, “Bifurcations of equilibria,” in Dynamical Systems V, edited by V. I. Arnol’d (Springer Berlin
1526
+ Heidelberg, Berlin, Heidelberg, 1994) pp. 10–38
1527
+ .
1528
+ [S8] M. Smith, G. Yanega, and A. Ruina, Journal of theoretical biology 282, 41 (2011)
1529
+ .
1530
+
1531
+ 9
1532
+ FIG. S1. Single snap-through mechanism (a.) As we vary the control parameters along a loop around the cusp
1533
+ point as shown, we expect to see a single snap-through buckling behavior (point v to point vi) for each cycle, akin
1534
+ to how hummingbirds use their beak to capture prey [S8]. (b.) The predicted potential energy curves for points
1535
+ labeled from i to vi are presented. The saddle-node bifurcation occurs between v and vi as indicated by the arrow
1536
+ in v. (c.) We experimentally observe the predicted snap-through behavior. Due to experimental errors, the location
1537
+ of the cusp point is shifted, but we see excellent agreement between the theory and measurements after shifting the
1538
+ coordinates to align the theoretical and experimental cusp points.
1539
+ FIG. S2. Snap Through transitions near a cusp. These plots show the equilibrium angle recorded in experiments
1540
+ following a snap-through transition. The corresponding (x, y) denote the values of the control parameters at which
1541
+ the snap-through occurred. (a.) Highlights the the data points used to fit the cusp scaling. We exclude data far from
1542
+ the cusp, where higher order terms in the normal form are non-negligible, and close to the cusp, where measurement
1543
+ and fabrication error are comparable to the distance from the cusp. (b.) Highlights the data corresponding to the
1544
+ upper and lower saddle-node curves.
1545
+
1546
+ a)
1547
+ b)
1548
+ c)
1549
+ ty
1550
+ i
1551
+ vi
1552
+ iii
1553
+ +
1554
+ anel
1555
+ -y
1556
+ L 2.9
1557
+ -y
1558
+ [rad] - System State
1559
+ Snap-through
1560
+ i: = 2.606
1561
+ vi: 0 = 2.653
1562
+ c+↑
1563
+ Snap-through ↑
1564
+ +c
1565
+ Snap-through
1566
+ v
1567
+ ii
1568
+ S
1569
+ 2.5
1570
+ ii: θ = 2.841
1571
+ v: θ = 2.830
1572
+ LSI
1573
+ h+ ↑
1574
+ -y
1575
+ L!
1576
+ S
1577
+ h+↑
1578
+ yT
1579
+ Cusp Point
1580
+ iii
1581
+ iv
1582
+ -c
1583
+ -0.03
1584
+ 0.3
1585
+ m
1586
+ 0.15
1587
+ -0.06
1588
+ 0
1589
+ dy [cm] - Control Parameter
1590
+ -0.15
1591
+ -0.09
1592
+ iii: 0 = 2.854
1593
+ iv: 0 = 2.864a)
1594
+ b)
1595
+ ·ScalingDataPoints
1596
+ · Upper Saddle Node Curve
1597
+ ·UnusedDataPoints
1598
+ O
1599
+ ·
1600
+ LowerSaddleNodeCurve
1601
+ ·EstimatedCuspPoint
1602
+ ·EstimatedCuspPoint
1603
+ 2.85
1604
+ 2.85
1605
+ [rad] - System State
1606
+ 2.8
1607
+ [rad] - System State
1608
+ 2.8
1609
+ 2.75
1610
+ 2.75
1611
+ 2.7
1612
+ 2.7.
1613
+ 2.65
1614
+ 2.65
1615
+ 0
1616
+ 2.6
1617
+ 0.15
1618
+ 2.6
1619
+ 0.15
1620
+ 0.2
1621
+ Q
1622
+ 0.2
1623
+ 2.55
1624
+ 0.25
1625
+ 2.55
1626
+ 0.25
1627
+ 0.8
1628
+ 0.3
1629
+ 0.8
1630
+ 0.3
1631
+ 0.35
1632
+ 0.75
1633
+ ,0.75
1634
+ 0.35
1635
+ x [cm]
1636
+ x [cm]
1637
+ Control
1638
+ y [cm] -
1639
+ Control
1640
+ Parameter
1641
+ Parameter10
1642
+ FIG. S3. Butterfly panels: In the butterfly experiments, Panel P1’s x, y and z positions are measured as displacements
1643
+ from their value when the panels are 180◦ open, the magnets closest to the hinge axis are removed from it by 2.5cm
1644
+ on both panels, the panels are aligned vertically and the back of the cylindrical magnets on Panel P1 are aligned with
1645
+ the center of the magnets on Panel P2. This small change in magnet alignment is found to reduce the discrepancy
1646
+ between experiment and prediction.
1647
+ FIG. S4.
1648
+ 2-state Cycle Near Butterfly Bifurcation Point (a.) Theory The saddle node surfaces of a magneto-
1649
+ elastic system with three active control parameters, x,y and z are plotted, their color denotes the angle θ at which
1650
+ the snap occurs. The system’s magnetic pattern is designed using the gradient continuation algorithm such that it
1651
+ operates near a butterfly bifurcation where multiple saddle node surfaces coalesce, enabling multiple snap-through
1652
+ transitions at the surfaces. A trajectory (colored tube with white arrows) is chosen such that the system snaps back
1653
+ and forth between two states with Large (L) and Small (S) angles. This trajectory is identical to that for the 3-state
1654
+ cycle in Fig. 4 in the main text, but the path is traversed in the opposite direction. The system’s predicted state is
1655
+ denoted by the tube’s color. At intersections of the trajectory with a surface where their colors match the system is
1656
+ predicted to snap to a new state. (b.) Experimental demonstration: The colored dots mark the experimental value
1657
+ of the system’s state as it follows the designed trajectory. We observe two distinct transitions as predicted.
1658
+
1659
+ Hinge Axis
1660
+ 11
1661
+ 2..
1662
+ X
1663
+ dx2
1664
+ Z
1665
+ 7.5
1666
+ 5
1667
+ 2.5
1668
+ 0
1669
+ [cm]
1670
+ x [cm][cm] - Control
1671
+ Parameter
1672
+ -0.2 -
1673
+ -0.4
1674
+ -0.4
1675
+ Stat
1676
+ -0.2
1677
+ 7
1678
+ -0.6
1679
+ -0.4
1680
+ tem
1681
+ -0.8 -
1682
+ -0.8
1683
+ -0.6 -
1684
+ -0.8 -
1685
+ 0
1686
+ -2.0】
1687
+ rad
1688
+ -1.5
1689
+ .1
1690
+ -1.0 >
1691
+ 2
1692
+ -0.5
1693
+ y
1694
+ -0.5
1695
+ S
1696
+ -1.0
1697
+ -0.5
1698
+ -1.0
1699
+ 3
1700
+ y [cm] - C
1701
+ Parameter
1702
+ 1.5
1703
+ x
1704
+ -2.0
1705
+ 1.5
1706
+ x
FdAzT4oBgHgl3EQfi_0V/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
HdE4T4oBgHgl3EQfgg3J/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8bcfe5639a0337f804cc1aca29236b50d8ab3096d66eaac2e79737b6bef5f5d
3
+ size 7798829
J9E3T4oBgHgl3EQfvQsy/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:338e48f45103b0b901586522e962a3070ffb985a7016dabc2dedcf71a2ef5d7c
3
+ size 188562
JNE1T4oBgHgl3EQfGAOI/content/tmp_files/2301.02909v1.pdf.txt ADDED
@@ -0,0 +1,776 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ How to Allocate your Label Budget? Choosing between
2
+ Active Learning and Learning to Reject in Anomaly Detection
3
+ Lorenzo Perini,1 Daniele Giannuzzi, Jesse Davis 1
4
+ 1 KU Leuven, Department of Computer Science, DTAI & Leuven.AI, B-3000 Leuven, Belgium
5
+ lorenzo.perini@kuleuven.be, danielegiannuzzi1998@gmail.com, jesse.davis@kuleuven.be
6
+ Abstract
7
+ Anomaly detection attempts at finding examples that deviate
8
+ from the expected behaviour. Usually, anomaly detection is
9
+ tackled from an unsupervised perspective because anomalous
10
+ labels are rare and difficult to acquire. However, the lack of
11
+ labels makes the anomaly detector have high uncertainty in
12
+ some regions, which usually results in poor predictive perfor-
13
+ mance or low user trust in the predictions. One can reduce
14
+ such uncertainty by collecting specific labels using Active
15
+ Learning (AL), which targets examples close to the detec-
16
+ tor’s decision boundary. Alternatively, one can increase the
17
+ user trust by allowing the detector to abstain from making
18
+ highly uncertain predictions, which is called Learning to Re-
19
+ ject (LR). One way to do this is by thresholding the detector’s
20
+ uncertainty based on where its performance is low, which re-
21
+ quires labels to be evaluated. Although both AL and LR need
22
+ labels, they work with different types of labels: AL seeks
23
+ strategic labels, which are evidently biased, while LR requires
24
+ i.i.d. labels to evaluate the detector’s performance and set the
25
+ rejection threshold. Because one usually has a unique label
26
+ budget, deciding how to optimally allocate it is challenging.
27
+ In this paper, we propose a mixed strategy that, given a budget
28
+ of labels, decides in multiple rounds whether to use the bud-
29
+ get to collect AL labels or LR labels. The strategy is based
30
+ on a reward function that measures the expected gain when
31
+ allocating the budget to either side. We evaluate our strategy
32
+ on 18 benchmark datasets and compare it to some baselines.
33
+ Introduction
34
+ Anomaly detection is the task of automatically detect-
35
+ ing examples that do not follow expected patterns (Chan-
36
+ dola, Banerjee, and Kumar 2009). These examples, named
37
+ anomalies, are usually indicative of critical events such as
38
+ water leaks in stores (Perini, Vercruyssen, and Davis 2022),
39
+ breakdowns in gas turbines (Zhao, Wen, and Li 2016), or
40
+ failures in the petroleum extraction (Mart´ı et al. 2015). Such
41
+ critical events usually come along with elevated (mainte-
42
+ nance) costs or with substantial natural damages (e.g., dis-
43
+ persion of petroleum or gas). Thus, detecting anomalies in
44
+ time is a relevant task that limits such resource waste.
45
+ Collecting labels, especially for anomalies, is often a
46
+ hard task because anomalies are costly events (e.g., ma-
47
+ chine failures cannot be voluntarily induced), or simply
48
+ time-consuming (e.g., you may need to label 100s of exam-
49
+ ples before getting an anomaly). Thus, anomaly detection is
50
+ often tackled from an unsupervised perspective. However,
51
+ the lack of labels usually forces the unsupervised detector
52
+ to have high uncertainty on specific regions of the example
53
+ space (Perini, Vercruyssen, and Davis 2020). High uncer-
54
+ tainty is undesirable because it is often associated with poor
55
+ predictive performance or reduced trust in the predictions.
56
+ This uncertainty can be tackled in two complementary
57
+ ways. On the one hand, one can try to learn a more accu-
58
+ rate detector by acquiring a limited number of labels using
59
+ Active Learning (AL) (Abe, Zadrozny, and Langford 2006).
60
+ On the other hand, it is possible to increase the user trust
61
+ in the detector’s outputs by allowing the detector to abstain
62
+ from making a prediction when it is highly uncertain, which
63
+ is called Learning to Reject (LR) (Hendrickx et al. 2021;
64
+ De Stefano, Sansone, and Vento 2000). One way to do this
65
+ is to set a rejection threshold on the detector’s uncertainty
66
+ based on where its performance is poor (Cortes, DeSalvo,
67
+ and Mohri 2016). However, evaluating the detector perfor-
68
+ mance requires labels.
69
+ Both of these approaches rely on labeled data. However,
70
+ the types of labels needed for each approach are quite differ-
71
+ ent. Many AL strategies rely on biased sampling strategies
72
+ such as explicitly targeting acquiring labels, for example, for
73
+ which the detector is highly uncertain (i.e., near the detec-
74
+ tor’s current decision boundary) as these are known to yield
75
+ better performance (Pimentel et al. 2020; Culotta and Mc-
76
+ Callum 2005). Alas, using such labels to evaluate the detec-
77
+ tor’s performance, as required when setting the threshold in
78
+ LR, will yield a biased performance estimate and hence a
79
+ sub-optimal threshold (Marrocco, Molinara, and Tortorella
80
+ 2007). Thus, if a user has a fixed budget for acquiring la-
81
+ bels there is a tension between collecting (a) strategic labels
82
+ that can be used to train a better detector, or (b) i.i.d. labels
83
+ that can be used to evaluate performance and set a proper
84
+ rejection threshold. Therefore, a data scientist is confronted
85
+ with the challenging question of how they should optimally
86
+ allocate their label budget between these two purposes.
87
+ In this paper, we assume that the label budget can be
88
+ split and allocated in multiple rounds. We introduce BAL-
89
+ LAD (Budget allocation for Active Learning and Learning to
90
+ reject in Anomaly Detection) a novel adaptive strategy that,
91
+ in each allocation round, (1) measures the potential reward
92
+ obtained by assigning the budget to either AL or LR, and (2)
93
+ chooses the highest reward option to collect the labels.
94
+ arXiv:2301.02909v1 [cs.LG] 7 Jan 2023
95
+
96
+ Preliminaries and Related Work
97
+ Anomaly Detection.
98
+ Let X be a d−dimensional random
99
+ variable with unknown p(X). We are given a dataset D =
100
+ {x1, . . . , xn} with n examples and d features is drawn i.i.d.
101
+ from p(X). Let V = {xn+1, . . . , xm} ∼i.i.d p(X), m >
102
+ n, be a validation set. Let Y be the label random variable,
103
+ such that Y |X = x indicates the class label (1 if anomaly,
104
+ 0 if normal) for x ∈ Rd. An anomaly detection problem
105
+ is the task of finding an anomaly score function h: Rd →
106
+ R and a threshold t ∈ R such that Y = h(t)(X), where
107
+ h(t)(x) = 1 if h(x) ≥ t, 0 otherwise. Usually, one sets t
108
+ based on the contamination factor γ, i.e. the proportion of
109
+ anomalies (Perini, Buerkner, and Klami 2022).
110
+ Pool-based Active Learning (AL).
111
+ The goal of pool-
112
+ based AL strategies is to reduce the detector’s uncertainty
113
+ by selecting the most informative training instances. The
114
+ AL approaches can be classified into 3 categories (Monarch
115
+ 2021): uncertainty-based sampling strategies aim to select
116
+ the unlabeled data samples with the highest uncertainty (Ha-
117
+ cohen, Dekel, and Weinshall 2022), diversity strategies cap-
118
+ ture the diversity among the training data (Abe, Zadrozny,
119
+ and Langford 2006; Dagan and Engelson 1995), combined
120
+ strategies integrate the advantages of uncertainty-based and
121
+ diversity-based criteria (Ebert, Fritz, and Schiele 2012).
122
+ Learning to Reject (LR).
123
+ The goal of a detector’s re-
124
+ ject option is to abstain from making a prediction when
125
+ a detector is too uncertain about predicting a test exam-
126
+ ple (Hendrickx et al. 2021; Cortes, DeSalvo, and Mohri
127
+ 2016). Our goal is to develop a detector-agnostic strategy
128
+ that does ambiguity rejection, as novelty rejection would
129
+ reject all anomalies. Thus, we use a dependent rejector ar-
130
+ chitecture (Chow 1970). We indicate by C(x) the detector’s
131
+ confidence for predicting x ∈ V , and with τ ∈ [0, 1] the re-
132
+ jection threshold. If the confidence is below τ, the prediction
133
+ is rejected ht(x) = ®. Note that for appropriate inference,
134
+ we need to collect validation labels randomly (i.i.d.).
135
+ A strategy to allocate the label budget
136
+ This paper tackles the following problem:
137
+ Given: initially unlabeled training set D and validation set
138
+ V , the dataset’s contamination factor γ, an anomaly de-
139
+ tector h, and a label budget B;
140
+ Do: decide whether, in each allocation round k, to acquire
141
+ labels for D (AL) or for V (LR).
142
+ Both training the detector with more labels (AL) and learn-
143
+ ing a threshold using larger validation data (LR) improve the
144
+ detector’s performance. However, choosing the side to max-
145
+ imize such improvement is challenging for multiple reasons.
146
+ First, it requires measuring the reward of either side, i.e. the
147
+ expected gain in terms of the detector’s improvement. Sec-
148
+ ond, the rewards need to be on a similar scale such that nei-
149
+ ther side is privileged during the process. Third, comparing
150
+ a standard detector to one with the reject option is challeng-
151
+ ing because the latter needs ad-hoc metrics to overcome the
152
+ problem of predicting three classes (anomaly, normal, re-
153
+ ject) (Nadeem, Zucker, and Hanczar 2009).
154
+ In this paper, we introduce BALLAD, a strategy that mea-
155
+ sures the reward of allocating the budget for AL, i.e. collect-
156
+ ing strategic labels on the training set, and for LR, i.e., col-
157
+ lecting random labels on the validation set. Let B = k·b ∈ N
158
+ be our labelling budget. We perform k rounds and the la-
159
+ bels of b examples are queried in each round. We initialize
160
+ the problem by (1) training the detector with no labels and
161
+ setting a default rejection threshold, and (2) collecting b ran-
162
+ dom labels for V (LR) and for D (AL) for a total of 2b labels.
163
+ This allows us to compute the initial rewards by measuring
164
+ how the detector varies from (1) to (2): for LR, we mea-
165
+ sure the variation after re-setting the validation threshold;
166
+ for AL, we measure the variation after re-training the detec-
167
+ tor with the new labels. Then, we start the allocation loop. In
168
+ each round, we allocate the budget to the option (LR or AL)
169
+ with the highest reward, and we update the reward using the
170
+ new labels. We propose two alternative reward functions: 1)
171
+ the entropy reward looks at the detector’s probabilities, ei-
172
+ ther for prediction (AL), or for rejection (LR); 2) the cosine
173
+ reward considers the predicted class labels, either anomaly
174
+ yes/no (AL), or rejection yes/no (LR).
175
+ Measuring the reward
176
+ Because we do not know how beneficial the next label al-
177
+ location would be for the detector, we look at the past and
178
+ measure the effect of the last allocation round. Our challenge
179
+ is to design a reward function that reflects the gain when
180
+ querying the labels. We use the following methods to derive
181
+ the reward for both AL and LR, by using as detector’s prob-
182
+ abilities either the probability of predicting anomaly (AL),
183
+ or the probability of rejecting the example (LR). Similarly
184
+ to Vercruyssen et al. (2022), we consider two scenarios:
185
+ Entropy.
186
+ Adding more labels has the ability to decrease
187
+ the overall uncertainty of the anomaly detector. Thus, we
188
+ measure the variation of the detector’s probabilities as:
189
+ Re(k) = Ex∼X [|H(hk(x)) − H(hk−1(x))|] ,
190
+ (1)
191
+ where H(h(x)) = −p log2 p is the entropy of the detector’s
192
+ probabilities p, and the subscript indicates the query round
193
+ (for k > 2). A large difference in entropy means a large
194
+ detector variation, which indicates a large impact of the new
195
+ labels and, in turn, a large reward Re.
196
+ Cosine.
197
+ More directly, one can measure the impact of the
198
+ labels in terms of variation of class predictions. Given the
199
+ detector’s probabilities, we threshold them at 0.5 and assign
200
+ value 1 to higher probabilities and 0 to lower ones. Thus, we
201
+ measure the cosine similarity between different outputs as
202
+ Rc(k) = ED∼X
203
+
204
+ 1 −
205
+ hk(D) · hk−1(D)
206
+ ∥hk(D)∥ · ∥hk−1(D)∥
207
+
208
+ ,
209
+ (2)
210
+ where h(D) is a vector containing the outputs (0 or 1) by the
211
+ detector h, and ∥·∥ is the Euclidean norm. This metric is less
212
+ sensitive to little variations in the detector and discriminates
213
+ more in case the new labels change the predicted class.
214
+
215
+ Deriving the detector’s probabilities
216
+ Measuring the reward needs some probabilities, which are
217
+ not easy to derive due to the partially supervised setting. For
218
+ both prediction and rejection, we exploit the squashing func-
219
+ tion: given a positive real score s ∈ R+ and a threshold
220
+ λ ∈ R+, the squashing function
221
+ Sλ : R+ → (0, 1),
222
+ Sλ(s) = 1 − 2− s2
223
+ λ2
224
+ maps s to a probability > 0.5 if s > λ, and ≤ 0.5 other-
225
+ wise. Roughly speaking, Sλ calibrates the probabilities by
226
+ centering λ as the decision threshold.
227
+ Detector’s posterior probabilities.
228
+ Given the contamina-
229
+ tion factor γ, a common approach to set the threshold t is
230
+ by forcing the detector to have a training positive class rate
231
+ equal to γ. Thus, one can center the probabilities to t by
232
+ transforming the anomaly scores h(x) through the squash-
233
+ ing function:
234
+ P(ht(x) = 1) = St(h(x)) s. t. t = Qh(1 − γ).
235
+ We set t as the 1−γth quantile of the score distribution such
236
+ that only a proportion of γ scores have P(ht(x) = 1) ≥ 0.5.
237
+ Rejection probabilities.
238
+ Given a validation set with some
239
+ labels, we (1) set a specific detector confidence C(x), and (2)
240
+ set the rejection threshold τ ∈ [0, 1]. For the former, we use
241
+ the detector’s posterior probabilities:
242
+ C(x) = 2 ×
243
+ ��P(ht(x) = 1) − 0.5
244
+ �� ∈ [0, 1].
245
+ Thus, the closer P(ht(x) = 1) is to 0.5 (high uncertainty),
246
+ the lower the detector confidence. For the latter, we opti-
247
+ mize the threshold τ over the validation set (only the la-
248
+ beled examples) by minimizing a cost function M(ht). Fi-
249
+ nally, we compute the rejection probabilities by centering 1-
250
+ confidence values to the rejection threshold, i.e. by applying
251
+ the squashing function
252
+ P(ht(x) = ®) = Sτ(1 − C(x)).
253
+ The cost-based evaluation metric
254
+ Given a detector with a reject option and a detector without
255
+ it, we cannot compare their performance on the non-rejected
256
+ examples, as they would have different test sets. Thus, we
257
+ introduce a cost-based evaluation metric. Formally, given a
258
+ rejection cost cr > 0, a false positive cost cfp > 0, and a
259
+ false negative cost cfn > 0, the detector is evaluated as:
260
+ Mh = cr · P(ht(X) = ®) + cfp · P(ht(X) = 1|Y = 0)
261
+ + cfn · P(ht(X) = 0|Y = 1).
262
+ Note that we assume cost null for the correct predictions,
263
+ while every misprediction as well as the rejection gets pe-
264
+ nalized. Because rejecting is assumed to be less costly than
265
+ mispredicting, the rejection cost needs to satisfy the inequal-
266
+ ity cr ≤ min{cfp × (1 − γ), cfn × γ}, otherwise one could
267
+ predict either always normal and pay an expected cost of
268
+ cfn × γ, or always anomaly and pay cfp × (1 − γ).
269
+ Experiments
270
+ We experimentally answer the following questions:
271
+ Q1. Does BALLAD result in lower costs when compared to
272
+ using only AL or LR?
273
+ Q2. Which reward metric is better?
274
+ Q3. Is the reward function on a similar scale for AL and LR?
275
+ Q4. How does our strategy behave when varying cfp, cfn ?
276
+ Experimental setup
277
+ Methods.
278
+ We compare BALLAD1 to two baselines: ALL-
279
+ IN-AL allocates all the budget for active learning and sets
280
+ the rejection threshold using the (biased) training labels; on
281
+ the contrary, ALL-IN-LR allocates all the budget for learn-
282
+ ing to reject and uses an unlabeled training set.
283
+ Table 1: Properties of the 18 datasets used.
284
+ Dataset
285
+ # Examples
286
+ # Features
287
+ γ
288
+ ALOI
289
+ 12384
290
+ 27
291
+ 0.0304
292
+ Annthyroid
293
+ 7129
294
+ 21
295
+ 0.0749
296
+ Arrhythmia
297
+ 271
298
+ 259
299
+ 0.0996
300
+ Cardiotocography
301
+ 1734
302
+ 21
303
+ 0.0496
304
+ Glass
305
+ 214
306
+ 7
307
+ 0.0421
308
+ InternetAds
309
+ 1682
310
+ 1555
311
+ 0.0499
312
+ KDDCup99
313
+ 48113
314
+ 40
315
+ 0.0042
316
+ PageBlocks
317
+ 5473
318
+ 10
319
+ 0.1023
320
+ PenDigits
321
+ 9868
322
+ 16
323
+ 0.0020
324
+ Pima
325
+ 526
326
+ 8
327
+ 0.0494
328
+ Shuttle
329
+ 1013
330
+ 9
331
+ 0.0128
332
+ SpamBase
333
+ 2661
334
+ 57
335
+ 0.0499
336
+ Stamps
337
+ 340
338
+ 9
339
+ 0.0912
340
+ WBC
341
+ 223
342
+ 9
343
+ 0.0448
344
+ WDBC
345
+ 367
346
+ 30
347
+ 0.0272
348
+ WPBC
349
+ 160
350
+ 33
351
+ 0.0562
352
+ Waveform
353
+ 3443
354
+ 21
355
+ 0.0290
356
+ Wilt
357
+ 4655
358
+ 5
359
+ 0.0199
360
+ Data.
361
+ We carry out our study on 18 publicly available
362
+ benchmark datasets, which are widely used in the litera-
363
+ ture (Campos et al. 2016). See Table 1 for the properties.
364
+ Setup.
365
+ For each of the 18 benchmark datasets, we go as
366
+ follows: (i) we split the dataset into training, validation and
367
+ test sets using the proportions 40 − 40 − 20 (we have a large
368
+ validation set to better measure the impact of rejection); (ii)
369
+ we fit the anomaly detector on the unlabeled dataset and set
370
+ the rejection threshold to the default value of 0.1; (ii) we
371
+ allocate a budget b to LR and AL by randomly selecting
372
+ the initial examples; (iii) we optimize the rejection thresh-
373
+ old and measure the LR reward; (iv) we train the anomaly
374
+ detector on the partially labeled training set and measure the
375
+ AL reward; (v) we allocate the next round budget b to the
376
+ option with the highest reward and repeat (iii) or (iv) until
377
+ the whole budget B is used. During each of the steps, we
378
+ measure the detector performance on the test set using our
379
+ 1Code available at https://github.com/Lorenzo-Perini/Ballad
380
+
381
+ cost function. We set B to the 30% of the training set’s size,
382
+ and b to 2% of it, such that we run 15 allocation rounds. We
383
+ repeat (i - v) 10 times and report the average results. In total
384
+ we run 18 × 15 × 10 = 2700 experiments.
385
+ Costs and hyperparameters.
386
+ We set cfp = cfn = 1 and
387
+ cr = γ, following the cost inequality. SSDO (Vercruyssen
388
+ et al. 2018) with its default parameters is used as the semi-
389
+ supervised anomaly detector (Soenen et al. 2021). We use
390
+ IFOREST (Liu, Ting, and Zhou 2008) as its unsupervised
391
+ prior. We use Uncertainty Sampling as the active learning
392
+ strategy (Zhan et al. 2021), and the entropy as default re-
393
+ ward. For setting the rejection threshold, we use Bayesian
394
+ Optimization (GP MINIMIZE implemented in SKOPT) with
395
+ 20 calls (Frazier 2018) and limit the rejection rate on the
396
+ validation set to 50%.
397
+ Experimental results
398
+ Q1. Comparing BALLAD to the ALL-IN strategies. Fig-
399
+ ure 1 shows the comparison between BALLAD with the en-
400
+ tropy reward and the ALL-IN strategies on the 18 bench-
401
+ mark datasets. On 8 datasets (Arrhythmia, Glass, Kdd-
402
+ Cup99, Pima, SpamBase, Wbc, Wdbc, Wpbc), BALLAD re-
403
+ sults in evident lower costs, although sometimes the differ-
404
+ ence is small. On 5 datasets (Cardiotocography, InternetAds,
405
+ PageBlocks, Stamps, Waveform) BALLAD performs simi-
406
+ lar/worse than ALL-IN-AL. This happens because SSDO has
407
+ an overall high performance and a contained uncertainty in
408
+ the predictions. On the other hand, in 3 cases (Aloi, Annthy-
409
+ roid, Wilt), allocating all the budget for LR has a lower cost.
410
+ This is due to the detector being inaccurate and unable to
411
+ learn from the training labels, which makes learning an opti-
412
+ mal threshold more convenient. As support for this intuition,
413
+ we analyze the plain test AUC of SSDO on the whole test
414
+ set (no rejection) for each of the three previous cases. By ag-
415
+ gregating over the rounds, SSDO obtains an average AUC
416
+ equal to 0.86, 0.88, and 0.57 when the best strategy is, re-
417
+ spectively, BALLAD, ALL-IN-AL, and ALL-IN-LR. Finally,
418
+ BALLAD obtains an overall average cost of 0.043, which is
419
+ ≈ 20% lower than the baselines’ average cost (0.055 for
420
+ ALL-IN-AL, 0.054 for ALL-IN-LR).
421
+ Q2. Which reward function works better? We analyze
422
+ both types of reward functions that we introduced in Eq. 1
423
+ and Eq. 2. Table 2 shows the mean and standard deviation
424
+ of the cost, divided by allocation round. Overall, using the
425
+ cosine reward builds a strategy that produces on average low
426
+ costs for little budget (≤ 10%), whereas, for a higher bud-
427
+ get, the entropy reward obtains better average costs. This is
428
+ due to the highly imbalanced choices made by the cosine re-
429
+ ward: the strategy opts for AL in 93% of the cases, which
430
+ usually improves a lot the detector’s performance with few
431
+ labels but tends to produce little effect when enough labels
432
+ are given. On the other hand, the entropy reward is more bal-
433
+ anced and opts for AL in 63% of the cases. This allows the
434
+ detector to keep decreasing the costs while learning during
435
+ the allocation rounds and obtain more steady performance.
436
+ Q3. Is the entropy reward balanced for AL and LR? Fig-
437
+ ure 2 shows the distribution of the difference between AL
438
+ and LR entropy rewards over all the 2700 experiments. Neg-
439
+ Budget
440
+ Entropy Re
441
+ Cosine Rc
442
+ 2%
443
+ 0.0536 ± 0.0401
444
+ 0.0399 ± 0.0303
445
+ 4%
446
+ 0.0465 ± 0.0330
447
+ 0.0411 ± 0.0334
448
+ 6%
449
+ 0.0443 ± 0.0284
450
+ 0.0398 ± 0.0321
451
+ 8%
452
+ 0.0436 ± 0.0303
453
+ 0.0399 ± 0.0306
454
+ 10%
455
+ 0.0420 ± 0.0299
456
+ 0.0411 ± 0.0325
457
+ 12%
458
+ 0.0416 ± 0.0303
459
+ 0.0433 ± 0.0347
460
+ 14%
461
+ 0.0413 ± 0.0306
462
+ 0.0448 ± 0.0367
463
+ 16%
464
+ 0.0408 ± 0.0288
465
+ 0.0456 ± 0.0372
466
+ 18%
467
+ 0.0403 ± 0.0290
468
+ 0.0457 ± 0.0355
469
+ 20%
470
+ 0.0412 ± 0.0297
471
+ 0.0451 ± 0.0363
472
+ 22%
473
+ 0.0407 ± 0.0301
474
+ 0.0451 ± 0.0359
475
+ 24%
476
+ 0.0421 ± 0.0325
477
+ 0.0438 ± 0.0361
478
+ 26%
479
+ 0.0417 ± 0.0345
480
+ 0.0438 ± 0.0363
481
+ 28%
482
+ 0.0416 ± 0.0332
483
+ 0.0438 ± 0.0380
484
+ 30%
485
+ 0.0418 ± 0.0345
486
+ 0.0427 ± 0.0354
487
+ Table 2: Average (± std) cost per test example over the
488
+ datasets grouped by allocation round for each of the two re-
489
+ ward functions. For low budgets, the cosine reward obtains
490
+ lower costs, while not being competitive for high budgets.
491
+ ative values indicate that the LR reward is higher than AL’s
492
+ one, while the opposite holds for positive values. Overall,
493
+ the median is close to 0, which means that there is no clearly
494
+ predominant strategy. Because the left tail of the density is
495
+ larger than the right one, we conclude that LR rewards have
496
+ higher variability (std = 0.07 vs 0.03).
497
+ Q4. The impact of varying cfp, and cfn. In this experi-
498
+ ment, we penalize more false positives and false negatives
499
+ by setting, one at a time, cfp and cfn to 10. We compare
500
+ BALLAD to the two ALL-IN baselines. For cfp = 10, our
501
+ strategy is still the best for low budgets (< 15%), reduc-
502
+ ing the relative cost by between 5% and 25% with respect
503
+ to the runner-up ALL-IN-LR. However, for higher budgets
504
+ (> 15%), ALL-IN-LR becomes the best strategy as it re-
505
+ duces BALLAD’s cost by around 20% and ALL-IN-AL’s cost
506
+ by more than 40%. This happens because the anomaly de-
507
+ tector produces too many false positives, which, if rejected,
508
+ allow us to reduce the cost. For cfn = 10, BALLAD performs
509
+ much better than the baselines, reducing their cost by around
510
+ 20% (vs ALL-IN-LR) and 24% (vs ALL-IN-AL).
511
+ Conclusion
512
+ We proposed BALLAD, a novel strategy to decide whether
513
+ to allocate the budget for Active Learning (AL), i.e. labeling
514
+ strategic training instances, or for Learning to Reject (LR),
515
+ i.e. labeling a random validation set. Our key insight is that
516
+ we can measure the expected reward when labeling either set
517
+ and allocate the label in the next round to the option with the
518
+ highest reward. We proposed two reward functions (entropy
519
+ and cosine similarity based). Experimentally, we evaluated
520
+ BALLAD on 18 datasets, and show that it performs better
521
+ than simply allocating all the labels to either AL or LR.
522
+
523
+ 0.03
524
+ 0.06
525
+ 0.09
526
+ 0.12
527
+ Aloi
528
+ 0.03
529
+ 0.06
530
+ 0.09
531
+ 0.12
532
+ Ann
533
+ 0.03
534
+ 0.06
535
+ 0.09
536
+ 0.12
537
+ Arr
538
+ AL-LR
539
+ All-in AL
540
+ All-in LR
541
+ 0.03
542
+ 0.06
543
+ 0.09
544
+ 0.12
545
+ Car
546
+ 0.03
547
+ 0.06
548
+ 0.09
549
+ 0.12
550
+ Glass
551
+ 0.03
552
+ 0.06
553
+ 0.09
554
+ 0.12
555
+ Int
556
+ 0.03
557
+ 0.06
558
+ 0.09
559
+ 0.12
560
+ Cost per test example
561
+ Kdd
562
+ 0.03
563
+ 0.06
564
+ 0.09
565
+ 0.12
566
+ Page
567
+ 0.03
568
+ 0.06
569
+ 0.09
570
+ 0.12
571
+ Pen
572
+ 0.03
573
+ 0.06
574
+ 0.09
575
+ 0.12
576
+ Pima
577
+ 0.03
578
+ 0.06
579
+ 0.09
580
+ 0.12
581
+ Shu
582
+ 0.03
583
+ 0.06
584
+ 0.09
585
+ 0.12
586
+ Spam
587
+ 4
588
+ 8
589
+ 12
590
+ 16
591
+ 20
592
+ 24
593
+ 28
594
+ 0.03
595
+ 0.06
596
+ 0.09
597
+ 0.12
598
+ Stam
599
+ 4
600
+ 8
601
+ 12
602
+ 16
603
+ 20
604
+ 24
605
+ 28
606
+ 0.03
607
+ 0.06
608
+ 0.09
609
+ 0.12
610
+ Wbc
611
+ 4
612
+ 8
613
+ 12
614
+ 16
615
+ 20
616
+ 24
617
+ 28
618
+ Allocated budget (% of labeled examples)
619
+ 0.03
620
+ 0.06
621
+ 0.09
622
+ 0.12
623
+ Wdbc
624
+ 4
625
+ 8
626
+ 12
627
+ 16
628
+ 20
629
+ 24
630
+ 28
631
+ 0.03
632
+ 0.06
633
+ 0.09
634
+ 0.12
635
+ Wpbc
636
+ 4
637
+ 8
638
+ 12
639
+ 16
640
+ 20
641
+ 24
642
+ 28
643
+ 0.03
644
+ 0.06
645
+ 0.09
646
+ 0.12
647
+ Wave
648
+ 4
649
+ 8
650
+ 12
651
+ 16
652
+ 20
653
+ 24
654
+ 28
655
+ 0.03
656
+ 0.06
657
+ 0.09
658
+ 0.12
659
+ Wilt
660
+ Figure 1: Comparison between BALLAD and the ALL-IN strategies on the 18 benchmarks. The x-axis reports the 15 rounds of
661
+ 2% labels each. The y-axis shows the average cost per test example. BALLAD obtains lower costs in the majority of cases.
662
+ 0.3
663
+ 0.2
664
+ 0.1
665
+ 0.0
666
+ 0.1
667
+ 0.2
668
+ 0.3
669
+ AL reward - LR reward
670
+ Density
671
+ Median
672
+ Figure 2: Distribution of the difference between AL’s and
673
+ LR’s entropy reward. The median close to 0 indicates the
674
+ absence of a predominant strategy.
675
+ Acknowledgements.
676
+ This work was presented at the 1st
677
+ AAAI Workshop on Uncertainty Reasoning and Quantifica-
678
+ tion in Decision Making (UDM23).
679
+ This research is supported by an FB Ph.D. fellowship by
680
+ FWO-Vlaanderen (grant 1166222N) [LP], the Flemish Gov-
681
+ ernment under the “Onderzoeksprogramma Artifici¨ele Intel-
682
+ ligentie (AI) Vlaanderen” programme [JD], and KUL Re-
683
+ search Fund iBOF/21/075 [JD].
684
+ References
685
+ Abe, N.; Zadrozny, B.; and Langford, J. 2006. Outlier detec-
686
+ tion by active learning. In Proceedings of ACM SIGKDD.
687
+ Campos, G. O.; Zimek, A.; Sander, J.; Campello, R. J.; Mi-
688
+ cenkov´a, B.; Schubert, E.; Assent, I.; and Houle, M. E. 2016.
689
+ On the evaluation of unsupervised outlier detection: mea-
690
+ sures, datasets, and an empirical study. Data mining and
691
+ knowledge discovery.
692
+ Chandola, V.; Banerjee, A.; and Kumar, V. 2009. Anomaly
693
+ detection: A survey. ACM computing surveys (CSUR).
694
+ Chow, C. 1970. On optimum recognition error and reject
695
+ tradeoff. IEEE Transactions on information theory.
696
+ Cortes, C.; DeSalvo, G.; and Mohri, M. 2016.
697
+ Learning
698
+ with rejection. In International Conference on Algorithmic
699
+ Learning Theory. Springer.
700
+ Culotta, A.; and McCallum, A. 2005. Reducing labeling ef-
701
+ fort for structured prediction tasks. In AAAI.
702
+ Dagan, I.; and Engelson, S. P. 1995. Committee-based sam-
703
+ pling for training probabilistic classifiers. In Machine Learn-
704
+ ing Proceedings 1995. Elsevier.
705
+ De Stefano, C.; Sansone, C.; and Vento, M. 2000. To reject
706
+ or not to reject: that is the question-an answer in case of
707
+ neural classifiers. IEEE Transactions on Systems, Man, and
708
+ Cybernetics, Part C (Applications and Reviews).
709
+ Ebert, S.; Fritz, M.; and Schiele, B. 2012.
710
+ Ralf: A rein-
711
+ forced active learning formulation for object class recogni-
712
+ tion.
713
+ In 2012 IEEE Conference on Computer Vision and
714
+ Pattern Recognition. IEEE.
715
+ Frazier, P. I. 2018. Bayesian optimization. In Recent ad-
716
+ vances in optimization and modeling of contemporary prob-
717
+ lems. Informs.
718
+ Hacohen, G.; Dekel, A.; and Weinshall, D. 2022. Active
719
+ learning on a budget: Opposite strategies suit high and low
720
+ budgets. arXiv preprint arXiv:2202.02794.
721
+ Hendrickx, K.; Perini, L.; Van der Plas, D.; Meert, W.; and
722
+ Davis, J. 2021. Machine learning with a reject option: A
723
+ survey. arXiv preprint arXiv:2107.11277.
724
+ Liu, F. T.; Ting, K. M.; and Zhou, Z.-H. 2008.
725
+ Isolation
726
+ forest. In 8th IEEE international conference on data mining.
727
+ Marrocco, C.; Molinara, M.; and Tortorella, F. 2007. An em-
728
+ pirical comparison of ideal and empirical ROC-based reject
729
+
730
+ rules. In International Workshop on Machine Learning and
731
+ Data Mining in Pattern Recognition. Springer.
732
+ Mart´ı, L.; Sanchez-Pi, N.; Molina, J. M.; and Garcia, A.
733
+ C. B. 2015.
734
+ Anomaly detection based on sensor data in
735
+ petroleum industry applications. Sensors.
736
+ Monarch, R. M. 2021. Human-in-the-Loop Machine Learn-
737
+ ing: Active learning and annotation for human-centered AI.
738
+ Simon and Schuster.
739
+ Nadeem, M. S. A.; Zucker, J.-D.; and Hanczar, B. 2009.
740
+ Accuracy-rejection curves (ARCs) for comparing classifica-
741
+ tion methods with a reject option. In Machine Learning in
742
+ Systems Biology. PMLR.
743
+ Perini, L.; Buerkner, P.; and Klami, A. 2022.
744
+ Estimat-
745
+ ing the Contamination Factor’s Distribution in Unsupervised
746
+ Anomaly Detection. arXiv preprint arXiv:2210.10487.
747
+ Perini, L.; Vercruyssen, V.; and Davis, J. 2020. Quantify-
748
+ ing the confidence of anomaly detectors in their example-
749
+ wise predictions. In Joint European Conference on Machine
750
+ Learning and Knowledge Discovery in Databases. Springer.
751
+ Perini, L.; Vercruyssen, V.; and Davis, J. 2022. Transfer-
752
+ ring the Contamination Factor between Anomaly Detection
753
+ Domains by Shape Similarity. In Proceedings of the AAAI
754
+ Conference on Artificial Intelligence.
755
+ Pimentel, T.; Monteiro, M.; Veloso, A.; and Ziviani, N.
756
+ 2020. Deep active learning for anomaly detection. In 2020
757
+ International Joint Conference on Neural Networks. IEEE.
758
+ Soenen, J.; Van Wolputte, E.; Perini, L.; Vercruyssen, V.;
759
+ Meert, W.; Davis, J.; and Blockeel, H. 2021. The effect of
760
+ hyperparameter tuning on the comparative evaluation of un-
761
+ supervised anomaly detection methods. In Proceedings of
762
+ the KDD.
763
+ Vercruyssen, V.; Meert, W.; Verbruggen, G.; Maes, K.;
764
+ Baumer, R.; and Davis, J. 2018. Semi-supervised anomaly
765
+ detection with an application to water analytics. In 2018
766
+ IEEE international conference on data mining (icdm). IEEE.
767
+ Vercruyssen, V.; Perini, L.; Meert, W.; and Davis, J.
768
+ 2022. Multi-domain Active Learning for Semi-supervised
769
+ Anomaly Detection. ECML 2022 published proceedings.
770
+ Zhan, X.; Liu, H.; Li, Q.; and Chan, A. B. 2021. A Compara-
771
+ tive Survey: Benchmarking for Pool-based Active Learning.
772
+ In IJCAI.
773
+ Zhao, N.; Wen, X.; and Li, S. 2016. A review on gas tur-
774
+ bine anomaly detection for implementing health manage-
775
+ ment. Turbo Expo: Power for Land, Sea, and Air.
776
+
JNE1T4oBgHgl3EQfGAOI/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
JtE1T4oBgHgl3EQfGQNW/content/2301.02911v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:676ea7c1d58107b9a2a9311456760b40a2f225b4d7afe56f06f7ce38ddfc9f96
3
+ size 4918836
JtE1T4oBgHgl3EQfGQNW/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b38b236c2a19700d7a94a7e25f0fbe28d04e7e20e7f3013f30d9654ec6cbb02f
3
+ size 2621485
K9FRT4oBgHgl3EQf1Th-/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:207457e451cac8ea7f4857eddff6b68e8bb52b64192b9f4afdefe5583e1038f9
3
+ size 273430
KtFRT4oBgHgl3EQf1Dhl/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8de351a8f9e517413135191ffd059d92ad4a875b965ad88ad1c4aa751ef872b7
3
+ size 5373997
LdE0T4oBgHgl3EQfSgCx/content/2301.02224v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6fbfb5bc727213a2c5d1eea829a72f6771b35275376effd430cbc0b8527029f
3
+ size 1695680
LdE0T4oBgHgl3EQfSgCx/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46a61882910945a4b979474e95697a4c1e332f38a91aee88b95134d8f0f36077
3
+ size 3276845
NdAyT4oBgHgl3EQfUPel/content/tmp_files/2301.00122v1.pdf.txt ADDED
@@ -0,0 +1,873 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ RESEARCH ARTICLE
3
+ European Journal of Information Technologies and Computer Science
4
+ www.ej-compute.org
5
+
6
+
7
+
8
+
9
+ DOI: http://dx.doi.org/10.24018/ejcompute.YEAR.VOL.ISSUE.ID
10
+ Vol X | Issue Y | Month Year
11
+ 1
12
+
13
+ I. INTRODUCTION
14
+ Hair, made of keratin protein, pertains to beauty and
15
+ masculinity. Approximately 5 million hair follicles are
16
+ present throughout our body [1]. Scalp Hair maintains body
17
+ temperature and protects the brain from external heat. A
18
+ typical hair growth cycle runs for 2-7 years, according to
19
+ Patel et al. [2] and Wolff, Fischer, and Blume-Peytavi [3]. A
20
+ healthy human has 100,000 hairs on the scalp, and 50-100
21
+ hair loss per day is considered normal. Hair loss is not a
22
+ present-day issue. The hair-loss treatment was found in
23
+ ancient Ayurveda scriptures 6000 years ago [2]. However,
24
+ Hair and scalp-related issues are gaining more recognition
25
+ nowadays compared to earlier years due to certain factors,
26
+ such as environmental pollution, hormonal imbalance,
27
+ autoimmune disease, gut microbiota alteration, elevated
28
+ physical and mental stress levels in human lifestyle, seasonal
29
+ change, unhealthy diet, micronutrient deficiency, genetic
30
+ predisposition, and side-effects of drugs [2], [3]. According
31
+ to Peyravian et al., 80 million Americans have hair loss-
32
+ related issues to some extent [4]. Although most hair loss
33
+ diseases are localized, some can spread to other locations.
34
+ Some
35
+ diseases
36
+ require
37
+ prescribed
38
+ drugs
39
+ and
40
+ hair
41
+ transplantation. Some diseases are caused by bacterial or
42
+ fungal infections and require antibiotic treatment. Often,
43
+ there are genetic and sexual predispositions in hair-scalp
44
+ diseases.
45
+ Alopecia, folliculitis, and psoriasis are some common
46
+ causes of hair loss. There is a difference between regular hair
47
+ fall and alopecia; the latter develops coin-sized bald patches
48
+ all over the scalp area. Alopecia or patchy hair loss can be of
49
+ different types. Androgenetic alopecia or male-pattern
50
+ baldness (MPB) is the most common form of alopecia where
51
+ the hairline starts to recede, following a pattern where the
52
+ frontal and temple area are most affected. 70% of men and
53
+ 40% of women get this type of hair loss and thinning issue
54
+ [3]. According to Liu et al., MPB is an X-linked polygenic
55
+ disease, and males are more genetically prone to develop
56
+ baldness at a mature age [5]. Topical minoxidil solution
57
+ thickens the hair by 50% [3]. On the other hand, Alopecia
58
+ areata (AA) is an autoimmune disease affecting individuals
59
+ irrespective of age and sex. Primarily affecting the scalp area,
60
+ AA can also spread in the beard, eyelashes, and eyebrows. In
61
+ this case, the body’s immune cells cannot recognize hair
62
+ follicles as ‘self.’ Instead, they consider these follicles as
63
+ ‘foreign,’ which ultimately causes the hair follicles to be
64
+ Hair and Scalp Disease Detection using Machine
65
+ Learning and Image Processing
66
+
67
+ Mrinmoy Roy, Anica Tasnim Protity
68
+ ABSTRACT
69
+
70
+ Almost 80 million Americans suffer from hair loss due to aging,
71
+ stress, medication, or genetic makeup. Hair and scalp-related
72
+ diseases often go unnoticed in the beginning. Sometimes, a patient
73
+ cannot differentiate between hair loss and regular hair fall.
74
+ Diagnosing hair-related diseases is time-consuming as it requires
75
+ professional dermatologists to perform visual and medical tests.
76
+ Because of that, the overall diagnosis gets delayed, which worsens
77
+ the severity of the illness. Due to the image-processing ability, neural
78
+ network-based applications are used in various sectors, especially
79
+ healthcare and health informatics, to predict deadly diseases like
80
+ cancers and tumors. These applications assist clinicians and patients
81
+ and provide an initial insight into early-stage symptoms. In this
82
+ study, we used a deep learning approach that successfully predicts
83
+ three main types of hair loss and scalp-related diseases: alopecia,
84
+ psoriasis, and folliculitis. However, limited study in this area,
85
+ unavailability of a proper dataset, and degree of variety among the
86
+ images scattered over the internet made the task challenging. 150
87
+ images were obtained from various sources and then preprocessed
88
+ by denoising, image equalization, enhancement, and data balancing,
89
+ thereby minimizing the error rate. After feeding the processed data
90
+ into the 2D convolutional neural network (CNN) model, we obtained
91
+ overall training accuracy of 96.2%, with a validation accuracy of
92
+ 91.1%. The precision and recall score of alopecia, psoriasis, and
93
+ folliculitis are 0.895, 0.846, and 1.0, respectively. We also created a
94
+ dataset of the scalp images for future prospective researchers.
95
+ Keywords: Deep Learning, Health Informatics, Machine Learning,
96
+ Scalp/ Hair Diseases.
97
+
98
+
99
+ Published Online:
100
+ ISSN: 2736-5492
101
+ DOI 10.24018/ejcompute.YEAR.Vol.Issue.ID
102
+
103
+ Mrinmoy Roy
104
+ Department
105
+ of
106
+ Computer
107
+ Science,
108
+ Northern Illinois University, USA.
109
+ (e-mail: mrinmoy.cs10 gmail.com)
110
+ Anica Tasnim Protity
111
+ Department of Biological Sciences,
112
+ Northern Illinois University, USA.
113
+ (e-mail:
114
+ protity.microbiology@gmail.com)
115
+
116
+ *Corresponding Author
117
+ @
118
+
119
+
120
+ RESEARCH ARTICLE
121
+ European Journal of Information Technologies and Computer Science
122
+ www.ej-compute.org
123
+
124
+
125
+
126
+
127
+ DOI: http://dx.doi.org/10.24018/ejcompute.YEAR.VOL.ISSUE.ID
128
+ Vol X | Issue Y | Month Year
129
+ 2
130
+
131
+ targeted and destroyed by the immune cells. It is an example
132
+ of a hereditary disease. The study from Benigno et al.
133
+ reported that, in the US alone, 700,000 individuals suffer
134
+ from AA [6]. This disease, if diagnosed early, might resolve
135
+ spontaneously. In severe cases, topical corticosteroid or
136
+ immune therapy is used [3].
137
+ Sometimes, the hair follicles might get inflamed because
138
+ of the action of bacterial accumulation. This follicle
139
+ inflammation
140
+ is
141
+ called
142
+ folliculitis
143
+ decalvans.
144
+ The
145
+ bacterium Staphylococcus aureus damages the follicle and
146
+ prevents hair growth. Staphylococcus aureus uses hair tufts
147
+ to
148
+ enter
149
+ underneath
150
+ the
151
+ follicle,
152
+ causing
153
+ chronic
154
+ inflammation, redness, swelling, scarring, itching, and hair
155
+ loss. Antibiotic treatment combined with surgical removal of
156
+ hair tufts and corticosteroids for reducing inflammation are
157
+ the
158
+ prescribed
159
+ treatment
160
+ for
161
+ Folliculitis
162
+ decalvans
163
+ [3]. Psoriasis is another form of common scalp skin disease.
164
+ According to [7], 54% of 5600 psoriasis patients had scalp
165
+ psoriasis. Severe psoriasis may cause significant itching,
166
+ scaling, and redness in the scalp. The application of topical
167
+ shampoo and corticosteroids are the treatment options by
168
+ Chan et al. [8].
169
+ Some scalp infections may be treatable if diagnosed
170
+ early. Some but not all diseases may go on their own. Only
171
+ an expert physician can detect the illness by visual
172
+ observation. In some cases, early disease detection is
173
+ beneficial for dermatologists to initiate the treatment. An
174
+ early scalp inspection includes a dermatoscopic examination
175
+ of the scalp for inflammation, itching, localized lesion,
176
+ dandruff, follicular flakes, louse eggs (nits), and a scalp
177
+ biopsy. Besides visual observation, the patient can undergo
178
+ blood and hormone tests to detect the exact disease.
179
+ Unfortunately, most hair and scalp diseases are diagnosed in
180
+ advanced stages, which complicate the treatment options. All
181
+ these factors lengthen the diagnosis and treatment process.
182
+ Therefore, researchers are putting more effort into developing
183
+ different mechanisms for the early detection of hair and scalp
184
+ diseases.
185
+ In the 21st century, with all the advancements in
186
+ computational technology, extensive application of machine
187
+ learning has made our daily lives simple, comfortable, and
188
+ secure. The increasing popularity of machine learning and its
189
+ nature to extract patterns from data are directing researchers
190
+ to incorporate several machine learning algorithms into
191
+ health informatics. Especially during the Covid-19 pandemic
192
+ era, different applications like restraining people from covid-
193
+ 19 spread [9], SARS-CoV-2 screening and treatment [10],
194
+ lock-down control in case of high dimensional input [11]
195
+ came into play, which made machine learning and healthcare
196
+ systems inseparable. Overall, adapting, integrating, and
197
+ developing deep learning-based applications on patients’
198
+ information, medical reports, and audio-video feedback make
199
+ the diagnosis process faster. Nowadays, patients can get at
200
+ least the initial idea of disease detection by themselves using
201
+ easily accessible smart devices. All these applications clear
202
+ their confusion and help them make health-related decisions
203
+ independently.
204
+ The high computational capability of neural networks is,
205
+ therefore, a breakthrough in healthcare and medical
206
+ diagnostic organizations. Convolutional neural networks
207
+ (CNN) have brought revolutionary success in detecting
208
+ deadly diseases. To date, neural networks are assisting
209
+ healthcare professionals in the early detection of different
210
+ types of tumors and cancers, such as skin cancer (melanoma)
211
+ [12], stomach cancer (adenocarcinoma) [13], and brain
212
+ tumors (glioblastoma) [14]. Neural networks are applicable
213
+ in detecting life-threatening dengue fever [15] and covid-19
214
+ [16] as well. In one study, CNN was used to extract complex
215
+ temporal dynamic features from heart rate variability (HRV)
216
+ signals, developing an algorithm that facilitated the early
217
+ detection of diabetics [17]. Using the image processing ability
218
+ of the neural networks, we can extract features from hair, skin
219
+ and scalp images to classify and categorize numerous hair and
220
+ scalp-related diseases. In this work, due to the importance of
221
+ early-stage hair disease detection, we applied convolutional
222
+ neural networks to 3 types of hair diseases and developed a
223
+ model to detect them successfully.
224
+
225
+ II. CHALLENGES AND CONTRIBUTIONS
226
+ A classic application of computer vision is to detect
227
+ disease using digital images. Researchers can exploit a pool
228
+ of digital images obtained from one or more datasets,
229
+ preprocess the images, feed the images into the neural
230
+ network, and develop a model to detect the disease.
231
+ Unfortunately, minimal research has been performed on the
232
+ machine-learning approach for scalp disease detection. There
233
+ are several unique challenges behind this. First and foremost,
234
+ hair diseases are not localized and can spread to different
235
+ regions of the scalp, beard, eyebrows, eyelashes, and pubic
236
+ area. Second, every image needs different types of
237
+ preprocessing before feeding to neural networks. Different
238
+ scalp skin tones, hair colors, and types around the detection
239
+ zones make the imaging process more complicated. Third, no
240
+ proper dataset for scalp diseases is available over the internet,
241
+ and images taken from the internet differ in size and
242
+ resolution. Moreover, one must be conscious of minimalizing
243
+ and correcting the error in disease detection; otherwise, the
244
+ high false-positive and false-negative rates result in
245
+ misdiagnosis of the disease and worsening hair loss.
246
+ To overcome the challenges, we developed a model
247
+ which can successfully classify the alopecia, folliculitis, and
248
+ psoriasis diseases with a minimal false-positive and false-
249
+ negative rate. Though it is challenging to collect images for
250
+ the diseases from the internet, and the images are varied in
251
+ color,
252
+ shape,
253
+ and
254
+ resolution,
255
+ we
256
+ applied
257
+ various
258
+ preprocessing, such as denoising, resizing, enhancement and
259
+ created a dataset that might help in further scalp diseases
260
+ research.
261
+
262
+ III. RELATED WORKS
263
+ Disease detection using machine learning approaches is
264
+ gaining popularity in health informatics. Many skin and
265
+ scalp-related diseases can be detected using images of
266
+ infected regions within a few seconds. In one study by
267
+ Choudhary et al. [18], a framework is developed to
268
+ differentiate alopecia areata from healthy hair. They obtained
269
+ 200 healthy hair images from the figaro1K dataset and 68
270
+ alopecia areata hair images from DermNet. After a series of
271
+ enhancement and segmentation, three key features were
272
+
273
+
274
+ RESEARCH ARTICLE
275
+ European Journal of Information Technologies and Computer Science
276
+ www.ej-compute.org
277
+
278
+
279
+
280
+
281
+ DOI: http://dx.doi.org/10.24018/ejcompute.YEAR.VOL.ISSUE.ID
282
+ Vol X | Issue Y | Month Year
283
+ 3
284
+
285
+ extracted from the images: texture, shape, and color. The
286
+ researchers divided the dataset into 70%-30% train-test-split
287
+ and applied a support vector machine (SNM) and k-nearest
288
+ neighbor (KNN) for the classification task. Overall, they
289
+ achieved 91.4% and 88.9% accuracy using SVM and KNN,
290
+ respectively, with a 10-fold cross-validation approach.
291
+ However, using other machine learning algorithms might
292
+ increase in the accuracy rate, which should have been
293
+ discussed. Besides, the application of Histogram Equalization
294
+ (HE) for image enhancement complicated the process of
295
+ getting accurate texture features from distorted images, as HE
296
+ itself adds noise to the output image, distorting the signals.
297
+ Moreover, this study only shed light on alopecia areata
298
+ disease, ignoring the inter-class differences between other
299
+ similar type diseases, which increased the likelihood of
300
+ inaccurate prediction of other diseases as alopecia areata,
301
+ thereby making this framework less reliable.
302
+ Another study [19] proposed a model for early alopecia
303
+ detection. They used 100 samples for this research, with 80%
304
+ as training data and the other 20% as testing data. They
305
+ looked for four attributes, length of the hair, nail brittleness,
306
+ amount of damage made to the hair, and hair follicle. Two-
307
+ layer feed-forward network with a back propagation
308
+ technique was used for detection purposes. The proposed
309
+ model system consisting of 4 input neurons, 10 hidden
310
+ neurons, and a linear output neuron, achieved 91% training
311
+ accuracy with 86.7% validation accuracy. It showed the best
312
+ performance at epoch 4 with a 0.059687 gradient. However,
313
+ the study has some pitfalls, too, as they did not mention their
314
+ data source or differentiate data classes with their respective
315
+ sample sizes. Also, no image pre-processing was performed
316
+ on the collected images. Although there is a possibility of
317
+ overfitting without a proper data balancing technique, this
318
+ report did not discuss the data balancing between the two
319
+ classes. Furthermore, they did not calculate the model’s false-
320
+ positive and false-negative rates, which is crucial for a model
321
+ specially developed for the healthcare system.
322
+ Related work [20] was performed on skin disease
323
+ detection, where machine learning was used to analyze the
324
+ digital image of the affected skin area for identifying eczema,
325
+ melanoma, and psoriasis. Their dataset consists of 80 images
326
+ from different websites specific to skin diseases. By using a
327
+ convolutional neural network for feature extraction and
328
+ applying multiclass SVM on those features, they achieved
329
+ 100% accuracy in disease classification. However, they did
330
+ not explore other essential model performance matrices and
331
+ overfitting issues. In another skin disease detection-based
332
+ article [21], the authors proposed a scheme to classify skin
333
+ lesions into five categories: healthy, acne, eczema, benign,
334
+ and malignant melanoma, using a pre-trained CNN model,
335
+ AlexNET for feature extraction and error correcting output
336
+ codes support vector machine for classification. The dataset
337
+ consists of 9144 images from different sources and achieved
338
+ 84.21% accuracy using a 10-fold cross-validation technique.
339
+ Overall, we observed very few works on hair diseases.
340
+ The recent related works lack at least one of the following
341
+ categories – discussion over false positive and false negative
342
+ rates, ignoring inter-class differences, model reliability, and
343
+ overfitting problem. In this work, we have attempted to fill
344
+ these gaps by leveraging a convolutional neural network
345
+ algorithm on hair disease images while maintaining high
346
+ accuracy with good precision and recall scores.
347
+
348
+ IV. DATA DESCRIPTION & DEVICE
349
+ A. Data Collection
350
+ The most challenging part of using visual images for
351
+ disease prediction and disease classification is data
352
+ collection. Often, one can get fewer appropriate images for
353
+ a specific illness found. Moreover, the pictures are scattered
354
+ over the internet. In this study, the authors extracted the
355
+ images from different websites, such as DermQuest,
356
+ DermNet, MedicineNet, DermnetNZ, and various medical
357
+ professionals.
358
+
359
+ TABLE I: IMAGES PER DISEASE
360
+ Disease
361
+ Quantity
362
+ Alopecia
363
+ 65
364
+ Psoriasis
365
+ 45
366
+ Folliculitis
367
+ 40
368
+
369
+
370
+ Fig. 1. Image subset of each disease category.
371
+
372
+ The image quantity is different for each category. We
373
+ found more alopecia-related images than other diseases
374
+ because alopecia is more frequent and severe among the
375
+ human population. The number of samples in each type of
376
+ disease is listed in Table I. Randomly selected images in
377
+ each category are graphically represented in Fig 1. Our
378
+ dataset is made publicly available on Github [22].
379
+ B. Device
380
+ The research was conducted on Dell Latitude 5520
381
+ laptop device having 11th generation Intel Core i5 (8 MB
382
+ cache, 4 cores, 8 threads, up to 4.40 GHz Turbo) and
383
+ running on Windows 10 Pro operating system. The device
384
+ has 16 GB, 1 x 16 GB, DDR4, 3200 MHz random access
385
+ memory (RAM), and 256 GB, M.2 PCIe NVMe, SSD,
386
+ Class 35 (NVRAM). For the classification of images, we
387
+ utilized the integrated Intel Iris XE graphics capable with a
388
+ thunderbolt for I5-1145G7 vPro processor. For the data
389
+
390
+ Alopecia
391
+ Psoriasis
392
+ folliculitis
393
+ RESEARCH ARTICLE
394
+ European Journal of Information Technologies and Computer Science
395
+ www.ej-compute.org
396
+
397
+
398
+
399
+
400
+ DOI: http://dx.doi.org/10.24018/ejcompute.YEAR.VOL.ISSUE.ID
401
+ Vol X | Issue Y | Month Year
402
+ 4
403
+
404
+ collection, we used iPhone-13 Pro Max having Hexa-core
405
+ (2x3.23 GHz Avalanche + 4x1.82 GHz Blizzard) CPU and
406
+ Apple GPU (5-core graphics). We used a mobile device
407
+ with 128GB 6GB RAM, and a 12 MP triple main camera
408
+ for the image collection.
409
+ V. PROPOSED MODEL
410
+ In this section, we introduce the system workflow of our
411
+ model and explain the functions of each module in details. As
412
+ shown in Fig. 2, first, the captured image is sent to
413
+ preprocessing steps which are divided into three parts: image
414
+ equalization, image enhancement, and data balancing.
415
+
416
+
417
+ Fig. 2. System workflow of hair disease detection model.
418
+
419
+ Among these three, the first two parts are mainly for
420
+ increasing image quality, and the last part is for model
421
+ versatility. After the preprocessing steps, the image is passed
422
+ to the Neural Network model for the classification task. We
423
+ used a convolutional neural network that classifies an image
424
+ successfully into three different classes: alopecia, folliculitis,
425
+ and psoriasis.
426
+ A. Denoising
427
+
428
+
429
+ Fig. 3. Left original image & right non-local means denoised image.
430
+
431
+ Noise is the degradation of image signals caused by
432
+ external sources [23]. Noise introduces random variations of
433
+ brightness or color information in the captured images. Most
434
+ of the time, images on the internet have some noise associated
435
+ with them. As we have collected most of the data samples
436
+ from different dermatology websites, the noise in our dataset
437
+ is not homogeneously distributed, which made it more
438
+ complex. Therefore, we applied additional filters for
439
+ denoising the collected images. We started with the gaussian
440
+ filter for a better image classification process. However, after
441
+ using the gaussian filter, the images become completely
442
+ blurred, which leads to the loss of important information and
443
+ damage to the edges. We then applied the median filter,
444
+ which worked better than the gaussian filter with kernel_size
445
+ = 3. Though we achieved better accuracy using the bilateral
446
+ filter, we got the best results while applying the non-local
447
+ means filter with patch_size = 3 and patch_distance = 5. This
448
+ non-local means filter preserved all the edges and reduced the
449
+ noise better than the other filters for our application which is
450
+ shown in Fig. 3.
451
+ B. Image Equalization
452
+ Often the captured image doesn’t reflect the natural view
453
+ and needs contrast enhancement to meet the level of realistic
454
+ view [24]. Especially images with high color depth and after
455
+ denoising effects need normalization for a better realistic
456
+ view [25]. First, we applied histogram equalization (HE).
457
+ However, the HE increases the contrast of the background
458
+ when used in images with low color depth, and information
459
+ is lost as the histogram is not confined to the local region. To
460
+ overcome the problem, we applied CLAHE (Contrast
461
+ Limited Adaptive Histogram Equalization) by dividing an
462
+ image into equal size non-overlapping areas and computing a
463
+ histogram for each region. After clipping the histogram, we
464
+ distributed the clipped value over the histogram equalization,
465
+ which gives us control of the over-amplification of the
466
+ contrast and generates the resultant image shown in Fig. 4.
467
+
468
+
469
+ Fig. 4. Image equalization using CLAHE.
470
+
471
+ C. Data Balancing
472
+ The overall performance of a machine learning model
473
+ depends on the balanced dataset because, without it, minority
474
+ class detection becomes difficult. Balancing a dataset reduces
475
+ the risk of skewing towards the majority. Imbalanced data
476
+ might achieve high accuracy, but the results are biased toward
477
+ the majority class. As alopecia is a common disease, we have
478
+ more alopecia images than other diseases, which creates an
479
+ imbalanced dataset for our model. For balancing the dataset,
480
+ we used data augmentation techniques (re-scaling, random
481
+ rotating, cropping, vertical and horizontal flipping) and
482
+ oversampled the infrequent class.
483
+ D. Neural Network Model
484
+ Neural network is the most applied model for visual data
485
+ analysis. Neural network needs limited human assistance and
486
+
487
+ Image
488
+ Image
489
+ Data
490
+ Denoising
491
+ Enhancement
492
+ Augmentation
493
+ Convolutional
494
+ Diseaseclass
495
+ Neural
496
+ Detected
497
+ Network
498
+ Model
499
+ RESEARCH ARTICLE
500
+ European Journal of Information Technologies and Computer Science
501
+ www.ej-compute.org
502
+
503
+
504
+
505
+
506
+ DOI: http://dx.doi.org/10.24018/ejcompute.YEAR.VOL.ISSUE.ID
507
+ Vol X | Issue Y | Month Year
508
+ 5
509
+
510
+ can identify complex non-linear relationship between input
511
+ and output. From global or local scale modeling [26] to
512
+ diagnosis by medical image classification, neural network is
513
+ using extensively. Moreover, Facial recognition, image
514
+ labeling, accurate video subtitles, assisting call centers,
515
+ automated virtual agents all these things are using neural
516
+ network. There are 3 types of neural network available:
517
+ Artificial Neural Networks (ANN), Convolution Neural
518
+ Networks (CNN) and Recurrent Neural Networks (RNN).
519
+ Each neural network has mainly 3 components: an input
520
+ layer, a processing layer, and an output layer.
521
+
522
+
523
+ Fig. 5. Neural Network Model.
524
+
525
+ In this study, CNN is utilized for classification because it
526
+ takes image’s raw pixel data, trains a model, and extracts the
527
+ features automatically for better detection. We used autokeras
528
+ to find the best model for this problem. After trying 25
529
+ different combinations, we selected 3 hidden layers with 1
530
+ input and 1 output layer as our final model which is shown in
531
+ Fig. 5. For training the model, we used batch_size = 16 with
532
+ 50 epochs for each batch. The preprocessed data is divided
533
+ into 70-30 train-test-split for training and validation purpose.
534
+ Our model consists of 256 inputs, 3 x 3 square kernel, 3
535
+ output units and a softmax output. We used ReLU as our
536
+ activation function to prevent the exponential growth of
537
+ required computation and to explore the non-linear
538
+ relationship between input and output variables. After each
539
+ convolutional layer, input goes through the pooling layer
540
+ having 2 x 2 kernel size to reduce the dimensions of the
541
+ features map. Pooling layer summarizes the presented
542
+ features in a region and helps to prevent the over-fitting
543
+ problem by down sampling. We also used dropout layer after
544
+ each pooling layer to prevent neurons in a layer from
545
+ synchronously optimizing their weights and converging to the
546
+ same goal. Our model’s dropout rate is 0.3, which means 30%
547
+ of the neurons of this layer will be randomly dropped in each
548
+ epoch.
549
+ All the resultant 2-D arrays from pooled features map
550
+ passes through the flatten layer and converted to single
551
+ dimensional long continuous linear vector in the transition
552
+ towards the fully connected layer as in Fig. 5. In the fully
553
+ connected layer, every single output pixel from the
554
+ convolutional layers is connected to 3 output classes. Though
555
+ dense layer is computationally expensive, we used 2 dense
556
+ layers for our classification task. Finally, we used softmax
557
+ activation function to transform the 3 units of fully connected
558
+ layer to a probability distribution which is represented by a
559
+ vector of 3 elements, and the highest probability element
560
+ selected as the final class. We leveraged adam optimizer for
561
+ learning purpose and reducing the overall loss by changing
562
+ the weights and learning rates. We used adam because it can
563
+ handle sparse gradients on noisy problems and combines the
564
+ best properties of AgaGrad and RMSProp algorithms.
565
+
566
+ VI. RESULTS
567
+ We trained our CNN model using the optimal
568
+ hyperparameters selected from the grid search. These
569
+ hyperparameters are listed in Table II. We divided the dataset
570
+ into 70%-30% train-test-split where 105 randomly selected
571
+ images are used for training and 45 random images for
572
+ testing. After applying the preprocessing steps, we used the
573
+ training dataset to train the CNN model and evaluated the test
574
+ dataset using the model.
575
+
576
+ TABLE II: HYPERPARAMETERS OF CNN MODEL
577
+ Hyperparameters
578
+ Values
579
+ Batch Size
580
+ 16
581
+ Epoch
582
+ 50
583
+ Kernel Size
584
+ 3 x 3
585
+ Optimizer
586
+ Adam
587
+ Dropout Rate
588
+ 0.3
589
+ Pooling Size
590
+ 2 x 2
591
+ Activation Function
592
+ ReLU
593
+
594
+
595
+ Fig. 6. Training and Validation loss for CNN.
596
+
597
+
598
+ Conv 1
599
+ Max-Pooling
600
+ Conv 2
601
+ Max-Pooling
602
+ Conv 3
603
+ Max-Pooling
604
+ Convolution
605
+ (2 x 2)
606
+ Convolution
607
+ (2 x 2) Kernel
608
+ Convolution
609
+ (2 x 2) Kernel
610
+ Fully-Connected
611
+ (3 x 3)
612
+ Kernel
613
+ (3 x 3)
614
+ Dropout 0.3
615
+ (3 × 3) Kernel
616
+ Dropout 0.3
617
+ Input
618
+ OutputTraining and validation loss
619
+ 1.25
620
+ Training loss
621
+ 1.00
622
+ Validation loss
623
+ 0.50
624
+ 0.25
625
+ 0.00
626
+ 0
627
+ 10
628
+ 20
629
+ 30
630
+ 40
631
+ 50
632
+ Epochs
633
+ RESEARCH ARTICLE
634
+ European Journal of Information Technologies and Computer Science
635
+ www.ej-compute.org
636
+
637
+
638
+
639
+
640
+ DOI: http://dx.doi.org/10.24018/ejcompute.YEAR.VOL.ISSUE.ID
641
+ Vol X | Issue Y | Month Year
642
+ 6
643
+
644
+
645
+ Fig. 7. Training and Validation Accuracy for CNN.
646
+ Our system achieved 96.2% training accuracy and
647
+ 91.1% validation accuracy on the unseen data. Validation and
648
+ training losses for every epoch are shown in Fig 6. The
649
+ training losses decreased from 1.1685 to 0.1017, and the
650
+ validation losses decrease from 1.1260 to 0.3438 while going
651
+ from epoch 1 to epoch 50. At the same time, Training
652
+ accuracy and validation accuracy increased to 96.2% and
653
+ 91.1%, respectively, from epoch 1 to epoch 50, shown in Fig.
654
+ 7.
655
+
656
+
657
+ Fig. 8. Confusion Matrix of Our Model.
658
+
659
+
660
+ Fig. 9. Fractional Incorrect Prediction of Our Model.
661
+
662
+ The confusion matrix in Fig. 8 shows the correct and
663
+ wrong classification for each category with inter-class
664
+ classification. Among 45 test images, alopecia (label 0) has
665
+ 19 images, psoriasis (label 1) has 13 images, and folliculitis
666
+ (label 2) has 13 images. A total of 17 alopecia images were
667
+ classified as alopecia and the other 2 were incorrectly
668
+ classified as psoriasis. Again, 11 psoriasis images are
669
+ classified as psoriasis, but 2 psoriasis images were incorrectly
670
+ classified as alopecia. All 13 folliculitis images are classified
671
+ correctly. The fractional incorrect prediction for each class is
672
+ shown in Fig 9. Our model achieved the precision and recall
673
+ score of 0.895 for the alopecia disease class, 0.846 for the
674
+ psoriasis disease class, and 1.0 for the folliculitis disease
675
+ class. As the precision and recall scores are same in each
676
+ class, F1 scores are also similar to their respective precision
677
+ and recall values.
678
+
679
+ VII. CONCLUSION
680
+ Although early-stage detection of hair and scalp-related
681
+ diseases is the key to the treatment process, hair loss and scalp
682
+ diseases can often go undetected due to a lack of awareness
683
+ and a lengthy diagnosis test. An AI-based application might
684
+ pave the way to facilitate early disease detection. In this
685
+ study, we developed a machine learning model to accurately
686
+ predict three hair and scalp-related diseases: alopecia,
687
+ folliculitis, and psoriasis by feeding 150 preprocessed image
688
+ data into a 2-D convolutional neural network model. After
689
+ using 70% of the data to train the model, we analyzed
690
+ remaining 30% of images for testing our model. After
691
+ subsequent training, the model gave an overall 96.2% training
692
+ accuracy on the training data and 91.1% validation accuracy
693
+ for the test data, with a high precision and recall scores for
694
+ each disease type. We have also provided our dataset with this
695
+ study. Our proposed system would assist dermatologists and
696
+ patients with a better understanding of disease classification
697
+ and initiating early treatment options for the three most
698
+ frequently occurred hair and scalp diseases.
699
+
700
+ FUNDING
701
+ No funding was used to write this research paper.
702
+
703
+ CONFLICT OF INTEREST
704
+ The authors declare that they do not have any conflicts of
705
+ interest.
706
+
707
+ REFERENCES
708
+ [1] Cotsarelis G. Gene expression profiling gets to the root of human hair
709
+ follicle stem cells. J Clin Invest. 1 2006;116(1):19–22. doi:
710
+ 10.1172/JCI27490.
711
+ [2] Patel S, Sharma V, Chauhan NS, Thakur M, Dixit VK. Hair growth:
712
+ Focus on herbal therapeutic agent. Curr Drug Discov Technol.
713
+ 2015;12(1):21–42. doi: 10.2174/1570163812666150610115055.
714
+ [3] Wolff H, Fischer TW, Blume-Peytavi U. The diagnosis and treatment
715
+ of hair and scalp diseases. Dtsch Arztebl Int. 2016; doi:
716
+ 10.3238/arztebl.2016.0377.
717
+ [4] Peyravian N, Deo S, Daunert S, Jimenez JJ. The inflammatory aspect
718
+ of male and female pattern hair loss. J Inflamm Res. 2020;13:879–81.
719
+ doi: 10.2147/JIR.S275785.
720
+
721
+ Trainingand validationaccuracy
722
+ 1.0
723
+ 0.8
724
+ Accuracy
725
+ 0.6
726
+ 0.4
727
+ Trainingacc
728
+ Validationacc
729
+ 0
730
+ 10
731
+ 20
732
+ 30
733
+ 40
734
+ 50
735
+ Epochs16
736
+ 17
737
+ 2
738
+ 0
739
+ -
740
+ 14
741
+ 12
742
+ -
743
+ 10
744
+ 1
745
+ 2
746
+ 11
747
+ 0
748
+ 8
749
+ 6
750
+ -4
751
+ 2
752
+ 13
753
+ -2
754
+ 0
755
+ 0
756
+ 1
757
+ 2incorrect predictions
758
+ 0.15
759
+ 0.10
760
+ Fraction ofi
761
+ 0.05
762
+ 0.00
763
+ 0
764
+ 1
765
+ 2
766
+ TrueLabel
767
+ RESEARCH ARTICLE
768
+ European Journal of Information Technologies and Computer Science
769
+ www.ej-compute.org
770
+
771
+
772
+
773
+
774
+ DOI: http://dx.doi.org/10.24018/ejcompute.YEAR.VOL.ISSUE.ID
775
+ Vol X | Issue Y | Month Year
776
+ 7
777
+
778
+ [5] Liu F, Hamer MA, Heilmann S, Herold C, Moebus S, Hofman A, et al.
779
+ Prediction of male-pattern baldness from genotypes. Eur J Hum Genet.
780
+ 2016;24(6):895–902. doi: 10.1038/ejhg.2015.220.
781
+ [6] Benigno M, Anastassopoulos KP, Mostaghimi A, Udall M, Daniel SR,
782
+ Cappelleri JC, et al. A large cross-sectional survey study of the
783
+ prevalence of alopecia areata in the United States. Clin Cosmet Investig
784
+ Dermatol. 2020;13:259–66. doi: 10.2147/ccid.s245649.
785
+ [7] Farber EM, Nall L. The natural history of psoriasis in 5,600 patients.
786
+ Dermatology. 1974;148(1):1–18. doi: 10.1159/000251595.
787
+ [8] Chan CS, Van Voorhees AS, Lebwohl MG, Korman NJ, Young M,
788
+ Bebo BF Jr, et al. Treatment of severe scalp psoriasis: The Medical
789
+ Board of the National Psoriasis Foundation. J Am Acad Dermatol.
790
+ 2009;60(6):962–71. doi: 10.1016/j.jaad.2008.11.890.
791
+ [9] Roy M, Seethi VDR, Bharti P. CovidAlert - A wristwatch-based system
792
+ to alert users from face touching. In: Lecture Notes of the Institute for
793
+ Computer Sciences, Social Informatics and Telecommunications
794
+ Engineering. Cham: Springer International Publishing; 2022. p. 489–
795
+ 504.
796
+ [10] Lalmuanawma S, Hussain J, Chhakchhuak L. Applications of machine
797
+ learning and artificial intelligence for Covid-19 (SARS-CoV-2)
798
+ pandemic:
799
+ A
800
+ review.
801
+ Chaos
802
+ Solitons
803
+ Fractals.
804
+ 2020;139(110059):110059. doi: 10.1016/j.chaos.2020.110059.
805
+ [11] Pramanik P. On lock-down control of a pandemic model. arXiv
806
+ [mathOC]. [Preprint] 2022 [cited 2022 Oct 29]. Available from:
807
+ http://arxiv.org/abs/2206.04248.
808
+ [12] Kumar M, Alshehri M, AlGhamdi R, Sharma P, Deep V. A DE-ANN
809
+ inspired skin cancer detection approach using fuzzy C-means
810
+ clustering. Mob Netw Appl. 2020;25(4):1319–29. doi: 10.1007/s11036-
811
+ 020-01550-2.
812
+ [13] Aytaç Korkmaz S, Binol H. Classification of molecular structure
813
+ images by using ANN, RF, LBP, HOG, and size reduction methods for
814
+ early stomach cancer detection. J Mol Struct. 2018;1156:255–63. doi:
815
+ 10.1016/j.molstruc.2017.11.093.
816
+ [14] Virupakshappa, Amarapur B. An automated approach for brain tumor
817
+ identification using ANN classifier. In: 2017 International Conference
818
+ on Current Trends in Computer, Electrical, Electronics and
819
+ Communication (CTCEEC). IEEE; 2017. p. 1011–6.
820
+ [15] Balasaravanan K, Prakash M. Detection of dengue disease using
821
+ artificial neural network based classification technique. Int j eng
822
+ technol. 2017;7(1.3):13. doi: 10.14419/ijet.v7i1.3.8978.
823
+ [16] Aslan N, Ozmen Koca G, Kobat MA, Dogan S. Multi-classification
824
+ deep CNN model for diagnosing COVID-19 using iterative
825
+ neighborhood component analysis and iterative ReliefF feature
826
+ selection techniques with X-ray images. Chemometr Intell Lab Syst.
827
+ 2022;224(104539):104539. doi: 10.1016/j.chemolab.2022.104539.
828
+ [17] G. S, R. V, K.p. S. Diabetes detection using deep learning algorithms.
829
+ ICT Express. 2018;4(4):243–6. doi: 10.1016/j.icte.2018.10.005.
830
+ [18] Shakeel CS, Khan SJ, Chaudhry B, Aijaz SF, Hassan U. Classification
831
+ framework for healthy hairs and alopecia areata: A machine learning
832
+ (ML) approach. Comput Math Methods Med. 2021; 2021:1102083. doi:
833
+ 10.1155/2021/1102083.
834
+ [19] Kapoor I, Mishra A. Automated classification method for early
835
+ diagnosis of alopecia using machine learning. Procedia Comput Sci.
836
+ 2018;132:437–43. doi: 10.1016/j.procs.2018.05.157.
837
+ [20] ALKolifi ALEnezi NS. A method of skin disease detection using image
838
+ processing and machine learning. Procedia Comput Sci. 2019;163:85–
839
+ 92. doi: 10.1016/j.procs.2019.12.090.
840
+ [21] Hameed N, Shabut AM, Hossain MA. Multi-class skin diseases
841
+ classification using deep convolutional neural network and support
842
+ vector machine. In: 2018 12th International Conference on Software,
843
+ Knowledge, Information Management & Applications (SKIMA).
844
+ IEEE; 2018.
845
+ [22] Mrinmoy-Roy. Scalp-Hair-Diseases-Detection [Internet]. 2022 [cited
846
+ 2022
847
+ Oct
848
+ 30].
849
+ Available
850
+ from:
851
+ https://github.com/Mrinmoy-
852
+ Roy/Scalp-Hair-Diseases-Detection.git.
853
+ [23] Swain A. Noise in digital image processing [Internet]. Image Vision.
854
+ 2018 [cited 2022 Oct 29]. Available from: https://medium.com/image-
855
+ vision/noise-in-digital-image-processing-55357c9fab71.
856
+ [24] Sameer. Image equalization (contrast enhancing) in python - analytics
857
+ Vidhya - medium [Internet]. Analytics Vidhya. 2020 [cited 2022 Oct
858
+ 29]. Available from: https://medium.com/analytics-vidhya/image-
859
+ equalization-contrast-enhancing-in-python-82600d3b371c.
860
+ [25] Hiroyasu T, Hayashinuma K, Ichikawa H, Yagi N. Preprocessing with
861
+ image denoising and histogram equalization for endoscopy image
862
+ analysis using texture analysis. Annu Int Conf IEEE Eng Med Biol Soc.
863
+ 2015;2015:789–92. doi: 10.1109/EMBC.2015.7318480.
864
+ [26] Tazmul Islam M, Meng Q. An exploratory study of Sentinel-1 SAR for
865
+ rapid urban flood mapping on Google Earth Engine. Int J Appl Earth
866
+ Obs
867
+ Geoinf.
868
+ 2022;113(103002):103002.
869
+ doi:
870
+ 10.1016/j.jag.2022.103002.
871
+
872
+
873
+
NdAyT4oBgHgl3EQfUPel/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
NtAzT4oBgHgl3EQfzP6J/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23c69b5142d9a29b40c8949e543a35dbebd8531096c58ce51eb5e8db94284dd0
3
+ size 324644
NtE1T4oBgHgl3EQfZwQu/content/2301.03153v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7c18c295f16837ab9ba91b1ab50ea8d3c256ae4d5dee9e139469b43a9efeb3d
3
+ size 4380585
NtE1T4oBgHgl3EQfZwQu/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff136d1cf7e712cfff1c3295ce6e8065e9d7fc7297c558d84aed82d0936d3567
3
+ size 4063277
O9AyT4oBgHgl3EQf7Prc/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be9615cc6c86c2bd29b51bd998fec8e985416cfc59328c608c2ee4ac02e8ed64
3
+ size 4390957
ONFAT4oBgHgl3EQfyh6B/content/tmp_files/2301.08693v1.pdf.txt ADDED
@@ -0,0 +1,675 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SELF-SUPERVISED LEARNING FOR A NONLINEAR INVERSE
2
+ PROBLEM WITH FORWARD OPERATOR INVOLVING AN
3
+ UNKNOWN FUNCTION ARISING IN PHOTOACOUSTIC
4
+ TOMOGRAPHY
5
+ Gyeongha Hwang1, Gihyeon Jeon2∗, Sunghwan Moon3
6
+ 1 Department of Mathematics, Yeungnam University, Gyeongsan 38541, Republic of Korea
7
+ 2 School of Mathematics, Kyungpook National University, Daegu 41566, Republic of Korea
8
+ 3 Department of Mathematics, Kyungpook National University, Daegu 41566, Republic of Korea
9
+ *Corresponding author E-mails: rydbr6709@knu.ac.kr
10
+ ABSTRACT. In this article, we concern with a nonlinear inverse problem with forward opera-
11
+ tor involving an unknown function. The problem arises in diverse applications and is challenging
12
+ by the presence of the unknown function, which makes it ill-posed. Additionally, the nonlinear
13
+ nature of the problem makes it difficult to use traditional methods and thus the study has
14
+ addressed a simplified version of the problem by either linearizing it or assuming knowledge of
15
+ the unknown function. Here, we propose a self-supervised learning to directly tackle a nonlinear
16
+ inverse problem involving an unknown function. In particular, we focus on an inverse problem
17
+ derived in Photoacoustic Tomograpy (PAT) which is a hybrid medical imaging with high res-
18
+ olution and contrast. PAT can be modelled based on the wave equation. The measured data
19
+ is the solution of the equation restricted to the surface and the initial pressure of the equation
20
+ contains the biological information on the object of interest. The speed of sound wave in the
21
+ equation is unknown. Our goal is to determine the initial pressure and the speed of sound wave
22
+ simultaneously. Under a simple assumption that the sound speed is a function of the initial
23
+ pressure, the problem becomes a nonlinear inverse problem involving an unknown function. The
24
+ experimental results demonstrate that the proposed algorithm performs successfully.
25
+ 1
26
+ Introduction
27
+ Inverse problem is to find the cause factor from the observed data, which has applications
28
+ in many fields such as optics, radar, acoustics, communication theory, signal processing,
29
+ medical imaging, computer vision, geophysics, oceanography, and astronomy because it
30
+ tells us about what we cannot directly observe. The forward operator, the inverse of
31
+ the inverse problem, can be modelled as an (non)-linear system and often involves an
32
+ unknown function. Due to the nature of the inverse problem, it is usually very hard to
33
+ know the cause factor. For example, in medical imaging the cause factor is the human
34
+ body section and in seismology, we never know the structure of the earth’s interior.
35
+ In this article, we concern with a nonlinear inverse problem with forward operator involv-
36
+ ing an unknown function. Our goal is to find the unknown function and the inverse opera-
37
+ tor simultaneously from the measurements. The problem is generally ill-posed because of
38
+ 1
39
+ arXiv:2301.08693v1 [math.NA] 20 Jan 2023
40
+
41
+ the unknown function. Additionally, the nonlinearity in the problem makes conventional
42
+ methods difficult to use. To handle the problem, one may simplify the problem linearly
43
+ or assume knowledge of the unknown function. Here we propose a self-supervised frame-
44
+ work to directly tackle a nonlinear inverse problem involving an unknown function. In
45
+ particular, we address an inverse problem derived in Photoacoustic Tomography (PAT).
46
+ Although our proposed framework has been proposed to solve the problem arising in
47
+ PAT, it is generic and can be extended to handle a nonlinear inverse problem involving
48
+ an unknown function.
49
+ The rest of the section is devoted to an introduction of PAT. In section 2, we formulate
50
+ an inverse problem arising in PAT, which is nonlinear and also involves an unknown
51
+ function. The structure and learning method of the proposed framework for the problem
52
+ are described in section 3. The numerical simulation results in section 4 demonstrate that
53
+ the proposed algorithm performs successfully.
54
+ 1.1
55
+ Photoacoustic Tomography
56
+ PAT is a hybrid medical imaging that combines the high contrast of optical imaging with
57
+ the high spatial resolution of ultrasound images [1, 2, 3]. The physical basis of PAT is
58
+ the photoacoustic effect discovered by Bell in 1881 [4]. In PAT, when an non-destructive
59
+ testing target object absorbs a non-ionizing laser pulse, it thermally expands and emits
60
+ acoustic waves. The emitted ultrasound contains biological information on the target
61
+ object and is measured by an detector placed around it. The internal image of the target
62
+ object is reconstructed from this measured data. The advantage of PAT is that it is
63
+ economical and less harmful because of non-ionizing radiation use [5].
64
+ The propagation of the emitted ultrasound p(x, t) can be described by the wave equation
65
+ ∂2
66
+ t p(x, t) = c(x)2∆xp(x, t)
67
+ on R2 × [0, ∞)
68
+ (1)
69
+ with the initial conditions
70
+ p(x, 0) = f(x)
71
+ ∂tp(x, 0) = 0
72
+ on R2.
73
+ (2)
74
+ Here c is the speed of waves and f is the initial pressure which contains biological in-
75
+ formation such as the location of a cancer cells in a physically small tissue. It is natural
76
+ assumption that f has compact support in the bounded domain Ω and the detectors are
77
+ located on the boundary ∂Ω of the domain. Regarding the measurement procedure, the
78
+ point-shaped detector measures the average pressure above ∂Ω where the detectors are
79
+ located and this average pressure is the value of a pressure wave p(x, t). Therefore, one of
80
+ mathematical problems in PAT is reconstructing f from the measured data p|∂Ω×[0,∞),
81
+ which implies obtaining an internal image of the target object.
82
+ It is well-known that given the initial pressure f and the speed c, the solution p is
83
+ determined uniquely. Let us define the wave forward operator W as
84
+ W : (f, c) �→ p|∂Ω×[0,∞),
85
+ i.e.,
86
+ W(f, c) = p|∂Ω×[0,∞).
87
+ Reconstructing problem for f from W(f, c) is studied when speed c is constant [6, 7].
88
+ Okanen, Stefanov and Uhlmann study the explicit reconstruction when the sound speed
89
+ 2
90
+
91
+ is known
92
+ [8, 9]. If c depends on space variable x, the problem become much more
93
+ difficult. A few of researchers have studied the problem with a given variable sound
94
+ speed [10, 11, 12, 13]. Liu and Uhlmann figure out the sufficient conditions for recovering
95
+ f and c [14].
96
+ Recently, the application of deep learning in an medical imaging including PAT has been
97
+ investigated extensively. Roles of deep learning in tomography include forward and in-
98
+ verse operator approximation, image reconstruction from sparse data, and artifact/noise
99
+ removal from reconstructed images [15, 16, 17, 18, 19, 20, 21]. There are also studies on
100
+ limited-view data (see [22, 23]). H. Shan et al. propose an iterative optimization algo-
101
+ rithm which reconstructs f and c simultaneously via a supervised learning [24]. However,
102
+ most works deal with linear inverse problems or inverse problems without involving an
103
+ unknown function [25].
104
+ Many studies on PAT with deep learning are based on a supervised learning. A supervised
105
+ learning exploits a collection of paired data of the boundary data and the initial pres-
106
+ sure. In practical applications, it is difficult to obtain the initial pressure, because initial
107
+ pressure represents internal human body. Therefore, it is necessary to study a learning
108
+ method exploiting the boundary data only. One such method is a self-supervised learning
109
+ which exploits supervised signals that are generated from the input data by leveraging
110
+ its structure [26, 27].
111
+ 2
112
+ Problem formulation
113
+ In this section, we formulate the problem precisely. For this, we will make several as-
114
+ sumptions. First we assume f has compact support, since a target object is finite. Sec-
115
+ ondly, c is assumed to be a function of f, namely c(x)2 = Γ(f(x)) for some function
116
+ Γ : [0, 1] → [0, ∞), because the wave speed c depends on the medium. Lastly, we assume
117
+ that Γ(0) and Γ(1) are known, namely Γ(0) = c0 and Γ(1) = c1. The last assumption is
118
+ reasonable because Γ(0) and Γ(1) represent the wave speeds in the air and the highest
119
+ thermal expansion coefficient respectively. Then the equation (1) is rewritten as:
120
+ ∂2
121
+ t p(x, t) = Γ(f(x))∆xp(x, t)
122
+ on R2 × [0, ∞).
123
+ (3)
124
+ Let us define WΓ by WΓ(f) = p|∂Ω×[0,∞) where p is the solution of (3) with initial
125
+ conditions (2). Then the inverse problem can be formulated as determining unknown Γ
126
+ and f from a given WΓ(f). However, this problem is ill-posed: for any Γ′ satisfying
127
+ � Γ = Γ′ on Im(f)
128
+ Γ ̸= Γ′ on Dom(Γ) \ Im(f),
129
+ we have WΓ(f) = WΓ′(f). Hence Γ can not be uniquely determined from WΓ(f). There
130
+ is also a possibility that there exist Γ1, Γ2, f1 and f2 such that Γ1 ̸= Γ2, f1 ̸= f2 and
131
+ WΓ1(f1) = WΓ2(f2). Instead, we consider the following inverse problem:
132
+ Problem 1. Let the collection of boundary data BΓ := {WΓ(f) | Γ : [0, 1] → [0, ∞), Γ(0) =
133
+ c0, Γ(1) = c1 and f ∈ L2(R2) has compact support} be given.
134
+ 3
135
+
136
+ 1. Determine unknown Γ from BΓ.
137
+ 2. For all WΓ(f) ∈ BΓ, determine f.
138
+ Then the uniqueness statements for Problem 1 are
139
+ Hypothesis 1. If Γ1 ̸= Γ2, then BΓ1 ̸= BΓ2.
140
+ and
141
+ Hypothesis 2. For fixed Γ, if f1 ̸= f2, then WΓ(f1) ̸= WΓ(f2).
142
+ In this article, we aim to solve Problem 1 under Hypothesis 1 and 2. The problem is
143
+ difficult to solve because of
144
+ 1. (3) involves unknown Γ.
145
+ 2. (3) is not linear.
146
+ We are going to solve Problem 1 by exploiting a deep neural network (DNN). Since DNN
147
+ can only handle with finite data, we address the following inverse problem.
148
+ Problem 2. For given {WΓ(fi) | Γ : [0, 1] → [0, ∞), Γ(0) = c0, Γ(1) = c1 and fi ∈
149
+ L2(R2) has compact support, i = 1, · · · , N}, determine Γ and {fi|i = 1, · · · , N}.
150
+ 3
151
+ Network Design
152
+ Figure 1: The network architecture
153
+ We propose a self-supervised learning for the problem formulated in section 2. Our goal
154
+ is simultaneously reconstructing {fi}N
155
+ i=1 and Γ from given collection {WΓ(fi)}N
156
+ i=1. The
157
+ proposed framework is depicted in Figure 1. It consists of three components:
158
+ 4
159
+
160
+ input
161
+ output
162
+ Wr(f)
163
+ W (R(Wr(f)))
164
+ R(Wr(f))
165
+ M(R(Wr(f)))
166
+ LOSS = MSEWr(f),W(RWr(f)))1. Reconstruction network R
167
+ 2. Mapping network M
168
+ 3. Wave forward operator W.
169
+ The reconstruction network R learns to reconstruct the initial data from the measured
170
+ data. The mapping network M approximates the function Γ : [0, 1] → [0, ∞) satisfying
171
+ c(x)2 = Γ(f(x)). The forward operator W assigns to the initial data and the wave
172
+ speed the measured data. Here we adopt the k-space method. If every component in the
173
+ framework functions properly, then output should be same to the input. Thus we define
174
+ the loss function as the difference between the input and the output:
175
+ L = 1
176
+ N
177
+ N
178
+
179
+ i=1
180
+ ∥WΓ(fi) − WM(R(WΓ(fi))∥2
181
+ ∥WΓ(fi)∥2
182
+ .
183
+ Remark. Our method estimates Γ and the inverse operator W−1
184
+ Γ . The estimated inverse
185
+ operator can be used for the fast inference of the initial pressure from the boundary
186
+ measurement.
187
+ Remark. The proposed framework is generic and can be extended to handle a nonlinear
188
+ inverse problem involving an unknown function.
189
+ Now, the detailed structures of each component in the framework are described below.
190
+ 3.1
191
+ Reconstruction network R
192
+ The reconstruction network R is a network that reconstruct f from input data WΓ(f).
193
+ Indeed, it approximates the inverse map W−1
194
+ Γ
195
+ : WΓ(f) �→ f. If the speed Γ of the wave
196
+ is constant, it is well-known that the inverse map of (3) is linear [7, 28, 29]. Inspired by
197
+ this fact, we propose the reconstruction network as a perturbation of a linear map:
198
+ R := T1 + U ◦ T2,
199
+ (4)
200
+ where T1, T2 : Rm×m → Rm×m are linear and U : Rm×m → Rm×m is the U-net described
201
+ in Figure 2. U-net is a type of convolutional neural network (CNN) introduced in [30] and
202
+ is used widely in medical imaging. U-net consists of a contracting path and an expansive
203
+ path. The contacting path has a typical CNN structure, where the input data is extracted
204
+ into feature map with small size and large channel. In the expansive path, the size of
205
+ the feature map increases again, and the number of channels decreases. In the end of R,
206
+ since the range of f is [0, 1], we used the clamp function which rounds up values smaller
207
+ than the minimum and round down values larger than the maximum.
208
+ 5
209
+
210
+ Figure 2: U-net architecture for 64 × 64 size
211
+ The proposed reconstruction network show a high performance for the low resolution
212
+ data like 64 × 64 (see section 4.3.1). In case of high resolution data, however the linear
213
+ operators T1 and T2 in the reconstruction network R make some problems, because
214
+ they contains too many parameters. It causes lots of critical points which impede the
215
+ convergence to the global minimum. It also makes a hardware issue and thus for the high
216
+ resolution data, we employ Pixel Shuffle and Pixel Unshuffle which reduce the number of
217
+ parameters contained in linear operators [31]. The Pixel Unshuffle splits one image into
218
+ several images and the Pixel Shuffle merges several images into one image, as illustrated
219
+ in Figure 3. Instead of applying the linear operators (T1 and T2) directly to the high
220
+ resolution data, we process the data as follows (see Figure 4) :
221
+ 1. Split the high resolution data (m × m) into four low resolution data ( m
222
+ 2 × m
223
+ 2 ) by
224
+ exploiting the Pixel Unshuffle.
225
+ 2. Apply four different linear operators to each low resolution data.
226
+ 3. Merge the outputs of the linear operators by using the Pixel Shuffle.
227
+ 6
228
+
229
+ Figure 3: Pixel Shuffle and Pixel Unshuffle
230
+ Figure 4: Architecture of alternative map to linear for high resolution data
231
+ 3.2
232
+ Mapping network M
233
+ We use multilayer perceptron (MLP) to approximate unknown Γ, because MLP can
234
+ approximate any continuous function (a universal approximation theorem, see [32, 33]).
235
+ The proposed network is a simple structure containing only three hidden layers of 10
236
+ nodes. To satisfy the assumption that Γ(0) = c0 and Γ(1) = c1, the output of MLP is
237
+ slightly manipulated as
238
+ M(f) = MLP(f) − MLP(0) ∗ (1 − f) − MLP(1) ∗ f + ((c1 − c0)f + c0),
239
+ 7
240
+
241
+ Linear map
242
+ Linearmap2
243
+ Pixel
244
+ Pixel
245
+ Unshuffle
246
+ Shuffle
247
+ Linear map 3
248
+ Linear map 4so that
249
+ M(0) = c0 and M(1) = c1.
250
+ (5)
251
+ 3.3
252
+ Forward problem
253
+ A solution of the initial value problem (3) can be computed by the k-space method
254
+ [34, 35]. The k-space method is a numerical method for computing solution of acoustic
255
+ wave propagation, which uses information in the frequency space to obtain a solution for
256
+ the next time step. For calculating propagation of p(x, t), let w(x, t) =
257
+ 1
258
+ Γ(f(x))p(x, t) be
259
+ an auxiliary field. Then we have,
260
+ ∂2
261
+ t w(x, t) = ∆x [Γ(f(x))w(x, t)] .
262
+ Taking the Fourier transform Fx for w with respect to x yields
263
+ ∂2
264
+ t Fxw(k, t) = −|k|2Fx
265
+
266
+ Γ(f(·))w(·, t)
267
+
268
+ (k).
269
+ (6)
270
+ Meanwhile, the numerical approximation of the second derivative of Fxw is
271
+ ∂2
272
+ t Fxw(k, t) ≈ Fxw(k, t + ∆t) − 2Fxw(k, t) + Fxw(k, t − ∆t)
273
+ (∆t)2
274
+ ,
275
+ (7)
276
+ where ∆t is the time step. Then, by combining (6) and (7), we have
277
+ Fxw(k, t + ∆t) = 2Fxw(k, t) − Fxw(k, t − ∆t) − (∆t)2|k|2Fx
278
+
279
+ Γ(f(·))w(·, t)
280
+
281
+ (k).
282
+ By taking the inverse Fourier transform F−1
283
+ k , we obtain
284
+ w(x, t + ∆t) = 2w(x, t) − w(x, t − ∆t) ��� F−1
285
+ k
286
+
287
+ (∆t)2|·|2Fx
288
+
289
+ Γ(f)w
290
+
291
+ (·, t)
292
+
293
+ (x).
294
+ Here, replacing (∆t)2|k|2 in the third term with 4 sin2 �
295
+ (∆t)|k|
296
+ 2
297
+
298
+ provides more accurate
299
+ discretization (see [34, 35]). Finally, we have wave propagation formula:
300
+ w(x, t + ∆t) = 2w(x, t) − w(x, t − ∆t) − F−1
301
+ k
302
+
303
+ 4 sin2
304
+ �(∆t)| · |
305
+ 2
306
+
307
+ Fx
308
+
309
+ Γ(f)w
310
+
311
+ (·, t)
312
+
313
+ (x),
314
+ or equivalently,
315
+ p(x, t + ∆t) = 2p(x, t) − p(x, t − ∆t) − Γ(f)F−1
316
+ k
317
+
318
+ 4 sin2
319
+ �(∆t)| · |
320
+ 2
321
+
322
+ Fx
323
+
324
+ p
325
+
326
+ (·, t)
327
+
328
+ (x).
329
+ 4
330
+ Numerical Simulations
331
+ In this section, we present the details of implementation and experimental results when
332
+ Ω is the unit ball.
333
+ 8
334
+
335
+ 4.1
336
+ Datasets
337
+ The Shepp-Logan phantom, an artificial image that describes a cross section of the brain
338
+ commonly used for simulation in tomography, contains 10 ellipses [36]. Each ellipse is
339
+ created with 6 parameters: major axis, minor axis, the x-coordinate and the y-coordinate
340
+ of center, rotation angle, and intensity value. The data set of the initial condition f defined
341
+ on [−1.0, 1.0]2 ⊂ R2 is generated by slightly changing these 6 parameters with
342
+ supp(f) ⊂
343
+
344
+ (x, y) ∈ R2 :
345
+ x2
346
+ 0.692 +
347
+ y2
348
+ 0.922 ≤ 1
349
+
350
+ .
351
+ We create a set of 2,688 phantoms P = {fi}2688
352
+ i=1 . For Γ, we consider four cases: linear,
353
+ square root, square, and constant
354
+ 1. Γ1(f) = 0.3f + 0.7
355
+ 2. Γ2(f) = 0.3√f + 0.7
356
+ 3. Γ3(f) = 0.3f 2 + 0.7
357
+ 4. Γ4(f) = 0.7.
358
+ For 1 ≤ j ≤ 4, we make the collection of data {WΓjfi}2688
359
+ i=1 by using the forward operator
360
+ for P and Γj. Of the 2,688 data, we use 2,048 data for training, 128 data for validation
361
+ and 512 data for testing respectively.
362
+ Figure 5: Examples of phantoms
363
+ 9
364
+
365
+ 4.2
366
+ Training
367
+ We use the Adam optimizer based on stochastic gradient descent and adaptive moment
368
+ estimation to train the network
369
+ [37]. There are two neural networks in the proposed
370
+ frameworks: the reconstruction network R and the mapping network M. The learning
371
+ rates for linear term of R, perturbation term of R, and M are chosen to be 10−4,
372
+ 10−3, and 10−3, respectively. Momentum parameters of the Adam optimizer were set at
373
+ β1 = 0.9 and β2 = 0.999, respectively.
374
+ We specifically put the batch size to 2. For general tasks, a moderately large batch
375
+ size reduces training time. However, in this problem, a small batch size is advantageous
376
+ because our model must be able to reconstruct an exact image for each data rather than
377
+ an average result.
378
+ 4.3
379
+ Results
380
+ In this section, we illustrate experimental results. The overall results are displayed in
381
+ Figure 6, Table 1, Figure 7, Figure 8, Table 2 and Figure 9. Here the losses for f and
382
+ WΓ(f) are respectively defined by
383
+ loss for f = 1
384
+ N
385
+ N
386
+
387
+ i=1
388
+ ∥fi − R(WΓ(fi))∥2
389
+ ∥fi∥2
390
+ ,
391
+ and
392
+ loss for WΓf = 1
393
+ N
394
+ N
395
+
396
+ i=1
397
+ ∥WΓ(fi) − WM(R(WΓ(fi))∥2
398
+ ∥WΓ(fi)∥2
399
+ .
400
+ 4.3.1
401
+ Low resolution data
402
+ We conduct the simulation utilizing a dataset of images with a size of 64×64. The results
403
+ for the mapping networks are shown in Figure 6. We see that the mapping networks
404
+ accurately approximates Γ. When Γ3 = 0.3f 2 + 0.7, there is a difference between the
405
+ plot of the mapping network M and the plot of Γ. This is because the values of f ∈ P
406
+ almost belong to [0, 0.3]∪1 and so it has little effect on WΓ(f). In all cases, the process of
407
+ training the mapping networks requires approximately 103 iterations. The results of the
408
+ reconstruction networks are illustrated in Table 1 and Figure 7. Table 1 shows the test
409
+ errors. So it can be concluded that the reconstruction networks accurately approximate
410
+ the inverse maps in each case. The training of the reconstruction networks necessitates
411
+ approximately 105 iterations.
412
+ Remark. The assumption on Γ, (5) is crucial. If constraint (5) is not given in M, it
413
+ may take a long time to approximate Γ, or it may fail to find Γ. Under the constraint, M
414
+ can quickly determine Γ. Early determination of Γ helps the learning of reconstruction
415
+ networks.
416
+ 10
417
+
418
+ Figure 6: Comparison of the mapping network M and ground truth Γ for 64 × 64 data
419
+ Assumption
420
+ loss for f
421
+ loss for WΓ(f)
422
+ Γ1 = 0.3f + 0.7
423
+ 0.00504
424
+ 0.00702
425
+ Γ2 = 0.3√f + 0.7
426
+ 0.00537
427
+ 0.00947
428
+ Γ3 = 0.3f 2 + 0.7
429
+ 0.00557
430
+ 0.00634
431
+ Γ4 = 0.7
432
+ 0.01373
433
+ 0.00456
434
+ Table 1: Test errors for f and WΓ(f) according to Γ after 102,400 iterations for 64 × 64
435
+ data
436
+ 11
437
+
438
+ [2 = 0.3Vf + 0.7
439
+ [1 = 0.3f + 0.7
440
+ 1.0
441
+ ground truth
442
+ 1.0
443
+ mapping network
444
+ 0.7
445
+ 0.7
446
+ 0.0
447
+ 0.5
448
+ 1.0
449
+ 0.0
450
+ 0.5
451
+ 1.0
452
+ [3 = 0.3f2 + 0.7
453
+ [4 = 0.7
454
+ 1.0
455
+ 1.0
456
+ 0.7
457
+ 0.7
458
+ 0.0
459
+ 0.5
460
+ 1.0
461
+ 0.0
462
+ 0.5
463
+ 1.0Figure 7: Reconstruction results according to Γ for 64 × 64 data
464
+ 4.3.2
465
+ High resolution data
466
+ In the simulation for high resolution data, two linear operators T1 and T2 in the recon-
467
+ struction network (4) are replaced by alternative map described in Figure 4. The dataset
468
+ is prepared with images of a size of 96 × 96. Similarly to the low resolution case, the
469
+ mapping network M approximates Γ accurately within 103 iterations (Figure 8). On the
470
+ other hand, the reconstruction networks for each Γ exhibit a slight decrease in perfor-
471
+ mance but still acceptable (Table 2 and Figure 9). We surmise that the slight decrease
472
+ in performance is a result of the reduction in parameters brought about by the Pixel
473
+ Unshuffle and Pixel Shuffle operations.
474
+ 12
475
+
476
+ [1 = 0.3f + 0.7
477
+ [2 = 0.3V f + 0.7
478
+ [3 = 0.3f2 + 0.7
479
+ 4 = 0.7
480
+ ground truth
481
+ 1.0
482
+ 0.8
483
+ 0.6
484
+ 0.4
485
+ 0.2
486
+ 0.0Figure 8: Comparison of the mapping network M and ground truth Γ for 96 × 96 data
487
+ Assumption
488
+ loss for f
489
+ loss for WΓ(f)
490
+ Γ1 = 0.3f + 0.7
491
+ 0.00860
492
+ 0.01293
493
+ Γ2 = 0.3√f + 0.7
494
+ 0.01023
495
+ 0.01679
496
+ Γ3 = 0.3f 2 + 0.7
497
+ 0.00710
498
+ 0.01132
499
+ Γ4 = 0.7
500
+ 0.00689
501
+ 0.69511
502
+ Table 2: Test errors for f and WΓ(f) according to Γ after 102,400 iterations for 96 × 96
503
+ data
504
+ 13
505
+
506
+ [2 = 0.3Vf + 0.7
507
+ [1 = 0.3f + 0.7
508
+ 1.0
509
+ ground truth
510
+ 1.0
511
+ mapping network
512
+ 0.7
513
+ 0.7
514
+ 0.0
515
+ 0.5
516
+ 1.0
517
+ 0.0
518
+ 0.5
519
+ 1.0
520
+ [3 = 0.3f2 + 0.7
521
+ [4 = 0.7
522
+ 1.0
523
+ 1.0
524
+ 0.7
525
+ 0.7
526
+ 0.0
527
+ 0.5
528
+ 1.0
529
+ 0.0
530
+ 0.5
531
+ 1.0Figure 9: Reconstruction results according to Γ for 96 × 96 data
532
+ 5
533
+ Conclusions
534
+ Here, we propose a self-supervised learning for a nonlinear inverse problem with forward
535
+ operator involving an unknown function. In medical imaging such as PAT, the initial
536
+ pressure is mostly untrackable for the measured data. Moreover it is difficult to know the
537
+ wave speed. So it is necessary to reconstruct the initial pressure f and the wave speed
538
+ simultaneously. Under the simple assumption, the problem becomes a nonlinear inverse
539
+ problem involving an unknown function. The experimental results demonstrate the high
540
+ performance of the proposed algorithm. Our framework can be extended to a nonlinear
541
+ inverse problem involving an unknown function, formulated under more complicated
542
+ situations. This can be an interesting line of future research.
543
+ References
544
+ [1] Huabei Jiang. Photoacoustic tomography. CRC Press, 2018.
545
+ [2] Jun Xia, Junjie Yao, and Lihong V Wang. Photoacoustic tomography: principles
546
+ and advances. Electromagnetic waves (Cambridge, Mass.), 147:1, 2014.
547
+ [3] Peter Kuchment. The Radon transform and medical imaging. SIAM, 2013.
548
+ 14
549
+
550
+ [1 = 0.3f + 0.7
551
+ [2 = 0.3V f + 0.7
552
+ [3 = 0.3f2 + 0.7
553
+ 「4 = 0.7
554
+ ground truth
555
+ 1.0
556
+ 0.8
557
+ 0.6
558
+ 0.4
559
+ 0.2
560
+ 0.0[4] Alexander Graham Bell. On the production and reproduction of sound by light. In
561
+ Proc. Am. Assoc. Adv. Sci., volume 29, pages 115–136, 1881.
562
+ [5] Idan Steinberg, David M Huland, Ophir Vermesh, Hadas E Frostig, Willemieke S
563
+ Tummers, and Sanjiv S Gambhir. Photoacoustic clinical imaging. Photoacoustics,
564
+ 14:77–98, 2019.
565
+ [6] Gerhard Zangerl, Sunghwan Moon, and Markus Haltmeier. Photoacoustic tomogra-
566
+ phy with direction dependent data: An exact series reconstruction approach. Inverse
567
+ Problems, 35(11):114005, 2019.
568
+ [7] Minghua Xu and Lihong V Wang. Universal back-projection algorithm for photoa-
569
+ coustic computed tomography. Physical Review E, 71(1):016706, 2005.
570
+ [8] Lauri Oksanen and Gunther Uhlmann. Photoacoustic and thermoacoustic tomog-
571
+ raphy with an uncertain wave speed. Mathematical Research Letters, 2014.
572
+ [9] Plamen Stefanov and Gunther Uhlmann. Thermoacoustic tomography with variable
573
+ sound speed Inverse Problems, 25(7):075011, 16, 2009.
574
+ [10] Jianliang Qian, Plamen Stefanov, Gunther Uhlmann, and Hongkai Zhao. An efficient
575
+ neumann series–based algorithm for thermoacoustic and photoacoustic tomography
576
+ with variable sound speed. SIAM Journal on Imaging Sciences, 4(3):850–883, 2011.
577
+ [11] Zakaria Belhachmi, Thomas Glatz, and Otmar Scherzer.
578
+ A direct method for
579
+ photoacoustic tomography with inhomogeneous sound speed.
580
+ Inverse Problems,
581
+ 32(4):045005, 2016.
582
+ [12] Yulia Hristova, Peter Kuchment, and Linh Nguyen. Reconstruction and time rever-
583
+ sal in thermoacoustic tomography in acoustically homogeneous and inhomogeneous
584
+ media. Inverse problems, 24(5):055006, 2008.
585
+ [13] Minam Moon, Injo Hur, and Sunghwan Moon.
586
+ Singular value decomposition
587
+ of the wave forward operator with radial variable coefficients.
588
+ arXiv preprint
589
+ arXiv:2208.10793, 2022.
590
+ [14] Hongyu Liu and Gunther Uhlmann. Determining both sound speed and internal
591
+ source in thermo-and photo-acoustic tomography. Inverse Problems, 31(10):105005,
592
+ 2015.
593
+ [15] Stephan Antholzer, Markus Haltmeier, Robert Nuster, and Johannes Schwab. Pho-
594
+ toacoustic image reconstruction via deep learning.
595
+ In Photons Plus Ultrasound:
596
+ Imaging and Sensing 2018, volume 10494, pages 433–442. SPIE, 2018.
597
+ [16] Gregory Ongie, Ajil Jalal, Christopher A Metzler, Richard G Baraniuk, Alexan-
598
+ dros G Dimakis, and Rebecca Willett. Deep learning techniques for inverse prob-
599
+ lems in imaging. IEEE Journal on Selected Areas in Information Theory, 1(1):39–56,
600
+ 2020.
601
+ [17] Ge Wang, Jong Chul Ye, and Bruno De Man. Deep learning for tomographic image
602
+ reconstruction. Nature Machine Intelligence, 2(12):737–748, 2020.
603
+ 15
604
+
605
+ [18] Janek Gröhl, Melanie Schellenberg, Kris Dreher, and Lena Maier-Hein. Deep learn-
606
+ ing for biomedical photoacoustic imaging: a review. Photoacoustics, 22:100241, 2021.
607
+ [19] Changchun Yang, Hengrong Lan, Feng Gao, and Fei Gao. Review of deep learning
608
+ for photoacoustic imaging. Photoacoustics, 21:100215, 2021.
609
+ [20] Stephan Antholzer, Markus Haltmeier, and Johannes Schwab. Deep learning for
610
+ photoacoustic tomography from sparse data.
611
+ Inverse problems in science and
612
+ engineering, 27(7):987–1005, 2019.
613
+ [21] Jiasheng Zhou, Da He, Xiaoyu Shang, Zhendong Guo, Sung-Liang Chen, and Jiajia
614
+ Luo. Photoacoustic microscopy with sparse data by convolutional neural networks.
615
+ Photoacoustics, 22:100242, 2021.
616
+ [22] Steven Guan, Amir A Khan, Siddhartha Sikdar, and Parag V Chitnis. Limited-
617
+ view and sparse photoacoustic tomography for neuroimaging with deep learning.
618
+ Scientific reports, 10(1):1–12, 2020.
619
+ [23] Huijuan Zhang, LI Hongyu, Nikhila Nyayapathi, Depeng Wang, Alisa Le, Leslie
620
+ Ying, and Jun Xia. A new deep learning network for mitigating limited-view and
621
+ under-sampling artifacts in ring-shaped photoacoustic tomography. Computerized
622
+ Medical Imaging and Graphics, 84:101720, 2020.
623
+ [24] Hongming Shan, Christopher Wiedeman, Ge Wang, and Yang Yang. Simultane-
624
+ ous reconstruction of the initial pressure and sound speed in photoacoustic tomog-
625
+ raphy using a deep-learning approach.
626
+ In Novel Optical Systems, Methods, and
627
+ Applications XXII, volume 11105, page 1110504. International Society for Optics
628
+ and Photonics, 2019.
629
+ [25] Maarten V. de Hoop, Matti Lassas, and Christopher A. Wong,
630
+ Deep learn-
631
+ ing architectures for nonlinear operator functions and nonlinear inverse problems.
632
+ Mathematical Statistics and Learning, no. 1/2(4):1–86, 2021
633
+ [26] Saeed Shurrab and Rehab Duwairi.
634
+ Self-supervised learning methods and appli-
635
+ cations in medical imaging analysis: A survey. PeerJ Computer Science, 8:e1045,
636
+ 2022.
637
+ [27] Longlong Jing and Yingli Tian. Self-supervised visual feature learning with deep
638
+ neural networks: A survey.
639
+ IEEE transactions on pattern analysis and machine
640
+ intelligence, 43(11):4037–4058, 2020.
641
+ [28] Sunghwan Moon. Inversion formula for a radon-type transform arising in photoa-
642
+ coustic tomography with circular integrating detectors. Advances in Mathematical
643
+ Physics, 2018, 2018.
644
+ [29] Rim Gouia-Zarrad, Souvik Roy, and Sunghwan Moon.
645
+ Numerical inversion and
646
+ uniqueness of a spherical radon transform restricted with a fixed angular span.
647
+ Applied Mathematics and Computation, 408:126338, 2021.
648
+ 16
649
+
650
+ [30] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional net-
651
+ works for biomedical image segmentation. In International Conference on Medical
652
+ image computing and computer-assisted intervention, pages 234–241. Springer, 2015.
653
+ [31] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob
654
+ Bishop, Daniel Rueckert, and Zehan Wang, Real-time single image and video super-
655
+ resolution using an efficient sub-pixel convolutional neural network Proceedings of
656
+ the IEEE conference on computer vision and pattern recognition,1874–1883, 2016.
657
+ [32] George Cybenko.
658
+ Approximation by superpositions of a sigmoidal function.
659
+ Mathematics of control, signals and systems, 2(4):303–314, 1989.
660
+ [33] Jae-Mo Kang and Sunghwan Moon. Error bounds for ReLU networks with depth
661
+ and width parameters. Japan Journal of Industrial and Applied Mathematics, To
662
+ appear.
663
+ [34] T Douglas Mast, Laurent P Souriau, D-LD Liu, Makoto Tabei, Adrian I Nachman,
664
+ and Robert C Waag. A k-space method for large-scale models of wave propagation
665
+ in tissue. IEEE transactions on ultrasonics, ferroelectrics, and frequency control,
666
+ 48(2):341–354, 2001.
667
+ [35] Benjamin T Cox, S Kara, Simon R Arridge, and Paul C Beard. k-space propagation
668
+ models for acoustically heterogeneous media: Application to biomedical photoacous-
669
+ tics. The Journal of the Acoustical Society of America, 121(6):3453–3464, 2007.
670
+ [36] Lawrence A Shepp and Benjamin F Logan. The Fourier reconstruction of a head
671
+ section. IEEE Transactions on nuclear science, 21.3:21–43, 1974.
672
+ [37] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization.
673
+ arXiv preprint arXiv:1412.6980, 2014.
674
+ 17
675
+
ONFAT4oBgHgl3EQfyh6B/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf,len=469
2
+ page_content='SELF-SUPERVISED LEARNING FOR A NONLINEAR INVERSE PROBLEM WITH FORWARD OPERATOR INVOLVING AN UNKNOWN FUNCTION ARISING IN PHOTOACOUSTIC TOMOGRAPHY Gyeongha Hwang1,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
3
+ page_content=' Gihyeon Jeon2∗,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
4
+ page_content=' Sunghwan Moon3 1 Department of Mathematics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
5
+ page_content=' Yeungnam University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
6
+ page_content=' Gyeongsan 38541,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
7
+ page_content=' Republic of Korea 2 School of Mathematics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
8
+ page_content=' Kyungpook National University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
9
+ page_content=' Daegu 41566,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
10
+ page_content=' Republic of Korea 3 Department of Mathematics,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
11
+ page_content=' Kyungpook National University,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
12
+ page_content=' Daegu 41566,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
13
+ page_content=' Republic of Korea Corresponding author E-mails: rydbr6709@knu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
14
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
15
+ page_content='kr ABSTRACT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
16
+ page_content=' In this article, we concern with a nonlinear inverse problem with forward opera- tor involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
17
+ page_content=' The problem arises in diverse applications and is challenging by the presence of the unknown function, which makes it ill-posed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
18
+ page_content=' Additionally, the nonlinear nature of the problem makes it difficult to use traditional methods and thus the study has addressed a simplified version of the problem by either linearizing it or assuming knowledge of the unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
19
+ page_content=' Here, we propose a self-supervised learning to directly tackle a nonlinear inverse problem involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
20
+ page_content=' In particular, we focus on an inverse problem derived in Photoacoustic Tomograpy (PAT) which is a hybrid medical imaging with high res- olution and contrast.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
21
+ page_content=' PAT can be modelled based on the wave equation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
22
+ page_content=' The measured data is the solution of the equation restricted to the surface and the initial pressure of the equation contains the biological information on the object of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
23
+ page_content=' The speed of sound wave in the equation is unknown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
24
+ page_content=' Our goal is to determine the initial pressure and the speed of sound wave simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
25
+ page_content=' Under a simple assumption that the sound speed is a function of the initial pressure, the problem becomes a nonlinear inverse problem involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
26
+ page_content=' The experimental results demonstrate that the proposed algorithm performs successfully.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
27
+ page_content=' 1 Introduction Inverse problem is to find the cause factor from the observed data, which has applications in many fields such as optics, radar, acoustics, communication theory, signal processing, medical imaging, computer vision, geophysics, oceanography, and astronomy because it tells us about what we cannot directly observe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
28
+ page_content=' The forward operator, the inverse of the inverse problem, can be modelled as an (non)-linear system and often involves an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
29
+ page_content=' Due to the nature of the inverse problem, it is usually very hard to know the cause factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
30
+ page_content=' For example, in medical imaging the cause factor is the human body section and in seismology, we never know the structure of the earth’s interior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
31
+ page_content=' In this article, we concern with a nonlinear inverse problem with forward operator involv- ing an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
32
+ page_content=' Our goal is to find the unknown function and the inverse opera- tor simultaneously from the measurements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
33
+ page_content=' The problem is generally ill-posed because of 1 arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
34
+ page_content='08693v1 [math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
35
+ page_content='NA] 20 Jan 2023 the unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
36
+ page_content=' Additionally, the nonlinearity in the problem makes conventional methods difficult to use.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
37
+ page_content=' To handle the problem, one may simplify the problem linearly or assume knowledge of the unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
38
+ page_content=' Here we propose a self-supervised frame- work to directly tackle a nonlinear inverse problem involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
39
+ page_content=' In particular, we address an inverse problem derived in Photoacoustic Tomography (PAT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
40
+ page_content=' Although our proposed framework has been proposed to solve the problem arising in PAT, it is generic and can be extended to handle a nonlinear inverse problem involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
41
+ page_content=' The rest of the section is devoted to an introduction of PAT.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
42
+ page_content=' In section 2, we formulate an inverse problem arising in PAT, which is nonlinear and also involves an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
43
+ page_content=' The structure and learning method of the proposed framework for the problem are described in section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
44
+ page_content=' The numerical simulation results in section 4 demonstrate that the proposed algorithm performs successfully.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
45
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
46
+ page_content='1 Photoacoustic Tomography PAT is a hybrid medical imaging that combines the high contrast of optical imaging with the high spatial resolution of ultrasound images [1, 2, 3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
47
+ page_content=' The physical basis of PAT is the photoacoustic effect discovered by Bell in 1881 [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
48
+ page_content=' In PAT, when an non-destructive testing target object absorbs a non-ionizing laser pulse, it thermally expands and emits acoustic waves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
49
+ page_content=' The emitted ultrasound contains biological information on the target object and is measured by an detector placed around it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
50
+ page_content=' The internal image of the target object is reconstructed from this measured data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
51
+ page_content=' The advantage of PAT is that it is economical and less harmful because of non-ionizing radiation use [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
52
+ page_content=' The propagation of the emitted ultrasound p(x, t) can be described by the wave equation ∂2 t p(x, t) = c(x)2∆xp(x, t) on R2 × [0, ∞) (1) with the initial conditions p(x, 0) = f(x) ∂tp(x, 0) = 0 on R2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
53
+ page_content=' (2) Here c is the speed of waves and f is the initial pressure which contains biological in- formation such as the location of a cancer cells in a physically small tissue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
54
+ page_content=' It is natural assumption that f has compact support in the bounded domain Ω and the detectors are located on the boundary ∂Ω of the domain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
55
+ page_content=' Regarding the measurement procedure, the point-shaped detector measures the average pressure above ∂Ω where the detectors are located and this average pressure is the value of a pressure wave p(x, t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
56
+ page_content=' Therefore, one of mathematical problems in PAT is reconstructing f from the measured data p|∂Ω×[0,∞), which implies obtaining an internal image of the target object.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
57
+ page_content=' It is well-known that given the initial pressure f and the speed c, the solution p is determined uniquely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
58
+ page_content=' Let us define the wave forward operator W as W : (f, c) �→ p|∂Ω×[0,∞), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
59
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
60
+ page_content=', W(f, c) = p|∂Ω×[0,∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
61
+ page_content=' Reconstructing problem for f from W(f, c) is studied when speed c is constant [6, 7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
62
+ page_content=' Okanen, Stefanov and Uhlmann study the explicit reconstruction when the sound speed 2 is known [8, 9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
63
+ page_content=' If c depends on space variable x, the problem become much more difficult.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
64
+ page_content=' A few of researchers have studied the problem with a given variable sound speed [10, 11, 12, 13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
65
+ page_content=' Liu and Uhlmann figure out the sufficient conditions for recovering f and c [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
66
+ page_content=' Recently, the application of deep learning in an medical imaging including PAT has been investigated extensively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
67
+ page_content=' Roles of deep learning in tomography include forward and in- verse operator approximation, image reconstruction from sparse data, and artifact/noise removal from reconstructed images [15, 16, 17, 18, 19, 20, 21].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
68
+ page_content=' There are also studies on limited-view data (see [22, 23]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
69
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
70
+ page_content=' Shan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
71
+ page_content=' propose an iterative optimization algo- rithm which reconstructs f and c simultaneously via a supervised learning [24].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
72
+ page_content=' However, most works deal with linear inverse problems or inverse problems without involving an unknown function [25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
73
+ page_content=' Many studies on PAT with deep learning are based on a supervised learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
74
+ page_content=' A supervised learning exploits a collection of paired data of the boundary data and the initial pres- sure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
75
+ page_content=' In practical applications, it is difficult to obtain the initial pressure, because initial pressure represents internal human body.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
76
+ page_content=' Therefore, it is necessary to study a learning method exploiting the boundary data only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
77
+ page_content=' One such method is a self-supervised learning which exploits supervised signals that are generated from the input data by leveraging its structure [26, 27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
78
+ page_content=' 2 Problem formulation In this section, we formulate the problem precisely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
79
+ page_content=' For this, we will make several as- sumptions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
80
+ page_content=' First we assume f has compact support, since a target object is finite.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
81
+ page_content=' Sec- ondly, c is assumed to be a function of f, namely c(x)2 = Γ(f(x)) for some function Γ : [0, 1] → [0, ∞), because the wave speed c depends on the medium.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
82
+ page_content=' Lastly, we assume that Γ(0) and Γ(1) are known, namely Γ(0) = c0 and Γ(1) = c1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
83
+ page_content=' The last assumption is reasonable because Γ(0) and Γ(1) represent the wave speeds in the air and the highest thermal expansion coefficient respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
84
+ page_content=' Then the equation (1) is rewritten as: ∂2 t p(x, t) = Γ(f(x))∆xp(x, t) on R2 × [0, ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
85
+ page_content=' (3) Let us define WΓ by WΓ(f) = p|∂Ω×[0,∞) where p is the solution of (3) with initial conditions (2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
86
+ page_content=' Then the inverse problem can be formulated as determining unknown Γ and f from a given WΓ(f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
87
+ page_content=' However, this problem is ill-posed: for any Γ′ satisfying � Γ = Γ′ on Im(f) Γ ̸= Γ′ on Dom(Γ) \\ Im(f), we have WΓ(f) = WΓ′(f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
88
+ page_content=' Hence Γ can not be uniquely determined from WΓ(f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
89
+ page_content=' There is also a possibility that there exist Γ1, Γ2, f1 and f2 such that Γ1 ̸= Γ2, f1 ̸= f2 and WΓ1(f1) = WΓ2(f2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
90
+ page_content=' Instead, we consider the following inverse problem: Problem 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
91
+ page_content=' Let the collection of boundary data BΓ := {WΓ(f) | Γ : [0, 1] → [0, ∞), Γ(0) = c0, Γ(1) = c1 and f ∈ L2(R2) has compact support} be given.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
92
+ page_content=' 3 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
93
+ page_content=' Determine unknown Γ from BΓ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
94
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
95
+ page_content=' For all WΓ(f) ∈ BΓ, determine f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
96
+ page_content=' Then the uniqueness statements for Problem 1 are Hypothesis 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
97
+ page_content=' If Γ1 ̸= Γ2, then BΓ1 ̸= BΓ2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
98
+ page_content=' and Hypothesis 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
99
+ page_content=' For fixed Γ, if f1 ̸= f2, then WΓ(f1) ̸= WΓ(f2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
100
+ page_content=' In this article, we aim to solve Problem 1 under Hypothesis 1 and 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
101
+ page_content=' The problem is difficult to solve because of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
102
+ page_content=' (3) involves unknown Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
103
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
104
+ page_content=' (3) is not linear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
105
+ page_content=' We are going to solve Problem 1 by exploiting a deep neural network (DNN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
106
+ page_content=' Since DNN can only handle with finite data, we address the following inverse problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
107
+ page_content=' Problem 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
108
+ page_content=' For given {WΓ(fi) | Γ : [0, 1] → [0, ∞), Γ(0) = c0, Γ(1) = c1 and fi ∈ L2(R2) has compact support, i = 1, · · · , N}, determine Γ and {fi|i = 1, · · · , N}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
109
+ page_content=' 3 Network Design Figure 1: The network architecture We propose a self-supervised learning for the problem formulated in section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
110
+ page_content=' Our goal is simultaneously reconstructing {fi}N i=1 and Γ from given collection {WΓ(fi)}N i=1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
111
+ page_content=' The proposed framework is depicted in Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
112
+ page_content=' It consists of three components: 4 input output Wr(f) W (R(Wr(f))) R(Wr(f)) M(R(Wr(f))) LOSS = MSEWr(f),W(RWr(f)))1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
113
+ page_content=' Reconstruction network R 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
114
+ page_content=' Mapping network M 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
115
+ page_content=' Wave forward operator W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
116
+ page_content=' The reconstruction network R learns to reconstruct the initial data from the measured data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
117
+ page_content=' The mapping network M approximates the function Γ : [0, 1] → [0, ∞) satisfying c(x)2 = Γ(f(x)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
118
+ page_content=' The forward operator W assigns to the initial data and the wave speed the measured data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
119
+ page_content=' Here we adopt the k-space method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
120
+ page_content=' If every component in the framework functions properly, then output should be same to the input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
121
+ page_content=' Thus we define the loss function as the difference between the input and the output: L = 1 N N � i=1 ∥WΓ(fi) − WM(R(WΓ(fi))∥2 ∥WΓ(fi)∥2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
122
+ page_content=' Remark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
123
+ page_content=' Our method estimates Γ and the inverse operator W−1 Γ .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
124
+ page_content=' The estimated inverse operator can be used for the fast inference of the initial pressure from the boundary measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
125
+ page_content=' Remark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
126
+ page_content=' The proposed framework is generic and can be extended to handle a nonlinear inverse problem involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
127
+ page_content=' Now, the detailed structures of each component in the framework are described below.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
128
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
129
+ page_content='1 Reconstruction network R The reconstruction network R is a network that reconstruct f from input data WΓ(f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
130
+ page_content=' Indeed, it approximates the inverse map W−1 Γ : WΓ(f) �→ f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
131
+ page_content=' If the speed Γ of the wave is constant, it is well-known that the inverse map of (3) is linear [7, 28, 29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
132
+ page_content=' Inspired by this fact, we propose the reconstruction network as a perturbation of a linear map: R := T1 + U ◦ T2, (4) where T1, T2 : Rm×m → Rm×m are linear and U : Rm×m → Rm×m is the U-net described in Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
133
+ page_content=' U-net is a type of convolutional neural network (CNN) introduced in [30] and is used widely in medical imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
134
+ page_content=' U-net consists of a contracting path and an expansive path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
135
+ page_content=' The contacting path has a typical CNN structure, where the input data is extracted into feature map with small size and large channel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
136
+ page_content=' In the expansive path, the size of the feature map increases again, and the number of channels decreases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
137
+ page_content=' In the end of R, since the range of f is [0, 1], we used the clamp function which rounds up values smaller than the minimum and round down values larger than the maximum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
138
+ page_content=' 5 Figure 2: U-net architecture for 64 × 64 size The proposed reconstruction network show a high performance for the low resolution data like 64 × 64 (see section 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
139
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
140
+ page_content='1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
141
+ page_content=' In case of high resolution data, however the linear operators T1 and T2 in the reconstruction network R make some problems, because they contains too many parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
142
+ page_content=' It causes lots of critical points which impede the convergence to the global minimum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
143
+ page_content=' It also makes a hardware issue and thus for the high resolution data, we employ Pixel Shuffle and Pixel Unshuffle which reduce the number of parameters contained in linear operators [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
144
+ page_content=' The Pixel Unshuffle splits one image into several images and the Pixel Shuffle merges several images into one image, as illustrated in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
145
+ page_content=' Instead of applying the linear operators (T1 and T2) directly to the high resolution data, we process the data as follows (see Figure 4) : 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
146
+ page_content=' Split the high resolution data (m × m) into four low resolution data ( m 2 × m 2 ) by exploiting the Pixel Unshuffle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
147
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
148
+ page_content=' Apply four different linear operators to each low resolution data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
149
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
150
+ page_content=' Merge the outputs of the linear operators by using the Pixel Shuffle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
151
+ page_content=' 6 Figure 3: Pixel Shuffle and Pixel Unshuffle Figure 4: Architecture of alternative map to linear for high resolution data 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
152
+ page_content='2 Mapping network M We use multilayer perceptron (MLP) to approximate unknown Γ, because MLP can approximate any continuous function (a universal approximation theorem, see [32, 33]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
153
+ page_content=' The proposed network is a simple structure containing only three hidden layers of 10 nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
154
+ page_content=' To satisfy the assumption that Γ(0) = c0 and Γ(1) = c1, the output of MLP is slightly manipulated as M(f) = MLP(f) − MLP(0) ∗ (1 − f) − MLP(1) ∗ f + ((c1 − c0)f + c0), 7 Linear map Linearmap2 Pixel Pixel Unshuffle Shuffle Linear map 3 Linear map 4so that M(0) = c0 and M(1) = c1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
155
+ page_content=' (5) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
156
+ page_content='3 Forward problem A solution of the initial value problem (3) can be computed by the k-space method [34, 35].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
157
+ page_content=' The k-space method is a numerical method for computing solution of acoustic wave propagation, which uses information in the frequency space to obtain a solution for the next time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
158
+ page_content=' For calculating propagation of p(x, t), let w(x, t) = 1 Γ(f(x))p(x, t) be an auxiliary field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
159
+ page_content=' Then we have, ∂2 t w(x, t) = ∆x [Γ(f(x))w(x, t)] .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
160
+ page_content=' Taking the Fourier transform Fx for w with respect to x yields ∂2 t Fxw(k, t) = −|k|2Fx � Γ(f(·))w(·, t) � (k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
161
+ page_content=' (6) Meanwhile, the numerical approximation of the second derivative of Fxw is ∂2 t Fxw(k, t) ≈ Fxw(k, t + ∆t) − 2Fxw(k, t) + Fxw(k, t − ∆t) (∆t)2 , (7) where ∆t is the time step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
162
+ page_content=' Then, by combining (6) and (7), we have Fxw(k, t + ∆t) = 2Fxw(k, t) − Fxw(k, t − ∆t) − (∆t)2|k|2Fx � Γ(f(·))w(·, t) � (k).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
163
+ page_content=' By taking the inverse Fourier transform F−1 k , we obtain w(x, t + ∆t) = 2w(x, t) − w(x, t − ∆t) − F−1 k � (∆t)2|·|2Fx � Γ(f)w � (·, t) � (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
164
+ page_content=' Here, replacing (∆t)2|k|2 in the third term with 4 sin2 � (∆t)|k| 2 � provides more accurate discretization (see [34, 35]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
165
+ page_content=' Finally, we have wave propagation formula: w(x, t + ∆t) = 2w(x, t) − w(x, t − ∆t) − F−1 k � 4 sin2 �(∆t)| · | 2 � Fx � Γ(f)w � (·, t) � (x), or equivalently, p(x, t + ∆t) = 2p(x, t) − p(x, t − ∆t) − Γ(f)F−1 k � 4 sin2 �(∆t)| · | 2 � Fx � p � (·, t) � (x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
166
+ page_content=' 4 Numerical Simulations In this section, we present the details of implementation and experimental results when Ω is the unit ball.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
167
+ page_content=' 8 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
168
+ page_content='1 Datasets The Shepp-Logan phantom, an artificial image that describes a cross section of the brain commonly used for simulation in tomography, contains 10 ellipses [36].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
169
+ page_content=' Each ellipse is created with 6 parameters: major axis, minor axis, the x-coordinate and the y-coordinate of center, rotation angle, and intensity value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
170
+ page_content=' The data set of the initial condition f defined on [−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
171
+ page_content='0, 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
172
+ page_content='0]2 ⊂ R2 is generated by slightly changing these 6 parameters with supp(f) ⊂ � (x, y) ∈ R2 : x2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
173
+ page_content='692 + y2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
174
+ page_content='922 ≤ 1 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
175
+ page_content=' We create a set of 2,688 phantoms P = {fi}2688 i=1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
176
+ page_content=' For Γ, we consider four cases: linear, square root, square, and constant 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
177
+ page_content=' Γ1(f) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
178
+ page_content='3f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
179
+ page_content='7 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
180
+ page_content=' Γ2(f) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
181
+ page_content='3√f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
182
+ page_content='7 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
183
+ page_content=' Γ3(f) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
184
+ page_content='3f 2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
185
+ page_content='7 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
186
+ page_content=' Γ4(f) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
187
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
188
+ page_content=' For 1 ≤ j ≤ 4, we make the collection of data {WΓjfi}2688 i=1 by using the forward operator for P and Γj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
189
+ page_content=' Of the 2,688 data, we use 2,048 data for training, 128 data for validation and 512 data for testing respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
190
+ page_content=' Figure 5: Examples of phantoms 9 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
191
+ page_content='2 Training We use the Adam optimizer based on stochastic gradient descent and adaptive moment estimation to train the network [37].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
192
+ page_content=' There are two neural networks in the proposed frameworks: the reconstruction network R and the mapping network M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
193
+ page_content=' The learning rates for linear term of R, perturbation term of R, and M are chosen to be 10−4, 10−3, and 10−3, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
194
+ page_content=' Momentum parameters of the Adam optimizer were set at β1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
195
+ page_content='9 and β2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
196
+ page_content='999, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
197
+ page_content=' We specifically put the batch size to 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
198
+ page_content=' For general tasks, a moderately large batch size reduces training time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
199
+ page_content=' However, in this problem, a small batch size is advantageous because our model must be able to reconstruct an exact image for each data rather than an average result.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
200
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
201
+ page_content='3 Results In this section, we illustrate experimental results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
202
+ page_content=' The overall results are displayed in Figure 6, Table 1, Figure 7, Figure 8, Table 2 and Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
203
+ page_content=' Here the losses for f and WΓ(f) are respectively defined by loss for f = 1 N N � i=1 ∥fi − R(WΓ(fi))∥2 ∥fi∥2 , and loss for WΓf = 1 N N � i=1 ∥WΓ(fi) − WM(R(WΓ(fi))∥2 ∥WΓ(fi)∥2 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
204
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
205
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
206
+ page_content='1 Low resolution data We conduct the simulation utilizing a dataset of images with a size of 64×64.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
207
+ page_content=' The results for the mapping networks are shown in Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
208
+ page_content=' We see that the mapping networks accurately approximates Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
209
+ page_content=' When Γ3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
210
+ page_content='3f 2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
211
+ page_content='7, there is a difference between the plot of the mapping network M and the plot of Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
212
+ page_content=' This is because the values of f ∈ P almost belong to [0, 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
213
+ page_content='3]∪1 and so it has little effect on WΓ(f).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
214
+ page_content=' In all cases, the process of training the mapping networks requires approximately 103 iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
215
+ page_content=' The results of the reconstruction networks are illustrated in Table 1 and Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
216
+ page_content=' Table 1 shows the test errors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
217
+ page_content=' So it can be concluded that the reconstruction networks accurately approximate the inverse maps in each case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
218
+ page_content=' The training of the reconstruction networks necessitates approximately 105 iterations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
219
+ page_content=' Remark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
220
+ page_content=' The assumption on Γ, (5) is crucial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
221
+ page_content=' If constraint (5) is not given in M, it may take a long time to approximate Γ, or it may fail to find Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
222
+ page_content=' Under the constraint, M can quickly determine Γ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
223
+ page_content=' Early determination of Γ helps the learning of reconstruction networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
224
+ page_content=' 10 Figure 6: Comparison of the mapping network M and ground truth Γ for 64 × 64 data Assumption loss for f loss for WΓ(f) Γ1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
225
+ page_content='3f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
226
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
227
+ page_content='00504 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
228
+ page_content='00702 Γ2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
229
+ page_content='3√f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
230
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
231
+ page_content='00537 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
232
+ page_content='00947 Γ3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
233
+ page_content='3f 2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
234
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
235
+ page_content='00557 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
236
+ page_content='00634 Γ4 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
237
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
238
+ page_content='01373 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
239
+ page_content='00456 Table 1: Test errors for f and WΓ(f) according to Γ after 102,400 iterations for 64 × 64 data 11 [2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
240
+ page_content='3Vf + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
241
+ page_content='7 [1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
242
+ page_content='3f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
243
+ page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
244
+ page_content='0 ground truth 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
245
+ page_content='0 mapping network 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
246
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
247
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
248
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
249
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
250
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
251
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
252
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
253
+ page_content='0 [3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
254
+ page_content='3f2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
255
+ page_content='7 [4 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
256
+ page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
257
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
258
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
259
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
260
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
261
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
262
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
263
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
264
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
265
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
266
+ page_content='0Figure 7: Reconstruction results according to Γ for 64 × 64 data 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
267
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
268
+ page_content='2 High resolution data In the simulation for high resolution data, two linear operators T1 and T2 in the recon- struction network (4) are replaced by alternative map described in Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
269
+ page_content=' The dataset is prepared with images of a size of 96 × 96.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
270
+ page_content=' Similarly to the low resolution case, the mapping network M approximates Γ accurately within 103 iterations (Figure 8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
271
+ page_content=' On the other hand, the reconstruction networks for each Γ exhibit a slight decrease in perfor- mance but still acceptable (Table 2 and Figure 9).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
272
+ page_content=' We surmise that the slight decrease in performance is a result of the reduction in parameters brought about by the Pixel Unshuffle and Pixel Shuffle operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
273
+ page_content=' 12 [1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
274
+ page_content='3f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
275
+ page_content='7 [2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
276
+ page_content='3V f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
277
+ page_content='7 [3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
278
+ page_content='3f2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
279
+ page_content='7 4 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
280
+ page_content='7 ground truth 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
281
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
282
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
283
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
284
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
285
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
286
+ page_content='0Figure 8: Comparison of the mapping network M and ground truth Γ for 96 × 96 data Assumption loss for f loss for WΓ(f) Γ1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
287
+ page_content='3f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
288
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
289
+ page_content='00860 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
290
+ page_content='01293 Γ2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
291
+ page_content='3√f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
292
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
293
+ page_content='01023 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
294
+ page_content='01679 Γ3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
295
+ page_content='3f 2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
296
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
297
+ page_content='00710 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
298
+ page_content='01132 Γ4 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
299
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
300
+ page_content='00689 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
301
+ page_content='69511 Table 2: Test errors for f and WΓ(f) according to Γ after 102,400 iterations for 96 × 96 data 13 [2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
302
+ page_content='3Vf + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
303
+ page_content='7 [1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
304
+ page_content='3f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
305
+ page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
306
+ page_content='0 ground truth 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
307
+ page_content='0 mapping network 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
308
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
309
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
310
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
311
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
312
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
313
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
314
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
315
+ page_content='0 [3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
316
+ page_content='3f2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
317
+ page_content='7 [4 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
318
+ page_content='7 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
319
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
320
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
321
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
322
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
323
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
324
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
325
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
326
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
327
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
328
+ page_content='0Figure 9: Reconstruction results according to Γ for 96 × 96 data 5 Conclusions Here, we propose a self-supervised learning for a nonlinear inverse problem with forward operator involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
329
+ page_content=' In medical imaging such as PAT, the initial pressure is mostly untrackable for the measured data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
330
+ page_content=' Moreover it is difficult to know the wave speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
331
+ page_content=' So it is necessary to reconstruct the initial pressure f and the wave speed simultaneously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
332
+ page_content=' Under the simple assumption, the problem becomes a nonlinear inverse problem involving an unknown function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
333
+ page_content=' The experimental results demonstrate the high performance of the proposed algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
334
+ page_content=' Our framework can be extended to a nonlinear inverse problem involving an unknown function, formulated under more complicated situations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
335
+ page_content=' This can be an interesting line of future research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
336
+ page_content=' References [1] Huabei Jiang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
337
+ page_content=' Photoacoustic tomography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
338
+ page_content=' CRC Press, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
339
+ page_content=' [2] Jun Xia, Junjie Yao, and Lihong V Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
340
+ page_content=' Photoacoustic tomography: principles and advances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
341
+ page_content=' Electromagnetic waves (Cambridge, Mass.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
342
+ page_content=' ), 147:1, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
343
+ page_content=' [3] Peter Kuchment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
344
+ page_content=' The Radon transform and medical imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
345
+ page_content=' SIAM, 2013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
346
+ page_content=' 14 [1 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
347
+ page_content='3f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
348
+ page_content='7 [2 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
349
+ page_content='3V f + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
350
+ page_content='7 [3 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
351
+ page_content='3f2 + 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
352
+ page_content='7 「4 = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
353
+ page_content='7 ground truth 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
354
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
355
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
356
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
357
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
358
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
359
+ page_content='0[4] Alexander Graham Bell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
360
+ page_content=' On the production and reproduction of sound by light.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
361
+ page_content=' In Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
362
+ page_content=' Am.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
363
+ page_content=' Assoc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
364
+ page_content=' Adv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
365
+ page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
366
+ page_content=', volume 29, pages 115–136, 1881.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
367
+ page_content=' [5] Idan Steinberg, David M Huland, Ophir Vermesh, Hadas E Frostig, Willemieke S Tummers, and Sanjiv S Gambhir.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
368
+ page_content=' Photoacoustic clinical imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
369
+ page_content=' Photoacoustics, 14:77–98, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
370
+ page_content=' [6] Gerhard Zangerl, Sunghwan Moon, and Markus Haltmeier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
371
+ page_content=' Photoacoustic tomogra- phy with direction dependent data: An exact series reconstruction approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
372
+ page_content=' Inverse Problems, 35(11):114005, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
373
+ page_content=' [7] Minghua Xu and Lihong V Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
374
+ page_content=' Universal back-projection algorithm for photoa- coustic computed tomography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
375
+ page_content=' Physical Review E, 71(1):016706, 2005.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
376
+ page_content=' [8] Lauri Oksanen and Gunther Uhlmann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
377
+ page_content=' Photoacoustic and thermoacoustic tomog- raphy with an uncertain wave speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
378
+ page_content=' Mathematical Research Letters, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
379
+ page_content=' [9] Plamen Stefanov and Gunther Uhlmann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
380
+ page_content=' Thermoacoustic tomography with variable sound speed Inverse Problems, 25(7):075011, 16, 2009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
381
+ page_content=' [10] Jianliang Qian, Plamen Stefanov, Gunther Uhlmann, and Hongkai Zhao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
382
+ page_content=' An efficient neumann series–based algorithm for thermoacoustic and photoacoustic tomography with variable sound speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
383
+ page_content=' SIAM Journal on Imaging Sciences, 4(3):850–883, 2011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
384
+ page_content=' [11] Zakaria Belhachmi, Thomas Glatz, and Otmar Scherzer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
385
+ page_content=' A direct method for photoacoustic tomography with inhomogeneous sound speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
386
+ page_content=' Inverse Problems, 32(4):045005, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
387
+ page_content=' [12] Yulia Hristova, Peter Kuchment, and Linh Nguyen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
388
+ page_content=' Reconstruction and time rever- sal in thermoacoustic tomography in acoustically homogeneous and inhomogeneous media.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
389
+ page_content=' Inverse problems, 24(5):055006, 2008.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
390
+ page_content=' [13] Minam Moon, Injo Hur, and Sunghwan Moon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
391
+ page_content=' Singular value decomposition of the wave forward operator with radial variable coefficients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
392
+ page_content=' arXiv preprint arXiv:2208.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
393
+ page_content='10793, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
394
+ page_content=' [14] Hongyu Liu and Gunther Uhlmann.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
395
+ page_content=' Determining both sound speed and internal source in thermo-and photo-acoustic tomography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
396
+ page_content=' Inverse Problems, 31(10):105005, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
397
+ page_content=' [15] Stephan Antholzer, Markus Haltmeier, Robert Nuster, and Johannes Schwab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
398
+ page_content=' Pho- toacoustic image reconstruction via deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
399
+ page_content=' In Photons Plus Ultrasound: Imaging and Sensing 2018, volume 10494, pages 433–442.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
400
+ page_content=' SPIE, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
401
+ page_content=' [16] Gregory Ongie, Ajil Jalal, Christopher A Metzler, Richard G Baraniuk, Alexan- dros G Dimakis, and Rebecca Willett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
402
+ page_content=' Deep learning techniques for inverse prob- lems in imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
403
+ page_content=' IEEE Journal on Selected Areas in Information Theory, 1(1):39–56, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
404
+ page_content=' [17] Ge Wang, Jong Chul Ye, and Bruno De Man.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
405
+ page_content=' Deep learning for tomographic image reconstruction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
406
+ page_content=' Nature Machine Intelligence, 2(12):737–748, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
407
+ page_content=' 15 [18] Janek Gröhl, Melanie Schellenberg, Kris Dreher, and Lena Maier-Hein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
408
+ page_content=' Deep learn- ing for biomedical photoacoustic imaging: a review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
409
+ page_content=' Photoacoustics, 22:100241, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
410
+ page_content=' [19] Changchun Yang, Hengrong Lan, Feng Gao, and Fei Gao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
411
+ page_content=' Review of deep learning for photoacoustic imaging.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
412
+ page_content=' Photoacoustics, 21:100215, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
413
+ page_content=' [20] Stephan Antholzer, Markus Haltmeier, and Johannes Schwab.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
414
+ page_content=' Deep learning for photoacoustic tomography from sparse data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
415
+ page_content=' Inverse problems in science and engineering, 27(7):987–1005, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
416
+ page_content=' [21] Jiasheng Zhou, Da He, Xiaoyu Shang, Zhendong Guo, Sung-Liang Chen, and Jiajia Luo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
417
+ page_content=' Photoacoustic microscopy with sparse data by convolutional neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
418
+ page_content=' Photoacoustics, 22:100242, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
419
+ page_content=' [22] Steven Guan, Amir A Khan, Siddhartha Sikdar, and Parag V Chitnis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
420
+ page_content=' Limited- view and sparse photoacoustic tomography for neuroimaging with deep learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
421
+ page_content=' Scientific reports, 10(1):1–12, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
422
+ page_content=' [23] Huijuan Zhang, LI Hongyu, Nikhila Nyayapathi, Depeng Wang, Alisa Le, Leslie Ying, and Jun Xia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
423
+ page_content=' A new deep learning network for mitigating limited-view and under-sampling artifacts in ring-shaped photoacoustic tomography.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
424
+ page_content=' Computerized Medical Imaging and Graphics, 84:101720, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
425
+ page_content=' [24] Hongming Shan, Christopher Wiedeman, Ge Wang, and Yang Yang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
426
+ page_content=' Simultane- ous reconstruction of the initial pressure and sound speed in photoacoustic tomog- raphy using a deep-learning approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
427
+ page_content=' In Novel Optical Systems, Methods, and Applications XXII, volume 11105, page 1110504.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
428
+ page_content=' International Society for Optics and Photonics, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
429
+ page_content=' [25] Maarten V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
430
+ page_content=' de Hoop, Matti Lassas, and Christopher A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
431
+ page_content=' Wong, Deep learn- ing architectures for nonlinear operator functions and nonlinear inverse problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
432
+ page_content=' Mathematical Statistics and Learning, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
433
+ page_content=' 1/2(4):1–86, 2021 [26] Saeed Shurrab and Rehab Duwairi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
434
+ page_content=' Self-supervised learning methods and appli- cations in medical imaging analysis: A survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
435
+ page_content=' PeerJ Computer Science, 8:e1045, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
436
+ page_content=' [27] Longlong Jing and Yingli Tian.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
437
+ page_content=' Self-supervised visual feature learning with deep neural networks: A survey.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
438
+ page_content=' IEEE transactions on pattern analysis and machine intelligence, 43(11):4037–4058, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
439
+ page_content=' [28] Sunghwan Moon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
440
+ page_content=' Inversion formula for a radon-type transform arising in photoa- coustic tomography with circular integrating detectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
441
+ page_content=' Advances in Mathematical Physics, 2018, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
442
+ page_content=' [29] Rim Gouia-Zarrad, Souvik Roy, and Sunghwan Moon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
443
+ page_content=' Numerical inversion and uniqueness of a spherical radon transform restricted with a fixed angular span.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
444
+ page_content=' Applied Mathematics and Computation, 408:126338, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
445
+ page_content=' 16 [30] Olaf Ronneberger, Philipp Fischer, and Thomas Brox.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
446
+ page_content=' U-net: Convolutional net- works for biomedical image segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
447
+ page_content=' In International Conference on Medical image computing and computer-assisted intervention, pages 234–241.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
448
+ page_content=' Springer, 2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
449
+ page_content=' [31] Wenzhe Shi, Jose Caballero, Ferenc Huszár, Johannes Totz, Andrew P Aitken, Rob Bishop, Daniel Rueckert, and Zehan Wang, Real-time single image and video super- resolution using an efficient sub-pixel convolutional neural network Proceedings of the IEEE conference on computer vision and pattern recognition,1874–1883, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
450
+ page_content=' [32] George Cybenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
451
+ page_content=' Approximation by superpositions of a sigmoidal function.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
452
+ page_content=' Mathematics of control, signals and systems, 2(4):303–314, 1989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
453
+ page_content=' [33] Jae-Mo Kang and Sunghwan Moon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
454
+ page_content=' Error bounds for ReLU networks with depth and width parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
455
+ page_content=' Japan Journal of Industrial and Applied Mathematics, To appear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
456
+ page_content=' [34] T Douglas Mast, Laurent P Souriau, D-LD Liu, Makoto Tabei, Adrian I Nachman, and Robert C Waag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
457
+ page_content=' A k-space method for large-scale models of wave propagation in tissue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
458
+ page_content=' IEEE transactions on ultrasonics, ferroelectrics, and frequency control, 48(2):341–354, 2001.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
459
+ page_content=' [35] Benjamin T Cox, S Kara, Simon R Arridge, and Paul C Beard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
460
+ page_content=' k-space propagation models for acoustically heterogeneous media: Application to biomedical photoacous- tics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
461
+ page_content=' The Journal of the Acoustical Society of America, 121(6):3453–3464, 2007.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
462
+ page_content=' [36] Lawrence A Shepp and Benjamin F Logan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
463
+ page_content=' The Fourier reconstruction of a head section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
464
+ page_content=' IEEE Transactions on nuclear science, 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
465
+ page_content='3:21–43, 1974.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
466
+ page_content=' [37] Diederik P Kingma and Jimmy Ba.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
467
+ page_content=' Adam: A method for stochastic optimization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
468
+ page_content=' arXiv preprint arXiv:1412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
469
+ page_content='6980, 2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
470
+ page_content=' 17' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/ONFAT4oBgHgl3EQfyh6B/content/2301.08693v1.pdf'}
OtAzT4oBgHgl3EQflP1e/content/2301.01544v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd493acf2dfeb9650fb941c2d7854ce27be46790966a188764b7a79e71edbfb
3
+ size 608960