jackkuo commited on
Commit
a55d3c4
·
verified ·
1 Parent(s): fa1a0c0

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +6 -0
  2. 29E3T4oBgHgl3EQfoAqh/content/tmp_files/2301.04630v1.pdf.txt +1590 -0
  3. 29E3T4oBgHgl3EQfoAqh/content/tmp_files/load_file.txt +0 -0
  4. 4NE0T4oBgHgl3EQfvQFL/content/tmp_files/2301.02615v1.pdf.txt +996 -0
  5. 4NE0T4oBgHgl3EQfvQFL/content/tmp_files/load_file.txt +0 -0
  6. 4NFQT4oBgHgl3EQfHjWk/content/tmp_files/2301.13249v1.pdf.txt +2064 -0
  7. 4NFQT4oBgHgl3EQfHjWk/content/tmp_files/load_file.txt +0 -0
  8. 59E2T4oBgHgl3EQf7Ahe/content/tmp_files/2301.04205v1.pdf.txt +1883 -0
  9. 59E2T4oBgHgl3EQf7Ahe/content/tmp_files/load_file.txt +0 -0
  10. 89AyT4oBgHgl3EQfdPcr/content/tmp_files/2301.00297v1.pdf.txt +475 -0
  11. 89AyT4oBgHgl3EQfdPcr/content/tmp_files/load_file.txt +489 -0
  12. A9FKT4oBgHgl3EQfWS5f/content/tmp_files/2301.11791v1.pdf.txt +814 -0
  13. A9FKT4oBgHgl3EQfWS5f/content/tmp_files/load_file.txt +0 -0
  14. C9E0T4oBgHgl3EQfQQB9/content/tmp_files/2301.02190v1.pdf.txt +1371 -0
  15. C9E0T4oBgHgl3EQfQQB9/content/tmp_files/load_file.txt +0 -0
  16. DtE2T4oBgHgl3EQfSQej/content/tmp_files/2301.03791v1.pdf.txt +573 -0
  17. DtE2T4oBgHgl3EQfSQej/content/tmp_files/load_file.txt +276 -0
  18. GdE1T4oBgHgl3EQfFAO9/content/tmp_files/2301.02898v1.pdf.txt +696 -0
  19. GdE1T4oBgHgl3EQfFAO9/content/tmp_files/load_file.txt +350 -0
  20. GtA0T4oBgHgl3EQfBv8y/content/tmp_files/2301.01979v1.pdf.txt +0 -0
  21. GtA0T4oBgHgl3EQfBv8y/content/tmp_files/load_file.txt +0 -0
  22. IdE4T4oBgHgl3EQfhA2z/content/tmp_files/2301.05122v1.pdf.txt +554 -0
  23. IdE4T4oBgHgl3EQfhA2z/content/tmp_files/load_file.txt +375 -0
  24. KtFJT4oBgHgl3EQfxS3S/content/tmp_files/2301.11634v1.pdf.txt +0 -0
  25. KtFJT4oBgHgl3EQfxS3S/content/tmp_files/load_file.txt +0 -0
  26. MtAyT4oBgHgl3EQfs_ll/content/tmp_files/2301.00586v1.pdf.txt +1576 -0
  27. MtAyT4oBgHgl3EQfs_ll/content/tmp_files/load_file.txt +0 -0
  28. N9FIT4oBgHgl3EQfdisk/content/tmp_files/2301.11270v1.pdf.txt +0 -0
  29. N9FIT4oBgHgl3EQfdisk/content/tmp_files/load_file.txt +0 -0
  30. OdE3T4oBgHgl3EQfxQsz/content/tmp_files/2301.04709v1.pdf.txt +0 -0
  31. OdE3T4oBgHgl3EQfxQsz/content/tmp_files/load_file.txt +0 -0
  32. OdFKT4oBgHgl3EQffi6d/content/tmp_files/2301.11830v1.pdf.txt +855 -0
  33. OdFKT4oBgHgl3EQffi6d/content/tmp_files/load_file.txt +0 -0
  34. PNA0T4oBgHgl3EQfC__J/content/tmp_files/2301.01998v1.pdf.txt +1198 -0
  35. PNA0T4oBgHgl3EQfC__J/content/tmp_files/load_file.txt +0 -0
  36. RdE2T4oBgHgl3EQfWQda/content/tmp_files/2301.03831v1.pdf.txt +1310 -0
  37. RdE2T4oBgHgl3EQfWQda/content/tmp_files/load_file.txt +0 -0
  38. SNE4T4oBgHgl3EQfLAy5/content/tmp_files/2301.04935v1.pdf.txt +2879 -0
  39. SNE4T4oBgHgl3EQfLAy5/content/tmp_files/load_file.txt +0 -0
  40. T9E2T4oBgHgl3EQftQh4/content/tmp_files/2301.04068v1.pdf.txt +5168 -0
  41. T9E2T4oBgHgl3EQftQh4/content/tmp_files/load_file.txt +0 -0
  42. TdA0T4oBgHgl3EQfEP9z/content/tmp_files/2301.02015v1.pdf.txt +1981 -0
  43. TdA0T4oBgHgl3EQfEP9z/content/tmp_files/load_file.txt +0 -0
  44. VtFIT4oBgHgl3EQfhCtr/content/tmp_files/2301.11286v1.pdf.txt +0 -0
  45. VtFIT4oBgHgl3EQfhCtr/content/tmp_files/load_file.txt +0 -0
  46. XdFLT4oBgHgl3EQfUC-H/content/tmp_files/2301.12047v1.pdf.txt +1631 -0
  47. XdFLT4oBgHgl3EQfUC-H/content/tmp_files/load_file.txt +0 -0
  48. _NAzT4oBgHgl3EQfFvpH/content/tmp_files/2301.01015v1.pdf.txt +1328 -0
  49. _NAzT4oBgHgl3EQfFvpH/content/tmp_files/load_file.txt +0 -0
  50. _NAzT4oBgHgl3EQfvf2q/content/tmp_files/2301.01708v1.pdf.txt +2069 -0
.gitattributes CHANGED
@@ -201,3 +201,9 @@ ptFPT4oBgHgl3EQf7zXe/content/2301.13206v1.pdf filter=lfs diff=lfs merge=lfs -tex
201
  ytE1T4oBgHgl3EQfQwPe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
202
  WNAyT4oBgHgl3EQfu_m7/content/2301.00624v1.pdf filter=lfs diff=lfs merge=lfs -text
203
  I9FAT4oBgHgl3EQfux7Z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
201
  ytE1T4oBgHgl3EQfQwPe/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
202
  WNAyT4oBgHgl3EQfu_m7/content/2301.00624v1.pdf filter=lfs diff=lfs merge=lfs -text
203
  I9FAT4oBgHgl3EQfux7Z/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
204
+ w9FJT4oBgHgl3EQfgiwX/content/2301.11561v1.pdf filter=lfs diff=lfs merge=lfs -text
205
+ zNE4T4oBgHgl3EQfAQs4/content/2301.04841v1.pdf filter=lfs diff=lfs merge=lfs -text
206
+ fNE_T4oBgHgl3EQf2BxV/content/2301.08338v1.pdf filter=lfs diff=lfs merge=lfs -text
207
+ x9E3T4oBgHgl3EQflwp-/content/2301.04610v1.pdf filter=lfs diff=lfs merge=lfs -text
208
+ h9E0T4oBgHgl3EQfpgF0/content/2301.02540v1.pdf filter=lfs diff=lfs merge=lfs -text
209
+ c9E3T4oBgHgl3EQfeQoX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
29E3T4oBgHgl3EQfoAqh/content/tmp_files/2301.04630v1.pdf.txt ADDED
@@ -0,0 +1,1590 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ShadowNav: Crater-Based Localization for Nighttime
2
+ and Permanently Shadowed Region Lunar Navigation
3
+ Abhishek Cauligi*
4
+ abhishek.s.cauligi@jpl.nasa.gov
5
+ R. Michael Swan*
6
+ robert.m.swan@jpl.nasa.gov
7
+ Hiro Ono
8
+ masahiro.ono@jpl.nasa.gov
9
+ Shreyansh Daftry
10
+ shreyansh.daftry@jpl.nasa.gov
11
+ John Elliott
12
+ john.o.elliott@jpl.nasa.gov
13
+ Larry Matthies
14
+ lhm@jpl.nasa.gov
15
+ Deegan Atha
16
+ deegan.j.atha@jpl.nasa.gov
17
+ Jet Propulsion Laboratory, California Institute of Technology
18
+ Pasadena, CA 91109, USA
19
+ Abstract—There has been an increase in interest in missions
20
+ that drive significantly longer distances per day than what
21
+ has currently been performed.
22
+ For example, Endurance-A
23
+ proposes driving several kilometers a day in order to reach
24
+ its target traverse of 2000 km in 4 years. Additionally, some
25
+ of these proposed missions, including Endurance-A and rovers
26
+ for Permanently Shadowed Regions (PSRs) of the moon, re-
27
+ quire autonomous driving and absolute localization in darkness.
28
+ Endurance-A proposes to drive 1200 km of its total traverse at
29
+ night. The lack of natural light available during these missions
30
+ limits what can be used as visual landmarks and the range
31
+ at which landmarks can be observed. In order for planetary
32
+ rovers to traverse long-ranges, onboard absolute localization is
33
+ critical to the rover’s ability to maintain its planned trajectory
34
+ and avoid known hazardous regions. Currently, the localization
35
+ performed onboard rovers is relative to the rover’s frame of
36
+ reference and is performed through the integration of wheel
37
+ and visual odometry and inertial measurements. To accomplish
38
+ absolute localization, a “ground-in-the-loop” (GITL) operation
39
+ is performed wherein a human operator matches local maps
40
+ or images from onboard with orbital images and maps. This
41
+ GITL operation places a limit on the distance that can be
42
+ driven in a day to a few hundred meters, which is the distance
43
+ that the rover can maintain acceptable localization error via
44
+ relative methods. Previous work has shown that using craters
45
+ as landmarks is a promising approach for performing absolute
46
+ localization on the moon during the day.
47
+ In this work we
48
+ present a method of absolute localization that utilizes craters
49
+ as landmarks and matches detected crater edges on the surface
50
+ with known craters in orbital maps. We focus on a localization
51
+ method based on a perception system which has an external
52
+ illuminator and a stereo camera. While other methods based
53
+ on lidar exist, lidar is not currently planned for deployment
54
+ on the current proposed nighttime and PSR missions. In this
55
+ paper, we evaluate (1) both monocular and stereo based surface
56
+ crater edge detection techniques, (2) methods of scoring the
57
+ crater edge matches for optimal localization, and (3) localization
58
+ performance on simulated Lunar surface imagery at night. We
59
+ demonstrate that this technique shows promise for maintaining
60
+ absolute localization error of less than 10 m required for most
61
+ planetary rover missions.
62
+ TABLE OF CONTENTS
63
+ 1. INTRODUCTION. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
64
+ 1
65
+ 2. RELATED WORKS . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
66
+ 2
67
+ *Abhishek Cauligi and R. Michael Swan contributed equally to this work.
68
+ 978-1-6654-9032-0/23/$31.00 ©2023. California Institute of Technology.
69
+ Government sponsorship acknowledged.
70
+ The research was carried out at the Jet Propulsion Laboratory, California
71
+ Institute of Technology, under a contract with the National Aeronautics and
72
+ Space Administration (80NM0018D0004).
73
+ 3. APPROACH. . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
74
+ 3
75
+ 4. DATASETS OVERVIEW . . . . . . . . . . . . . . . . . . . . . . . . . . . .
76
+ 5
77
+ 5. CRATER DETECTION PERFORMANCE . . . . . . . . . . .
78
+ 6
79
+ 6. LOCALIZATION PERFORMANCE . . . . . . . . . . . . . . . . .
80
+ 8
81
+ 7. CONCLUSIONS . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
82
+ 10
83
+ ACKNOWLEDGMENTS . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
84
+ 10
85
+ REFERENCES . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
86
+ 11
87
+ BIOGRAPHY . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
88
+ 12
89
+ L
90
+ R
91
+ Figure 1: The ShadowNav localization algorithm per-
92
+ forms absolute localization for a Lunar rover mission
93
+ located at the red position in the left image by matching
94
+ known craters from (left) an orbital map against (right)
95
+ detected craters from the rover stereo cameras.
96
+ 1. INTRODUCTION
97
+ Long-range Lunar navigation, and specifically navigating
98
+ within darkness, has gained a significant amount of traction
99
+ recently. For example, missions to Permanently Shadowed
100
+ Regions (PSRs) of the moon have been proposed such as
101
+ the VIPER mission [1], [2] and the Lunar Polar Volatiles
102
+ Explorer mission concepts. Furthermore, there are missions
103
+ that have proposed driving during the Lunar night in order
104
+ to traverse longer distances. For example, the new Decadal
105
+ Survey [3] recommends the Endurance-A Lunar rover mis-
106
+ sion should be implemented as a strategic medium-class
107
+ mission as the highest priority of the Lunar Discovery and
108
+ Exploration Program. The Endurance-A rover proposal plans
109
+ to drive 2000 km in the South Pole-Aitken (SPA) Basin to
110
+ collect
111
+ 100 kg of samples, which would be delivered to
112
+ Artemis astronauts. This mission concept study [4] identified
113
+ several key capabilities required to complete this mission
114
+ which are: (1) Endurance will need to drive 70% of its total
115
+ distance during the night to enable daytime hours dedicated
116
+ to science and sampling. (2) The mission will require on-
117
+ board autonomy for the majority of its operations, while the
118
+ 1
119
+ arXiv:2301.04630v1 [cs.RO] 11 Jan 2023
120
+
121
+ Orbital Image Generation
122
+ Stereo Images Generation
123
+ Crater Edge
124
+ Detection
125
+ Local-to-Global
126
+ Transform
127
+ Particle Filter
128
+ Step
129
+ Compute Q-Score
130
+ Orbital Image Generation
131
+ Particle Filter
132
+ Disparity
133
+ Generation
134
+ Disparity Hole
135
+ Filler
136
+ Figure 2: Schematic of the ShadowNav algorithm proposed to perform absolute localization on the Moon. A particle
137
+ filter is used to match craters detected by the rover stero cameras with known craters from an orbital map.
138
+ ground only handles contingencies. (3) Global localization
139
+ is necessary to maintain an error of <10 m relative to orbital
140
+ maps.
141
+ At present, existing rovers perform onboard localization rel-
142
+ ative to their own reference frame.
143
+ This is accomplished
144
+ by using wheel and visual odometry and inertial measure-
145
+ ments. Absolute localization is performed periodically with
146
+ a “ground-in-the-loop” (GITL) operation. This is acceptable
147
+ for current driving distances which are a few hundred meters
148
+ a day. Existing relative localization has around 2% drift and
149
+ therefore can only drive at most 500 m before the error will
150
+ be larger than 10 m. In order to traverse longer distances, on
151
+ the order of several kilometers a day proposed by missions
152
+ such as Endurance-A, autonomous absolute localization be-
153
+ comes critical. At present the Lunar surface does not have
154
+ continuous communication with Earth. Therefore, having to
155
+ perform several GITL operations for absolute localization in
156
+ a day will significantly reduce the distance that can be driven.
157
+ The lack of frequent absolute localization for the rover would
158
+ lead to errors greater than the maximum 10 m localization
159
+ error which would present significant risks to the mission
160
+ through deviations from the desired trajectory and risk for
161
+ unidentified obstacles.
162
+ Craters as landmarks have been shown to be promising for
163
+ absolute localization on the Moon [5], [6]. However, the lack
164
+ of natural light available while driving within a PSR or during
165
+ the Lunar night limits what can be used as a landmark and the
166
+ range at which the landmarks can be observed. Using craters
167
+ is still promising as the average distance between craters of
168
+ ≥10 m in diameter is 100 m on terrain with relatively fresh
169
+ craters and
170
+ 10 m on terrain with old craters [7].
171
+ Addi-
172
+ tionally the Lunar Reconnaissance Orbiter Camera (LROC)
173
+ provides digital elevation models (DEMs) with a resolution
174
+ between 0.5 m-5 m per pixel [8] and there are some DEMs
175
+ within PSRs [9].
176
+ In this work, we propose using a stereo camera with an
177
+ illuminator positioned below the stereo camera in order to
178
+ detect crater rims within the darkness. The use of such an
179
+ illuminator is motivated by the Endurance-A mission concept
180
+ study [4], which proposes the use of a stereo camera with
181
+ an illumination source as the perception system for a rover
182
+ operating in darkness.
183
+ Global localization is then accom-
184
+ plished by matching the detected crater rims against known
185
+ craters from an orbital image as shown in Figure 2. To handle
186
+ the uncertainty and nonlinearity of the crater rim detection
187
+ model, we utilize a particle filter with a novel Q-Score metric
188
+ for ranking potential crater matches in order to estimate the
189
+ absolute position of the rover within an orbital map. This
190
+ paper demonstrates the initial results of both crater detection
191
+ within darkness and absolute localization within simulation
192
+ which are the results of the first two years of a planned
193
+ three year effort to validate this approach. Work is ongoing
194
+ to collect and validate this approach in a real-world Lunar
195
+ analogue test location.
196
+ Statement of Contributions: This paper presents an approach
197
+ to absolute localization on the Moon that can be performed
198
+ while a rover is in darkness, such as within a PSR or during
199
+ the Lunar night.
200
+ The main contributions of the work as
201
+ summarized below:
202
+ 1. We developed a simulator based on Blender [10] which
203
+ renders simulated surface stereo imagery of the Lunar sur-
204
+ face in darkness located within a known orbital position.
205
+ The rendering process utilizes the Hapke lighting model for
206
+ more accurate surface reflectance as well as DEMs captured
207
+ by LROC for realistic crater distributions.
208
+ 2. We evaluated different crater-edge detection techniques
209
+ and demonstrate a method which captures 80% of the leading
210
+ crater arc at 10 m and can detect crater arcs out to 20 m.
211
+ 3. We present a method to localize a rover within an orbital
212
+ map using surface crater-edge detections and known orbital
213
+ craters based on a particle filter and a metric we call the Q-
214
+ Score which is detailed in Section 3.
215
+ 4. We demonstrate our absolute localization technique can
216
+ achieve less than 2 m absolute error with an assumed odome-
217
+ try drift of 2% and an initial 3-sigma uncertainty of 3 m.
218
+ 2. RELATED WORKS
219
+ Absolute localization on planetary surfaces is critical for
220
+ expanding the range rovers can travel in a day and over the
221
+ course of a mission and there have been many previous works
222
+ that investigate this problem. There have been techniques
223
+ proposed for the Martian surface. Works such as [11], [12]
224
+ consider far range and horizon features which are at ranges
225
+ that are beyond what is expected can be seen in the dark.
226
+ 2
227
+
228
+ Figure 3: Figure demonstrating the impact of placement
229
+ of light source on crater rim shadows.
230
+ Left: Sample
231
+ render of a crater with light source even with camera.
232
+ Right Sample render of a crater with light source below
233
+ the camera.
234
+ [13] proposes a technique on the Martian surface for absolute
235
+ localization that uses rocks and DEMs surface features.
236
+ In our work, we focus on the problem of global localization in
237
+ darkness which is relevant for permanently shadowed regions
238
+ of the moon, for which there has been a surge of interest in
239
+ conducting scientific measurements and activities [14]. Our
240
+ solution approach is inspired by a host of recent works that
241
+ seek to leverage orbital maps for global rover localization in
242
+ these shadowed regions. In [13], the authors propose a local-
243
+ ization procedure that matches an observed rover image with
244
+ an orbital map, but this approach neglects the rover motion
245
+ model and yields a deterministic estimate of the robot belief.
246
+ A purely data-driven approach is presented in [15], wherein
247
+ a convolutional neural network is trained on synthetic data to
248
+ match the rover observations with orbital imagery. Closest
249
+ to our approach, [16] presents a particle filtering technique
250
+ to compare rover monocular camera imagery with orbital
251
+ imagery and uses a Siamese neural network approach to
252
+ assign each particle a likelihood weight. The authors in [6]
253
+ propose a similar approach for Lunar absolute localization
254
+ known as LunarNav. However, LunarNav focuses on the day-
255
+ time localization problem and therefore considers different
256
+ methods of crater matching that rely on greater knowledge of
257
+ the surface geometry than available in the nighttime case.
258
+ 3. APPROACH
259
+ In this work, we propose an absolute localization approach
260
+ which utilizes a crater’s leading edge as landmarks for local-
261
+ ization. The end result of this approach will be an estimated
262
+ position and uncertainty within the orbital frame. At present,
263
+ this approach only considers position localization.
264
+ Rover
265
+ orientation is assumed to be given by a star tracker which
266
+ can compute orientation in three dimensions from celestial
267
+ measurements. Our approach consists of two primary com-
268
+ ponents:
269
+ 1. A leading-edge crater detection methodology for use with
270
+ a Lunar rover equipped with a stereo camera system and
271
+ illumination source.
272
+ 2. A particle filter for computing a position belief based on
273
+ a score computed based on the association of crater edges
274
+ and known orbital ground truth craters, which we call the Q-
275
+ Score, and the robot motion model.
276
+ A. Surface Crater Detection
277
+ In order to identify craters on the surface, the system was de-
278
+ signed to be used in conjunction with a perception system that
279
+ contained a stereo camera and an illumination source. This
280
+ perception system was configured where the illumination
281
+ source was beneath the stereo camera. Examples of simulated
282
+ images with the light at the same height as the cameras and
283
+ the light positioned beneath the cameras are in Figure 3. It
284
+ was observed that placing the illumination source below the
285
+ camera results in a shadow at the leading edge of a negative
286
+ obstacle. Furthermore, offsetting the light with the cameras
287
+ reduced the impact of the Hapke model washing out some of
288
+ the surface texture. Further details on the Hapke model and
289
+ its impact on surface terrain are provided in Section 4.
290
+ Here, we first review the three different techniques studied in
291
+ this work for detecting a crater’s leading edge: (1) a method
292
+ of detecting jumps within stero disparities, (2) a Canny edge
293
+ detector used to find the shadow on the leading edge, and (3)
294
+ a convolutional neural network (CNN)-based edge detector
295
+ that uses both the monocular and disparity image as input.
296
+ 1. Stereo Disparity Discontinuity Method The first approach
297
+ for leading edge crater detection relies on detecting discon-
298
+ tinuities within the stereo disparity image. To accomplish
299
+ this, the stereo disparity image must first be generated using
300
+ methods such as the JPLV algorithm [17] or the Semi-Global
301
+ Block Matching (SGBM) approach [18], among others. To
302
+ account for the low contrast that may be present in the Lunar
303
+ rover case, Contrast Limited Adaptive Histogram Equaliza-
304
+ tion (CLAHE) is first run on the input images prior to running
305
+ stereo. CLAHE is an adaptive histogram equalization and
306
+ operates on sub-regions of an image which allows more
307
+ consistent equalization across different lighting conditions
308
+ within an image. This is useful for this application as there is
309
+ a light-to-dark gradient from near-to-far within the images.
310
+ The resulting disparity image is then scanned column-by-
311
+ column and, when the difference between any two disparities
312
+ is greater than some pre-defined threshold, the larger column
313
+ index is marked as a crater edge.
314
+ Further, any numerical
315
+ issues stemming from stereo holes are accounted for by
316
+ omitting any pixels with spurious values during comparison.
317
+ 2.
318
+ Canny Edge Detector Method For sensor configura-
319
+ tions that contain an illuminator located beneath the stereo
320
+ cameras, shadows appear on the leading edge of negative
321
+ obstacles. In such cases, a Canny edge detector can be used
322
+ to distinguish the stark contrasting dark line along the rim.
323
+ In this work, the Canny edge detector from OpenCV [19] is
324
+ used to find these shadows.
325
+ 3.
326
+ CNN-Based Edge Detector Method The Holistically-
327
+ Nested Edge Detection (HED) approach presents a CNN-
328
+ based deep learning based method for leading edge crater
329
+ detection [20].
330
+ This method uses the HED approach and
331
+ can be performed by directly using the publicly released
332
+ neural network weights. HED is capable of performing both
333
+ monocular and stereo depth based edge detection. For HED
334
+ to perform edge detection within a depth image, it generates a
335
+ three channel image that contains horizontal disparity, height
336
+ above ground, and angle of the local surface normal with the
337
+ inferred direction of gravity. The RGB and depth predictions
338
+ of the CNN are then merged to generate the desired output.
339
+ Positive Obstacle False Positive Rejection—One shortcoming
340
+ of the aforementioned leading edge crater detection approach
341
+ is the susceptibility of false positive cases in the presence
342
+ of positive obstacles. In order to account for this positive
343
+ 3
344
+
345
+ Algorithm 1 Q-Score Computation
346
+ Require: Belief bt
347
+ i, set of crater observations {zt
348
+ 0,rover, ..., zt
349
+ m,rover},
350
+ set of ground truth craters {ct
351
+ 0,world, ..., ct
352
+ ℓ,world}, positive value
353
+ ε
354
+ 1:
355
+ Qinc ← ε
356
+ 2:
357
+ for i = 1, . . . , m do
358
+ 3:
359
+ zt
360
+ 0,world ← rover to world(zt
361
+ 0,rover)
362
+ 4:
363
+ dcr ← min ∥cj,world − zt
364
+ 0,world∥
365
+ 5:
366
+ Qinc ← Qinc + dcr
367
+ 6:
368
+ end for
369
+ 7:
370
+ Qscore ← min
371
+
372
+ 1, ( 1
373
+ mQinc)−1�
374
+ 8:
375
+ return Qscore
376
+ Algorithm 2 ShadowNav Particle Filtering Algorithm
377
+ Require: Initial belief distribution (µ0, Σ0), number of particles
378
+ Ns, number of effective particles threshold Neff,thresh
379
+ 1:
380
+ {b0
381
+ 1, ..., b0
382
+ Ns} ← sample beliefs(µ0, Σ0)
383
+ 2:
384
+ {w0
385
+ 1, ..., w0
386
+ Ns} ← {1, ..., 1}
387
+ 3:
388
+ t ← 1
389
+ 4:
390
+ while particle filter running do
391
+ 5:
392
+ {zt
393
+ 0, ..., zt
394
+ m} ← get observations()
395
+ 6:
396
+ {qt
397
+ 1, ..., qt
398
+ Ns} ← {0, ..., 0}
399
+ 7:
400
+ for i = 1, ..., Ns do
401
+ 8:
402
+ bt
403
+ i ← propagate sample(bt−1
404
+ i
405
+ )
406
+ 9:
407
+ qt
408
+ i ← log Q score(bt
409
+ i, {zt
410
+ 0, ..., zt
411
+ m})
412
+ 10:
413
+ end for
414
+ 11:
415
+ qt
416
+ min ← min(qt
417
+ 1, ..., qt
418
+ Ns)
419
+ 12:
420
+ for i = 1, ..., Ns do
421
+ 13:
422
+ wt
423
+ i ← wt−1
424
+ i
425
+ + qt
426
+ i − qt
427
+ min
428
+ 14:
429
+ end for
430
+ 15:
431
+ Neff ← compute Neff(wt
432
+ 1, ..., wt
433
+ Ns)
434
+ 16:
435
+ if Neff ≤ Neff,thresh then
436
+ 17:
437
+ {bt
438
+ 1, ..., bt
439
+ Ns} ← resample beliefs({bt
440
+ i}Ns
441
+ i=1, {wt
442
+ i}Ns
443
+ i=1)
444
+ 18:
445
+ {wt
446
+ 1, ..., wt
447
+ Ns} ← {1, ..., 1}
448
+ 19:
449
+ end if
450
+ 20:
451
+ t ← t + 1
452
+ 21: end while
453
+ obstacle issue, the detected edge points are passed through
454
+ a filter that removes points which have hits on the far side of
455
+ the crater edge with a detected negative or flat slope. Detected
456
+ edge points are kept only if, within the region directly beyond
457
+ the detected edge, there exists a positive slope or if there is
458
+ not enough stereo to accurately compute the slope. Thus, the
459
+ case of a detected positive slope is assumed to correspond
460
+ to the rising edge of the crater under the assumption that
461
+ the detected edge is the leading edge of a negative obstacle.
462
+ Alternatively, a detected edge is also retained if the far edge
463
+ is not captured due to low light conditions, as this is assumed
464
+ to be an indication of the presence of a large crater.
465
+ B. Particle Filter
466
+ Here, we provide an overview of the proposed ShadowNav
467
+ particle filtering approach. First, we provide further details
468
+ on the Q-Score metric that is used in the belief update step.
469
+ Q-Score—The Q-Score provided the measurement probabil-
470
+ ity of some position belief based on rover frame observations
471
+ and an orbital map. The procedure for computing the Q-Score
472
+ is given in Algorithm 1. The algorithm takes as input a given
473
+ belief bt
474
+ i, a set of m observed edges in rover frame, and a set
475
+ of ℓ ground truth crater observations to associate these mea-
476
+ surements with (Line 1). A value Qinc is initialized to some
477
+ negligibly small, positive value ε to later avoid divide-by-zero
478
+ Algorithm 3 Systematic Resampling
479
+ Require: Particles
480
+ {bt
481
+ 1, ..., bt
482
+ Ns}
483
+ and
484
+ associated
485
+ weights
486
+ {wt
487
+ 1, ..., wt
488
+ Ns}
489
+ 1:
490
+ nt = log
491
+ � �Ns
492
+ i=1 exp(bt
493
+ i)
494
+
495
+ 2:
496
+ { ˜wt
497
+ 0, ..., ˜wt
498
+ Ns} ← {0, ..., 0}
499
+ 3:
500
+ for i = 1, ..., Ns do
501
+ 4:
502
+ ˜wt
503
+ i ← exp(wt
504
+ i − nt)
505
+ 5:
506
+ end for
507
+ 6:
508
+ {q0, ..., qNs} ← cum sum({ ˜wt
509
+ 0, ..., ˜wt
510
+ Ns})
511
+ 7:
512
+ n ← 0
513
+ 8:
514
+ m ← 0
515
+ 9:
516
+ u0 ∼ U(0,
517
+ 1
518
+ Ns )
519
+ 10: while n ≤ Ns do
520
+ 11:
521
+ u = u0 +
522
+ n
523
+ Ns
524
+ 12:
525
+ while qm ≤ u do
526
+ 13:
527
+ m ← m + 1
528
+ 14:
529
+ end while
530
+ 15:
531
+ n ← n + 1
532
+ 16:
533
+ bt
534
+ n ← bt
535
+ m
536
+ 17: end while
537
+ 18: return {bt
538
+ 0, ..., bt
539
+ Ns}
540
+ issues (Line 1). Next, for each measurement zt
541
+ i in the rover
542
+ frame, the detected edge is converted to world frame (Line 3)
543
+ and the minimum distance to an edge from the ground truth
544
+ map computed (Line 4).
545
+ The Qinc is incremented by the
546
+ distance between the observed edge and its associated ground
547
+ truth observation (Line 5). The Q-Score is computed as the
548
+ reciprocal of Qinc and a min operation is applied to ensure
549
+ that the score provided by any particular run is between 0 and
550
+ 1 (Line 7). This implies that observations and belief pairs
551
+ which are less than 1 m away from ground truth will receive
552
+ the same score as those exactly 1m away from ground truth,
553
+ which is seen as acceptable given the orbital DEM resolution
554
+ and mission concept localization requirements.
555
+ In addition to the shortest distance formulation from Line 4,
556
+ additional approaches were also explored for determining
557
+ the Q-Score. One alternate approach investigated included
558
+ fitting a Gaussian normal distribution on the orbital map
559
+ crater edges and the Q-Score value was them computed based
560
+ on the intensity (i.e., distance to the computed mean) of the
561
+ point hit by observations or 0 in cases when no point was
562
+ hit. In practice, it was determined that the shortest distance
563
+ formulation provided the most robust results for use with the
564
+ particle filter and also did not require additional projection
565
+ calculations to project each belief from the orbital frame to
566
+ rover frame.
567
+ Overview—A description of the ShadowNav particle filtering
568
+ algorithm is given in Alg. 2. The algorithm takes as input a
569
+ Gaussian belief distribution (µ0, Σ0) assumed for the initial
570
+ robot position, the number of particles Ns to use in the
571
+ particle filter, and a threshold for the effective number of
572
+ beliefs Neff,thresh used to trigger resampling (Line 2).
573
+ The
574
+ filter is initialized by sampling Ns particles from the initial
575
+ belief distribution and assigning a weight of equal importance
576
+ for each particle (Lines 1-2). As common in particle filtering
577
+ implementations [21], we note that we used the log of the
578
+ weights for improved numerical stability of the weight update
579
+ step [22]. Given a new set of crater observations (Line 5),
580
+ a set of Q-Score measurements is initialized for computing
581
+ for each individual particle (Line 6).
582
+ After applying the
583
+ motion model update to each particle (Line 8), the Q-Score
584
+ for each updated particle is computed using the procedure
585
+ from Alg. 1 by comparing against the current measurements
586
+ 4
587
+
588
+ (a) Sample of terrain
589
+ with 90◦ from cam-
590
+ era.
591
+ (b) Sample opposi-
592
+ tion effect during the
593
+ day.
594
+ (c) Sample effect of
595
+ surface reflectance at
596
+ night with an illumi-
597
+ nator.
598
+ Figure 4: The opposition effect simulated during the day
599
+ and its effect at night with an external illuminator.
600
+ (Line 9). The particle weights are then updated in log-domain
601
+ (Line 13) with a normalization step to ensure non-negative
602
+ weights (Line 11). Next, the number of effective samples Neff
603
+ at the current iteration is calculated (Line 15). A common
604
+ pitfall of particle filters is “degeneracy”, wherein the weights
605
+ {wt
606
+ i} collapse around a handful of particles and computa-
607
+ tional resources are wasted on propagating low likelihood
608
+ particles [21]. If Neff is below the threshold Neff,thresh, then
609
+ this indicates that the filter is degenerating and a resample
610
+ operation is triggered (Line 17).
611
+ Further details on the systematic resampling approach used in
612
+ this work are provided in Algorithm 3. Given a set of particles
613
+ and their associated weights, the weights are first normalized
614
+ to (0, 1] from log-domain (Lines 1-4) and the cumulative sum
615
+ of these normalized weights ˜wt
616
+ i computed (Line 6). The key
617
+ step in systematic resampling is to sample a random value
618
+ u0 from a uniform distribution inversely proportional to Ns
619
+ (Line 9) and then incrementally sample a new particle from
620
+ this “bin” of width
621
+ 1
622
+ Ns . This ensures that, after resampling,
623
+ at least one particle is retained from each
624
+ 1
625
+ Ns interval from
626
+ the previous belief distribution.
627
+ C. Surface to Orbital Crater Transformation
628
+ For every observation step, rover frame crater edges were
629
+ detected with a stereo camera pair that provided the depth,
630
+ and thus a relative position for the crater edge was saved. This
631
+ relative crater distance was added to each particle’s belief
632
+ position to form an estimate of the observed crater position
633
+ in the world frame for each particle. The orbital map was
634
+ projected to the world frame and then the shortest distance
635
+ metric noted in the Q-Score algorithm was used to determine
636
+ which particle belief positions were most likely and thus
637
+ which observed crater was the most likely one to match the
638
+ known orbital craters.
639
+ Stereo hole filling—As some crater edge detections do not
640
+ rely on depth information, not all pixels in the stero camera
641
+ depth or disparity image will have a detected depth value
642
+ and, in such cases, no relative position would be available
643
+ for matching rover observations to the orbital map. For such
644
+ observations, a simple plane fit can be carried out to fill in the
645
+ depth information. A future area of investigation includes
646
+ carrying out an improved stereo hole filling approach, in
647
+ particular using existing knowledge on what the regional
648
+ terrain looks like.
649
+ Figure 5: The trajectories used for the numerical ex-
650
+ periments are overlaid on the orbital map here with the
651
+ crater numbers in black.
652
+ A red square indicator is at
653
+ the start and a green circle indicator is at the end of
654
+ each trajectory. Trajectory 1 is in blue, trajectory 2 is
655
+ in orange, and trajectory 3 is in purple.
656
+ 4. DATASETS OVERVIEW
657
+ A. Simulated Lunar Environment
658
+ At the time of writing, a Lunar dataset with images captured
659
+ in the dark with an illuminator did not exist. Therefore, to
660
+ evaluate the approach, a simulation environment was devel-
661
+ oped using the Blender software [10]. In order to simulate
662
+ images as realistically as possible, the Hapke lighting model
663
+ [23], [24], [25] was implemented. This model approximates
664
+ the Lunar surface reflectance and will simulate the “opposi-
665
+ tion effect”. This effect leads to a focused point of extreme
666
+ saturation at a location within an image where the camera ray
667
+ and light source are at zero phase angle. The Hapke lighting
668
+ model was implemented using the “old highland” parameters
669
+ of the moon provided in [26], as these most closely match the
670
+ poles of the moon where PSRs can be found. The coherent
671
+ backscattering opposition effect (CBOE) was left out of our
672
+ implementation and only the shadow hiding opposition effect
673
+ (SHOE) was implemented as it dominates most or all lighting
674
+ calculations in our use case, while CBOE has a negligible
675
+ or very small effect. Initial implementation was done using
676
+ the Open Shading Language (OSL), however not all rays are
677
+ available for calculation due to optimizations made in OSL,
678
+ so workarounds were needed to implement the Hapke light-
679
+ ing model in Blender using OSL. While this was partially
680
+ successful, it was not very robust and we had numerous
681
+ issues.
682
+ Instead of using OSL, we opted to modify the
683
+ source of Blender to add the Hapke bidirectional reflectance
684
+ distribution function (BRDF) directly into the Blender Cycles
685
+ renderer code which also reduced the render time by greater
686
+ than a factor of 2 through the use of Nvidia CUDA.
687
+ In order to represent a realistic 3D model of the sur-
688
+ face geometry, DEMs produced from LROC were utilized.
689
+ While LROC has enough resolution to resolve craters of
690
+ around 10 m, its resolution is not quite good enough for
691
+ generating smooth surface imagery. In order to have smooth
692
+ surface image renders, the DEMs from LROC were scaled
693
+ down to be 0.25 m resolution. Crater measurements in fu-
694
+ 5
695
+
696
+ 10Table 1: Table of crater sizes in crater detection dataset.
697
+ Crater
698
+ Diameter (m)
699
+ Depth (m)
700
+ 1
701
+ 9.2
702
+ 1.0
703
+ 2
704
+ 9.1
705
+ 0.75
706
+ 3
707
+ 11.3
708
+ 0.84
709
+ 4
710
+ 4.4
711
+ 0.55
712
+ 5
713
+ 3.7
714
+ 0.40
715
+ 6
716
+ 8.3
717
+ 0.27
718
+ 7
719
+ 11.9
720
+ 0.44
721
+ 8
722
+ 3.9
723
+ 0.48
724
+ 9
725
+ 4.1
726
+ 0.49
727
+ 10
728
+ 2.3
729
+ 0.25
730
+ ture discussions were based on this scaled resolution. This
731
+ scaled DEM was imported into Blender and a surface texture
732
+ was added.
733
+ The surface texture comprised of two scales
734
+ of fractal Brownian motion, which is a natural noise that
735
+ was added to the DEM in order to simulate Lunar surface
736
+ texture for stereo to utilize.
737
+ Figure 4 demonstrates three
738
+ sample renders, two in the daylight and one at night with
739
+ an illumination source from our simulation. It demonstrates
740
+ what the surface looks like in daytime conditions as well as
741
+ the effect of the Hapke model during the day with the sun
742
+ behind the camera and the effect of the illumination source.
743
+ From this it was observed that the full amount of daytime
744
+ texture is not observed during the night with an illumination
745
+ source.
746
+ B. Simulated Craters for Detection Analysis
747
+ In order to evaluate the performance of different crater de-
748
+ tection techniques, a dataset with different sized craters was
749
+ built.
750
+ This dataset was built using the simulation process
751
+ within Blender and captured stereo pair renders between 5 m
752
+ and 20 m from the front crater rim in increments of 0.1 m.
753
+ This dataset contained 10 different craters with varying sizes
754
+ and depths. The sizes of the craters within this dataset are in
755
+ Table 1 and their locations corresponding to the crater ID in
756
+ our simulated environment are marked in Figure 5.
757
+ C. Simulated Trajectories for Localization Analysis
758
+ In order to evaluate the localization performance, several
759
+ trajectories were run in the simulated environment. These
760
+ trajectories were run to generate an image every 1 m and
761
+ were designed to approach craters in different ways that
762
+ might present challenges to our filtering approach.
763
+ The
764
+ 1 m observation delta was used to reduce render times of
765
+ our dataset, as rendering every 0.1 m did not result in a
766
+ significant localization performance change. An overview of
767
+ the trajectories within the orbital environment are displayed
768
+ in Figure 5.
769
+ D. Real Data of Negative Obstacles at Night
770
+ In addition to the simulated data generated, a dataset was
771
+ collected in the Arroyo, which is a dry river bed near the
772
+ NASA Jet Propulsion Laboratory. This dataset contained a
773
+ few different negative obstacles that were imaged at 5, 10,
774
+ and 15 m away from the leading edge.
775
+ This dataset was
776
+ used to validated that the stereo and crater edge detection
777
+ algorithms work on real data collected at night with an
778
+ external illuminator.
779
+ Figure 6:
780
+ Plots of different metrics evaluating crater
781
+ detection performance. Left: Plot that shows image-based
782
+ crater edge detection score versus range for all craters
783
+ evaluated. Right: Plot that shows percent of the crater
784
+ front arc detected for all craters evaluated.
785
+ Figure 7: Sample stereo results using JPLV stereo on a
786
+ sample negative obstacle.
787
+ 5. CRATER DETECTION PERFORMANCE
788
+ A. Metrics
789
+ In order to evaluate the performance of surface crater detec-
790
+ tion, the dataset referenced in Section 4 was utilized. Five
791
+ different combinations of algorithms were evaluated. These
792
+ were disparity discontinuity detection within SGBM stereo,
793
+ disparity discontinuity detection within JPLV stereo, HED
794
+ using SGBM stereo, HED using JPLV stereo, and a hybrid
795
+ JPLV disparity discontinuity detection and canny edge detec-
796
+ tion approach. The hybrid discontinuity detection and canny
797
+ approach was implemented so that Canny only ran on the
798
+ portion of the image that was 10 m away or further. This
799
+ was done since it was observed the discontinuity detection
800
+ worked well in the near range but stereo began to degrade
801
+ beyond 10 m.
802
+ These algorithms were evaluated with two different metrics.
803
+ The first was an image based edge scoring method which
804
+ captures an average Gaussian probability that a detected edge
805
+ is on a ground truth crater edge. It utilizes a distance error
806
+ computed in image space as represented in Equation 1 where
807
+ Errordistpx is the pixel error from ground truth to detection,
808
+ rangegt is the known ground truth range, fl is the focal length
809
+ of camera, ss is the sensor size of the camera, and Errordist
810
+ is the error in meters of the detection.
811
+ Errordist = Errordistpx ∗ rangegt
812
+ (fl ∗ ss)
813
+ (1)
814
+ 6
815
+
816
+ 5m
817
+ 10mImageBasedCraterEdgeScoreversusRange
818
+ 1.0
819
+ +
820
+ Disparity JPLV
821
+ Disparity SGBM
822
+ +
823
+ HED JPLV
824
+ 0.8
825
+ HEDSGBM
826
+ Disparity + Canny jPLV
827
+ 0.6
828
+ Score
829
+ +
830
+ +
831
+ +
832
+ 0.4
833
+ 0.2
834
+ 0.0
835
+ 6
836
+ 8
837
+ 10
838
+ 12
839
+ 14
840
+ 16
841
+ 18
842
+ 20
843
+ GT Range (m)Percent Crater Front Arc Detected vs.Range
844
+ 100
845
+ +
846
+ Disparity JPLV
847
+ Percent of Crater Front Arc Detected (%)
848
+ HED JPLV
849
+ Disparity SGBM
850
+ 80
851
+ HED SGBM
852
+ Disparity JPLV + Canny
853
+ 60
854
+ ++
855
+ 40
856
+ 20
857
+ 0
858
+ 6
859
+ 8
860
+ 10
861
+ 12
862
+ 14
863
+ 16
864
+ 18
865
+ 20
866
+ Range(m)(a) Ground Truth at 7 m
867
+ (b) Ground Truth at 12 m
868
+ (c) Ground Truth at 17 m
869
+ (d) JPLV Disparity + Canny at 7 m
870
+ (e) JPLV Disparity + Canny at 12 m
871
+ (f) JPLV Disparity + Canny at 17 m
872
+ (g) JPLV HED at 7 m
873
+ (h) JPLV HED at 12 m
874
+ (i) JPLV HED at 17 m
875
+ Figure 8: The efficacy of the JPLV HED approach over JPLV Disparity + Canny is demonstrated in simulations of
876
+ crater rim detection overlay samples for crater 1.
877
+ The distance error was then passed into a Gaussian.
878
+ The
879
+ Gaussian probabilities for all of the detected pixels were
880
+ summed together and normalized by number of detected
881
+ points to obtain a score. This scoring method infused ground
882
+ truth range values to remove the impact of stereo holes
883
+ and stereo range uncertainty on the projection in order to
884
+ better isolate the specific performance of the crater detection
885
+ algorithms. The sigma value for the Gaussian that was used
886
+ in these experiments was 0.25m. This was chosen because
887
+ the resolution of the DEM utilized was 0.25 m. Therefore
888
+ most detections should fall within this boundary if they are
889
+ highly accurate.
890
+ The second metric used was ”percent of
891
+ front arc detected”. In this metric, there is a ground truth
892
+ circle of the orbital crater.
893
+ Depending on the pose of the
894
+ simulated cameras, the half arc of the ground truth circle that
895
+ was nearest the simulated camera was projected into image
896
+ space.
897
+ The crater detection was then matched to the half
898
+ arc and the percentage of the half arc that was successfully
899
+ identified was determined. This metric removes the Gaussian
900
+ component from the first metric; however, it does not capture
901
+ 7
902
+
903
+ (a) Negative obstacle 5 m away
904
+ (b) Negative obstacle 10 m away
905
+ (c) Negative obstacle 10 m away
906
+ (d) Negative obstacle 5 m away
907
+ (e) Negative obstacle 15 m away
908
+ (f) Negative obstacle 10 m away
909
+ Figure 9: Qualitative edge detections using JPLV disparity discontinuity detection and Canny hybrid on negative
910
+ obstacles on a real dataset collected in a dry river bed at night. These results demonstrate the transferability of the
911
+ crater detection algorithms from simulated data to a real environment.
912
+ false positives like the first metric.
913
+ B. Detection Results on Simulated Data
914
+ The results of running the different algorithms on the simu-
915
+ lated dataset are observed in Figure 6. There were several
916
+ notable observations from the results. The first was that the
917
+ algorithms tended to perform the best around 10 m and did
918
+ not improve as craters came closer.
919
+ This was believed to
920
+ be because as the camera gets closer to the crater, more of
921
+ the crater becomes visible and the discontinuities become
922
+ smaller. However, as the crater becomes further than 10 m,
923
+ the stereo began to degrade.
924
+ Additionally, for the hybrid
925
+ stereo and Canny technique, the Canny detection started de-
926
+ tection at 10 m and led to a significant jump in performance.
927
+ In terms of algorithm comparison, JPLV disparity disconti-
928
+ nuity performed better than SGBM disparity discontinuity
929
+ which is likely due JPLV having more holes than SGBM.
930
+ These holes at the boundary helped the disparity discontinuity
931
+ detector find a better edge. However, for HED, it performed
932
+ well with either stereo technique, likely due to its representa-
933
+ tion of depth containing height values. HED was used with
934
+ its out of the box weights from its authors. It likely could be
935
+ improved with finetuning on a Lunar dataset.
936
+ In addition to quantitative results, samples of crater rim de-
937
+ tection overlays are in Figure 8. These results were on crater
938
+ 1 which is a nearly 10 m in diameter crater. Both methods
939
+ were able to detect the craters well, but JPLV HED did have
940
+ more falloff at 17 m than the Canny detector.
941
+ However,
942
+ the Canny edge detector was optimized for this environment
943
+ where as HED was a generalized detector.
944
+ Overall the
945
+ generalization of HED was extremely promising as a crater
946
+ rim detection approach.
947
+ C. Detection Results on Real Data
948
+ As described previously, data was collected from a location
949
+ with negative obstacles at night.
950
+ This dataset was used
951
+ to validate the performance of stereo and crater detection
952
+ algorithms.
953
+ Figure 7 presents a sample of 5 m and 10 m
954
+ negative obstacles and the corresponding stereo results from
955
+ JPLV. From this figure is was observed that stereo is dense up
956
+ unto the leading edge of the negative obstacle. Additionally,
957
+ at 5 m, the far edge of the negative obstacle was captured
958
+ in the disparity values. At 10 m, the far edge, did contain
959
+ some disparity values but it was sparse.
960
+ While not fully
961
+ representative of the Lunar surface, this demonstrated that
962
+ current stereo techniques do have the capability to work in
963
+ low light conditions at the ranges necessary. The data was
964
+ also used to evaluate the edge detection techniques. The JPLV
965
+ disparity discontinuity and Canny edge detection hybrid was
966
+ found to be the best on simulation data and therefore it
967
+ was used on the real data.
968
+ Figure 9 demonstrates sample
969
+ detections at different ranges.
970
+ These detection results did
971
+ contain false positives on some of the vegetation as the false
972
+ positive rejection was not run.
973
+ Vegetation is not present
974
+ on the moon, however, objects such as rocks could present
975
+ similar issues. Overall, the negative obstacle edge detection
976
+ qualitatively performs well.
977
+ 6. LOCALIZATION PERFORMANCE
978
+ In this section, we provide Monte Carlo results on the per-
979
+ formance of the proposed ShadowNav filtering algorithm.
980
+ For each simulation, we analyzed the performance of the
981
+ ShadowNav filter on the basis of the following metrics:
982
+ 8
983
+
984
+ (a) Ground truth error for traj. 1. (b) Filter covariance for traj. 1. (c) Ground truth error for traj. 2. (d) Filter covariance for traj. 2.
985
+ Figure 10: A comparison of the four proposed resampling schemes demonstrated that systematic resampling empirically
986
+ outperforms the other scheme in terms of relatively lower ground truth error and reduced uncertainty in the filter.
987
+ (a) Ground truth error.
988
+ (b) Filter covariance.
989
+ Figure 11: Monte Carlo simulations for trajectories 1–3
990
+ demonstrated the efficacy of the Q-Score based particle
991
+ filtering approach at accomplishing global rover localiza-
992
+ tion.
993
+ (a) Traj. 1 traverse – case A.
994
+ (b) Traj. 1 traverse – case B.
995
+ Figure 12: Two Monte Carlo trials for trajectory 1 are
996
+ illustrated with the ground truth in red and the weighted
997
+ average belief µt in blue. The comparatively better per-
998
+ formance of the filter in case A (left) was due to false
999
+ positive crater rim measurements in case B (right) that led
1000
+ to worse localization.
1001
+ Ground truth error: We computed the weighted average
1002
+ mean µt = �Ns
1003
+ i=1 wt
1004
+ ibt
1005
+ i at time t for the filter using the particle
1006
+ weights and beliefs and compute the ℓ2-distance to the ground
1007
+ truth gtt, i.e., ∥µt − gtt∥2.
1008
+ Particle filter uncertainty: To capture the uncertainty asso-
1009
+ ciated with the current belief, we additionally computed the
1010
+ weighted covariance matrix Σt = �Ns
1011
+ i=1 ˜wt
1012
+ i(bt
1013
+ i−µt)(bt
1014
+ i−µt),
1015
+ where ˜wt
1016
+ i are the normalized weights detailed in Alg. 3. The
1017
+ metric we report at each time step was the square root of
1018
+ the largest eigenvalue
1019
+
1020
+ λmax(Σt), which corresponded to the
1021
+ worst case variance of the estimation error [27], [28].
1022
+ Mahalanobis distance: The final metric we computed was the
1023
+ Mahalanobis distance, which measures the distance between
1024
+ and the particle filter distribution and ground truth posi-
1025
+ tion. We approximately computed this by fitting a Gaussian
1026
+ distribution N(µt, Σt) to the particle filter distribution, for
1027
+ which the Mahalnobis distance is simply a weighted ℓ2-norm
1028
+
1029
+ (µt − gtt)T (Σt)−1(µt − gtt).
1030
+ A. Resampling Scheme Comparison
1031
+ In this section, we compared the baseline systematic resam-
1032
+ pling approach detailed in Alg. 3 against three other resam-
1033
+ pling methods utilized: multinomial, residual, and stratified
1034
+ (we refer the reader to [21], [29], [30] for a thorough review
1035
+ of these approaches.) Figure 10 presents the ground truth
1036
+ error and filter uncertainty for the four different resampling
1037
+ approaches. We saw that, for the two trajectories compared
1038
+ in Figure 10, systematic resampling led to comparable ground
1039
+ truth error as the other resampling approaches, but that
1040
+ systematic resampling outperformed the other approaches in
1041
+ terms of the overall uncertainty of the filter. Indeed, we note
1042
+ that multinomial resampling, the most commonly employed
1043
+ resampling technique, fared quite poorly in terms of the
1044
+ variance of the filter uncertainty (Fig. 10b and 10d).
1045
+ B. Baseline Performance Evaluation
1046
+ Finally, we evaluated the performance of the proposed Shad-
1047
+ owNav particle filter approach on three test trajectories. Our
1048
+ analysis consisted of Monte Carlos simulations with 25 seeds
1049
+ and utilizing 2% odometry noise and initial belief distribution
1050
+ with σ0 =3 m. Each simulation was run with Ns = 100
1051
+ particles and systematic resampling as the resampling scheme
1052
+ with Neff,thresh = 50 as the resampling threshold.
1053
+ Figure 11 shows Monte Carlo simulation results for the three
1054
+ test trajectories. We saw that the initial uncertainty in the filter
1055
+ began at approximately 3 m as expected by sampling from a
1056
+ distribution with σ0 =3 m. Thereafter, the filter was able to
1057
+ improve the rover position estimate, which led to an absolute
1058
+ error reduction of 4 m. Further, we see in Table 2 that the
1059
+ metrics computed at the final time step indicate convergence
1060
+ of the filter, with an average final error of ≤4 m and an
1061
+ absolute error reduction of 4 m.
1062
+ As seen in Figure 13, while the filter performed well on
1063
+ trajectories 2 and 3, the filter was less performant for the
1064
+ trajectory 1 test case. Figure 12 illustrates the performance
1065
+ of the filter on trajectory 1 for two different random seeds
1066
+ as the rover starts from the northern edge of the orbital map
1067
+ and moves southward.
1068
+ During the middle portion of this
1069
+ traverse, the craters were out-of-sight for the rover and, as we
1070
+ 9
1071
+
1072
+ [x-x| [m]
1073
+ 6
1074
+ 5
1075
+ [u] Ix->
1076
+ 4
1077
+ X
1078
+ 3
1079
+ Resampling
1080
+ 2
1081
+ Residual
1082
+ Systematic
1083
+ 1
1084
+ Stratified
1085
+ Multinomial
1086
+ 0
1087
+ 0
1088
+ 20
1089
+ 40
1090
+ 60
1091
+ 80
1092
+ 100
1093
+ 120
1094
+ Iterationg
1095
+ Resampling
1096
+ Residual
1097
+ 4
1098
+ Systematic
1099
+ Stratified
1100
+ Multinomial
1101
+ 3
1102
+ [m]
1103
+ 2
1104
+ 1
1105
+ 0
1106
+ 0
1107
+ 20
1108
+ 40
1109
+ 60
1110
+ 80
1111
+ 100
1112
+ 120
1113
+ Iteration[x-x] [m]
1114
+ 7
1115
+ Resampling
1116
+ Residual
1117
+ 6
1118
+ Systematic
1119
+ Stratified
1120
+ 5
1121
+ Multinomial
1122
+ [m]
1123
+ 4
1124
+ [x-)
1125
+ <X
1126
+ 3
1127
+ 2
1128
+ 1
1129
+ 0
1130
+ 20
1131
+ 40
1132
+ 60
1133
+ 80
1134
+ 100
1135
+ 120
1136
+ 140
1137
+ Iterationg
1138
+ Resampling
1139
+ 3.5
1140
+ Residual
1141
+ Systematic
1142
+ 3.0
1143
+ Stratified
1144
+ Multinomial
1145
+ 2.5
1146
+ 1.5
1147
+ 1.0
1148
+ 0.5
1149
+ 0.0
1150
+ 0
1151
+ 20
1152
+ 40
1153
+ 60
1154
+ 80
1155
+ 100
1156
+ 120
1157
+ 140
1158
+ Iteration[x-x] [m]
1159
+ Traj_Name
1160
+ 7
1161
+ Traj. 1
1162
+ Traj. 2
1163
+ 6
1164
+ Traj. 3
1165
+ 5
1166
+ [u] [x->
1167
+ 4
1168
+ X
1169
+ 3
1170
+ 2
1171
+ 1
1172
+ 0
1173
+ 0
1174
+ 20
1175
+ 40
1176
+ 60
1177
+ 80
1178
+ 100
1179
+ 120
1180
+ 140
1181
+ Iterationg
1182
+ 4.0
1183
+ Traj Name
1184
+ Traj. 1
1185
+ 3.5
1186
+ Traj. 2
1187
+ Traj. 3
1188
+ 3.0
1189
+ 2.5
1190
+ [w]
1191
+ 2.0
1192
+ 1.5
1193
+ 1.0
1194
+ 0.5
1195
+ 0.0
1196
+ 0
1197
+ 20
1198
+ 40
1199
+ 60
1200
+ 80
1201
+ 100
1202
+ 120
1203
+ 140
1204
+ Iteration(a) Trajectory 1
1205
+ (b) Trajectory 2
1206
+ (c) Trajectory 3
1207
+ Figure 13: The final ground truth error distribution for 25 Monte Carlo simulations showed filter convergence to ≤ 4m
1208
+ error in all cases for trajectories 2 and 3 and for the majority of cases for trajectory 1.
1209
+ Table 2: The metrics computed at the end of a long-range
1210
+ lunar traverse indicate convergence of the particular filter
1211
+ on trajectories 2 and 3, but spurious measurements from
1212
+ unlabeled crater lead to relatively poor performance on
1213
+ trajectory 1.
1214
+ Error
1215
+ Uncertainty
1216
+ Mahalanobis Dist.
1217
+ Traj. 1
1218
+ 3.84 ± 2.78
1219
+ 1.84 ± 1.12
1220
+ 8.74 ± 10.03
1221
+ Traj. 2
1222
+ 1.75 ± 0.78
1223
+ 1.32 ± 0.76
1224
+ 2.75 ± 1.88
1225
+ Traj. 3
1226
+ 1.68 ± 0.7
1227
+ 1.39 ± 0.61
1228
+ 2.92 ± 1.91
1229
+ see in Figure 11, false positive observations led to increases
1230
+ in the error and uncertainty in the filter.
1231
+ As the crater in
1232
+ the southern portion of the orbital map became observable
1233
+ for the rover, we saw that the estimate quickly improved in
1234
+ case A (Fig. 12a), but continues to have a residual error in
1235
+ case B (Fig. 12b). This poor convergence behavior was also
1236
+ explained by false positive observations, wherein the filter
1237
+ had difficulty reconciling the front edge of the rim with the
1238
+ back edge, an issue that requires further investigation.
1239
+ C. Debugging
1240
+ When testing the particle filter, we found it helpful to generate
1241
+ “perfect” datasets where ground truth depth was generated
1242
+ directly from the simulator as shown in Figure 14b) and crater
1243
+ edges were plotted into the rover frame using their exact
1244
+ known world coordinates (see Fig. 8a-8c).
1245
+ This approach
1246
+ uncovered bugs with our perception and projection pipeline
1247
+ as well as the particle filter pipeline and it is highly recom-
1248
+ mended to build such a dataset for all similar work.
1249
+ 7. CONCLUSIONS
1250
+ In this work we present a system to perform autonomous
1251
+ absolute localization on a Lunar rover while it is in darkness.
1252
+ This system entails using a stereo camera and illuminator. We
1253
+ enhanced a Blender based simulation with a custom Lunar
1254
+ texture and an implementation of the Hapke model to model
1255
+ surface reflectance as accurately as possible.
1256
+ We further
1257
+ demonstrate both geometric and learning based techniques
1258
+ for detecting the leading edge of a crater with ability to
1259
+ detect some craters out to 20 m range. We propose a method
1260
+ of matching the detected leading crater rims with known
1261
+ craters within an orbital map and using these matches to score
1262
+ (a)
1263
+ Simulated
1264
+ image
1265
+ from
1266
+ Blender.
1267
+ (b) Perfect depth: blue is close,
1268
+ red is far
1269
+ Figure 14: Crater 1 viewed from 5 m away from front rim.
1270
+ observations with our Q-Score. Finally we demonstrate abso-
1271
+ lute localization within our simulation environment with less
1272
+ than 4 m error, and an absolute error reduction of 4 m upon
1273
+ detecting craters.
1274
+ These results show promise for further
1275
+ investigation in the future on more simulation environments
1276
+ as well as on to be collected real analogue datasets.
1277
+ D. Future Work
1278
+ In the future, we seek to perform several updates and addi-
1279
+ tional evaluations. The primary focus is to experimentally
1280
+ collect a nighttime dataset using representative hardware in
1281
+ an analogue Lunar environment with negative obstacles to
1282
+ evaluate the system.
1283
+ Additional evaluation is planned to
1284
+ evaluate the performance of the proposed approach along
1285
+ longer trajectories, on more varied Lunar type locales, and
1286
+ for different rover specific parameters such as camera height
1287
+ off of the ground. Finally, we plan to validate our proposed
1288
+ approach on a flight-like embedded computer (e.g., a Snap-
1289
+ dragon) to demonstrate that it is computationally feasible for
1290
+ use onboard a Lunar rover.
1291
+ ACKNOWLEDGMENTS
1292
+ The research was carried out at the Jet Propulsion Labo-
1293
+ ratory, California Institute of Technology, under a contract
1294
+ with the National Aeronautics and Space Administration
1295
+ (80NM0018D0004). The authors would like to thank Yang
1296
+ Cheng, Olivier Lamarre, and Scott Tepsuporn for their dis-
1297
+ cussions during the development of this work.
1298
+ 10
1299
+
1300
+ 14
1301
+ 12
1302
+ 10
1303
+ Count
1304
+ 8
1305
+ 6
1306
+ 4
1307
+ 2
1308
+ 0
1309
+ 0
1310
+ 2
1311
+ 4
1312
+ 6
1313
+ 8
1314
+ 10
1315
+ Final GT Error [m]14
1316
+ 12
1317
+ 10
1318
+ Count
1319
+ 8
1320
+ 6
1321
+ 4
1322
+ 2
1323
+ 0
1324
+ 0
1325
+ 2
1326
+ 4
1327
+ 6
1328
+ 8
1329
+ 10
1330
+ Final GT Error [m]14
1331
+ 12
1332
+ 10
1333
+ Count
1334
+ 8
1335
+ 6
1336
+ 4
1337
+ 2
1338
+ 0
1339
+ 0
1340
+ 2
1341
+ 4
1342
+ 6
1343
+ 8
1344
+ 10
1345
+ Final GT Error [m]REFERENCES
1346
+ [1]
1347
+ K. Ennico-Smith, A. Colaprete, R. Elphic, J. Captain,
1348
+ J. Quinn, and K. Zachny, “The Volatiles Investigating
1349
+ Polar Exploration Rover payload,” in Lunar and Plane-
1350
+ tary Science Conference, 2020.
1351
+ [2]
1352
+ A. Colaprete, R. C. Elphic, M. Shirley, K. Ennico-
1353
+ Smith, D. S. S. Lim, Z. Zacny, and J. Captain,
1354
+ “The Volatiles Investigating Polar Exploration Rover
1355
+ (VIPER) mission – measurements and constraints,” in
1356
+ Lunar and Planetary Science Conference, 2021.
1357
+ [3]
1358
+ National Academies of Sciences, Engineering, and
1359
+ Medicine, “Origins, worlds, and life: A decadal strat-
1360
+ egy for planetary science and astrobiology 2023–2032.”
1361
+ National Academy Press, Tech. Rep., 2022.
1362
+ [4]
1363
+ J. T. Keane, S. M. Tikoo, and J. Elliott, “Endurance: Lu-
1364
+ nar South Pole-Atken Basin traverse and sample return
1365
+ rover,” National Academy Press, Tech. Rep., 2022.
1366
+ [5]
1367
+ L. Matthies, S. Daftry, S. Tepsuporn, Y. Cheng, D. Atha,
1368
+ R. M. Swan, S. Ravichandar, and M. Ono, “Lunar
1369
+ rover localization using craters as landmarks,” in IEEE
1370
+ Aerospace Conference, 2022.
1371
+ [6]
1372
+ S. Daftry, Z. Chen, Y. Cheng, S. Tepsuporn, B. Coltin,
1373
+ U. Naam, M. M. Lanssie, S. Khattak, M. Deans,
1374
+ and L. Matthies, “LunarNav: Crater-based localization
1375
+ for long-range autonomous rover navigation,” in IEEE
1376
+ Aerospace Conference, 2023.
1377
+ [7]
1378
+ H. Hiesinger, C. H. van der Bogert, J. H. Pasckert,
1379
+ L. Funcke, L. Giacomini, L. R. Ostrach, and M. S.
1380
+ Robinson, “How old are young lunar craters?” Journal
1381
+ of Geophysical Research, vol. 117, no. 12, pp. 1–15,
1382
+ 2012.
1383
+ [8]
1384
+ M. S. Robinson,
1385
+ S. M. Brylow,
1386
+ M. Tschimmel,
1387
+ D. Humm, S. J. Lawrence, P. C. Thomas, B. W. Denevi,
1388
+ E. Bowman-Cisneros, J. Zerr, M. A. Ravine, M. A.
1389
+ Caplinger, F. T. Ghaemi, J. A. Schaffner, M. C. Malin,
1390
+ P. Mahanti, A. Bartels, J. Anderson, T. N. Tran, E. M.
1391
+ Eliason, A. S. McEwen, E. Turtle, B. L. Jolliff, and
1392
+ H. Hiesinger, “Lunar Reconnaissance Orbiter (LROC)
1393
+ camera instrument overview,” Space Science Reviews,
1394
+ vol. 150, 2010.
1395
+ [9]
1396
+ E. Cisneros, A. Awumah, H. M. Brown, A. C. Mar-
1397
+ tin, K. N. Paris, R. Z. Povilaitis, A. K. Boyd, M. S.
1398
+ Robinson, and LROC Team, “Lunar Reconnaissance
1399
+ Orbiter camera permanently shadowed region imaging
1400
+ – atlas and controlled mosaics,” in Lunar and Planetary
1401
+ Science Conference, 2017.
1402
+ [10] Blender - a 3D modelling and rendering package. Soft-
1403
+ ware available from https://www.blender.org/.
1404
+ [11] J. V. Hook, R. Schwartz, K. Ebadi, K. Coble, and
1405
+ C. Padgett, “Topographical landmarks for ground-level
1406
+ terrain relative navigation on Mars,” in IEEE Aerospace
1407
+ Conference, 2022.
1408
+ [12] K. Ebadi, K. Coble, D. Atha, R. Schwartz, C. Padgett,
1409
+ and J. V. Hook, “Semantic mapping in unstructured en-
1410
+ vironments: Toward autonomous localization of plane-
1411
+ tary robotic explorers,” in IEEE Aerospace Conference,
1412
+ 2022.
1413
+ [13] J. W. Hwangbo, K. Di, and R. Li, “Integration of orbital
1414
+ and ground image networks for the automation of rover
1415
+ localization,” in American Society for Photogrammetry
1416
+ and Remote Sensing Annual Conference, 2009.
1417
+ [14] D. Hurley, P. Prem, A. Stickle, C. Hibbitts, A. Deutsch,
1418
+ A. Colaprete, R. Elphic, S. Li, P. Lucey, Y. Liu,
1419
+ S. Hosseini, K. D. Retherford, K. Zacny, J. Atkin-
1420
+ son, M. Benna, W. Farrell, D. Needham, L. Gertsch,
1421
+ M. Delitsky, and P. Hayne, “Science from the lunar per-
1422
+ manently shadowed regions,” in Lunar and Planetary
1423
+ Science Conference, 2020.
1424
+ [15] B. Wu, R. W. K. Potter, P. Ludivig, A. S. Chung,
1425
+ and T. Seabrook, “Absolute localization through orbital
1426
+ maps and surface perspective imagery: A synthetic lu-
1427
+ nar dataset and neural network approach,” in IEEE/RSJ
1428
+ Int. Conf. on Intelligent Robots & Systems, 2019.
1429
+ [16] V. Franchi and E. Ntagiou, “Planetary rover localisa-
1430
+ tion via surface and orbital image matching,” in IEEE
1431
+ Aerospace Conference, 2022.
1432
+ [17] A. Howard, A. I. Ansar, T. E. Litwin, and S. B. Gold-
1433
+ berg. (2009) Jet Propulsion Laboratory Stereo Vision
1434
+ software suite (JPLV). Software available from https:
1435
+ //software.nasa.gov/software/NPO-18593-1T.
1436
+ [18] H. Hirschmuller, “Stereo processing by semiglobal
1437
+ matching and mutual information,” IEEE Transactions
1438
+ on Pattern Analysis & Machine Intelligence, vol. 30,
1439
+ no. 2, pp. 328–341, 2008.
1440
+ [19] J. Canny, “A computational approach to edge detection,”
1441
+ IEEE Transactions on Pattern Analysis & Machine In-
1442
+ telligence, vol. 8, no. 6, pp. 679–698, 1986.
1443
+ [20] S. Xie and Z. Tu, “Holistically-nested edge detection,”
1444
+ in IEEE Int. Conf. on Computer Vision, 2015.
1445
+ [21] M. S. Arulampalam, S. Maskell, N. Gordon, and
1446
+ T. Clapp,
1447
+ “A tutorial on particle filters for on-
1448
+ line nonlinear/non-Gaussian Bayesian tracking,” IEEE
1449
+ Transactions on Signal Processing, vol. 50, no. 2, pp.
1450
+ 174–188, 2002.
1451
+ [22] C. Gentner, S. Zhang, and T. Jost, “Log-PF: Particle
1452
+ filtering in logarithm domain,” Journal of Electrical and
1453
+ Computer Engineering, vol. 2018, 2018.
1454
+ [23] B. Hapke, Theory of reflectance and emittance spec-
1455
+ troscopy.
1456
+ Cambridge Univ. Press, 2012.
1457
+ [24] ——, “Bidirectional reflectance spectroscopy: 5. the
1458
+ coherent backscatter opposition effect and anisotropic
1459
+ scattering,” Icarus, vol. 157, no. 2, pp. 523–534, 2002.
1460
+ [25] F. Schmidt and S. Bourguignon, “Efficiency of BRDF
1461
+ sampling and bias on the average photometric behav-
1462
+ ior,” Icarus, vol. 317, pp. 10–26, 2019.
1463
+ [26] X. Xu, J. Liu, D. Liu, B. Liu, and R. Shu, “Photo-
1464
+ metric correction of Chang’E-1 interference imaging
1465
+ spectrometer’s (IIM) limited observing geometries data
1466
+ with Hapke model,” Remote Sensing, vol. 12, no. 22, p.
1467
+ 3676, 2020.
1468
+ [27] S. Joshi and S. Boyd, “Sensor selection via convex
1469
+ optimization,” IEEE Transactions on Signal Processing,
1470
+ vol. 57, no. 2, 2009.
1471
+ [28] L. Carlone and S. Karaman, “Attention and anticipation
1472
+ in fast visual-inertial navigation,” IEEE Transactions on
1473
+ Robotics, vol. 35, no. 1, 2019.
1474
+ [29] T. Li, M. Boli´c, and P. M. Djuri´c, “Resampling meth-
1475
+ ods for particle filtering,” IEEE Transactions on Signal
1476
+ Processing, vol. 32, no. 3, pp. 70–86, 2015.
1477
+ [30] C. Kuptametee and N. Aunsri, “A review of resampling
1478
+ techniques in particle filtering framework,” Measure-
1479
+ ment, vol. 193, 2022.
1480
+ 11
1481
+
1482
+ BIOGRAPHY[
1483
+ Abhishek Cauligi is a Robotics Tech-
1484
+ nologist in the Robotic Surface Mobility
1485
+ Group at NASA Jet Propulsion Labo-
1486
+ ratory, California Institute of Technol-
1487
+ ogy.
1488
+ Abhishek received his B.S. in
1489
+ Aerospace Engineering from the Uni-
1490
+ versity of Michigan - Ann Arbor and
1491
+ his PhD. in Aeronautics and Astronau-
1492
+ tics from Stanford University.
1493
+ His re-
1494
+ search interests lie in leveraging recent
1495
+ advances in nonlinear optimization, machine learning, and
1496
+ control theory towards planning and control for complex
1497
+ robotic systems.
1498
+ R. Michael Swan is a Robotics Systems
1499
+ Engineer at NASA Jet Propulsion Lab-
1500
+ oratory, California Institute of Technol-
1501
+ ogy. He received his B.S. in Computer
1502
+ Engineering from Walla Walla Univer-
1503
+ sity and his M.S. in Computer Science
1504
+ from the University of Southern Califor-
1505
+ nia. He is interested in robotic surface
1506
+ and aerial autonomy, perception, simu-
1507
+ lation, and robotic system architecture.
1508
+ Hiro Ono is the Group Leader of
1509
+ the Robotic Surface Mobility Group at
1510
+ NASA Jet Propulsion Laboratory, Cal-
1511
+ ifornia Institute of Technology.
1512
+ Since
1513
+ he joined JPL in 2013, he has led a
1514
+ number of research projects on Mars
1515
+ rover autonomy, as well as three NIAC
1516
+ studies on Enceladus Vent Explorer and
1517
+ Comet Hitchhiker. Hiro was a flight soft-
1518
+ ware developer of M2020’s Enhanced
1519
+ AutoNav and the lead of M2020 Landing Site Traversability
1520
+ Analysis. He also led the development of a machine learning-
1521
+ based Martian terrain classifier, SPOC (Soil Property and
1522
+ Object Classification), which won JPL’s Software of the Year
1523
+ Award in 2020.
1524
+ Shreyansh Daftry is a Robotics Tech-
1525
+ nologist at NASA Jet Propulsion Labora-
1526
+ tory, California Institute of Technology.
1527
+ He received his M.S. degree in Robotics
1528
+ from the Robotics Institute, Carnegie
1529
+ Mellon University, and his B.S. degree
1530
+ in Electronics and Communication En-
1531
+ gineering.
1532
+ His research interest lies
1533
+ at the intersection of space technology
1534
+ and autonomous robotic systems, with
1535
+ an emphasis on machine learning applications to percep-
1536
+ tion, planning, and decision making.
1537
+ At JPL, he is the
1538
+ Group Leader of the Perception Systems group, is working
1539
+ on the Mars Sample Recovery Helicopter mission, and has
1540
+ led/contributed to technology development for autonomous
1541
+ navigation of ground, airborne, and subterranean robots.
1542
+ John Elliott is a principal engineer
1543
+ in JPL’s Mission Concept Systems De-
1544
+ velopment group.
1545
+ He currently serves
1546
+ as Program Engineer for the Planetary
1547
+ Science Formulation office. His recent
1548
+ tasks have included serving as study
1549
+ lead for the Planetary Decadal Survey’s
1550
+ three lunar rover mission concept stud-
1551
+ ies, Intrepid, INSPIRE, and Endurance,
1552
+ and performing systems engineering and
1553
+ leadership roles on a number of recent Discovery and New
1554
+ Frontiers mission proposals. Mr. Elliott’s past experience
1555
+ includes six years in the terrestrial nuclear power industry
1556
+ with Bechtel Corporation in addition to 30 years in aerospace
1557
+ systems at TRW and JPL.
1558
+ Larry Matthies is the technology co-
1559
+ ordinator in the Mars Exploration Pro-
1560
+ gram Office at JPL. He received B.S.,
1561
+ M. Math,and PhD degrees in Computer
1562
+ Science from the University of Regina
1563
+ (1979), University of Waterloo (1981),
1564
+ and Carnegie Mellon University (1989).
1565
+ He has been with JPL for more than
1566
+ 32 years. He has conducted technology
1567
+ development in perception systems for
1568
+ autonomous navigation of robotic vehicles for land, sea,
1569
+ air, and space.
1570
+ He supervised the JPL Computer Vision
1571
+ group for 21 years. He led development of computer vision
1572
+ algorithms for Mars rovers, landers, and helicopters. He is a
1573
+ Fellow of the IEEE and a member of the editorial boards of
1574
+ Autonomous Robots and the Journal of Field Robotics.
1575
+ Deegan Atha is a Robotics Technologist
1576
+ within the Perception Systems Group of
1577
+ the Mobility and Robotic Systems Sec-
1578
+ tion at the Jet Propulsion Laboratory.
1579
+ He received his B.S. degree from Purdue
1580
+ University in Electrical Engineering and
1581
+ his M.S. in Computer Science from the
1582
+ Georgia Institute of Technology. He is
1583
+ currently the Principal Investigator for
1584
+ the ShadowNav task and leading the se-
1585
+ mantic perception effort for the DARPA RACER project. His
1586
+ interests are in the infusion of machine learning and robotic
1587
+ perception into autonomous systems operating in unstruc-
1588
+ tured environments.
1589
+ 12
1590
+
29E3T4oBgHgl3EQfoAqh/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4NE0T4oBgHgl3EQfvQFL/content/tmp_files/2301.02615v1.pdf.txt ADDED
@@ -0,0 +1,996 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Silent Killer: Optimizing Backdoor Trigger Yields a Stealthy and Powerful Data
2
+ Poisoning Attack
3
+ Tzvi Lederer1 , Gallil Maimon2 , Lior Rokach1
4
+ 1Ben Gurion University , 2The Hebrew University of Jerusalem
5
+ {tzvil, liorrk}@post.bgu.ac.il
6
+ Abstract
7
+ We propose a stealthy and powerful backdoor attack
8
+ on neural networks based on data poisoning (DP).
9
+ In contrast to previous attacks, both the poison and
10
+ the trigger in our method are stealthy. We are able to
11
+ change the model’s classification of samples from a
12
+ source class to a target class chosen by the attacker.
13
+ We do so by using a small number of poisoned train-
14
+ ing samples with nearly imperceptible perturbations,
15
+ without changing their labels. At inference time,
16
+ we use a stealthy perturbation added to the attacked
17
+ samples as a trigger. This perturbation is crafted as
18
+ a universal adversarial perturbation (UAP), and the
19
+ poison is crafted using gradient alignment coupled
20
+ to this trigger. Our method is highly efficient in craft-
21
+ ing time compared to previous methods and requires
22
+ only a trained surrogate model without additional re-
23
+ training. Our attack achieves state-of-the-art results
24
+ in terms of attack success rate while maintaining
25
+ high accuracy on clean samples.
26
+ 1
27
+ Introduction
28
+ Deep learning models have been shown to be susceptible
29
+ to data poising attacks [Chen et al., 2017; Gu et al., 2017;
30
+ Shafahi et al., 2018]. In these attacks, the attacker manipu-
31
+ lates training examples and adds them to the victim’s training
32
+ set. The victim trains the model on the poisoned dataset and
33
+ unknowingly adds unwanted functionality to the model. One
34
+ typical example of such functionalities is a backdoor. The
35
+ goal of backdoor attacks is to be able to change the model’s
36
+ prediction at inference time by adding a trigger to the sam-
37
+ ple (e.g., a small colorful patch in the image), such that the
38
+ model will misclassify it in the way the attacker planned.
39
+ (e.g. replacing stop signs prediction with speed limits [Gu
40
+ et al., 2017]). In a world where many models are trained on
41
+ large datasets scraped from the internet [Ramesh et al., 2022;
42
+ Kuznetsova et al., 2020], the threat of DP becomes more rel-
43
+ evant than ever. A malicious attacker can generate poisoned
44
+ samples and put them on public sites such as Wikipedia or
45
+ even on his own site. In high probability, those samples will
46
+ end up in the training set of a large company and can stealthily
47
+ damage the model.
48
+ Figure 1: Illustration of the Silent-Killer attack. In the first step
49
+ (1), the attacker uses a surrogate model to craft a targeted universal
50
+ adversarial perturbation that will be used as a trigger. (2) The attacker
51
+ creates poisoned samples using gradient alignment. The attacker
52
+ releases the poisoned samples into the victim’s dataset (3), and the
53
+ victim trains the model on the poisoned dataset. After the victim
54
+ deploys the model, the attacker can add the trigger to his own images
55
+ and activate the unwanted behavior of the model (4).
56
+ However, it is difficult to carry out a successful poisoning
57
+ attack on a model without being noticed. Earlier work in this
58
+ field [Gu et al., 2017] has shown that embedding the trigger
59
+ directly into the training set can poison the model, but this
60
+ method is not stealthy because the trigger is visible and can be
61
+ removed. Additionally, these methods typically involve flip-
62
+ ping the labels of the poisoned samples which can be identified
63
+ during re-labeling. Other works [Saha et al., 2020] introduce a
64
+ method to craft poisoned examples with small stealthy pertur-
65
+ bations that do not change their label, but this method, which
66
+ is based on features collision, does not achieve high perfor-
67
+ mance when training the model from scratch, but only in the
68
+ fine-tuning scenario. A recent work, named Sleeper-Agent
69
+ [Souri et al., 2022] has demonstrated a gradient-based op-
70
+ timization method for poison crafting. In their work, they
71
+ show that using the gradient alignment method, they can craft
72
+ stealthy and effective poisoned samples, even in the challeng-
73
+ ing scenario that previous methods struggled to handle. For
74
+ example, this method succeeds under the clean label settings,
75
+ when the attacker is not allowed to change the sample label,
76
+ and when the attacker trains the model from scratch. However,
77
+ this method uses as a trigger a striking, colorful patch, which is
78
+ arXiv:2301.02615v1 [cs.CR] 5 Jan 2023
79
+
80
+ Trigger
81
+ Poison
82
+ Victim
83
+ Attack
84
+ Crafting
85
+ Crafting
86
+ .Training
87
+ .Realization
88
+ X
89
+ (1)
90
+ (2)
91
+ (3)
92
+ (4)not stealthy and can be detected by the victim at inference time.
93
+ Examples of this trigger can be found in figure 3. Furthermore,
94
+ the patch is arbitrary. As we show in this work, optimizing
95
+ a trigger toward the attacker’s objective, rather than using an
96
+ arbitrary patch, can yield a significant improvement in attacks
97
+ like this.
98
+ Another line of research in the field of adversarial attacks
99
+ is evasion attacks, also known as adversarial examples. In
100
+ these, the attacker uses gradient-based optimization to care-
101
+ fully perturb the input samples at test time and have them
102
+ misclassified. Specifically, some works [Moosavi-Dezfooli
103
+ et al., 2017] show that a single perturbation, which can be
104
+ interpreted as a trigger, can change model predictions on many
105
+ samples. However, as noted in their work, the transferability
106
+ of the attack is not completely reliable. This means that in
107
+ many cases, the attack performance will significantly drop
108
+ when it is applied to other models. In today’s reality, when
109
+ companies often don’t publicly release their models, assuming
110
+ access to the exact model is implausible.
111
+ In this work, we show that a universal adversarial pertur-
112
+ bation (UAP)[Moosavi-Dezfooli et al., 2017] can be used
113
+ as a stealthy and powerful trigger. This trigger, combined
114
+ with the gradient alignment method to craft poisoned sam-
115
+ ples for training [Souri et al., 2022], yields a strong, stealthy
116
+ attack that is more robust and powerful than existing meth-
117
+ ods. Specifically, we show two attacks. In the first attack, we
118
+ craft a universal perturbation that is bounded in magnitude
119
+ and use it as a trigger. The optimization process is inspired
120
+ by UAP [Moosavi-Dezfooli et al., 2017] which iteratively
121
+ updates the perturbation of a set of input samples in the di-
122
+ rection that minimizes the attacker loss on a large number
123
+ of samples. The main differences are that we use a targeted
124
+ attack rather than an untargeted attack, and at each iteration,
125
+ we clip the perturbations up to the upper bound rather than
126
+ projecting it on a l2 norm ball. Examples of this method
127
+ are shown in figure 2. In our second attack, we use a small
128
+ square patch similar to existing methods [Gu et al., 2017;
129
+ Souri et al., 2022] and optimize it without constraints (ex-
130
+ cept patch size). This optimization yields a patch that is sim-
131
+ ilar to triggers used in previous works [Saha et al., 2020;
132
+ Souri et al., 2022].
133
+ Examples can be found in figure 3.
134
+ In both cases, our approach achieves state-of-the-art results
135
+ in terms of attack success rate and robustness under our
136
+ setup. Furthermore, our method does not require retraining
137
+ the model during poison crafting nor using an ensemble of
138
+ models as was done in previous works [Huang et al., 2020;
139
+ Souri et al., 2022]. We demonstrate the success of our method
140
+ in a black-box scenario and its high transferability to unseen
141
+ architectures. The attack scheme is shown in figure 1. Code is
142
+ available at this link.
143
+ Our main contributions are as follows:
144
+ • We demonstrate for the first time a stealthy data-
145
+ poisoning-based black-box backdoor attack on NN image
146
+ classifiers, in which both the poison and the trigger are
147
+ nearly imperceptible, and the label of the poisoned sam-
148
+ ples is not changed (clean-label).
149
+ • Our attack achieves state-of-the-art results in terms of at-
150
+ tack success rate without harming the accuracy on benign
151
+ Figure 2: Samples of our attack on ImageNet. The images on the left
152
+ are poisoned samples sampled from the target class, and the images
153
+ on the right are test samples with the trigger embedded into them. A
154
+ successful attack will result in shark images being classified as cocks.
155
+ samples.
156
+ • This attack is simple and effective. It does not require
157
+ model optimization and does not need ensembles or other
158
+ hand-crafted solutions to achieve effective results.
159
+ 2
160
+ Related Work
161
+ Backdoor Attacks.
162
+ The concept of a backdoor attack or
163
+ Trojan attack involves the embedding of unexpected behavior
164
+ into a neural network by an attacker. This behavior is not
165
+ anticipated by the network’s developer and may only mani-
166
+ fest in specific, pre-defined cases. For instance, a classifica-
167
+ tion network may exhibit high accuracy on most samples but
168
+ be designed to misclassify a specific, chosen sample [Geip-
169
+ ing et al., 2020]. Backdoor attacks can be implemented in
170
+ several ways, such as by modifying the victim network di-
171
+ rectly [Gu et al., 2017; Zhang et al., 2021], contaminating the
172
+ pre-trained network used by the victim [Kurita et al., 2020;
173
+ Gu et al., 2017], poisoning the training dataset [Yang et al.,
174
+ 2017], or even modifying the training process or loss function
175
+ [Bagdasaryan and Shmatikov, 2021]. In some cases, a com-
176
+ bination of these methods may be used, such as in [Qi et al.,
177
+ 2021], where the poisoned training set and network weights
178
+ are learned together. A comprehensive review of backdoor
179
+ attacks against neural networks can be found in [Li et al.,
180
+ 2022]. In this work, we focus on backdoor attacks via DP.
181
+ This allows the attacker to control the output of the model by
182
+ using a trigger added to samples at inference time.
183
+ Data Poisoning.
184
+ DP is a type of attack on neural networks,
185
+ in which the attacker manipulates the training dataset in order
186
+ to influence the behavior of models trained on this data [Biggio
187
+ et al., 2011; Geiping et al., 2020; Gu et al., 2017]. As machine
188
+ learning models require a large amount of data for training,
189
+ many organizations gather data from the internet. This data
190
+ may not always be verified and can therefore be exploited by
191
+ attackers. In DP attacks, the attacker alters or adds samples to
192
+ the training dataset. This is in order to change the behavior
193
+ of the model when it is trained on this modified dataset. Cin`a
194
+ et al. categorize DP attacks into three categories based on
195
+ their purpose: (1) indiscriminate attacks, which increase the
196
+ test error on all samples in the test set [Biggio et al., 2011;
197
+
198
+ Poisoned Training
199
+ Clean Training
200
+ Poison
201
+ Test Samples with
202
+ Test Samples
203
+ Trigger
204
+ Perturbations
205
+ Samples
206
+ Trigger
207
+ without Trigger
208
+ Perturbations
209
+ SamplesCin`a et al., 2022], (2) targeted attacks, which increase the test
210
+ error on specific samples [Geiping et al., 2020; Shafahi et al.,
211
+ 2018], and (3) backdoor attacks, which increase the test error
212
+ on samples that contain a specific trigger embedded within
213
+ them [Gu et al., 2017; Souri et al., 2022]. In this work, we
214
+ focus on implementing a DP-based backdoor attack.
215
+ Backdoor Attacks with Data Poisoning.
216
+ Backdoor attacks
217
+ have often been conducted with unrealistic assumptions for
218
+ the attacker, such as the ability to modify the victim model
219
+ or its weights. There are few works that demonstrate the suc-
220
+ cessful backdooring of a model using only a small portion of
221
+ the poisoned training set. Gu et al. blended a patch into a
222
+ portion of the training set and showed that inserting this patch
223
+ into samples in the test set would cause the model to classify
224
+ them as the attacker planned. Subsequent works investigated
225
+ other heuristic approaches, but they did not find a systematic
226
+ optimization-based approach to find stealthy poison. Li et al.
227
+ described an attack that uses a stealthy bounded norm trigger,
228
+ but it does not work under the clean label setting and is not
229
+ applicable to from-scratch training. Saha et al. described a
230
+ method based on feature collusion that crafts a clean label
231
+ stealthy poison, but it does not work on from-scratch training.
232
+ Souri et al. proposed using gradient alignment on a surrogate
233
+ network to craft a poisoned training set, but the trigger in their
234
+ work was noticeable and the poisoning process is hard for the
235
+ attacker, especially in large models, due to the need for retrain-
236
+ ing the model and use of ensemble during crafting. In this
237
+ work, we present a stealthy data-poisoning-based backdoor
238
+ attack that achieves state-of-the-art results.
239
+ Gradient Alignment.
240
+ Gradient alignment, or gradient
241
+ matching, is a relatively recent method that was proven
242
+ suitable to DP [Souri et al., 2022; Geiping et al., 2020;
243
+ Fowl et al., 2021]. The idea was borrowed from the data
244
+ condensation domain, where the goal is to find ”informative
245
+ samples”, samples producing rich gradients during model
246
+ training. In other words, the goal is to find a few samples
247
+ where an optimization upon them minimizes the loss over a
248
+ large number of samples [Zhao et al., 2021]. Recent works
249
+ show that this method can be used for DP [Geiping et al.,
250
+ 2020] and backdoor attacks [Souri et al., 2022]. For example,
251
+ in a backdoor attack, the attacker wants the model to classify
252
+ samples with a certain trigger as a specific target class. How-
253
+ ever, the attacker is not able to directly optimize the model to
254
+ do this. The gradient alignment method allows the attacker to
255
+ adjust the gradients of the model’s weights when it is being
256
+ optimized on poisoned samples. This is so that the gradients
257
+ are as close as possible to the gradients of the attacker’s de-
258
+ sired classification loss on the trigger samples. The attacker
259
+ tries to maximize the cosine similarity between the gradients
260
+ of the poisoned samples and the gradients of the attacker’s
261
+ loss during the optimization process. A more comprehensive
262
+ formulation can be found in section 3.3.
263
+ Evasion Attacks.
264
+ Evasion attack, also called adversarial
265
+ examples, is a type of attack at test time on a machine learning
266
+ model, aiming to change the model’s input so that the model
267
+ will predict wrongly on the manipulated example [Goodfellow
268
+ et al., 2014; Kurakin et al., 2018; Madry et al., 2017; Carlini
269
+ and Wagner, 2017]. An important attack for our method is the
270
+ Figure 3: An illustration of our experiment with optimized patches.
271
+ From left to right: pre-defined arbitrary patch [Souri et al., 2022],
272
+ optimized patch (ours), poisoned training samples, and clean training
273
+ samples. The poisoned samples were crafted with the optimized
274
+ patch, but the poison crafted with the pre-defined patch looked very
275
+ similar. By optimizing the trigger, we achieve superior results in
276
+ terms of ASR.
277
+ Algorithm 1 Trigger Crafting
278
+ Input: {xi}M
279
+ i=1 ∈ Ds, Fs(·; θs), lt, Tt(·, ·)
280
+ Parameter: Rt, αt, ϵt, σ2
281
+ Output: δt
282
+ 1: Initialize δt ∼ N(0, σ2)
283
+ 2: for r = 1, 2, ..., Rt optimization steps do
284
+ 3:
285
+ Xt = {Tt(xi, δt)}M
286
+ i=1
287
+ 4:
288
+ ˆYt = {Fs(xt
289
+ i; θs) : xt
290
+ i ∈ Xt}M
291
+ i=1
292
+ 5:
293
+ ∆ = sign( 1
294
+ N
295
+
296
+ ˆyt
297
+ i∈ ˆYt ∇δtL(ˆyt
298
+ i, lt))
299
+ 6:
300
+ δt ← δt − αt · ∆
301
+ 7:
302
+ δt ← clip(δt, −ϵt, ϵt)
303
+ 8: end for
304
+ 9: return δt
305
+ UAP attack [Moosavi-Dezfooli et al., 2017], which optimizes
306
+ a single perturbation on all samples. Inspired by this method,
307
+ we use a targeted iterative optimization [Kurakin et al., 2018]
308
+ to craft a universal perturbation on several samples from the
309
+ source label and use it as a trigger for the backdoor attack. In
310
+ combination with gradient-alignment-based poison crafting,
311
+ this results in a powerful DP attack.
312
+ 3
313
+ Method
314
+ 3.1
315
+ Threat Model
316
+ Our threat model is as follows. We conducted experiments
317
+ under two scenarios: gray-box, where the attacker knows the
318
+ victim’s architecture but not the weights, and black-box, where
319
+ the attacker has no information about the architecture and the
320
+ weights. In either case, the attacker has access to data drawn
321
+ from the same distribution as the victim. The attacker can
322
+ train a surrogate model, or use a pre-trained model without the
323
+ need to perform training himself. In addition, the attacker can
324
+
325
+ pre-defined
326
+ optimized
327
+ poisoned
328
+ clean
329
+ Trigger
330
+ PoisonAlgorithm 2 Poison Crafting
331
+ Input: Fs(·; θs), Tp(·; ·), Da = {(xj, yj)}N
332
+ j=1 ∈ Dt,
333
+ Dδt = {Tt(xk; δt) : xk ∼ Ds}K
334
+ k=1, lt
335
+ Parameter: Rp, αp, ϵp, σ2
336
+ Output: δp
337
+ 1: Initialize δp ← {δp
338
+ j ∼ N(0, σ2)}N
339
+ j=1
340
+ 2: La = 1
341
+ K
342
+
343
+ xt
344
+ k∈Dδt L(Fs(xt
345
+ k, θs), lt)
346
+ 3: for r = 1, 2, ..., Rp optimizations steps do
347
+ 4:
348
+ for j = 1, 2, ..., N do
349
+ 5:
350
+ Lv,j = L(Fs(xj + δp
351
+ j , θs), yj)
352
+ 6:
353
+ Aj = 1 −
354
+ ∇θsLv,j·∇θsLa
355
+ ||∇θsLv,j||·||∇θsLa||
356
+ 7:
357
+ δp
358
+ j ← δp
359
+ j − αp · ∂Aj
360
+ ∂δj
361
+ 8:
362
+ δp
363
+ j ← clip(δp
364
+ j , −ϵp, ϵp)
365
+ 9:
366
+ end for
367
+ 10: end for
368
+ 11: return δp
369
+ poison a small portion (e.g., 1%) of poisoned training samples
370
+ of the victim’s training set. The attack has to be clean label,
371
+ which means that the attacker cannot change the labels of the
372
+ poisoned samples, which is more challenging [Schwarzschild
373
+ et al., 2021]. Finally, the attacker can embed a stealthy trigger
374
+ into a sample and feed it to the model at inference time. The
375
+ attack is successful if the model predicts this sample as the
376
+ target class specified by the attacker. The victim can train a
377
+ model from-scratch, which is more challenging than the case
378
+ when we assume a fine-tuning regime [Schwarzschild et al.,
379
+ 2021]. Training a model from-scratch is very challenging for
380
+ some methods based on feature-collision [Saha et al., 2020].
381
+ 3.2
382
+ Notation and Problem Setup
383
+ The victim has a model F with parameters θ, a dataset D =
384
+ {(xi, yi)}i and a loss function L (e.g. Cross-entropy loss).
385
+ The attacker has access to a surrogate model Fs(·; θs), which
386
+ can have the same architecture as F (gray-box) or a different
387
+ one (black-box). While preliminary results showed that using
388
+ a different dataset from the same distribution would also work,
389
+ we assume that the attacker has access to the victim dataset
390
+ D, to be compatible with the setup in Souri et al.. We refer to
391
+ the subset containing the poisoned samples as Da. |Da| = N,
392
+ when N is the number of samples that the attacker is allowed
393
+ to modify in the training set. Dc is the portion of the data
394
+ that the attacker cannot modify. Dp = Dc ∪ Da is the dataset
395
+ containing the poisoned samples with the clean samples. The
396
+ victim trains a model to find θ = arg min L(Dp, F(·; θ)).
397
+ The attacker aims to find perturbations δp = {δp
398
+ i }N
399
+ i=1 to
400
+ embed into training samples xp
401
+ i = Tp(xi, δp
402
+ i ), and create the
403
+ poisoned set Da = {(xp
404
+ i , yi)}N
405
+ i=1. In addition, the attacker has
406
+ to find a single trigger δt which will be embedded into samples
407
+ at test time xt = Tt(x, δt); x ∼ Ds which will make the
408
+ model predict the sample as target class F(xt; θ) = lt, when
409
+ ls ̸= lt. We refer to the distribution of samples from the source
410
+ class and the target class as Ds and Dt respectively. ls is the
411
+ source label and lt is the target label. Tp(xi, δp
412
+ i ) and Tt(xi, δt)
413
+ insert δp
414
+ i and δt to xi as a poison and a trigger respectively. In
415
+ our work, Tp is an additive function Tp(xj; δp
416
+ j ) = Clip(xj +
417
+ δp
418
+ j , 0, 1), and Tt can be an additive function, or alternatively
419
+ embed a patch into the sample. The details of these functions
420
+ are described in section 3.3.
421
+ The attacker aims to minimize E(xi,yi)∼DsLa when
422
+ La = L(F(xt
423
+ i; θ(δp)), lt), as the victim minimizes his loss
424
+ 1
425
+ |Dp|
426
+
427
+ (xi,yi)∼Dp Lv; Lv = L(F(xi; θ), yi). In general, the
428
+ perturbations and the trigger have to solve the following bi-
429
+ level optimization problem:
430
+ arg
431
+ min
432
+ δp
433
+ i |N
434
+ i=1∈∆p,δt∈∆t
435
+ E(x,y)∼DsL(F(Tt(x, δt); θ(δp)), lt)
436
+ s.t. θ(δp) = arg min
437
+ ˆθ
438
+ E(xi,yi)∼DpL(F(xi(δp); ˆθ), yi)
439
+ Where ∆p = {δ : ||δ||∞ < ϵp} and ∆t = {δ : ||δ||∞ < ϵt}.
440
+ ϵp and ϵt control the maximal norm of the perturbation of the
441
+ poison and the trigger respectively. The larger they are, the
442
+ simpler it is to perform the attack, but it makes the attack less
443
+ stealthy. Alternatively, ∆p can be the set of small patches with
444
+ pre-defined size, as we describe later.
445
+ The bilevel optimization problem is known to be difficult to
446
+ solve [Huang et al., 2020]. Our approach to dealing with this
447
+ problem is described in section 3.3.
448
+ Previous methods [Souri et al., 2022; Saha et al., 2020]
449
+ also used hidden poison in their attacks, but their trigger was
450
+ a visible colorful patch. We are the first to use both a stealthy
451
+ poison and a stealthy trigger. Furthermore, in these works,
452
+ only the poison was optimized, but not the trigger. In our
453
+ method, we aim to find an optimized trigger that outperforms
454
+ the use of simple arbitrary triggers.
455
+ 3.3
456
+ Our Approach
457
+ Trigger Crafting
458
+ Our attack consists of two stages: trigger crafting and poi-
459
+ son crafting. In the first stage, inspired by [Kurakin et al.,
460
+ 2018] and [Moosavi-Dezfooli et al., 2017], we craft a univer-
461
+ sal perturbation δt that tries to change the surrogate model
462
+ Fs(·; θs) prediction of samples from the source class ls to
463
+ the target class lt. This process is described in algorithm
464
+ 1. We initialize δt ∈ N(0, σ2) when σ2 = 0.01, but uni-
465
+ form noise or initialization with zeros works as well. In our
466
+ work, we use samples from victim training set, but in general,
467
+ any samples drawn from the source label distribution can be
468
+ used. In our main attack, the trigger is an additive perturbation
469
+ with the same dimensions as the images, added to the samples
470
+ Tt(x; δt) = Clip(x+δt, 0, 1), bounded such that ||δt||∞ ≤ ϵt.
471
+ We follow [Souri et al., 2022] that uses ϵ = 16/255 for the
472
+ poison perturbation and use this threshold for ϵt. Our second
473
+ attack was designed to compare our method to [Souri et al.,
474
+ 2022], which used a small patch as a trigger. We optimize a
475
+ trigger of size 8 × 8 pixels instead of selecting a pre-defined
476
+ one. We use algorithm 1 with Tt as a patch insertion function:
477
+ Tt(x, δt)[i, j] =
478
+ �δt[i′, j′]
479
+ (i, j) ∈ R
480
+ x[i, j]
481
+ else
482
+
483
+ Attack success rate [%]
484
+ Architecture
485
+ ResNet18
486
+ MobileNet-V2
487
+ VGG11
488
+ Hidden Trigger Backdoor [Saha et al., 2020]
489
+ 3.50
490
+ 3.76
491
+ 5.02
492
+ Clean-Label Backdoor [Turner et al., 2019]
493
+ 2.78
494
+ 3.50
495
+ 4.70
496
+ Sleeper Agent [Souri et al., 2022]
497
+ 78.84
498
+ 75.96
499
+ 86.60
500
+ Sleeper Agent (base)
501
+ 57.42
502
+ 15.00
503
+ 25.25
504
+ Silent Killer (patch) (ours)
505
+ 95.92
506
+ 41.68
507
+ 97.35
508
+ Silent Killer (perturbation) (ours)
509
+ 89.85
510
+ 97.91
511
+ 97.86
512
+ Table 1: Silent Killer vs other popular clean-label poisoning attacks, in terms of ASR. The comparison is made in gray-box settings, trained
513
+ from scratch on CIFAR-10, with 1% of poisoned samples. We evaluate our method with both additive stealthy trigger (perturbation), bound by
514
+ l∞ < 16/255, and optimized colorful patch (patch). The results of the three first methods were taken from [Souri et al., 2022]. Sleeper Agent
515
+ (base) is our implementation of Sleeper Agent without retraining and ensemble (to be comparable to our method which does not use it).
516
+ Architecture
517
+ Attack success rate (ASR) [%]
518
+ Clean accuracy [%]
519
+ Clean
520
+ Silent Killer
521
+ Clean
522
+ Silent Killer
523
+ ResNet18
524
+ 46.07 ± 20.52
525
+ 89.85 ± 6.46
526
+ 83.76 ± 0.17
527
+ 83.61 ± 0.17
528
+ VGG11
529
+ 74.47 ± 20.54
530
+ 97.86 ± 2.10
531
+ 86.00 ± 0.10
532
+ 85.86 ± 0.12
533
+ MobileNetV2
534
+ 55.84 ± 20.18
535
+ 97.91 ± 01.72
536
+ 82.05 ± 0.38
537
+ 82.64 ± 1.44
538
+ Table 2: A comparison of the results of our trigger, both with and without data poisoning, on CIFAR10. The table shows the mean and standard
539
+ deviation of the ASR and the clean accuracy. One can notice that the clean accuracy in the poisoned model is very close to the clean model.
540
+ This means that the poison does not harm model performance on clean data.
541
+ When x[i, j] is the pixel value of the image in pixel i, j,
542
+ R = {(i, j) : imin ≤ i ≤ imax, jmin ≤ j ≤ jmax}
543
+ is the set of pixel indexes between the patch boundaries
544
+ (imin, imax, jmin, jmax). In our experiment, we chose a patch
545
+ size of 8 × 8, which means that imax = imin + 8, jmax =
546
+ jmin + 8. Finally, i′ = i − imin, j′ = j − jmin. The location
547
+ of the patch was randomly chosen both during training and
548
+ testing. Souri et al. showed that optimization in this way
549
+ gives better results. During crafting, instead of computing the
550
+ gradients with respect to all the input dimensions, we optimize
551
+ only the pixels in the position of the patch. Additionally, in
552
+ this case, we choose ϵt = 1 which effectively means we do
553
+ not clip the trigger and allow it to have any norm. Examples
554
+ of the two types of triggers can be found in figures 2 and 3.
555
+ Poison Crafting
556
+ The second stage of our attack is poison crafting. We use
557
+ the trigger computed in the previous stage to craft poison
558
+ examples. We follow the method proposed by Souri et al.
559
+ and align the gradients achieved from the victim training loss
560
+ ∇θLv which is computed on the poisoned dataset, to the gra-
561
+ dients of the weights computed from the attacker loss ∇θLa.
562
+ First, we compute the predictions of the model on source
563
+ label samples embedded with the trigger ˆyt
564
+ i = Fs(xt
565
+ i; θs)
566
+ when xt
567
+ i = Tt(xi; δt). Next, we compute the loss function of
568
+ this prediction with respect to the target class Li = L(ˆyt
569
+ i, lt)
570
+ and backpropagate it to the weights θs obtaining ∇θsL =
571
+ 1
572
+ N
573
+
574
+ i ∇θsLi. ∇θsL is the average gradient of the labels with
575
+ the trigger to which we want the gradients of the poisoned
576
+ samples to be aligned. Similarly, we compute the gradients
577
+ of the chosen poison samples xp
578
+ j = Tp(xj; δp
579
+ j ) and obtain
580
+ ∇θsLj. In this step, our goal is to ��nd the perturbation δp
581
+ j that
582
+ will make ∇θsLj to be aligned with ∇θsL. To this end, we
583
+ compute the alignment loss:
584
+ Aj = 1 −
585
+ ∇θsLj · ∇θsL
586
+ ||∇θsLj|| · ||∇θsL||
587
+ (1)
588
+ This term quantifies the distance between the gradients ob-
589
+ tained from the poisoned sample (xp
590
+ j, lt) and the average gra-
591
+ dients obtained from the samples {(xt
592
+ i, lt)}i. To minimize this
593
+ distance, one can derive this term with respect to the poison
594
+ ∂Aj
595
+ ∂δp
596
+ j and perform a standard gradient descent algorithm to
597
+ {δp
598
+ j }. In our work, we use signed SGD for both poison craft-
599
+ ing and trigger crafting. For the selection of samples to poison,
600
+ we follow [Souri et al., 2022] which shows that selecting sam-
601
+ ples with the largest gradients yields a higher attack success
602
+ rate. The complete method is summarized in Algorithm 2.
603
+ 4
604
+ Experiments
605
+ 4.1
606
+ Setup
607
+ We follow [Souri et al., 2022] and conducted experiments
608
+ on CIFAR10 using ResNet18, VGG11, and MobileNetV2
609
+ architectures. Unless specified otherwise, we used 500 poison
610
+ samples (1% of the data) in all experiments. The main metric
611
+ for evaluating the attack’s performance was the attack success
612
+ rate (ASR), defined as ASR = #Success
613
+ #T otal , where #Success
614
+ is the number of successful classifications of source samples
615
+ as the target class after the trigger was embedded into them,
616
+ and #Total is the total number of samples. We did not count
617
+ samples where the predicted label was altered to a class other
618
+ than the target class. We also measured the clean accuracy or
619
+ the accuracy of the model on benign samples from the test set.
620
+
621
+ Architecture
622
+ Attack success rate (ASR) [%]
623
+ Clean Accuracy [%]
624
+ Predefined
625
+ Silent Killer (patch)
626
+ Clean
627
+ Predefined
628
+ Silent Killer (patch)
629
+ ResNet18
630
+ 57.42 ± 25.13
631
+ 95.92 ± 3.86
632
+ 83.76 ± 0.17
633
+ 83.19 ± 0.51
634
+ 83.31 ± 0.32
635
+ VGG11
636
+ 25.25 ± 21.51
637
+ 97.35 ± 4.91
638
+ 86.00 ± 0.10
639
+ 85.97 ± 0.24
640
+ 85.69 ± 0.21
641
+ MobileNetV2
642
+ 15.00 ± 11.57
643
+ 41.68 ± 25.63
644
+ 82.05 ± 0.38
645
+ 81.97 ± 0.91
646
+ 81.89 ± 0.65
647
+ Table 3: Mean and STD of the ASR and the clean accuracy of our second attack (square patch as a trigger) on CIFAR-10 with 1% poisoned
648
+ samples. Predefined means using a random trigger as in Souri et al., whereas Silent Killer (patch) creates it using algorithm 1. The tables show
649
+ the advantages of the optimized patch.
650
+ Surrogate\Target
651
+ ResNet18
652
+ VGG11
653
+ MobileNetV2
654
+ ResNet18
655
+ -
656
+ 81.60 ± 14.63
657
+ 90.92 ± 11.89
658
+ VGG11
659
+ 76.97 ± 20.67
660
+ -
661
+ 95.56 ± 6.74
662
+ MobileNetV2
663
+ 32.44 ± 18.27
664
+ 77.53 ± 14.43
665
+ -
666
+ Table 4: Mean ASR and STD for black-box attacks. The rows
667
+ indicate the surrogate architecture that was used for crafting the
668
+ poison and the trigger, and the columns the target architecture, on
669
+ which we train (using the poisoned dataset) and evaluate the results.
670
+ Each result was evaluated on 24 runs of source-target pairs.
671
+ We use the same 24 source-label pairs for all of our exper-
672
+ iments and report the average results. In the baseline experi-
673
+ ment (perturbation as a trigger) we run the evaluation step on
674
+ the training set, e.g. the victim training phase, five times with
675
+ different seeds ending up with 24 × 5 = 120 runs.
676
+ In the first experiment, we compared our method to a base-
677
+ line without DP in the gray-box setting (surrogate and victim
678
+ architectures are the same) to measure the impact of DP on
679
+ the attack success rate. The results in Table 2 show signifi-
680
+ cantly better ASR with poisoning compared to the baseline
681
+ and almost no change in clean accuracy.
682
+ On the other hand, to confirm that the improvement in
683
+ attack success rate is not only due to the DP but also to the
684
+ optimization of the trigger, we compare our results to gradient
685
+ alignment with a pre-defined arbitrary patch. We perform
686
+ poison crafting with both the optimized and non-optimized
687
+ patches using the same checkpoints of the surrogate models
688
+ for poison crafting. For the trigger, we use a 8 × 8 colorful
689
+ patch. In the baseline, we use the same patch that was applied
690
+ in previous works [Souri et al., 2022; Saha et al., 2020], and
691
+ for the trigger optimization, we start from a random patch
692
+ and optimize it with algorithm 1 for 500 optimization steps.
693
+ We use the trigger to craft poisoned examples with gradient
694
+ alignment (algorithm 2), and evaluate the success of the attack.
695
+ Some samples from this experiment are shown in figure 3.
696
+ As shown in table 3, the optimization of the trigger has a
697
+ significant impact on the attack success rate. On average,
698
+ the ASR with an optimized trigger was significantly higher
699
+ than the ASR without trigger optimization by 45.76%. This
700
+ demonstrates the importance of carefully crafting the trigger
701
+ to improve the effectiveness of the attack.
702
+ Note that the results of MobileNet-V2 are significantly
703
+ lower than those of the other architectures. When we in-
704
+ vestigated it, we noticed that the success of the poisoning
705
+ attack using gradient alignment was highly correlated with the
706
+ success of the trigger crafting using the algorithm 1. This sug-
707
+ Figure 4: Sensitivity analysis of the number of poisoned samples
708
+ added to the training set. The dotted lines represent the results of
709
+ using the trigger without poisoning. We can see that even with a
710
+ small amount of data (e.g. 100, which is 0.2%), reasonable results
711
+ can be obtained.
712
+ gests that the effectiveness of the poisoning attack is closely
713
+ tied to the use of an optimized trigger that performs well
714
+ as an evasion attack on the surrogate model. Therefore we
715
+ estimate that different evasion attacks [Madry et al., 2017;
716
+ Carlini and Wagner, 2017] may yield more effective triggers
717
+ and improve the results on this architecture.
718
+ 4.2
719
+ Black-Box
720
+ Our attack is highly effective in black-box settings compared
721
+ to other data-poisoning-based clean-label backdoor attacks.
722
+ To demonstrate this, we compared the efficiency of a poisoned
723
+ dataset crafted using a surrogate model to attack a model with
724
+ a different architecture (Fs ̸= F). The comparison was made
725
+ for all pairs of the three models: ResNet18, VGG11, and
726
+ MobileNetV2. The results are presented in table 4.
727
+ For comparison, Souri et al. achieved ASR of 29.10%
728
+ and 31.96% on MobileNet-V2 and VGG11 with ResNet18 as
729
+ a surrogate model, while we achieved ASR of 90.92% and
730
+ 81.60% on the same models. It is worth saying that [Souri et
731
+ al., 2022] retrained the model during poison crafting, whereas
732
+ we did not. Furthermore, even when [Souri et al., 2022] uses
733
+ an ensemble of four different architectures, he does not reach
734
+ our results. This suggests that our attack is more effective
735
+ than previous approaches in black-box settings. Note that,
736
+ as shown in [Souri et al., 2022], the attack can be improved
737
+ even further by using an ensemble of multiple models and
738
+ retraining the models several times during crafting.
739
+
740
+ mobilenet
741
+ resnet18
742
+ vggllFigure 5: Sensitivity analysis of the magnitude of the perturbation
743
+ (l∞ norm) to the poisoned samples in the training set and of the
744
+ trigger. The units on the x-axis are in the range (0, 255). The larger
745
+ the norm, the better the performance.
746
+ Method
747
+ ASR [%]
748
+ Accuracy [%]
749
+ Activation Clustering
750
+ 71.28 ± 20.73
751
+ 73.13 ± 2.07
752
+ DPSGD
753
+ 91.16 ± 5.34
754
+ 84.36 ± 0.32
755
+ MixUp
756
+ 94.24 ± 4.44
757
+ 81.39 ± 0.57
758
+ Table 5: ASR and clean accuracy after applying defenses. Clean
759
+ accuracy without defense is 82.64%. As we can see, the success of
760
+ the attack does not significantly deteriorate after using these defenses.
761
+ 4.3
762
+ Sensitivity Analysis
763
+ One challenge of poisoning attacks in the real world is in-
764
+ serting poisoned samples into the victim’s training set. If the
765
+ attacker can use a small number of samples, it is easier to bring
766
+ these samples to the training set, and it will be difficult for the
767
+ victim to find them. Therefore, we investigated the sensitivity
768
+ of the ASR to the choice of N, the number of poisoned sam-
769
+ ples. In this experiment, we evaluate our perturbation-trigger
770
+ attack when selecting N = {50, 100, 200, 300, 400} and test
771
+ the results on the three architectures. As shown in figure 4,
772
+ our method was successful with as few as 50 samples, which
773
+ is only 0.1% of the size of the training set. This indicates that
774
+ our method is highly efficient and can be successful with a
775
+ limited number of training samples.
776
+ Furthermore, we analyzed ϵt, ϵp, the upper bound of the
777
+ perturbation of the trigger and the poison. We chose ϵt =
778
+ ϵp = ϵ ∈ {4/255, 8/255, 12/255, 16/255}. As we expected,
779
+ there is a tradeoff between the stealthiness of the attack, i.e.
780
+ the value of ϵ, and its performance. The results and some
781
+ perturbation examples are shown in figures 5 and 6.
782
+ 4.4
783
+ ImageNet Evaluation
784
+ To evaluate a more realistic scenario of DP, when the images
785
+ have a larger size, we evaluate our method on ImageNet. Due
786
+ to computational limits, we use only the 10 first classes of
787
+ ImageNet to train and evaluate the results. We chose one
788
+ source-target pair, crafted poison to 185 samples from the
789
+ target class, and performed a partial training of ImageNet on
790
+ ResNet18. Some samples are shown in figure 2. We found
791
+ that the ASR was 76% for this preliminary experiment, which
792
+ indicates that the danger is also present in real-world datasets.
793
+ Figure 6: Samples of images with triggers with different l∞ norms.
794
+ 4.5
795
+ Defences
796
+ We evaluate the effectiveness of three different types of de-
797
+ fenses against our method, based on three different concepts:
798
+ gradient shaping, data filtering, and data augmentation. We
799
+ chose methods that were found effective at evading gradient-
800
+ alignment-based DP [Souri et al., 2022]. The first, Activation
801
+ Clustering [Chen et al., 2018], filters out samples based on
802
+ their activations in the second to last layer. We follow this
803
+ method, perform K-Means clustering with K = 2 on each of
804
+ the labels in the training set, and filter out the small cluster.
805
+ The second method, named DP-SGD [Hong et al., 2020], is
806
+ based on reshaping the gradients of the weights during training.
807
+ In this method, the victim clips the training gradients and adds
808
+ some noise to them, to mitigate the effect of poisoned samples.
809
+ We tried several values for the clipping rate and the noise taken
810
+ from them to evaluate this defense. We found that gradient
811
+ clipping of 4 and additive gaussian noise with a variance of
812
+ 10−5 is reasonable. More strict values make the model not
813
+ converge. The last defense is based on [Borgnia et al., 2021]
814
+ which shows that data augmentations can be a viable defense
815
+ against backdoor attacks. We follow [Souri et al., 2022] and
816
+ evaluate the defense using MixUp augmentation [Zhang et al.,
817
+ 2017] during victim training. All of these methods did not
818
+ significantly deteriorate the attack’s effectiveness. The results
819
+ can be found in table 5.
820
+ 5
821
+ Conclusion
822
+ In this paper, we present a novel data poisoning attack on
823
+ neural networks that is stealthy and difficult to detect. Our
824
+ proposed method is highly efficient in crafting time and only
825
+ requires a trained surrogate model, without the need for ad-
826
+ ditional retraining. While our attack achieves state-of-the-art
827
+ results in terms of attack success rate, it may not perform as
828
+ well on certain model architectures. In future research, we
829
+ could investigate the impact of more complex triggers on at-
830
+ tack performance. We could also optimize both the poison
831
+ and the trigger together to find a better trigger that is more
832
+ effective at activating the poison. It is also critical to continue
833
+ developing robust defenses against DP attacks and to prioritize
834
+ the security of training data.
835
+
836
+ 16/255
837
+ 12/255
838
+ 8/255
839
+ 4/255References
840
+ [Bagdasaryan and Shmatikov, 2021] Eugene
841
+ Bagdasaryan
842
+ and Vitaly Shmatikov. Blind backdoors in deep learning
843
+ models. In 30th USENIX Security Symposium (USENIX
844
+ Security 21), pages 1505–1521, 2021.
845
+ [Biggio et al., 2011] Battista Biggio, Blaine Nelson, and
846
+ Pavel Laskov. Support vector machines under adversar-
847
+ ial label noise. In Asian conference on machine learning,
848
+ pages 97–112. PMLR, 2011.
849
+ [Borgnia et al., 2021] Eitan Borgnia, Jonas Geiping, Valeriia
850
+ Cherepanova, Liam Fowl, Arjun Gupta, Amin Ghiasi,
851
+ Furong Huang, Micah Goldblum, and Tom Goldstein. Dp-
852
+ instahide: Provably defusing poisoning and backdoor at-
853
+ tacks with differentially private data augmentations. arXiv
854
+ preprint arXiv:2103.02079, 2021.
855
+ [Carlini and Wagner, 2017] Nicholas Carlini and David Wag-
856
+ ner. Towards evaluating the robustness of neural networks.
857
+ In 2017 ieee symposium on security and privacy (sp), pages
858
+ 39–57. Ieee, 2017.
859
+ [Chen et al., 2017] Xinyun Chen, Chang Liu, Bo Li, Kim-
860
+ berly Lu, and Dawn Song. Targeted backdoor attacks on
861
+ deep learning systems using data poisoning. arXiv preprint
862
+ arXiv:1712.05526, 2017.
863
+ [Chen et al., 2018] Bryant Chen, Wilka Carvalho, Nathalie
864
+ Baracaldo, Heiko Ludwig, Benjamin Edwards, Taesung
865
+ Lee, Ian Molloy, and Biplav Srivastava. Detecting backdoor
866
+ attacks on deep neural networks by activation clustering.
867
+ arXiv preprint arXiv:1811.03728, 2018.
868
+ [Cin`a et al., 2022] Antonio Emanuele Cin`a, Kathrin Grosse,
869
+ Ambra Demontis, Sebastiano Vascon, Werner Zellinger,
870
+ Bernhard A Moser, Alina Oprea, Battista Biggio, Marcello
871
+ Pelillo, and Fabio Roli. Wild patterns reloaded: A survey of
872
+ machine learning security against training data poisoning.
873
+ arXiv preprint arXiv:2205.01992, 2022.
874
+ [Fowl et al., 2021] Liam Fowl, Ping-yeh Chiang, Micah
875
+ Goldblum, Jonas Geiping, Arpit Bansal, Wojtek Czaja, and
876
+ Tom Goldstein. Preventing unauthorized use of proprietary
877
+ data: Poisoning for secure dataset release. arXiv preprint
878
+ arXiv:2103.02683, 2021.
879
+ [Geiping et al., 2020] Jonas Geiping, Liam Fowl, W Ronny
880
+ Huang, Wojciech Czaja, Gavin Taylor, Michael Moeller,
881
+ and Tom Goldstein.
882
+ Witches’ brew: Industrial scale
883
+ data poisoning via gradient matching.
884
+ arXiv preprint
885
+ arXiv:2009.02276, 2020.
886
+ [Goodfellow et al., 2014] Ian J Goodfellow, Jonathon Shlens,
887
+ and Christian Szegedy. Explaining and harnessing adver-
888
+ sarial examples. arXiv preprint arXiv:1412.6572, 2014.
889
+ [Gu et al., 2017] Tianyu Gu, Brendan Dolan-Gavitt, and Sid-
890
+ dharth Garg. Badnets: Identifying vulnerabilities in the
891
+ machine learning model supply chain.
892
+ arXiv preprint
893
+ arXiv:1708.06733, 2017.
894
+ [Hong et al., 2020] Sanghyun Hong, Varun Chandrasekaran,
895
+ Yi˘gitcan Kaya, Tudor Dumitras¸, and Nicolas Papernot. On
896
+ the effectiveness of mitigating data poisoning attacks with
897
+ gradient shaping. arXiv preprint arXiv:2002.11497, 2020.
898
+ [Huang et al., 2020] W Ronny Huang, Jonas Geiping, Liam
899
+ Fowl, Gavin Taylor, and Tom Goldstein. Metapoison: Prac-
900
+ tical general-purpose clean-label data poisoning. Advances
901
+ in Neural Information Processing Systems, 33:12080–
902
+ 12091, 2020.
903
+ [Kurakin et al., 2018] Alexey Kurakin, Ian J Goodfellow, and
904
+ Samy Bengio. Adversarial examples in the physical world.
905
+ In Artificial intelligence safety and security, pages 99–112.
906
+ Chapman and Hall/CRC, 2018.
907
+ [Kurita et al., 2020] Keita Kurita, Paul Michel, and Graham
908
+ Neubig. Weight poisoning attacks on pre-trained models.
909
+ arXiv preprint arXiv:2004.06660, 2020.
910
+ [Kuznetsova et al., 2020] Alina Kuznetsova, Hassan Rom,
911
+ Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset,
912
+ Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander
913
+ Kolesnikov, et al. The open images dataset v4. Inter-
914
+ national Journal of Computer Vision, 128(7):1956–1981,
915
+ 2020.
916
+ [Li et al., 2020] Shaofeng Li, Minhui Xue, Benjamin Zi Hao
917
+ Zhao, Haojin Zhu, and Xinpeng Zhang. Invisible backdoor
918
+ attacks on deep neural networks via steganography and
919
+ regularization.
920
+ IEEE Transactions on Dependable and
921
+ Secure Computing, 18(5):2088–2105, 2020.
922
+ [Li et al., 2022] Yiming Li, Yong Jiang, Zhifeng Li, and Shu-
923
+ Tao Xia. Backdoor learning: A survey. IEEE Transactions
924
+ on Neural Networks and Learning Systems, 2022.
925
+ [Madry et al., 2017] Aleksander
926
+ Madry,
927
+ Aleksandar
928
+ Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian
929
+ Vladu.
930
+ Towards deep learning models resistant to
931
+ adversarial attacks.
932
+ arXiv preprint arXiv:1706.06083,
933
+ 2017.
934
+ [Moosavi-Dezfooli et al., 2017] Seyed-Mohsen
935
+ Moosavi-
936
+ Dezfooli, Alhussein Fawzi, Omar Fawzi, and Pascal
937
+ Frossard.
938
+ Universal adversarial perturbations.
939
+ In Pro-
940
+ ceedings of the IEEE conference on computer vision and
941
+ pattern recognition, pages 1765–1773, 2017.
942
+ [Qi et al., 2021] Fanchao Qi, Yuan Yao, Sophia Xu, Zhiyuan
943
+ Liu, and Maosong Sun. Turn the combination lock: Learn-
944
+ able textual backdoor attacks via word substitution. arXiv
945
+ preprint arXiv:2106.06361, 2021.
946
+ [Ramesh et al., 2022] Aditya Ramesh, Prafulla Dhariwal,
947
+ Alex Nichol, Casey Chu, and Mark Chen. Hierarchical
948
+ text-conditional image generation with clip latents. arXiv
949
+ preprint arXiv:2204.06125, 2022.
950
+ [Saha et al., 2020] Aniruddha Saha, Akshayvarun Subra-
951
+ manya, and Hamed Pirsiavash. Hidden trigger backdoor
952
+ attacks. In Proceedings of the AAAI conference on artificial
953
+ intelligence, volume 34, pages 11957–11965, 2020.
954
+ [Schwarzschild et al., 2021] Avi Schwarzschild, Micah Gold-
955
+ blum, Arjun Gupta, John P Dickerson, and Tom Goldstein.
956
+ Just how toxic is data poisoning? a unified benchmark for
957
+ backdoor and data poisoning attacks. In International Con-
958
+ ference on Machine Learning, pages 9389–9398. PMLR,
959
+ 2021.
960
+
961
+ [Shafahi et al., 2018] Ali Shafahi, W Ronny Huang, Mahyar
962
+ Najibi, Octavian Suciu, Christoph Studer, Tudor Dumitras,
963
+ and Tom Goldstein. Poison frogs! targeted clean-label
964
+ poisoning attacks on neural networks. Advances in neural
965
+ information processing systems, 31, 2018.
966
+ [Souri et al., 2022] Hossein Souri, Liam H Fowl, Rama Chel-
967
+ lappa, Micah Goldblum, and Tom Goldstein.
968
+ Sleeper
969
+ agent: Scalable hidden trigger backdoors for neural net-
970
+ works trained from scratch. In Alice H. Oh, Alekh Agarwal,
971
+ Danielle Belgrave, and Kyunghyun Cho, editors, Advances
972
+ in Neural Information Processing Systems, 2022.
973
+ [Turner et al., 2019] Alexander Turner, Dimitris Tsipras, and
974
+ Aleksander Madry.
975
+ Label-consistent backdoor attacks.
976
+ arXiv preprint arXiv:1912.02771, 2019.
977
+ [Yang et al., 2017] Chaofei Yang, Qing Wu, Hai Li, and Yi-
978
+ ran Chen. Generative poisoning attack method against
979
+ neural networks. arXiv preprint arXiv:1703.01340, 2017.
980
+ [Zhang et al., 2017] Hongyi
981
+ Zhang,
982
+ Moustapha
983
+ Cisse,
984
+ Yann N Dauphin, and David Lopez-Paz.
985
+ mixup: Be-
986
+ yond empirical risk minimization.
987
+ arXiv preprint
988
+ arXiv:1710.09412, 2017.
989
+ [Zhang et al., 2021] Xinyang Zhang, Zheng Zhang, Shouling
990
+ Ji, and Ting Wang. Trojaning language models for fun and
991
+ profit. In 2021 IEEE European Symposium on Security and
992
+ Privacy (EuroS&P), pages 179–197. IEEE, 2021.
993
+ [Zhao et al., 2021] Bo Zhao, Konda Reddy Mopuri, and
994
+ Hakan Bilen. Dataset condensation with gradient matching.
995
+ ICLR, 1(2):3, 2021.
996
+
4NE0T4oBgHgl3EQfvQFL/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4NFQT4oBgHgl3EQfHjWk/content/tmp_files/2301.13249v1.pdf.txt ADDED
@@ -0,0 +1,2064 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Extraction of neutron density distributions from high-statistics coherent elastic neutrino-nucleus
2
+ scattering data
3
+ D. Aristizabal Sierra1, ∗
4
+ 1Universidad T´ecnica Federico Santa Mar´ıa - Departamento de F´ısica
5
+ Casilla 110-V, Avda. Espa˜na 1680, Valpara´ıso, Chile
6
+ Forthcoming fixed-target coherent elastic neutrino-nucleus scattering experiments aim at measurements with
7
+ O(tonne)-scale detectors and substantially reduced systematic and statistical uncertainties. With such high
8
+ quality data, the extraction of point-neutron distributions mean-square radii requires a better understanding of
9
+ possible theoretical uncertainties. We quantify the impact of single-nucleon electromagnetic mean-square radii
10
+ on the weak-charge form factor and compare results from weak-charge form factor parametrizations and weak-
11
+ charge form factor decompositions in terms of elastic vector proton and neutron form factors, including nucleon
12
+ form factors Q-dependent terms up to order Q2. We assess as well the differences arising from results derived
13
+ using weak-charge form factor decompositions in terms of elastic vector proton and neutron form factors and a
14
+ model-independent approach based solely on the assumption of spherically symmetric nuclear ground state. We
15
+ demonstrate the impact of the main effects by assuming pseudo-data from a one-tonne LAr detector and find that,
16
+ among the effects and under the assumptions considered in this paper, weak-charge form factor parametrizations
17
+ and weak-charge form factor decompositions in terms of elastic vector proton and neutron form factors enable
18
+ the extraction of the 40Ar point-neutron distribution mean-square radius with a ∼ 15% accuracy. With a sub-
19
+ stantial reduction of the beam-related neutron and steady-state backgrounds a ∼ 1% precision extraction seems
20
+ feasible, using either of the two approaches.
21
+ CONTENTS
22
+ I. Introduction
23
+ 1
24
+ II. Nuclear charge and weak-charge form factors
25
+ 2
26
+ A. Electromagnetic and weak charge radii
27
+ 3
28
+ III. Theoretical and phenomenological uncertainties
29
+ 3
30
+ A. Uncertainties due to leading-order point-proton
31
+ distribution mean-square radius and leading-order
32
+ nucleon form factors
33
+ 4
34
+ B. Uncertainties due to elastic vector proton and
35
+ neutron form factor parametrizations
36
+ 6
37
+ C. Model-independent versus form factor
38
+ parametrizations approaches
39
+ 6
40
+ IV. CEνNS cross section and event rate
41
+ 8
42
+ V. Extraction of the argon neutron distribution
43
+ root-mean-square radius using different approaches
44
+ 11
45
+ VI. Conclusions
46
+ 13
47
+ Acknowledgments
48
+ 13
49
+ References
50
+ 13
51
+ I.
52
+ INTRODUCTION
53
+ Since its first observation by the COHERENT collabora-
54
+ tion in 2017 [1], CEνNS has become a powerful tool for the
55
+ ∗ daristizabal@ulg.ac.be
56
+ determination of nuclear and SM properties as well as for con-
57
+ straining new physics. Current datasets involve measurements
58
+ in CsI and LAr detectors with fiducial volumes in the order of
59
+ 10kg [1–3]. With such active volumes, the datasets comprise
60
+ about ∼ 100 events, with—to a certain degree—sizable sys-
61
+ tematics and statistical uncertainties. Using these data a wide
62
+ range of analyses have been carried out. They include the
63
+ determination of cesium, iodide and argon point-neutron dis-
64
+ tributions root-mean-square (rms) radii [4–9], measurements
65
+ of the weak mixing angle at renormalization scales of the or-
66
+ der of 50 MeV [5, 6, 9, 10], constraints on new interactions
67
+ in the neutrino sector [11–24], constraints on neutrino elec-
68
+ tromagnetic properties [25–27] and limits on sterile neutrinos
69
+ [10].
70
+ Plans for enhancing statistics by deploying an O(tonne)
71
+ LAr detector in the future second target station (STS) at the
72
+ Oak Ridge National Laboratory have been discussed in Refs.
73
+ [28, 29] (see Ref.
74
+ [30] as well)1.
75
+ With improvements on
76
+ statistical and systematic uncertainties, such detector will de-
77
+ liver high quality data with which improvements on the dif-
78
+ ferent analyses that have been done so far are expected. This
79
+ calls for a better understanding of theoretical uncertainties.
80
+ For instance—depending on the experimental uncertainties—
81
+ the inclusion of one-loop order electroweak corrections [34]
82
+ might become necessary, regardless of the physics case the
83
+ data will be used for.
84
+ As far as we know, the extraction of point-neutron distri-
85
+ butions mean-square radii with existing or forecasted data has
86
+ been done considering only leading order (LO) effects. With
87
+ this in this paper we mean the following: The measured nu-
88
+ clear charge radius and the point-proton distribution rms ra-
89
+ 1 The Coherent Captain-Mills Experiment at Los Alamos National Labora-
90
+ tory aims at measurements with an O(tonne) LAr detector as well [31] (see
91
+ e.g. Refs. [32, 33] for discussions of its physics reach).
92
+ arXiv:2301.13249v1 [hep-ph] 30 Jan 2023
93
+
94
+ 2
95
+ dius have been assumed to be equal, and only LO nucleon
96
+ form factor terms (Q independent terms) have been considered
97
+ [6, 35–37]. Furthermore, analyses using nuclear weak-charge
98
+ form factor parametrizations, nuclear weak-charge form fac-
99
+ tor decompositions in terms of elastic vector proton and neu-
100
+ tron form factors and power series expansions of the nuclear
101
+ weak-charge form factor have been used [4, 8, 36–38]. With
102
+ all of them implying—in principle—different precision levels.
103
+ In this paper we quantify the uncertainties implied by con-
104
+ sidering only LO effects—defined as we have done in the pre-
105
+ vious paragraph—in the extraction of point-neutron distribu-
106
+ tions mean-square radii. The analysis is split in two parts. In
107
+ the first part, we quantify uncertainties at the nuclear weak-
108
+ charge level for heavy and light nuclei (cesium and argon,
109
+ taken as representative examples of heavy and light nuclei).
110
+ To do so we compare results from calculations using only
111
+ LO effects with calculations where: (a) The point-proton dis-
112
+ tribution radius is corrected with single-nucleon electromag-
113
+ netic mean-square radii, (b) nucleon form factor Q-dependent
114
+ terms (up to order Q2) are included 2. We then determine the
115
+ uncertainties implied by the most commonly used form fac-
116
+ tor parametrizations and by expansions in terms of even mo-
117
+ ments.
118
+ In the second part, we quantify the precision at which the
119
+ point-neutron distribution rms radius can be extracted, includ-
120
+ ing the main effects found in the first part. In this case we
121
+ proceed by assuming pseudo-data from a one-tonne LAr de-
122
+ tector with assumptions on the beam-related neutron (BRN)
123
+ and steady-state (SS) backgrounds as well as systematic un-
124
+ certainties extrapolated from current COHERENT measure-
125
+ ments.
126
+ The remainder of this paper is organized as follows. In Sec.
127
+ II we provide a general discussion of the nuclear charge and
128
+ weak-charge form factors as well as of the nuclear charge and
129
+ weak-charge radii. In Sec. III we determine nuclear weak-
130
+ charge form factor uncertainties, while in Sec. IV we briefly
131
+ discuss the CEνNS differential cross section and differential
132
+ event rate. In Sec. V, based on the results from Sec. III, we
133
+ extract the 40Ar neutron distribution mean-square radius. Our
134
+ conclusions are presented in Sec. VI
135
+ II.
136
+ NUCLEAR CHARGE AND WEAK-CHARGE FORM
137
+ FACTORS
138
+ The single-nucleon electromagnetic and weak currents can
139
+ be expressed in terms of the Sachs form factors [41]. For a
140
+ nucleon N (N = n, p) they read
141
+ Jµ,N
142
+ EM = us′(p′)
143
+
144
+ GN
145
+ Eγµ +
146
+ �GN
147
+ M −GN
148
+ E
149
+ 1+τ
150
+ ��
151
+ τγµ +iσµν qν
152
+ 2m
153
+ ��
154
+ us(p) ,
155
+ (1)
156
+ Jµ,N
157
+ NC = us′(p′)
158
+
159
+ �GN
160
+ Eγµ +
161
+ � �GN
162
+ M − �GN
163
+ E
164
+ 1+τ
165
+ ��
166
+ τγµ +iσµν qν
167
+ 2m
168
+ ��
169
+ us(p) .
170
+ (2)
171
+ where m is a universal nucleon mass, q = p′ − p the trans-
172
+ ferred four momentum and τ = Q2/4/m2 (with Q2 = −q2
173
+ a timelike vector) and us(p) and us′(p′) on-shell nucleon
174
+ spinors. GN
175
+ E,M = GN
176
+ E,M(Q2) and �GN
177
+ E,M = �GN
178
+ E,M(Q2) are the
179
+ single-nucleon form factors for N.
180
+ The weak neutral cur-
181
+ rent (NC) involves as well axial and pseudoscalar form fac-
182
+ tors, which are sensitive to the nucleon spin distribution in the
183
+ nuclear medium. For elastic scattering their contribution can
184
+ then be regarded as a subleading effect (actually vanishing in
185
+ nuclei with even number of protons and neutrons), and so are
186
+ not considered in Eq. (2).
187
+ The nuclear charge and weak-charge form factors follow
188
+ from: (i) Trading the on-shell nucleon spinors to spinor wave-
189
+ functions that account for nucleons in a potential, (ii) assum-
190
+ ing the impulse approximation (i.e.
191
+ assuming that single-
192
+ nucleon form factors are valid as well in the nuclear medium),
193
+ (iii) integration over nuclear volume, (iv) taking into account
194
+ contributions from protons and neutrons (for details see e.g.
195
+ 2 Other subleading effects involve as well relativistic Darwin-Foldy and spin-
196
+ orbit corrections [39, 40], which our analysis does not account for.
197
+ [40]). Explicitly one finds
198
+ ZFC = ∑
199
+ N=p,n
200
+
201
+ GN
202
+ EFN
203
+ V +
204
+ �GN
205
+ M −GN
206
+ E
207
+ 1+τ
208
+ ��
209
+ τFN
210
+ V + q
211
+ 2mFN
212
+ T
213
+ ��
214
+ ,
215
+ (3)
216
+ QWFW = ∑
217
+ N=p,n
218
+
219
+ �GN
220
+ EFN
221
+ V +
222
+ � �GN
223
+ M − �GN
224
+ E
225
+ 1+τ
226
+ ��
227
+ τFN
228
+ V + q
229
+ 2mFN
230
+ T
231
+ ��
232
+ ,
233
+ (4)
234
+ with QW determined by the couplings of the proton and neu-
235
+ tron to the Z gauge boson, QW = N gn
236
+ V + Z gp
237
+ V (gp
238
+ V = 1/2 −
239
+ 2sin2 θW and gn
240
+ V = −1/2) 3. Note that the form factors writ-
241
+ ten as in Eqs. (3) and (4) satisfy the normalization condition
242
+ FC(Q2 = 0) = FW(Q2 = 0) = 1. These expressions contain in-
243
+ formation on nucleon and nuclear structure, the latter encoded
244
+ in the vector and tensor nuclear form factors. Spin-orbit ef-
245
+ fects governed by FT are subleading compared with nuclear
246
+ 3 One-loop corrected proton and neutron electroweak couplings are given by
247
+ gp
248
+ V = 0.0721 and gn
249
+ V = −0.988 [42]. These are the values we use in the
250
+ calculations presented in Secs. III and V.
251
+
252
+ 3
253
+ spin-independent effects controlled by FV [40]. Thus keeping
254
+ only LO effects and taking into account that τ ≪ 1 for the typ-
255
+ ical transferred momentum in neutrino stopped-pion sources
256
+ (Q2 ≲ m2
257
+ µ/2), the charge and weak-charge form factors reduce
258
+ to a rather simple form
259
+ FC(q2) = 1
260
+ Z
261
+
262
+ Gp
263
+ E(q2)F p
264
+ V (q2)+Gn
265
+ E(q2)Fn
266
+ V (q2)
267
+
268
+ ,
269
+ (5)
270
+ FW(q2) =
271
+ 1
272
+ QW
273
+
274
+ �Gp
275
+ E(q2)F p
276
+ V (q2)+ �Gn
277
+ E(q2)Fn
278
+ V (q2)
279
+
280
+ .
281
+ (6)
282
+ Numerically one finds that spin-orbit effects are of the order
283
+ of 0.1% [40], while terms proportional to τ contribute at the
284
+ order of 0.01% in stopped-pion neutrino experiments where
285
+ Q2 ≃ (10MeV)2. Eqs. (5) and (6) are thus precise enough at
286
+ the percent level.
287
+ A.
288
+ Electromagnetic and weak charge radii
289
+ LO expressions for the electromagnetic and weak-charge
290
+ radii of the nucleus follow from Eqs. (5) and (6), as we now
291
+ discuss. Assuming spherical symmetry, the nucleon and spin-
292
+ independent nucleon structure functions can be expanded in
293
+ terms of their moments. For nucleons one has
294
+ Gp,n
295
+ E
296
+ =
297
+
298
+
299
+ i=0,1
300
+ (−1)i
301
+ Q2i
302
+ (2i+1)!r2i
303
+ X ,
304
+ (7)
305
+ �GX
306
+ E = gX
307
+ V
308
+
309
+
310
+ i=0
311
+ (−1)i
312
+ Q2i
313
+ (2i+1)! ˜r2i
314
+ X ,
315
+ (8)
316
+ where the order Q2 terms in the expansion in Eq.
317
+ (7) in-
318
+ volve the single-nucleon electromagnetic mean-square radii,
319
+ r2
320
+ X (X = p,n), while those in Eq. (8) the single-nucleon weak-
321
+ charge mean-square radii, ˜r2
322
+ X. In Eq. (7) the sum starts at
323
+ i = 0 (i = 1) for protons (neutrons). For the spin-independent
324
+ nuclear form factors one instead has (see e.g. [38])
325
+ F p
326
+ V = Z
327
+
328
+
329
+ i=0
330
+ (−1)i
331
+ Q2i
332
+ (2i+1)!R2i
333
+ p ,
334
+ (9)
335
+ Fn
336
+ V = N
337
+
338
+
339
+ i=0
340
+ (−1)i
341
+ Q2i
342
+ (2i+1)!R2i
343
+ n .
344
+ (10)
345
+ In this case—at order Q2—terms involve the point-proton and
346
+ point-neutron distributions mean-square radii, R2
347
+ p and R2
348
+ n (sec-
349
+ ond moment of the distributions)4. The nuclear charge and
350
+ weak-charge radii, R2
351
+ C and R2
352
+ W, follow from
353
+ R2
354
+ C = − 6 dFC
355
+ dQ2
356
+ ����
357
+ Q2=0
358
+ ,
359
+ R2
360
+ W = −6 dFW
361
+ dQ2
362
+ ����
363
+ Q2=0
364
+ ,
365
+ (11)
366
+ 4 Here following standard conventions we use lower-case for nucleons and
367
+ upper-case for nuclei.
368
+ after expansions in Eqs. (7), (8), (9) and (10) are inserted in
369
+ Eqs. (5) and (6). Their explicit expressions are given by
370
+ R2
371
+ C = R2
372
+ p +r2
373
+ p + N
374
+ Z r2
375
+ n ,
376
+ (12)
377
+ R2
378
+ W = Zgp
379
+ V
380
+ QW
381
+
382
+ R2
383
+ p + ˜r2
384
+ p
385
+
386
+ + Ngn
387
+ V
388
+ QW
389
+
390
+ R2
391
+ n + ˜r2
392
+ n
393
+
394
+ .
395
+ (13)
396
+ The single-nucleon weak-charge mean-square radii in RW can
397
+ be reexpressed in terms of the single-nucleon electromagnetic
398
+ mean-square radii as follows [40]
399
+ ˜r2
400
+ p = r2
401
+ p + gn
402
+ V
403
+ gp
404
+ V
405
+ r2
406
+ n +ξ(0)
407
+ V r2
408
+ s ,
409
+ ˜r2
410
+ n = r2
411
+ p + gp
412
+ V
413
+ gn
414
+ V
415
+ r2
416
+ n +ξ(0)
417
+ V r2
418
+ s , (14)
419
+ where ξ(0)
420
+ V
421
+ = gu
422
+ V + gd
423
+ V + gs
424
+ V (with gq
425
+ V the quark q weak-vector
426
+ charge) and r2
427
+ s the mean-square strange radius. Numerically,
428
+ ξ(0)
429
+ V = −0.988 and r2
430
+ s = −0.00430fm2 with the latter obtained
431
+ from lattice QCD calculations [43]. Thus, the strange quark
432
+ contribution provides per mille corrections to the proton and
433
+ neutron LO expressions in Eq. (14). It can therefore be ne-
434
+ glected in the following analysis.
435
+ With the aid of these equations the weak-charge mean-
436
+ square radius can finally be rewritten as
437
+ R2
438
+ W = Z
439
+ QW
440
+
441
+ gp
442
+ VR2
443
+ p +gp
444
+ Vr2
445
+ p +gn
446
+ Vr2
447
+ n
448
+
449
+ + N
450
+ QW
451
+
452
+ gn
453
+ VR2
454
+ n +gn
455
+ Vr2
456
+ p +gp
457
+ Vr2
458
+ n
459
+
460
+ ,
461
+ (15)
462
+ which in turn can be recast in terms of the electromagnetic
463
+ charge radius and the neutron skin, R2
464
+ n −R2
465
+ p, [8]
466
+ R2
467
+ W = R2
468
+ C + Ngn
469
+ V
470
+ QW
471
+
472
+ (R2
473
+ n −R2
474
+ p)+ Z2 −N2
475
+ N Z
476
+ r2
477
+ n
478
+
479
+ .
480
+ (16)
481
+ Due to the small transferred momentum, stopped-pion
482
+ CEνNS experiments are not particularly sensitive to nucleon
483
+ structure. Thus the nucleon form factors in Eqs. (5) and (6)
484
+ can be truncated at order Q2, resulting in the following ex-
485
+ pression for the weak-charge form factor [8]
486
+ FW ≃
487
+ 1
488
+ QW
489
+
490
+ Z
491
+
492
+ gp
493
+ V − gp
494
+ V
495
+ 6 r2
496
+ pQ2 − gn
497
+ V
498
+ 6 r2
499
+ nQ2
500
+
501
+ F p
502
+ V (Q2)
503
+ +N
504
+
505
+ gn
506
+ V − gn
507
+ V
508
+ 6 r2
509
+ pQ2 − gp
510
+ V
511
+ 6 r2
512
+ nQ2
513
+
514
+ Fn
515
+ V (Q2)
516
+
517
+ ,
518
+ (17)
519
+ where the nuclear form factors have been normalized,
520
+ F p
521
+ V (Q2 = 0) = 1 and Fn
522
+ V (Q2 = 0) = 1.
523
+ III.
524
+ THEORETICAL AND PHENOMENOLOGICAL
525
+ UNCERTAINTIES
526
+ In this Section we determine the numerical variations to
527
+ which the weak-charge form factor can be subject to. For that
528
+ aim we consider the Helm parametrization [44] for the elastic
529
+ vector proton and neutron form factors (see Sec. III A). We
530
+
531
+ 4
532
+ first quantify FW by considering LO expressions for the point-
533
+ proton distribution mean-square radius and the nucleon form
534
+ factors, which we compare with FW obtained by considering
535
+ the full expression in Eq. (17), including the single-nucleon
536
+ electromagnetic mean-square radii in the determination of Rp.
537
+ We compare as well those results with those obtained assum-
538
+ ing the Helm parametrization for the weak-charge form fac-
539
+ tor. We then proceed to calculate FW by assuming different
540
+ parametrizations for the elastic vector proton and neutron nu-
541
+ clear form factors. Our analysis relies on the Fourier trans-
542
+ form of the symmetrized Fermi function [39] and the Klein-
543
+ Nystrand [45] parametrizations, in addition to the Helm ap-
544
+ proach. In doing so, we quantify the dependence of FW on
545
+ parametrization choice. Finally, we compare the parametriza-
546
+ tion approach with the model-independent treatment based on
547
+ series expansions of the elastic vector proton and neutron form
548
+ factors.
549
+ A.
550
+ Uncertainties due to leading-order point-proton
551
+ distribution mean-square radius and leading-order nucleon
552
+ form factors
553
+ We start by considering three different cases, two limits and
554
+ the full case:
555
+ 1) Limit 1 (Case 1):
556
+ LO nucleon form factors (Q-
557
+ independent terms) and point-proton distribution mean-
558
+ square radius equal to the nuclear charge mean-square ra-
559
+ dius, R2
560
+ p = R2
561
+ C.
562
+ 2) Limit 2 (Case 2): LO nucleon form factors and full point-
563
+ proton distribution mean-square radius given by Eq. (12).
564
+ 3) Full case (Case 3): Full nucleon form factors, up to Q2 as
565
+ given in Eq. (17) and full point-proton distribution mean-
566
+ square radius given by Eq. (12).
567
+ These limits are motivated as follows. In most analyses in
568
+ which the point-neutron distribution rms radius is extracted
569
+ from CEνNS data (see e.g. Ref. [37]) the point-proton distri-
570
+ bution mean-square radius is fixed to be equal to the nuclear
571
+ charge radius. As can be seen from Eq. (12), doing so over-
572
+ estimates Rp. A larger value for Rp means a smaller value
573
+ for Fp at a given Q. Under such simplification, an uncertainty
574
+ in the extraction of R2
575
+ n from CEνNS data is implied. Order
576
+ Q2 corrections in the nucleon form factors are also typically
577
+ ignored and, though suppressed, are potentially a source of
578
+ sizable uncertainties. Ignoring order Q2 terms enhance FW
579
+ [see Eq. (17)], and so the CEνNS event rate, resulting in a
580
+ potential underestimation of R2
581
+ n.
582
+ To proceed, we adopt the Helm form factor parametrization
583
+ for the elastic vector proton and neutron form factors, F p
584
+ V and
585
+ Fn
586
+ V , namely [44]
587
+ FH(Q2) = 3 j1(QR0)
588
+ QR0
589
+ e−(Qs)2/2 .
590
+ (18)
591
+ Here R0 refers to the diffraction radius, related with the point-
592
+ nucleon distribution mean-square radius through the skin
593
+ thickness s = 0.9fm [46] according to
594
+ R0 =
595
+
596
+ 5
597
+ 3
598
+
599
+ R2
600
+ X −3s2�
601
+ (X = p,n) .
602
+ (19)
603
+ To avoid mixing nuclear physics effects with detector ef-
604
+ fects we use argon and cesium as target materials. For ar-
605
+ gon this means one can assume the detector to be 100% com-
606
+ posed of 40Ar (up to per mille corrections), while for cesium
607
+ of 133Cs. Parameters used in our calculation are displayed in
608
+ Tab. I, along with the values for the single-nucleon electro-
609
+ magnetic mean-square radii central values [47].
610
+ Isotope
611
+ 40Ar
612
+ 133Cs
613
+ Abundance (Xi)
614
+ 99.6%
615
+ 100.0%
616
+ RC [fm]
617
+ 3.4274
618
+ 4.0414
619
+ Nucleon
620
+ Proton
621
+ Neutron
622
+ r2
623
+ X [fm2]
624
+ 0.7071
625
+ -0.1155
626
+ TABLE I. 40Ar and 133Cs relative abundances along with their nu-
627
+ clear charge radii used in our calculation. Nuclear charge radii taken
628
+ from Ref. [48]. Single-nucleon electromagnetic mean-square radii
629
+ central values taken from Ref. [47].
630
+ To quantify deviations implied by the limits in items 1 and
631
+ 2 with the full result in item 3 the following percentage differ-
632
+ ence factor is employed
633
+ ∆FW [%] = FW|Ci −FW|C j
634
+ FW|Ci
635
+ ×100% ,
636
+ (20)
637
+ where FW|Ci > FW|C j and Ci, j refer to the cases used for the
638
+ calculation of the weak-charge form factor. The results are
639
+ shown in Fig. 1. We have fixed the point-neutron distribution
640
+ rms radius according to Rn = Rp+0.1fm and Rn = Rp+0.2fm
641
+ for argon and cesium, respectively. Note that these choices are
642
+ not intended to have any particular meaning, they are chosen
643
+ just to illustrate the deviations implied by the different limits
644
+ we are considering.
645
+ From Fig. 1 (left graph) one can see that by assuming that
646
+ the point-proton rms radius distribution amounts to the mea-
647
+ sured nuclear charge rms radius has a small effect. All over the
648
+ relevant transferred momentum range (Q2 ≲ m2
649
+ µ) deviations
650
+ are at (or below) the per mille level, regardless of nuclide. In-
651
+ cluding the single-nucleon electromagnetic mean-square radii
652
+ in the determination of R2
653
+ p becomes relevant only if experi-
654
+ mental uncertainties reach that level of precision, otherwise
655
+ the R2
656
+ p = R2
657
+ C approximation is fairly accurate. Differences
658
+ due to nucleon form factors Q-dependent terms are instead
659
+ slightly more relevant. Results in Fig. 1 (right graph) show
660
+ that they can reach values up to 3%, basically regardless of
661
+ nuclide. Nucleon form factors effects should then accounted
662
+ for in high-statistics CEνNS experiments with low systematic
663
+ and statistical uncertainties.
664
+ Rather than calculating the weak-charge form factor using
665
+ Eq. (17), one could instead use a form factor parametrization
666
+ for FW—e.g. the Helm parametrization—and fix R2
667
+ X = R2
668
+ W in
669
+ Eq. (19), with R2
670
+ W as given in Eq. (16) (as done e.g. in Ref.
671
+
672
+ 5
673
+ 0
674
+ 20
675
+ 40
676
+ 60
677
+ 80
678
+ 100
679
+ Q[MeV]
680
+ 0.00
681
+ 0.05
682
+ 0.10
683
+ 0.15
684
+ 0.20
685
+ FW [%]
686
+ Percentage uncertainties from limits 1 and 2
687
+ Argon: Limit 1 - Limit 2
688
+ Cesium: Limit 1 - Limit 2
689
+ 0
690
+ 20
691
+ 40
692
+ 60
693
+ 80
694
+ 100
695
+ Q[MeV]
696
+ 0.0
697
+ 0.5
698
+ 1.0
699
+ 1.5
700
+ 2.0
701
+ 2.5
702
+ 3.0
703
+ FW [%]
704
+ Percentage uncertainties from limit 1 and NLO nucleon form factor terms
705
+ Argon: Limit 1 - Full
706
+ Cesium: Limit 1 - Full
707
+ FIG. 1. Nuclear weak-charge form factor percentage difference calculated according to Eq. (20) and for: Nucleon form factors LO terms
708
+ (momentum-independent terms only) and R2p = R2
709
+ C (Limit 1, item 1), nucleon form factor LO terms and R2p = R2
710
+ C − r2p − (N/Z)r2n (Limit 2,
711
+ item 2), full nucleon form factors up to order Q2 (Full, item 3) and R2p = R2
712
+ C −r2p −(N/Z)r2n. Left graph: Percentage difference obtained by
713
+ comparing limits 1 and 2, Right graph: percentage difference obtained by comparing limit 1 with full nucleon form factor expressions and
714
+ R2p = R2
715
+ C − r2p − (N/Z)r2n. For the point-neutron distribution rms radii we have taken Rn = Rp + 0.1fm and Rn = Rp + 0.2fm for argon and
716
+ cesium, respectively. These values taken just for the sake of illustrating the deviations implied by the different limits compared with the case
717
+ in which full expressions are considered.
718
+ 0
719
+ 20
720
+ 40
721
+ 60
722
+ 80
723
+ 100
724
+ Q[MeV]
725
+ 0.0
726
+ 1.5
727
+ 3.0
728
+ 4.5
729
+ 6.0
730
+ 7.5
731
+ 9.0
732
+ FW [%]
733
+ Argon
734
+ Percentage uncertainties from Helm parametrization and full FW
735
+ FW = FH vs FW: Limit 1
736
+ FW = FH vs FW: Full
737
+ 0
738
+ 20
739
+ 40
740
+ 60
741
+ 80
742
+ 100
743
+ Q[MeV]
744
+ 0
745
+ 3
746
+ 6
747
+ 9
748
+ 12
749
+ 15
750
+ 18
751
+ FW [%]
752
+ Cesium
753
+ Percentage uncertainties from Helm parametrization and full FW
754
+ FW = FH vs FW: Limit 1
755
+ FW = FH vs FW: Full
756
+ FIG. 2. Nuclear weak-charge form factor percentage uncertainty for argon (left graph) and cesium (right graph) calculated using the Helm
757
+ parametrization with the diffraction radius R0 fixed through the weak-charge mean-square radius R2
758
+ W and by using Eq. (17). Percentage
759
+ uncertainties are calculated in limit 1 (item 1) and in the full case (item 3). The point-neutron distribution rms radii have been fixed according
760
+ to Rn = Rp +0.1fm and Rn = Rp +0.2fm.
761
+ [8]). Since R2
762
+ W involves the point-neutron distribution mean-
763
+ square radius, from such procedure one could as well extract
764
+ from CEνNS data a range for its value at a certain confidence
765
+ level. To show what are the expected percentage differences
766
+ following this procedure with that dictated by Eq. (17), we
767
+ have calculated FW as we have described above and compared
768
+ with FW calculated using Eq. (17) in limit 1 (item 1) and in
769
+ the full case (item 3). Deviations are quantified with the aid of
770
+ Eq. (20) where in this case Ci refers to FW calculated through
771
+ the Helm parametrization and Cj to FW calculated using Eq.
772
+ (17) in the aforementioned cases.
773
+ The result is displayed in Fig. 2, left (right) graph for ar-
774
+ gon (cesium). One can see that differences between both ap-
775
+ proaches are more pronounced for heavier nuclei. And grow
776
+ depending on whether one considers the most simplified as-
777
+ sumptions (limit 1) or the full weak-charge form factor in-
778
+ cluding nucleon form factors corrections and single-nucleon
779
+ electromagnetic mean-square radii. For argon, the percentage
780
+ uncertainty can raise up to ∼ 9% while for cesium—instead—
781
+ up to ∼ 18%. Some caution, however, is required. Differences
782
+ grow with transferred momentum and so at their peak (in the
783
+ relevant range) the stopped-pion neutrino flux is expected to
784
+ be fading away. Thus, although substantially large, these un-
785
+ certainties might not have the same effect they exhibit here on
786
+ the CEνNS event rate. We will come back to this issue later
787
+ in Sec. V.
788
+
789
+ 6
790
+ 0
791
+ 20
792
+ 40
793
+ 60
794
+ 80
795
+ 100
796
+ Q[MeV]
797
+ 0.0
798
+ 0.2
799
+ 0.4
800
+ 0.6
801
+ 0.8
802
+ 1.0
803
+ FW [%]
804
+ Argon
805
+ Percentage uncertainties from FF parametrizations
806
+ KN-Helm
807
+ Fermi-Helm
808
+ KN-Fermi
809
+ 0
810
+ 20
811
+ 40
812
+ 60
813
+ 80
814
+ 100
815
+ Q[MeV]
816
+ 0.0
817
+ 0.5
818
+ 1.0
819
+ 1.5
820
+ 2.0
821
+ FW [%]
822
+ Cesium
823
+ Percentage uncertainties from FF parametrizations
824
+ KN-Helm
825
+ Fermi-Helm
826
+ KN-Fermi
827
+ FIG. 3. Left graph: Weak-charge form factor percentage difference for argon obtained by using for the elastic vector proton and neutron
828
+ form factors the Helm, Klein-Nystrand and the Fourier transform of the symmetrized Fermi distribution parametrizations. The results include
829
+ nucleon form factors Q-dependent terms (up to order Q2) and single-nucleon electromagnetic mean-square radii. The argon point-neutron
830
+ distribution rms radius has been fixed according to Rn = Rp +0.1fm. Right graph: Same as left graph but for cesium, with the point-neutron
831
+ distribution rms radius fixed according to Rn = Rp +0.2fm.
832
+ B.
833
+ Uncertainties due to elastic vector proton and neutron form
834
+ factor parametrizations
835
+ Event rate spectra derived from form factor parametriza-
836
+ tions are expected to depend on the parametrization used, with
837
+ the dependence increasing with increased Q [36]. To deter-
838
+ mine the size of these dependences we calculate F p
839
+ V and Fn
840
+ V
841
+ using as well the Klein-Nystrand form factor [45] and the
842
+ Fourier transform of the symmetrized Fermi distribution [39].
843
+ The Klein-Nystrand form factor follows from folding a
844
+ Yukawa potential (range ak) over a hard sphere distribution
845
+ of radius RA, namely [45]
846
+ FKN = 3 j1(QRA)
847
+ QRA
848
+ 1
849
+ 1+Q2a2
850
+ k
851
+ .
852
+ (21)
853
+ The range of the potential ak is 0.7 fm [45] and the hard
854
+ sphere radius is determined through the point-proton and
855
+ point-neutron distributions mean-square radii according to
856
+ RA =
857
+
858
+ 5
859
+ 3
860
+
861
+ R2
862
+ X −6a2
863
+ k
864
+
865
+ (X = p,n) .
866
+ (22)
867
+ The Fourier transform of the symmetrized Fermi distribution
868
+ follows instead from fSF = fF(r)+ fF(−r)−1, where fF(r) is
869
+ the conventional Fermi function
870
+ fF =
871
+ 1
872
+ 1+e(r−c)/a ,
873
+ (23)
874
+ where c refers to the half-density radius and a (a = 0.52fm
875
+ [49]) to the surface diffuseness. The Fourier transform can be
876
+ analytically integrated resulting in
877
+ FSF = 3
878
+ Qc
879
+ �sin(Qc)
880
+ (Qc)2
881
+ ��
882
+ πQa
883
+ tanh(πQa)
884
+
885
+ − cos(Qc)
886
+ Qc
887
+
888
+ ×
889
+
890
+ πQa
891
+ sinh(πQa)
892
+
893
+ 1
894
+ 1+(πa/c)2 .
895
+ (24)
896
+ In this case the half-density radius c proceeds from the point-
897
+ proton and point-neutron distributions mean-square radii and
898
+ the surface diffuseness a
899
+ c =
900
+
901
+ 5
902
+ 3R2
903
+ X − 7
904
+ 3(πa)2
905
+ (X = p,n) .
906
+ (25)
907
+ We now calculate the weak-charge form factor percentage dif-
908
+ ference obtained by calculating FW according to Eq. (17),
909
+ using for the elastic vector proton and neutron form factors
910
+ the three different parametrizations already discussed. The
911
+ result is displayed in Fig. 3. The left graph shows results
912
+ for argon, while the right graph results for cesium. Differ-
913
+ ences are slightly more pronounced for the latter, thus demon-
914
+ strating they are more relevant for heavy nuclides. As Q in-
915
+ creases, the Helm form factor decreases more steeply. This
916
+ effect is less pronounced for the Fourier transform of the
917
+ symmetrized Fermi distribution and even less for the Klein-
918
+ Nystrand parametrization. This means that event rates cal-
919
+ culated with the Helm form factor will produce slightly less
920
+ events than those calculated with the Fermi parametrization,
921
+ and even less than those obtained with the Klein-Nystrand
922
+ form factor. This translates into uncertainties of the order of
923
+ 1% (or below) for argon and 2% (or below) for cesium. In
924
+ summary, uncertainties due to form factor parametrizations of
925
+ the elastic vector proton and neutron form factors are compa-
926
+ rable to those implied by the Q-dependent nucleon form factor
927
+ terms discussed in the previous Section.
928
+ C.
929
+ Model-independent versus form factor parametrizations
930
+ approaches
931
+ Assuming the nucleon distributions to be spherically sym-
932
+ metric (i.e. assuming that the charge density distribution de-
933
+ pends solely on the distribution radius) leads to the series ex-
934
+ pansions of the elastic vector proton and neutron form fac-
935
+
936
+ 7
937
+ tors, Eqs. (9) and (10). These expansions, subject only to
938
+ the spherical symmetry hypothesis, involve the point-nucleon
939
+ mean-square radii (second radial moment) at order Q2. So
940
+ rather than sticking to a form factor parametrization or a nu-
941
+ clear physics model one can use these expansions to fit R2
942
+ n to
943
+ data [38, 50]. The question is of course whether a Q2-order
944
+ description suffices, or whether higher order terms should be
945
+ included to increase convergence. Ref. [38] addressed this
946
+ question and ended up concluding that order Q4 terms are re-
947
+ quired (for argon for which the analysis was done). Following
948
+ this approach, this implies the introduction of a new param-
949
+ eter, the fourth radial moment of the nucleon distributions,
950
+ ⟨R4
951
+ X⟩ (X = p,n) 5 .
952
+ To compare the accuracy of the model-independent analysis
953
+ and the form factor parametrization approach, we first calcu-
954
+ late the fourth radial moment for the three parametrizations
955
+ we are using. We do so by taking into account that the series
956
+ expansions in Eqs. (9) and (10) can be compared term by term
957
+ to the Taylor expansions of the F p
958
+ V (Q2) and Fn
959
+ V (Q2) functions.
960
+ Up to order Q4 this reduces to
961
+ R2
962
+ X = −6 dFX
963
+ V
964
+ dQ2
965
+ ����
966
+ Q2=0
967
+ and
968
+ ⟨R4
969
+ X⟩ = 60 d2FX
970
+ V
971
+ dQ4
972
+ ����
973
+ Q2=0
974
+ . (26)
975
+ More generally, the 2i-th moment can be written according to
976
+ ⟨R2i
977
+ X ⟩ = (−1)i (2i+1)!
978
+ i!
979
+ diFX
980
+ V
981
+ dQ2i
982
+ ����
983
+ Q2=0
984
+ (X = p,n) .
985
+ (27)
986
+ The first relation in Eq. (26) leads to the expressions for the
987
+ diffraction, hard sphere and half density radii (R0, RA and c)
988
+ for the Helm, Klein-Nystrand and Fourier transform of the
989
+ symmetrized Fermi distribution, Eqs. (19), (22) and (25). The
990
+ second relation in Eq. (26) leads instead to
991
+ Helm:
992
+ ⟨R4
993
+ X⟩ =3
994
+ 7R4
995
+ 0 +6R2
996
+ 0s2 +15s4 ,
997
+ KN:
998
+ ⟨R4
999
+ X⟩ =3
1000
+ 7R4
1001
+ A +12a2
1002
+ kR2
1003
+ A +120a4
1004
+ k ,
1005
+ Fermi:
1006
+ ⟨R4
1007
+ X⟩ =3
1008
+ 7c4 + 18
1009
+ 7 c2(aπ)2 + 31
1010
+ 7 (aπ)4 .
1011
+ (28)
1012
+ Of course the higher the order in Q the expansion extends to,
1013
+ the better the convergence and so the reliability of the result.
1014
+ Inclusion of terms up to order Q6 require the sixth radial mo-
1015
+ ment. From Eq. (27) explicit expressions for each case read
1016
+ Helm: ⟨R6
1017
+ X⟩ =1
1018
+ 3R6
1019
+ 0 +9R2
1020
+ 0s2 +63R2
1021
+ 0s4 +105s6 ,
1022
+ KN:
1023
+ ⟨R6
1024
+ X⟩ =1
1025
+ 3R6
1026
+ A +18R4
1027
+ Aa2
1028
+ k +504R2
1029
+ ka4
1030
+ k +5040a6
1031
+ k ,
1032
+ Fermi:⟨R6
1033
+ X⟩ =1
1034
+ 3c6 + 11
1035
+ 3 c4(πa)2 + 239
1036
+ 15 c2(πa)4 + 127
1037
+ 5 (πa)6 .
1038
+ (29)
1039
+ 5 Note that we have simplified our notation for the point-nucleon mean-
1040
+ square radii R2
1041
+ X ≡ ⟨R2
1042
+ X⟩. For ⟨R4
1043
+ X⟩ we do not do so to avoid confusion.
1044
+ Using Eqs. (19), (22) and (25) for R0, RA and c one can write
1045
+ the fourth and sixth radial moments for each parametrization
1046
+ solely in terms of R2
1047
+ X. In doing so one can then compare the
1048
+ level of convergence of the Q2, Q4 and Q6 expansions with
1049
+ the full expressions for each form factor. Results are shown in
1050
+ Figs. 4 and 5 for argon and cesium, respectively (representa-
1051
+ tive of light and heavy nuclides).
1052
+ Top graphs in Fig. 4 show results for the three form factor
1053
+ parametrizations calculated for argon. Bottom graphs show
1054
+ percentage uncertainties for each case. One can see that in-
1055
+ clusion of only the second moment leads to deviations above
1056
+ �� 5% for transferred momenta of the order of 80MeV. At
1057
+ 100MeV those deviations can reach ∼ 20%. These uncer-
1058
+ tainties, however, should be taken with care as those found
1059
+ in the previous Section for the same reason. They increase at
1060
+ relatively large Q, where the neutrino flux is expected to be
1061
+ fading away to a certain degree. They will—of course—have
1062
+ an impact when comparing results from both approaches, but
1063
+ the best way to understand their actual impact is through the
1064
+ calculation of the CEνNS event rate, something that will be
1065
+ done and discussed in Sec. V.
1066
+ Top and bottom graphs in Fig. 5 show—instead—results
1067
+ for cesium. In this case a few percentage convergence requires
1068
+ the inclusion of the sixth moment. As can be seen in the bot-
1069
+ tom graphs, up to order Q4 the expansion involves uncertain-
1070
+ ties that can readily reach ∼ 10% at transferred momenta of
1071
+ the order of 80MeV, values for which the neutrino flux is still
1072
+ sizable enough for the uncertainty to have an impact on the
1073
+ CEνNS event rate. Aiming at few percent level precision thus
1074
+ requires the inclusion of the sixth moment, inline with Ref.
1075
+ [38].
1076
+ The adoption of form factor parametrizations suffers from
1077
+ the model dependence implied by the different assumptions
1078
+ those parametrizations come along with.
1079
+ However, as we
1080
+ have discussed in Sec. III B, using one or the other (which
1081
+ can be understood as a variation of the underlying nuclear
1082
+ physics hypotheses) introduces uncertainties at the few per-
1083
+ cent level. The same level of precision can be achieved with
1084
+ the model-independent power expansion approach, which re-
1085
+ lies only on the assumption of a spherical symmetric nuclear
1086
+ ground state. However to achieve that level of precision new
1087
+ parameters should be considered. Thus, take for instance the
1088
+ case of cesium. Given a CEνNS dataset an analysis relying
1089
+ on form factor parametrizations is expected to imply a few
1090
+ percent uncertainty (in addition to systematic and statistical
1091
+ uncertainties due to e.g. quenching factor, neutrino flux, BRN
1092
+ and SS backgrounds). An analysis based on radial moments
1093
+ expansions as well, but then the dataset has to be used to fit not
1094
+ only the neutron distribution rms radius but also the fourth and
1095
+ sixth moments (depending on the nuclide the detector is built
1096
+ with). The extraction procedure then becomes a multiparame-
1097
+ ter problem, which might worsen the precision with which R2
1098
+ n
1099
+ can be determined. We will come back to that discussion in
1100
+ Sec. V.
1101
+
1102
+ 8
1103
+ 0
1104
+ 20
1105
+ 40
1106
+ 60
1107
+ 80
1108
+ 100
1109
+ Q [MeV]
1110
+ 0.2
1111
+ 0.4
1112
+ 0.6
1113
+ 0.8
1114
+ 1.0
1115
+ FW
1116
+ Argon
1117
+ Weak-charge FF: Full vs moments expansion
1118
+ Helm: Full
1119
+ Helm: Order Q2
1120
+ Helm: Order Q4
1121
+ 0
1122
+ 20
1123
+ 40
1124
+ 60
1125
+ 80
1126
+ 100
1127
+ Q [MeV]
1128
+ 0.2
1129
+ 0.4
1130
+ 0.6
1131
+ 0.8
1132
+ 1.0
1133
+ FW
1134
+ Argon
1135
+ Weak-charge FF: Full vs moments expansion
1136
+ KN: Full
1137
+ KN: Order Q2
1138
+ KN: Order Q4
1139
+ 0
1140
+ 20
1141
+ 40
1142
+ 60
1143
+ 80
1144
+ 100
1145
+ Q [MeV]
1146
+ 0.2
1147
+ 0.4
1148
+ 0.6
1149
+ 0.8
1150
+ 1.0
1151
+ FW
1152
+ Argon
1153
+ Weak-charge FF: Full vs moments expansion
1154
+ Fermi: Full
1155
+ Fermi: Order Q2
1156
+ Fermi: Order Q4
1157
+ 0
1158
+ 20
1159
+ 40
1160
+ 60
1161
+ 80
1162
+ 100
1163
+ Q [MeV]
1164
+ 0
1165
+ 5
1166
+ 10
1167
+ 15
1168
+ 20
1169
+ | FW| [%]
1170
+ Argon
1171
+ Weak-charge FF: Full vs moments expansion
1172
+ Helm at Q2
1173
+ Helm at Q4
1174
+ 0
1175
+ 20
1176
+ 40
1177
+ 60
1178
+ 80
1179
+ 100
1180
+ Q [MeV]
1181
+ 0
1182
+ 5
1183
+ 10
1184
+ 15
1185
+ 20
1186
+ | FW| [%]
1187
+ Argon
1188
+ Weak-charge FF: Full vs moments expansion
1189
+ KN at Q2
1190
+ KN at Q4
1191
+ 0
1192
+ 20
1193
+ 40
1194
+ 60
1195
+ 80
1196
+ 100
1197
+ Q [MeV]
1198
+ 0
1199
+ 5
1200
+ 10
1201
+ 15
1202
+ 20
1203
+ | FW| [%]
1204
+ Argon
1205
+ Weak-charge FF: Full vs moments expansion
1206
+ Fermi at Q2
1207
+ Fermi at Q4
1208
+ FIG. 4. Top graphs: Convergence level for the weak-charge form factor series expansions at order Q2, Q4 and Q6 (in argon) for the Helm,
1209
+ Klein-Nystrand and the Fourier transform of the symmetrized Fermi distribution. Expansion at order Qn involve radial moments up to n-th
1210
+ order (see text). Bottom graphs: Percentage uncertainty in each case. From these results one can see that if one relies on elastic vector form
1211
+ factor power expansions and demands precision below a few percent the fourth radial moment should be included for light nuclides. Inline
1212
+ with Ref. [38].
1213
+ IV.
1214
+ CEνNS CROSS SECTION AND EVENT RATE
1215
+ The CEνNS differential cross section follows from a neutral
1216
+ current process. In terms of nuclear recoil energy it is given
1217
+ by [51–54]
1218
+
1219
+ dEr
1220
+ = G2
1221
+ FmN
1222
+
1223
+ Q2
1224
+ W F2
1225
+ W
1226
+
1227
+ 2− mNEr
1228
+ E2ν
1229
+
1230
+ ,
1231
+ (30)
1232
+ where subleading kinematic terms have been neglected and
1233
+ mN here refers to nuclear mass. The strength at which the
1234
+ Z boson couples to the nucleus is determined by the Q-
1235
+ dependent coupling QW ×FW(Q2). The coupling is such that
1236
+ with increasing transferred momentum (increasing incoming
1237
+ neutrino energy) the “effective” weak charge decreases and so
1238
+ the interaction probability. Uncertainties in FW, as those we
1239
+ have discussed in the previous Sections and as those discussed
1240
+
1241
+ 9
1242
+ 0
1243
+ 20
1244
+ 40
1245
+ 60
1246
+ 80
1247
+ 100
1248
+ Q [MeV]
1249
+ 0.2
1250
+ 0.4
1251
+ 0.6
1252
+ 0.8
1253
+ 1.0
1254
+ FW
1255
+ Cesium
1256
+ Weak-charge FF: Full vs moments expansion
1257
+ Helm: Full
1258
+ Helm: Order Q2
1259
+ Helm: Order Q4
1260
+ Helm: Order Q6
1261
+ 0
1262
+ 20
1263
+ 40
1264
+ 60
1265
+ 80
1266
+ 100
1267
+ Q [MeV]
1268
+ 0.2
1269
+ 0.4
1270
+ 0.6
1271
+ 0.8
1272
+ 1.0
1273
+ FW
1274
+ Cesium
1275
+ Weak-charge FF: Full vs moments expansion
1276
+ KN: Full
1277
+ KN: Order Q2
1278
+ KN: Order Q4
1279
+ KN: Order Q6
1280
+ 0
1281
+ 20
1282
+ 40
1283
+ 60
1284
+ 80
1285
+ 100
1286
+ Q [MeV]
1287
+ 0.2
1288
+ 0.4
1289
+ 0.6
1290
+ 0.8
1291
+ 1.0
1292
+ FW
1293
+ Cesium
1294
+ Weak-charge FF: Full vs moments expansion
1295
+ Fermi: Full
1296
+ Fermi: Order Q2
1297
+ Fermi: Order Q4
1298
+ Fermi: Order Q6
1299
+ 0
1300
+ 20
1301
+ 40
1302
+ 60
1303
+ 80
1304
+ 100
1305
+ Q [MeV]
1306
+ 0
1307
+ 5
1308
+ 10
1309
+ 15
1310
+ 20
1311
+ 25
1312
+ 30
1313
+ | FW| [%]
1314
+ Cesium
1315
+ Weak-charge FF: Full vs moments expansion
1316
+ Helm at Q2
1317
+ Helm at Q4
1318
+ Helm at Q6
1319
+ 0
1320
+ 20
1321
+ 40
1322
+ 60
1323
+ 80
1324
+ 100
1325
+ Q [MeV]
1326
+ 0
1327
+ 5
1328
+ 10
1329
+ 15
1330
+ 20
1331
+ 25
1332
+ 30
1333
+ FW [%]
1334
+ Cesium
1335
+ Weak-charge FF: Full vs moments expansion
1336
+ KN at Q2
1337
+ KN at Q4
1338
+ KN at Q6
1339
+ 0
1340
+ 20
1341
+ 40
1342
+ 60
1343
+ 80
1344
+ 100
1345
+ Q [MeV]
1346
+ 0
1347
+ 5
1348
+ 10
1349
+ 15
1350
+ 20
1351
+ 25
1352
+ 30
1353
+ FW [%]
1354
+ Cesium
1355
+ Weak-charge FF: Full vs moments expansion
1356
+ Fermi at Q2
1357
+ Fermi at Q4
1358
+ Fermi at Q6
1359
+ FIG. 5. Top graphs: Convergence level for the weak-charge form factor series expansions at order Q2, Q4 and Q6 (in cesium) for the Helm,
1360
+ Klein-Nystrand and the Fourier transform of the symmetrized Fermi distribution. Expansion at order Qn involve radial moments up to n-th
1361
+ order (see text). Bottom graphs: Percentage uncertainty in each case. From these results one can see that if one relies on elastic vector form
1362
+ factor power expansions and demands precision below a few percent the sixth radial moment should be included. Inline as well with Ref. [38].
1363
+ in Ref. [36] 6, translate into uncertainties in the CEνNS cross
1364
+ section. They are indeed responsible for the nuclear physics
1365
+ uncertainties the process involves and so are entirely respon-
1366
+ sible for the theoretical uncertainties of the CEνNS event
1367
+ rate, unless one-loop electroweak corrections are accounted
1368
+ for [34].
1369
+ 6 Uncertainties on the weak-charge form factor using the large-scale nuclear
1370
+ shell model for a long list of nuclei of interest have been discussed in Ref.
1371
+ [55].
1372
+ The CEνNS differential event rate, as any other rate, fol-
1373
+ lows from a convolution of the differential cross section in Eq.
1374
+ (30) and the incoming neutrino flux. For CEνNS to be used as
1375
+ a tool for the extraction of neutron distributions mean-square
1376
+ radii the neutrino flux should lie in either the “intermediate”
1377
+ or “high” energy windows. The former defined by pion decay
1378
+ at rest, while the latter by pion decay in flight. Note that from
1379
+ this perspective COHERENT experiments operate in the in-
1380
+ termediate energy window and the νBDX-DRIFT experiment
1381
+ will in the high energy regime [37, 56]. Because of the large
1382
+ statistics our analysis is interested in we focus on the interme-
1383
+
1384
+ 10
1385
+ 10
1386
+ 20
1387
+ 30
1388
+ 40
1389
+ 50
1390
+ 60
1391
+ 70
1392
+ 80
1393
+ 90
1394
+ 100
1395
+ 110
1396
+ 120
1397
+ 130
1398
+ Er [keV]
1399
+ 0
1400
+ 500
1401
+ 1000
1402
+ 1500
1403
+ 2000
1404
+ 2500
1405
+ 3000
1406
+ 3500
1407
+ Events
1408
+ Events in 1 year-ton LAr detector (Helm with RW)
1409
+ 10
1410
+ 20
1411
+ 30
1412
+ 40
1413
+ 50
1414
+ 60
1415
+ 70
1416
+ 80
1417
+ 90
1418
+ 100
1419
+ 110
1420
+ 120
1421
+ 130
1422
+ Er [keV]
1423
+ 0
1424
+ 500
1425
+ 1000
1426
+ 1500
1427
+ 2000
1428
+ 2500
1429
+ 3000
1430
+ Events
1431
+ Events in 1 year-ton LAr detector (with vector elastic form p and n form factors)
1432
+ 2.8
1433
+ 3.0
1434
+ 3.2
1435
+ 3.4
1436
+ 3.6
1437
+ 3.8
1438
+ 4.0
1439
+ 4.2
1440
+ RW [fm]
1441
+ 0.0
1442
+ 0.5
1443
+ 1.0
1444
+ 1.5
1445
+ 2.0
1446
+ 2.5
1447
+ 3.0
1448
+ 3.5
1449
+ 4.0
1450
+ 4.5
1451
+ 2
1452
+ 95% CL
1453
+ 90% CL
1454
+ Helm with RW in 1 tonne-year LAr detector
1455
+ BRN+SS=10×CE NS
1456
+ BRN+SS=1.0×CE NS
1457
+ BRN+SS=0.1×CE NS
1458
+ 2.6
1459
+ 2.8
1460
+ 3.0
1461
+ 3.2
1462
+ 3.4
1463
+ 3.6
1464
+ 3.8
1465
+ 4.0
1466
+ 4.2
1467
+ Rn [fm]
1468
+ 0.0
1469
+ 0.5
1470
+ 1.0
1471
+ 1.5
1472
+ 2.0
1473
+ 2.5
1474
+ 3.0
1475
+ 3.5
1476
+ 4.0
1477
+ 4.5
1478
+ 2
1479
+ 95% CL
1480
+ 90% CL
1481
+ Weak-charge FF with elastic vector FFs in 1 tonne-year LAr detector
1482
+ BRN+SS=10×CE NS
1483
+ BRN+SS=1.0×CE NS
1484
+ BRN+SS=0.1×CE NS
1485
+ FIG. 6. Top graphs: Toy experiment signals calculated by: (i) Using the Helm form factor and fixing the diffraction radius with RW (left
1486
+ graph), (ii) Using the weak-charge form factor as given in Eq. (17) (right graph). In both cases Rn = Rp +0.1fm, with Rp calculated from the
1487
+ nuclear charge radius taken from Ref. [48] and Eq. (12). Bottom graphs: Least-square function as a function of the weak-charge radius RW
1488
+ (left graph) and the point-neutron distribution rms radius (right graph). In both graphs results for two additional analyses with two different
1489
+ BRN and SS background hypotheses are shown as well.
1490
+ diate energy window, where O(tonne) detectors are expected
1491
+ in the near future [28, 29]. In this case the neutrino spec-
1492
+ trum consist of a monochromatic component (prompt com-
1493
+ ponent) and two continuous spectra (delayed components).
1494
+ Their spectral functions follow from the π+ and µ+ energy
1495
+ distributions and read
1496
+ Φνµ = δ
1497
+
1498
+ Eν − m2
1499
+ π −m2
1500
+ µ
1501
+ 2mπ
1502
+
1503
+ ,
1504
+ Φνµ = 192
1505
+
1506
+ � Eν
1507
+
1508
+ �2 �1
1509
+ 2 − Eν
1510
+
1511
+
1512
+ ,
1513
+ Φνe = 64
1514
+
1515
+ � Eν
1516
+
1517
+ �2 �3
1518
+ 4 − Eν
1519
+
1520
+
1521
+ .
1522
+ (31)
1523
+ Since neutrinos are isotropically produced, normalization of
1524
+ the neutrino flux is given by nν = r ×POT/4/π2/L2. We use
1525
+ POT = 2.1 × 1023/year, L = 28.0 m and r = 8.0 × 10−2, in-
1526
+ spired by recent COHERENT measurements and future plans
1527
+ [1–3, 28]. Thus taking into account the three neutrino com-
1528
+ ponents, the differential recoil spectrum in a detector with a
1529
+ fiducial volume composed of different stable isotopes is writ-
1530
+ ten as
1531
+ dR
1532
+ dEr
1533
+ = mdetNA
1534
+ mmol,i
1535
+ nν∑
1536
+ i
1537
+ Xi
1538
+
1539
+ α=νµ,νµ,νe
1540
+ � Emax
1541
+ ν
1542
+ Emin
1543
+ ν
1544
+ Φα
1545
+ dσi
1546
+ dEr
1547
+ Eν .
1548
+ (32)
1549
+ Here mdet refers to the detector fiducial volume mass, NA =
1550
+ 6.022×1023/mol to the Avogadro number, mmol,i to the molar
1551
+ mass of the ith isotope and Xi to its relative abundance. Lower
1552
+ and upper integration limits are given by Emin
1553
+ ν
1554
+ =
1555
+
1556
+ ErmN/2
1557
+ and Eν = mµ/2, respectively. Assuming uniform bin size ∆Er,
1558
+ the total event rate evaluated in the kth bin central value Ek
1559
+ r
1560
+ follows from integration, namely
1561
+ N =
1562
+ � Ekr +∆Er/2
1563
+ Ekr −∆Er/2
1564
+ dR
1565
+ dEr
1566
+ dEr .
1567
+ (33)
1568
+ With the results from Secs. II and III, along with those
1569
+ discussed in this section, we are now in a position to study
1570
+ the precision with which the point-neutron distribution rms
1571
+ can be extracted from data under different weak-charge form
1572
+ factor approaches. For that aim we consider a one-tonne LAr
1573
+ detector as we now discuss.
1574
+
1575
+ 11
1576
+ V.
1577
+ EXTRACTION OF THE ARGON NEUTRON
1578
+ DISTRIBUTION ROOT-MEAN-SQUARE RADIUS USING
1579
+ DIFFERENT APPROACHES
1580
+ We now proceed with the determination of the neutron dis-
1581
+ tribution rms radius in some of the different approaches we
1582
+ have discussed. We focus on three cases: (i) The weak-charge
1583
+ form factor parametrized `a la Helm, (ii) the weak-charge form
1584
+ factor written as in Eq. (17) with the elastic vector form fac-
1585
+ tors parametrized as well `a la Helm, (iii) model-independent
1586
+ approach based on expansions in terms of even-moments as
1587
+ discussed in Sec. III C.
1588
+ Rather than using existing CsI and LAr data [1–3] we in-
1589
+ stead assume a one-tonne LAr detector with specifications as
1590
+ explained in Sec. IV. This choice provides flexibility with
1591
+ background and so enable us to highlight the main differences
1592
+ arising in each case. As we have already stressed plans for de-
1593
+ ploying an O(tonne)-scale LAr detector at the STS [28] have
1594
+ been discussed in Ref. [29].
1595
+ To proceed we define a simplistic spectral least-square
1596
+ function that accounts only for energy spectral data, but en-
1597
+ capsulates the main features of such an experimental envi-
1598
+ ronment: Uncertainties from quenching factor, neutrino flux
1599
+ and efficiency as well as Beam Related Neutron (BRN) and
1600
+ Steady State (SS) backgrounds. We, however, do not consider
1601
+ systematic uncertainties due to energy calibration and pulse-
1602
+ shape discrimination and assume that the neutrino-induced
1603
+ neutron (NIN) background is subdominant compared with the
1604
+ BRN and SS backgrounds. Note that such assumptions do not
1605
+ aim whatsoever at representing the actual experimental setup,
1606
+ they are just choices that enable pointing out the effects we
1607
+ are aiming at. The spectral least-square function then reads
1608
+ χ2 =
1609
+ 5
1610
+
1611
+ i=1
1612
+
1613
+ NExp
1614
+ i
1615
+ −(1+α)NTh
1616
+ i (P)
1617
+ σi
1618
+ �2
1619
+ +
1620
+ � α
1621
+ σα
1622
+ �2
1623
+ .
1624
+ (34)
1625
+ Here NExp
1626
+ i
1627
+ refers to the number of events in the ith bin gen-
1628
+ erated in a toy experiment and α a nuisance parameter. The
1629
+ least-square function becomes a function of α, with its actual
1630
+ value following from minimization over it. NTh
1631
+ i
1632
+ the number
1633
+ of events generated by varying the set P = {R2
1634
+ n,⟨R4
1635
+ n⟩} over
1636
+ a certain range, where ⟨R4
1637
+ n⟩ is only present in case (ii). For
1638
+ the statistical uncertainty we assume σ2
1639
+ i = NTh
1640
+ i
1641
+ + ∑j B j, with
1642
+ ∑j B j the BRN and SS related backgrounds which we assume
1643
+ to follow the same energy dependence of the signal.
1644
+ The
1645
+ latter assumption is certainly not the case as can be seen in
1646
+ the LAr CEνNS measurement release [3]. However, rather
1647
+ than extrapolating that background to the case we are inter-
1648
+ ested in (multi-ton LAr detector) we believe this assumption
1649
+ represents a better choice. At present BRCEνNS ≃ 3% while
1650
+ BRBRN ≃ 14% and BRSS ≃ 83%, but reduction of that back-
1651
+ ground to lower levels seems achievable in the future [57].
1652
+ We then assume Bj = 10 × NExp
1653
+ j
1654
+ 7. For the systematical un-
1655
+ 7 Note that at present BRN plus SS backgrounds in the LAr data release
1656
+ amount to about 29 times the CEνNS signal [3].
1657
+ certainty encoded in σα we take σα = 0.11 from a 10% uncer-
1658
+ tainty due to neutrino flux, 5% uncertainty due to efficiency
1659
+ and 1% due to quenching factor. For the efficiency, and fol-
1660
+ lowing once again the CENNS-10 LAr detector, we assume
1661
+ a Heaviside function at 5keVnr and a 10keVnr bin size. To
1662
+ generate the toy experiment data, we fix the point-proton dis-
1663
+ tribution rms radius with the aid of Eq. (12) with the values for
1664
+ the different quantities given in Tab. I. For the point-neutron
1665
+ distribution rms radius we use Rn = Rp + 0.1fm, as we have
1666
+ done in the previous Sections.
1667
+ Results for cases (i) and (ii) are shown in Fig. 6. Top graphs
1668
+ display results for the toy experiments, while those at the bot-
1669
+ tom the results of the χ2 analyses. Case (i) leads to a fit for
1670
+ RW from which we find at the 90%CL the following result
1671
+ RW = 3.522+0.449
1672
+ −0.474 fm .
1673
+ (35)
1674
+ The point-neutron distribution rms radius can then be ex-
1675
+ tracted from this result with the aid of Eq. (16). To do so
1676
+ one should—in principle—take into account uncertainties on
1677
+ the single-nucleon electromagnetic mean-square radii as well
1678
+ as on the 40Ar nuclear charge radius. These uncertainties are
1679
+ given by ∆
1680
+
1681
+ r2p = 4.0 × 10−4 fm, ∆r2
1682
+ n = 1.7 × 10−3 fm2 [47]
1683
+ and ∆RC = 1.8×10−2 fm [48] and so can be safely neglected
1684
+ when compared with those from RW in Eq. (35). Results for
1685
+ Rn at the 90%CL thus read
1686
+ Rn = 3.428+0.434
1687
+ −0.458 fm .
1688
+ (36)
1689
+ In case (ii), c’est-`a-dire using Eq. (17) for the weak-charge
1690
+ form factor, allows fitting Rn directly. In this case the diffrac-
1691
+ tion radius in the proton and neutron elastic vector form fac-
1692
+ tors are fixed from Rp and Rn. The former fixed from the
1693
+ nuclear-charge radius and Eq. (12) (using central values for
1694
+ the relevant quantities), while the latter varying over the range
1695
+ [2.6,4.1]fm. At the 90%CL we find
1696
+ Rn = 3.457+0.492
1697
+ −0.611 fm .
1698
+ (37)
1699
+ Since the systematic uncertainty as well as the BRN and SS
1700
+ backgrounds for both analyses have been equally fixed and
1701
+ the toy experiments signals have been generated under the as-
1702
+ sumptions that define each case, differences in the results in
1703
+ Eqs. (36) and (37) can be only attributed to theoretical as-
1704
+ sumptions. Parametrizing the weak-charge form factor `a la
1705
+ Helm, Rn can be determined with a ∼ 14% precision. Us-
1706
+ ing Eq. (17)—including nucleon form factor terms up to or-
1707
+ der Q2—allows a determination at the 18% level. We then
1708
+ conclude that both procedures seem to produce results with
1709
+ comparable levels of precision, though with a slight improve-
1710
+ ment if a parametrization of the weak-charge form factor is
1711
+ used. In Sec. III we found that numerical deviations from one
1712
+ procedure or the other could be as large as ∼ 5% in the rel-
1713
+ evant transferred momentum range. We attribute the differ-
1714
+ ences found here in the extraction of Rn to that fact.
1715
+ There is of course the question of whether precision can be
1716
+ improved by improving upon the BRN and SS backgrounds.
1717
+ To assess that question we have run to extra analyses, assum-
1718
+ ing Bj = 1.0×NExp
1719
+ j
1720
+ and Bj = 0.1×NExp
1721
+ j
1722
+ . The results can be
1723
+
1724
+ 12
1725
+ 10
1726
+ 20
1727
+ 30
1728
+ 40
1729
+ 50
1730
+ 60
1731
+ 70
1732
+ 80
1733
+ 90
1734
+ 100
1735
+ 110
1736
+ 120
1737
+ 130
1738
+ Er [keV]
1739
+ 0
1740
+ 500
1741
+ 1000
1742
+ 1500
1743
+ 2000
1744
+ 2500
1745
+ 3000
1746
+ 3500
1747
+ Events
1748
+ Events in 1 year-ton LAr detector (model-independent approach)
1749
+ 1.5
1750
+ 2.0
1751
+ 2.5
1752
+ 3.0
1753
+ 3.5
1754
+ 4.0
1755
+ 4.5
1756
+ 5.0
1757
+ 5.5
1758
+ 6.0
1759
+ R2
1760
+ n
1761
+ 1/2 [fm]
1762
+ 3.6
1763
+ 3.8
1764
+ 4.0
1765
+ 4.2
1766
+ 4.4
1767
+ 4.6
1768
+ 4.8
1769
+ 5.0
1770
+ 5.2
1771
+ R4
1772
+ n
1773
+ 1/4 [fm]
1774
+ 95% CL
1775
+ 90% CL
1776
+ 90% CL and 95% CL limits in a 1-tonne LAr detector
1777
+ FIG. 7. Left graph: Toy experiment data used in the model-independent analysis generated by fixing Rn = Rp +0.1fm and ⟨R4n⟩ = 210.3fm4,
1778
+ the latter obtained from the results in Eq. (28) in the Helm parametrization case. Right graph: 90% and 95% CL isocontours in the neutron
1779
+ distribution rms radius, Rn =
1780
+
1781
+ ⟨R2n⟩, and fourth moment,
1782
+
1783
+ ⟨R4n⟩, plane.
1784
+ seen in both graphs in Fig. 6. In the case in which the Helm
1785
+ parametrization is used for the weak-charge form factor, preci-
1786
+ sion in the extraction of Rn improves at the 5% and 1% levels,
1787
+ respectively. In the case in which FW is decomposed in terms
1788
+ of elastic vector proton and neutron form factors, instead, at
1789
+ the 8% and 5% levels.
1790
+ One might wonder whether this conclusion holds as well for
1791
+ heavy nuclei. Although we have not done an analysis for such
1792
+ a case (e.g. for cesium), we find no reason why this should
1793
+ not be the case. Of course nuclear parameters will change, but
1794
+ given the discussion in the previous Sections we expect the
1795
+ trend to be valid in this case too.
1796
+ We now turn to case (iii), for which for the calculation we
1797
+ do not include nucleon form factors corrections in Eq. (17)
1798
+ and expand the elastic vector proton and neutron form factors
1799
+ in terms of even moments up to order Q4 (as required from the
1800
+ discussion in Sec. III C). Such procedure leads to
1801
+ FW =
1802
+ 1
1803
+ QW
1804
+
1805
+ Zgp
1806
+ V
1807
+
1808
+ 1− Q2
1809
+ 3! R2
1810
+ p + Q4
1811
+ 5! ⟨R4
1812
+ p⟩
1813
+
1814
+ + Ngn
1815
+ V
1816
+
1817
+ 1− Q2
1818
+ 3! R2
1819
+ n + Q4
1820
+ 5! ⟨R4
1821
+ n⟩
1822
+ ��
1823
+ .
1824
+ (38)
1825
+ This expression depends—in principle—upon three free pa-
1826
+ rameters: The point-neutron distribution mean-square radius
1827
+ and the proton and neutron fourth moments. However, the
1828
+ contribution controlled by ⟨R4
1829
+ p⟩ can be safely neglected. This
1830
+ can be readily checked by sticking—for this matter—to the
1831
+ Helm parametrization, using then the corresponding expres-
1832
+ sion in Eq. (28) and evaluating the proton Q4-to-Q2 ratio. Do-
1833
+ ing so one finds 2 × 10−5(Q/MeV)2, which combined with
1834
+ the fact that Q ≲ 50MeV and that the cross section is domi-
1835
+ nated by the neutron contribution reduces the calculation to a
1836
+ two-parameter problem.
1837
+ Dropping the proton fourth moment one can then determine
1838
+ from data the neutron distribution mean-square radius and its
1839
+ fourth moment. To generate the toy experiment data we fix
1840
+ ⟨R4
1841
+ n⟩ with the aid of Eq. (28) assuming the Helm parametriza-
1842
+ tion. Results for that pseudo-data are displayed in the left
1843
+ graph in Fig. 7. We then proceed with the least-square analysis
1844
+ by varying Rn ≡
1845
+
1846
+ ⟨R2n⟩ and ⟨R4
1847
+ n⟩. Results of this calculation
1848
+ in the
1849
+
1850
+ ⟨R2n⟩-⟨R4
1851
+ n⟩1/4 plane are shown in the right graph in
1852
+ Fig. 7. One can see that under the same background and de-
1853
+ tector assumptions that those used in the previous two cases,
1854
+ only an upper limit on Rn can be derived. At the 90%CL one
1855
+ gets
1856
+ Rn ≲ 5.9fm .
1857
+ (39)
1858
+ The result for the fourth moment is instead constrained to
1859
+ an interval (at both the 90% and 95% CLs), although wide.
1860
+ Fitting Rn through the model-independent approach has—of
1861
+ course—the advantage of not relying on particular nuclear
1862
+ physics assumptions, but implies fitting two parameters rather
1863
+ than one. That is why, with the statistics and background as-
1864
+ sumptions we have adopted, only an upper limit on Rn can be
1865
+ placed.
1866
+ In some respect this result differs from the one reported
1867
+ in Ref. [38], in particular in the shape of the available 90%
1868
+ and 95% CLs contours. It seems to us these differences are
1869
+ expected because of the following reasons: (a) Our analysis
1870
+ includes the proton contribution (which might become im-
1871
+ portant in regions of parameter space where Rn/⟨R4
1872
+ n⟩1/2 ≃
1873
+ Q/
1874
+
1875
+ 20), (b) our analysis involves a 3.5 less statistics (a 1-
1876
+ tonne rather than 3.5-tonne LAr detector), (c) our systematic
1877
+ and statistical uncertainties are much larger (in particular the
1878
+ statistical uncertainty which includes a background hypothe-
1879
+ sis that exceeds the signal by a factor 10), (d) the statistical
1880
+ methods employed in the extraction of the relevant quantities.
1881
+ Results from the three different approaches we have em-
1882
+ ployed here thus seem to favor the inclusion of nucleon
1883
+ form factors Q-dependent terms and the decomposition of the
1884
+ weak-charge form factor in terms of elastic vector proton and
1885
+ neutron form factors, as given in Eq. (17). With such an ap-
1886
+ proach a percent determination of the neutron distribution rms
1887
+ radius seems achievable, as shown in Eq. (37). Improving the
1888
+ statistical uncertainty by improving upon BRN and SS back-
1889
+ grounds may lead to ∼ 1% determination of Rn.
1890
+
1891
+ 13
1892
+ VI.
1893
+ CONCLUSIONS
1894
+ We have quantified uncertainties on the extraction of point-
1895
+ neutron distributions mean-square radii from CEνNS data.
1896
+ Our analysis is motivated by future O(tonne)-scale CEνNS
1897
+ detectors which will deliver thousands of events per year with
1898
+ well-controlled systematic and statistical uncertainties. The-
1899
+ oretically, uncertainties are encoded in the weak-charge form
1900
+ factor, for which a variety of effects and parametrization ap-
1901
+ proaches have different impact.
1902
+ Here we have quantified at the weak-charge form factor
1903
+ level uncertainties due to: (i) The absence/presence of single-
1904
+ nucleon electromagnetic mean-square radii in the determina-
1905
+ tion of the point-proton distribution mean-square radius, (ii)
1906
+ Q-dependent nucleon form factor terms up to order Q2, (iii)
1907
+ parametrizations of the elastic vector proton and neutron form
1908
+ factors, (iv) parametrizations of the weak-charge form factor,
1909
+ (v) model-independent series expansions of the elastic vector
1910
+ proton and neutron form factors.
1911
+ At the weak-charge level we have found that the inclusion
1912
+ of single-nucleon electromagnetic mean-square radii in the
1913
+ determination of the point-proton distribution mean-square ra-
1914
+ dius has a per mille impact. Thus showing that taking the
1915
+ point-proton distribution mean-square radius equal to the nu-
1916
+ clear charge radius is precise enough. Inclusion of nucleon
1917
+ form factors Q-dependent terms, instead, has a percent ef-
1918
+ fect. Comparison of results from decompositions of the weak-
1919
+ charge form factor in terms of elastic vector proton and neu-
1920
+ tron form factors with weak-charge form factor parametriza-
1921
+ tions shows a wider uncertainty, that can rise up to the order
1922
+ of ∼ 5% (∼ 10%) in the relevant transferred momentum range
1923
+ for light (heavy) nuclei. Comparison of results from decom-
1924
+ positions of the weak-charge form factor in terms of elastic
1925
+ vector proton and neutron form factors with series expansions
1926
+ of the elastic vector proton and neutron form factors shows
1927
+ uncertainties of the same order.
1928
+ To better understand the impact of these effects we have
1929
+ considered pseudo-data from a one-tonne LAr detector with
1930
+ substantial BRN and SS backgrounds, though suppressed
1931
+ compared with those in current LAr data. Our results show
1932
+ that weak-charge form factor parametrizations and decompo-
1933
+ sitions of the weak-charge form factor in terms of elastic vec-
1934
+ tor proton and neutron form factors lead to the same level of
1935
+ accuracy. Though the weak-charge form factor parametriza-
1936
+ tion leads to a slightly better accuracy. Suppressed BRN and
1937
+ SS backgrounds seem to enable reaching ∼ 1% accuracies in
1938
+ the extraction of Rn.
1939
+ ACKNOWLEDGMENTS
1940
+ The author warmly thank Dimitrios Papoulias and Jorge
1941
+ Piekarewicz for very useful comments on the manuscript. He
1942
+ thank as well the “Universidad de Antioquia” Physics Depart-
1943
+ ment for its hospitality during the completion of this work and
1944
+ ANID for financial support through grant “Fondecyt Regular”
1945
+ 1221445.
1946
+ [1] D. Akimov et al. (COHERENT), Science (2017), 1708.01294.
1947
+ [2] D. Akimov et al. (COHERENT), Phys. Rev. Lett. 129, 081801
1948
+ (2022), 2110.07730.
1949
+ [3] D. Akimov et al. (COHERENT) (2020), 2006.12659.
1950
+ [4] M. Cadeddu, C. Giunti, Y. F. Li, and Y. Y. Zhang, Phys. Rev.
1951
+ Lett. 120, 072501 (2018), 1710.02730.
1952
+ [5] D. K. Papoulias, Phys. Rev. D 102, 113004 (2020), 1907.11644.
1953
+ [6] O. Miranda, D. Papoulias, G. Sanchez Garcia, O. Sanders,
1954
+ M. T´ortola, and J. Valle, JHEP 05, 130 (2020), 2003.12050.
1955
+ [7] M. Cadeddu, N. Cargioli, F. Dordei, C. Giunti, Y. F. Li, E. Pic-
1956
+ ciau, C. A. Ternes, and Y. Y. Zhang, Phys. Rev. C 104, 065502
1957
+ (2021), 2102.06153.
1958
+ [8] P. Coloma, I. Esteban, M. C. Gonzalez-Garcia, and J. Menen-
1959
+ dez, JHEP 08, 030 (2020), 2006.08624.
1960
+ [9] V. De Romeri, O. G. Miranda, D. K. Papoulias, G. Sanchez Gar-
1961
+ cia, M. T´ortola, and J. W. F. Valle (2022), 2211.11905.
1962
+ [10] D. K. Papoulias and T. S. Kosmas, Phys. Rev. D 97, 033003
1963
+ (2018), 1711.09773.
1964
+ [11] J. Liao and D. Marfatia, Phys. Lett. B775, 54 (2017),
1965
+ 1708.04255.
1966
+ [12] D. Aristizabal Sierra, V. De Romeri, and N. Rojas, Phys. Rev.
1967
+ D98, 075018 (2018), 1806.07424.
1968
+ [13] D. Aristizabal Sierra, V. De Romeri, and N. Rojas, JHEP 09,
1969
+ 069 (2019), 1906.01156.
1970
+ [14] D. Aristizabal Sierra, B. Dutta, S. Liao, and L. E. Strigari, JHEP
1971
+ 12, 124 (2019), 1910.12437.
1972
+ [15] B. Dutta, S. Liao, S. Sinha, and L. E. Strigari, Phys. Rev. Lett.
1973
+ 123, 061801 (2019), 1903.10666.
1974
+ [16] M. Cadeddu, N. Cargioli, F. Dordei, C. Giunti, Y. F. Li, E. Pic-
1975
+ ciau, and Y. Y. Zhang, JHEP 01, 116 (2021), 2008.05022.
1976
+ [17] H. Banerjee, B. Dutta, and S. Roy, Phys. Rev. D 104, 015015
1977
+ (2021), 2103.10196.
1978
+ [18] M. Abdullah, J. B. Dent, B. Dutta, G. L. Kane, S. Liao, and
1979
+ L. E. Strigari, Phys. Rev. D 98, 015005 (2018), 1803.01224.
1980
+ [19] I. M. Shoemaker, Phys. Rev. D95, 115028 (2017), 1703.05774.
1981
+ [20] S.-F. Ge and I. M. Shoemaker, JHEP 11, 066 (2018),
1982
+ 1710.10889.
1983
+ [21] P. B. Denton, Y. Farzan, and I. M. Shoemaker, JHEP 07, 037
1984
+ (2018), 1804.03660.
1985
+ [22] P. B. Denton and J. Gehrlein,
1986
+ JHEP 04,
1987
+ 266 (2021),
1988
+ 2008.06062.
1989
+ [23] P. Coloma, M. C. Gonzalez-Garcia, M. Maltoni, and T. Schwetz
1990
+ (2017), 1708.02899.
1991
+ [24] P. Coloma, I. Esteban, M. C. Gonzalez-Garcia, and M. Maltoni
1992
+ (2019), 1911.09109.
1993
+ [25] M. Cadeddu, C. Giunti, K. A. Kouzakov, Y.-F. Li, Y.-Y. Zhang,
1994
+ and A. I. Studenikin, Phys. Rev. D 98, 113010 (2018), [Erra-
1995
+ tum: Phys.Rev.D 101, 059902 (2020)], 1810.05606.
1996
+ [26] O. G. Miranda, D. K. Papoulias, M. T´ortola, and J. W. F. Valle,
1997
+ JHEP 07, 103 (2019), 1905.03750.
1998
+ [27] A. N. Khan, Nucl. Phys. B 986, 116064 (2023), 2201.10578.
1999
+ [28] D. Anderson et al., Technical Design Report Second Target
2000
+ Station,
2001
+ https://neutrons.ornl.gov/sites/default/
2002
+
2003
+ 14
2004
+ files/SNS_STS_Technical_Design_Report_2015-01.pdf
2005
+ (2015).
2006
+ [29] J. Asaadi et al., in 2022 Snowmass Summer Study (2022),
2007
+ 2209.02883.
2008
+ [30] D. Akimov et al. (COHERENT), Phys. Rev. D 102, 052007
2009
+ (2020), 1911.06422.
2010
+ [31] Coherent
2011
+ Captain-Mills
2012
+ (CCM)
2013
+ Experiment,
2014
+ https:
2015
+ //p25ext.lanl.gov/˜lee/CaptainMills/ (2018).
2016
+ [32] A. A. Aguilar-Arevalo et al. (CCM), Phys. Rev. D 106, 012001
2017
+ (2022), 2105.14020.
2018
+ [33] A. A. Aguilar-Arevalo et al. (CCM), Phys. Rev. Lett. 129,
2019
+ 021801 (2022), 2109.14146.
2020
+ [34] O. Tomalak, P. Machado, V. Pandey, and R. Plestid, JHEP 02,
2021
+ 097 (2021), 2011.05960.
2022
+ [35] D. K. Papoulias and T. S. Kosmas, Phys. Rev. D97, 033003
2023
+ (2018), 1711.09773.
2024
+ [36] D. Aristizabal Sierra, J. Liao, and D. Marfatia, JHEP 06, 141
2025
+ (2019), 1902.07398.
2026
+ [37] D. Aristizabal Sierra, B. Dutta, D. Kim, D. Snowden-Ifft, and
2027
+ L. E. Strigari, Phys. Rev. D 104, 033004 (2021), 2103.10857.
2028
+ [38] K. Patton, J. Engel, G. C. McLaughlin, and N. Schunck, Phys.
2029
+ Rev. C 86, 024612 (2012), 1207.0693.
2030
+ [39] J. L. Friar, J. Martorell, and D. W. L. Sprung, Phys. Rev. A 56,
2031
+ 4579 (1997), nucl-th/9707016.
2032
+ [40] C. J. Horowitz and J. Piekarewicz, Phys. Rev. C 86, 045503
2033
+ (2012), 1208.2249.
2034
+ [41] F. J. Ernst, R. G. Sachs, and K. C. Wali, Phys. Rev. 119, 1105
2035
+ (1960).
2036
+ [42] J. Liu, R. D. McKeown, and M. J. Ramsey-Musolf, Phys. Rev.
2037
+ C 76, 025202 (2007), 0706.0226.
2038
+ [43] R. S. Sufian, Y.-B. Yang, A. Alexandru, T. Draper, J. Liang, and
2039
+ K.-F. Liu, Phys. Rev. Lett. 118, 042001 (2017), 1606.07075.
2040
+ [44] R. H. Helm, Phys. Rev. 104, 1466 (1956).
2041
+ [45] S. Klein and J. Nystrand, Phys. Rev. C60, 014903 (1999), hep-
2042
+ ph/9902259.
2043
+ [46] J. D. Lewin and P. F. Smith, Astropart. Phys. 6, 87 (1996).
2044
+ [47] P. A. Zyla et al. (Particle Data Group), PTEP 2020, 083C01
2045
+ (2020).
2046
+ [48] I. Angeli and K. P. Marinova, Atom. Data Nucl. Data Tabl. 99,
2047
+ 69 (2013).
2048
+ [49] J. Piekarewicz, A. R. Linero, P. Giuliani, and E. Chicken, Phys.
2049
+ Rev. C 94, 034316 (2016), 1604.07799.
2050
+ [50] D. K. Papoulias, T. S. Kosmas, R. Sahu, V. K. B. Kota, and
2051
+ M. Hota, Phys. Lett. B 800, 135133 (2020), 1903.03722.
2052
+ [51] L. Stodolsky, Phys. Rev. 144, 1145 (1966).
2053
+ [52] D. Z. Freedman, Phys. Rev. D9, 1389 (1974).
2054
+ [53] V. B. Kopeliovich and L. L. Frankfurt, JETP Lett. 19, 145
2055
+ (1974).
2056
+ [54] D. Z. Freedman, D. N. Schramm, and D. L. Tubbs, Ann. Rev.
2057
+ Nucl. Part. Sci. 27, 167 (1977).
2058
+ [55] M. Hoferichter, J. Men´endez, and A. Schwenk, Phys. Rev. D
2059
+ 102, 074018 (2020), 2007.08529.
2060
+ [56] D. Aristizabal Sierra, J. L. Barrow, B. Dutta, D. Kim, L. Stri-
2061
+ gari, D. Snowden-Ifft, and M. H. Wood (νBDX-DRIFT), Phys.
2062
+ Rev. D 107, 013003 (2023), 2210.08612.
2063
+ [57] D. Akimov et al. (COHERENT) (2018), 1803.09183.
2064
+
4NFQT4oBgHgl3EQfHjWk/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
59E2T4oBgHgl3EQf7Ahe/content/tmp_files/2301.04205v1.pdf.txt ADDED
@@ -0,0 +1,1883 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Quantitative Verification of Scheduling Heuristics
2
+ Saksham Goel
3
+ UT Austin
4
+ Benjamin Mikek
5
+ Georgia Tech
6
+ Jehad Aly
7
+ Alexandria University
8
+ Venkat Arun
9
+ MIT CSAIL
10
+ Ahmed Saeed
11
+ Georgia Tech
12
+ Aditya Akella
13
+ UT Austin
14
+ Abstract
15
+ Computer systems use many scheduling heuristics to allo-
16
+ cate resources. Understanding their performance properties is
17
+ hard because it requires a representative workload and exten-
18
+ sive code instrumentation. As a result, widely deployed sched-
19
+ ulers can make poor decisions leading to unpredictable perfor-
20
+ mance. We propose a methodology to study their specification
21
+ using automated verification tools to search for performance
22
+ issues over a large set of workloads, system characteristics and
23
+ implementation details. Our key insight is that much of the
24
+ complexity of the system can be overapproximated without
25
+ oversimplification, allowing system and heuristic developers
26
+ to quickly and confidently characterize the performance of
27
+ their designs. We showcase the power of our methodology
28
+ through four case studies. First, we produce bounds on the
29
+ performance of two classical algorithms, SRPT scheduling
30
+ and work stealing, under practical assumptions. Then, we
31
+ create a model that identifies two bugs in the Linux CFS
32
+ scheduler. Finally, we verify a recently made observation that
33
+ TCP unfairness can cause some ML training workloads to
34
+ spontaneously converge to a state of high network utilization.
35
+ 1
36
+ Introduction
37
+ Modern software systems are expected to meet strict opera-
38
+ tional goals, such as high availability and good performance.
39
+ However, when subject to unforeseen workloads or operating
40
+ environments, these systems can be left exposed to corner
41
+ cases where they fall well short of their goals. The overall per-
42
+ formance of the system is critically impacted by scheduling
43
+ algorithms since they decide how system resources are allo-
44
+ cated. As a result, these algorithms have received significant
45
+ attention from academia and industry.
46
+ This paper argues that although scheduling algorithms con-
47
+ trol complex systems and exhibit complex behaviors, they
48
+ often have a simple mathematical description. This opens the
49
+ possibility of using formal verification techniques to guar-
50
+ antee their robustness under arbitrary, pathological, scenar-
51
+ ios. Conventional approaches to ensuring robustness, such
52
+ as testing and fuzzing, require workloads that can trigger all
53
+ potential problems. These can be difficult or even impossi-
54
+ ble to build (see §2.1). On the other hand, theoretical tools
55
+ (e.g., queueing theory and control theory) offer the ability of
56
+ deriving bounds on the performance of such algorithms. How-
57
+ ever, they require making lots of oversimplifying assumptions,
58
+ leading to results that don’t necessarily reflect the algorithm’s
59
+ performance in practice. In contrast, verification can guaran-
60
+ tee performance for all workloads with a simple but accurate
61
+ description of the algorithm under study.
62
+ Formal verification has grown in popularity in recent years,
63
+ with frameworks for verifying key-value stores [7,9,22], net-
64
+ work configurations [5,11,12,27], compilers [24,26,29,30],
65
+ and more. However, most efforts focus on verifying quali-
66
+ tative properties related to correctness, such as safety and
67
+ liveness. In contrast, proving quantitative properties that pro-
68
+ vide guarantees on performance has received less attention.
69
+ To that end, this paper presents a method for encoding
70
+ scheduling heuristics, the systems in which they operate, and
71
+ the performance property to be tested (e.g. work conservation
72
+ or near-optimality) in an SMT solver. The solver can then
73
+ either prove that a property always holds or provide a concrete
74
+ workload where it is violated. Since solvers can quickly and
75
+ efficiently explore a large number of workloads and system
76
+ behaviors, they can help us understand complex behaviors
77
+ that would be difficult or tedious for humans to analyze.
78
+ To limit complexity of the analysis while still maintain-
79
+ ing accuracy, we model only the parts of the surrounding
80
+ system that directly interact with the heuristic. Our models
81
+ overapproximate the system by capturing a superset of system
82
+ behaviors. Thus, if the model does not contain any behaviors
83
+ that cause the algorithm to violate the desired property, we
84
+ can be sure that the real system will not exhibit such behavior
85
+ either. The principles we adopt (see §3) allow us to reason
86
+ about complex systems in a clear and concise way.
87
+ To illustrate these principles concretely and demonstrate
88
+ generality, we include four case-studies. The first two prove
89
+ bounds on the optimality of shortest remaining processing
90
+ time (SRPT) schedulers and work stealing, demonstrating
91
+ that our tool can provide insights for well-studied algorithms
92
+ 1
93
+ arXiv:2301.04205v1 [cs.LO] 10 Jan 2023
94
+
95
+ by relaxing assumptions made in theoretical models. To go
96
+ beyond the classical theoretical results, we study the perfor-
97
+ mance of SRPT when preemption is not allowed and tasks
98
+ can block. Further, we study the performance of work stealing
99
+ algorithms while accounting for context switching costs.
100
+ Our third case study is concerned with the performance
101
+ of the Linux CFS load balancer. In particular, we check if
102
+ the algorithm is work conserving, reproducing a bug that was
103
+ recently identified by the community. Further, we identify a
104
+ new bug that can lead to the violation of the work conserva-
105
+ tion property. Finally, we show that our approach may not
106
+ be limited to CPU schedulers by verifying a recent observa-
107
+ tion [37] that TCP unfairness can cause flows to synchronize
108
+ to a schedule where they efficiently occupy a shared link.
109
+ This paper’s central thesis is that, using our modeling ap-
110
+ proach, it is possible to quickly and easily analyze scheduling
111
+ algorithms and draw useful conclusions about their behavior
112
+ in the real world. The most challenging aspect of scheduler
113
+ analysis is still asking the right questions and interpreting the
114
+ answers. However, automation can make this process easier
115
+ and more reliable. We invite the community to incorporate
116
+ performance verification as part of their workflow when de-
117
+ signing scheduling algorithms.
118
+ 2
119
+ Motivation
120
+ 2.1
121
+ The Problem of Analyzing Heuristics
122
+ Heuristics, by definition, are imperfect. However, experts de-
123
+ sign them to produce results with reasonable performance.
124
+ The typical way to evaluate the performance of a heuristic
125
+ is through thorough benchmarks and fuzzing, which show
126
+ that the heuristic performs well in a wide range of scenarios.
127
+ These benchmarks can give confidence in the heuristic, but
128
+ they do not provide guarantees on its worst-case performance.
129
+ Without guarantees, heuristics can have corner cases that can
130
+ lead to poor performance, wasted debugging efforts to iden-
131
+ tify the heuristic as the cause of the problem, and decreased
132
+ overall reliability of the system.
133
+ As an example, we will use the load balancer in the Linux
134
+ CFS scheduler to illustrate our argument. The Linux load
135
+ balancer implements complex heuristics to decide which tasks
136
+ should be moved to which CPU in order to balance the load
137
+ between CPUs, distribute idle CPU cycles, give each task a
138
+ fair share of CPU time, avoid moving tasks too much, and
139
+ respect cache and NUMA placement when moving tasks. Due
140
+ to its complexity, it is reasonable to question whether the load
141
+ balancer is work-conserving and fair.
142
+ It has been demonstrated repeatedly that the answer is “no”.
143
+ Many of the identified issues were caused by the algorithm
144
+ itself, not just the implementation. For example, prior work
145
+ found that CPUs may remain idle for a long time, even when
146
+ other cores are overloaded with a large number of active tasks,
147
+ due to an inadequate method for quantifying the load of a
148
+ group of CPUs [31]. While these were later fixed, other per-
149
+ formance bugs remain. For instance, we discovered a new
150
+ bug where CPUs can remain idle even with a large number
151
+ of runnable tasks, because of a spurious check that forces
152
+ the balancer to return early (see §6.2.2). This occurs despite
153
+ mechanisms designed apparently to prevent this scenario.
154
+ Identifying such performance bugs using conventional
155
+ methods is a major undertaking. Typically, experienced users
156
+ or developers observe anomalous behavior when using the
157
+ heuristic over an extended period of time. Such observations
158
+ prompt a more methodical effort by the community to repro-
159
+ duce the bug, leading to efforts that can require hours or even
160
+ days of experimentation. For example, the complete rewrite of
161
+ the Linux load balancer in v5.5 took several months of careful
162
+ manual scrutiny by the community to prevent bugs [15].
163
+ Not all heuristics are based solely on the designers’ intu-
164
+ ition. Many are supported by solid theoretical results that
165
+ prove the optimality of the heuristic or a version of it under
166
+ simplifying assumptions. For example, there are strong theo-
167
+ retical bounds on the performance of work stealing algorithms.
168
+ The models used to prove these bounds ignore practical con-
169
+ siderations like the context switching costs [6]. Translating
170
+ these algorithms to real-world systems results in much more
171
+ variable performance than is guaranteed by theory, and this
172
+ has become a active area of research [18,32,34]. Designers
173
+ need systematic guidance to decide whether work stealing is
174
+ right for them.
175
+ There is a tension between two objectives when evaluating
176
+ heuristics: 1) fidelity of the evaluated heuristics and workloads
177
+ with respect to the real system, and 2) confidence provided
178
+ by the evaluation methodology. Theoretical approaches have
179
+ low fidelity because they rely on simplifications that make
180
+ the problem tractable for human reasoning. Benchmarks and
181
+ fuzzers provide higher fidelity by evaluating the actual imple-
182
+ mentation of the heuristic, but they compromise on confidence
183
+ by not providing formal guarantees. In this work, we aim to
184
+ provide a practical tool that offers high confidence in the
185
+ performance of heuristics without sacrificing fidelity.
186
+ 2.2
187
+ Quantitative Verification
188
+ In deploying a scheduler in real-world settings, a system de-
189
+ signer or operator may be interested in several questions per-
190
+ taining to the performance of the scheduling heuristics. We
191
+ identify three broad classes of such questions:
192
+ Q1: Boolean questions about performance. Examples in-
193
+ clude whether a scheduler is work-conserving, or fair. An-
194
+ swers to these questions could either be that the scheduler
195
+ satisfies the property in question, or a counterexample show-
196
+ ing a valid schedule that breaks the property. These types of
197
+ questions help the designer ensure the correctness of their
198
+ design, and identify corner cases missed by the heuristic.
199
+ Q2: Comparison with an Oracular Scheduler. When de-
200
+ signing a heuristic, it’s typically helpful to know how the
201
+ heuristic compares to an optimal, oracular scheduler with
202
+ complete offline knowledge of inputs that can even solve NP-
203
+ 2
204
+
205
+ complete problems. Although such an oracular scheduler is
206
+ impractical, comparing to it helps identify room for improve-
207
+ ment and define bounds on the performance of the heuristic.
208
+ For example, we may desire to define a bound on the perfor-
209
+ mance of the studied heuristic (e.g., it takes at most twice as
210
+ long as optimal to finish all tasks).
211
+ Q3: Precise workload characterization. Heuristics based
212
+ on theoretical results will perform optimally when operating
213
+ in settings or on workloads that match the assumptions made
214
+ in the theoretical model. The same performance might hold
215
+ for a broader class of workloads, but might also break for other
216
+ classes of workloads. Similarly, benchmarking and fuzzing
217
+ can only evaluate a heuristic for specific and finite number of
218
+ workloads. A heuristic designer will typically be interested
219
+ in systematically characterizing workloads (as opposed to
220
+ experimenting with different canonical workloads on a trial-
221
+ and-error basis) for which the heuristic performs poorly.
222
+ Recent advances in verification tools provide an avenue
223
+ for efficient verification of a wide range of properties. In
224
+ our context, given a model of the heuristic in question, we
225
+ interpret “correctness” to mean achieving a quantitative per-
226
+ formance criterion, thereby helping answer questions of type
227
+ Q1. Further, existing verification tools can find assignments
228
+ that optimize a quantitative measure, allowing us to compare
229
+ the performance of an encoded heuristic to that of an oracular
230
+ scheduler. Finally, given a performance metric, we can an-
231
+ swer questions of type Q3 by using verification tools to find
232
+ workloads which perform poorly according to the metric in
233
+ each of several different specified workload classes.
234
+ 3
235
+ Methodology
236
+ Our main contribution is defining a clear methodology for
237
+ scheduling heuristic designers to use formal verification tools
238
+ to debug and better understand the performance of their de-
239
+ signs. A designer expresses environment constraints (i.e.,
240
+ CPUs and a task model), a scheduling algorithm (or heuris-
241
+ tic), and a performance query. Queries ask whether some
242
+ performance property is always true for the given scheduling
243
+ algorithm. If a counterexample exists, it helps the user iden-
244
+ tify corner cases that can break the studied algorithm. If there
245
+ is no counterexample, then we have successfully verified the
246
+ behavior encoded by the query. To answer queries we use Z3,
247
+ a Satisfiability Modulo Theories (SMT) solver [10]. To keep
248
+ our models tractable, we only use the theory of linear real
249
+ arithmetic. Table 1 provides a summary of how our method-
250
+ ology is applied to four different scheduling algorithms. We
251
+ adopt three design tenets for our models.
252
+ Overapproximation not oversimplification. Reasoning
253
+ about the complexity of a full system is intractable, whether
254
+ using automated tools or conventional analytical tools. Thus,
255
+ abstraction is a necessary step. Conventional analytical tools
256
+ tackle that abstraction with simplifying assumptions, favoring
257
+ human tractability over soundness. Formal verification, by
258
+ contrast, favors soundness. Formal verification models tend
259
+ to overapproximate systems; i.e., the set of possible model be-
260
+ haviors is a superset of the possible behaviors of the modeled
261
+ system. Hence, any theorems proved in the overapproximated
262
+ model are also true for the real system. However, counterex-
263
+ amples produced by the model require human inspection to
264
+ ensure that they are also possible in the underlying system. In
265
+ our experience, all counterexamples we encountered have a
266
+ real-world counterpart.
267
+ Our methodology is to overapproximate all behavior ex-
268
+ ternal to the scheduling algorithm. For example, all blocking
269
+ calls (e.g., networking, storage, or synchronization) that force
270
+ a thread to yield a CPU core can be treated collectively as one,
271
+ allowing a task’s blocking time to have a wide range of values.
272
+ The solver can then choose any value for the blocking time a
273
+ thread faces. Despite networking and storage calls typically
274
+ having bounded latencies, overapproximation helps us avoid
275
+ making any assumptions about their behavior, allowing us to
276
+ make general conclusions about the scheduling algorithm.
277
+ Overapproximating external behavior makes the system
278
+ easier to model for the user. In particular, it allows users to
279
+ focus on modeling the details of their algorithm while the
280
+ solver picks the behavior of components external to the mod-
281
+ eled system. Further, reducing the volume of details makes
282
+ user–created models more concise. In all of the case studies
283
+ presented here, for example, the SMT constraints are gener-
284
+ ated by less than 1,000 lines of Python.
285
+ Event-based modeling. Schedulers determine the order-
286
+ ing and time of execution of tasks. There are multiple ways
287
+ to encode their behavior. As discussed above, we overapprox-
288
+ imate all behavior not integral to the heurstic. However, this
289
+ leaves an important question: how should we model the de-
290
+ tails of complicated heuristics with many steps and states?
291
+ Capturing every step and state in a heuristic creates intractable
292
+ models. A more tractable approach is to discretize time, al-
293
+ lowing the automatic solver to capture all possible state tran-
294
+ sitions that can happen between two timesteps. This approach
295
+ is intuitive and has been used before to model, for example,
296
+ the performance of congestion control algorithms [2]. How-
297
+ ever, this modeling approach suffers from several downsides.
298
+ First, it limits the time span captured by the model, reducing
299
+ the scalability of the model. Second, it requires introducing
300
+ complex constraints to ensure that only valid state transitions,
301
+ including scheduling decisions, are made between two time
302
+ steps.
303
+ By contrast, our approach relies on identifying key events in
304
+ schedules produced by a heuristic (e.g., the point in time when
305
+ a thread blocks). Where necessary, we represent a state with
306
+ two events: its starting and finishing times. The automated
307
+ solver can assign arbitrary time values to events, enabling it to
308
+ capture arbitrarily large time spans. In addition, an algorithm
309
+ makes scheduling decisions at a particular event, meaning
310
+ that scheduling behaviors need only be encoded relative to
311
+ events, potentially simplifying the model.
312
+ Unconstrained initial conditions. Event-based modeling
313
+ 3
314
+
315
+ Heuristic
316
+ Queries
317
+ Events
318
+ Overapproximation
319
+ Single-core SRPT Scheduling (§4)
320
+ Comparison to an oracle scheduler
321
+ for various objectives
322
+ Tasks changing their states from
323
+ ready to running to blocking
324
+ Causes of blocking and
325
+ task profiles
326
+ Work Stealing (§5)
327
+ Characterizing the impact of
328
+ context switching cost on performance
329
+ Start and end of individual
330
+ tasks in a DAG
331
+ Context switching cost
332
+ Linux CFS Load Balancer (§6)
333
+ Work conservation
334
+ Periodic load balancing ticks
335
+ Changes in the states of
336
+ tasks between ticks
337
+ Ring Reduce Scheduling (§7)
338
+ Achieving high network utilization
339
+ Start and end of flows
340
+ Behavior of congestion
341
+ control algorithms
342
+ Table 1: A summary of the application of the modeling methodology to four scheduling heurisitics.
343
+ limits the number of tasks and events that a model captures,
344
+ leaving a critical question open: How can a small number of
345
+ events provide useful insights about complicated heuristics?
346
+ Testing a scheduling heuristic for violations of a given per-
347
+ formance property using benchmarks or fuzzing requires the
348
+ identification of a concrete task workload under which the
349
+ heuristic fails. Such a workload is likely to be complex and dif-
350
+ ficult to analyze, even if only a few of its events cause the prop-
351
+ erty violation. Our approach solves this problem by letting the
352
+ solver pick arbitrary initial conditions for the system. Com-
353
+ pared to a concrete workload under the testing approach, the
354
+ solver may choose initial conditions corresponding the state
355
+ of the system just before the events which caused the property
356
+ violation. In this sense, unconstraining initial conditions al-
357
+ lows us to jump past irrelevant events which a benchmarking
358
+ or fuzzing approach would have to exhaustively consider.
359
+ 4
360
+ Case study: Single Core SRPT Scheduling
361
+ We begin with the simplest case study: shortest remaining
362
+ processing time first (SRPT) on a single processor. It is well
363
+ known that SRPT is optimal with respect to average time to
364
+ completion [40]. However, proofs of optimality assume both
365
+ preemption and no task blocking, for example, to perform
366
+ IO operations. Later work on queueing theory has provided
367
+ bounds on the average running time of tasks in a system with
368
+ general load [3], and investigated the fairness implications
369
+ of SRPT [4]. The performance of non-preemptive schedulers
370
+ has also been investigated [1], but not with blocking.
371
+ We fill this gap for a simple non-preemptive SRPT sched-
372
+ uler by providing a model which can be used to verify per-
373
+ formance properties without prior expertise. For a simple
374
+ system with a single core and plain SRPT scheduling, we try
375
+ to understand SRPT’s performance when tasks can block.
376
+ 4.1
377
+ Model
378
+ We focus on the context of a single core where tasks can
379
+ alternate between the states of running and blocking and no
380
+ preemption is allowed. This context is reflective of many RPC
381
+ execution settings where threads run to completion and only
382
+ yield when they make blocking calls [18,34,44]. Within this
383
+ context, a task can be in one of three states: ready, running, or
384
+ blocking, as shown in Figure 1a. Instead of modeling states
385
+ explicitly, we identify two events corresponding to the passage
386
+ of a task through each state: the time when it enters the state
387
+ and the time it leaves. We call one passage through the state
388
+ D
389
+ R
390
+ B
391
+ Start
392
+ Finish
393
+ Blocking
394
+ Running
395
+ Ready
396
+ (a) Modeled state machine of a task
397
+ ...
398
+ Time
399
+ Ds1
400
+ Df1=Rs1
401
+ Rf1=Bs1
402
+ Bf1=Ds2
403
+ Bfs
404
+ Waiting in
405
+ the queue
406
+ Running
407
+ Finished
408
+ Blocking
409
+ (b) Mapping the state machine to an event-based model in time
410
+ Figure 1: Task model, showing the model of a single task
411
+ cycle a step; a step has 6 events, two each for ready, running,
412
+ and blocking as shown in Figure 1b. We overapproximate
413
+ real systems by allowing events to happen at arbitrary points
414
+ in time subject only to constraints enforced by our context
415
+ (i.e., having a single core). We constrain the system such that
416
+ only one task can be running at any point in time. Under this
417
+ model, a schedule is simply an ordering on the time each task
418
+ enters each state.
419
+ Tasks. We consider a model with n tasks T1,...,Tn. We limit
420
+ the number of events in the model by fixing the number of
421
+ steps s per task. A step is one ready–running–blocking cycle
422
+ in Figure 1; each task has six events per step. We define a task
423
+ Ti as a tuple of (Li,Di,Ri,Bi), where Li > 0 is the total length
424
+ of the task (i.e., the sum of all the time it spends running).
425
+ D,R,B ∈ (R ×R)s are sets of pairs that define the start and
426
+ end of ready, running, and blocking periods, respectively. We
427
+ denote these with D =
428
+ ��
429
+ Dsj
430
+ i ,Df j
431
+ i
432
+
433
+ | 1 ≤ j ≤ s
434
+
435
+ , where
436
+ Dsj
437
+ i is the start of the ready event of task i in step j and Df j
438
+ i is
439
+ the end of the same ready event. We define R and B similarly.
440
+ The model includes constraints that ensure the proper tim-
441
+ ing of all events. At the task level, we ensure that the sum of
442
+ the running times per task is equal to its length. Further, tasks
443
+ must always be in one of the three states, and must proceed
444
+ from waiting to running to blocking in that order. Formally:
445
+ ∀i
446
+ (Df j
447
+ i = Rsj
448
+ i < Rf j
449
+ i = Bs j
450
+ i ≤ Bf j
451
+ i ),
452
+ 1 ≤ j ≤ s
453
+ ∀i
454
+ (Bf j
455
+ i = Dsj+1
456
+ i
457
+ ),
458
+ 1 ≤ j < s
459
+ Scheduler. Since a schedule is simply a valid ordering on
460
+ events, we can represent a scheduling algorithm as a con-
461
+ straint on task variables. SRPT allows a task to run iff that task
462
+ has the least remaining processing time or all tasks with less
463
+ 4
464
+
465
+ remaining time are blocking. Formally, let the remaining pro-
466
+ cessing time of task i’s step j be ej
467
+ i = Li −∑j
468
+ k=0(Rf k
469
+ i −Rsk
470
+ i ).
471
+ SRPT can be defined as on ordering on running times, subject
472
+ to the remaining time of each task
473
+ ∀i,l ≤ n ∀ j,k ≤ s
474
+ (Rs j
475
+ i < Rsk
476
+ l ) ⇐⇒
477
+
478
+ ej−1
479
+ i
480
+ ≤ ek−1
481
+ l
482
+ ∧Rf k−1
483
+ l
484
+ ≤ Rsj
485
+ i
486
+
487
+
488
+
489
+ ek−1
490
+ l
491
+ ≤ ej−1
492
+ i
493
+ ∧Rf k−1
494
+ l
495
+ ≤ Rsj
496
+ i
497
+ ∧Bsk−1
498
+ l
499
+ ≤ Rsj
500
+ i ∧Rs j
501
+ i < Bf k−1
502
+ l
503
+
504
+
505
+
506
+ R f k−1
507
+ l
508
+ > Rsj
509
+ i
510
+
511
+ The first condition checks that by step j −1, task i has less
512
+ remaining time than task l by step k −1. The second condi-
513
+ tion checks that if the first predicate is false that task l will
514
+ be blocking when step j of task i runs. The last condition
515
+ checks that l’s step k comes after i’s step j. We note that the
516
+ SRPT scheduling constraint is an overapproximation because
517
+ it assumes perfect knowledge of tasks’ remaining time. In real
518
+ workloads where remaining time is uncertain, SRPT could
519
+ perform even worse than the results presented in this section.
520
+ Objective. We create two schedules for the same set of tasks,
521
+ one generated by SRPT and the other representing the best
522
+ possible schedule subject to the query. Formally, we define a
523
+ schedule as a set of tasks {T1,...,Tn}. Our queries are repre-
524
+ sented by two schedules SchedSRPT = T and Schedquery = T ′,
525
+ where SchedSRPT follows an SRPT schedule. Both schedules
526
+ have the same set of tasks, or Rf j
527
+ i − Rsj
528
+ i = R′ f j
529
+ i − R′sj
530
+ i and
531
+ Bf j
532
+ i −Bs j
533
+ i = B′ f j
534
+ i −B′sj
535
+ i for all i ≤ n and j ≤ s.
536
+ We specify two queries: 1) comparing the average comple-
537
+ tion time of tasks under the two schedules, and 2) comparing
538
+ the number of tasks that finish within a specific deadline. For
539
+ average completion time, the query fixes a ratio q > 0, and
540
+ asks whether average completion time in SchedSRPT can be
541
+ q times more than in Schedquery: (∑n
542
+ i=0 Bf s
543
+ i ) = q(∑n
544
+ i=0 B′ f s
545
+ i )
546
+ For a deadline, the query specifies a time G and compares the
547
+ number of tasks a and a′ finished by time G in each schedule:
548
+ ���
549
+ Ti | B f i
550
+ s ≤ G
551
+ ��� = a ∧
552
+ ���
553
+ T ′
554
+ i | B′ f i
555
+ s ≤ G
556
+ ��� = a′
557
+ Expressiveness and performance. The model is general and
558
+ can be used to evaluate the performance of any valid schedul-
559
+ ing algorithm (i.e., an ordering on the running steps of individ-
560
+ ual tasks) under any objective. For our model, the performance
561
+ of Z3 scales approximately exponentially with the number of
562
+ variables; it is therefore infeasible to make queries with very
563
+ large number of tasks or steps. To maximize the number of
564
+ queries, deadline objective results were generated with a = 1,
565
+ a′ = n, and s = 2; for smaller numbers of tasks n ≤ 4, we
566
+ confirmed that the results hold for s = 3 and s = 4 as well.
567
+ Despite these limitations, our modeling approach remains
568
+ highly expressive; with each query, it checks an infinite class
569
+ of inputs, a task that would be infeasible with a simulator.
570
+ In our evaluation, our model was able to check more than
571
+ 500,000 infinite classes of tasks in about a day on a standard
572
+ desktop by scanning different values of a, a′, s, n, and bounds
573
+ on running and blocking times.
574
+ 4.2
575
+ Results
576
+ Due to the simplicity of SRPT scheduling, we focus on the Q2
577
+ and Q3 classes of questions. Questions in Q1 are very simple
578
+ to reason about (e.g., SRPT is by definition work conserving).
579
+ Comparison to an oracular scheduler. Our model allows
580
+ us to characterize SRPT’s performance by creating queries
581
+ about the existence of schedules that significantly outperform
582
+ SRPT, for both objectives. We start with a simple question
583
+ to verify existing results. In particular, when no blocking is
584
+ allowed, all queries for both average completion time (i.e., k >
585
+ 1) and number of tasks finished (i.e., a′ > a) are unsatisfiable,
586
+ meaning that there is no schedule which performs better than
587
+ SRPT. This matches the known result of SRPT optimality.
588
+ Then, we unconstrain blocking times, allowing them to
589
+ take any value. In that setting, the solver can generate valid
590
+ task sets for which SRPT performs much worse than an ideal
591
+ schedule. In particular, for average completion time, the solver
592
+ generates a task pattern like that shown in Figure 2a with one
593
+ long task, and n − 1 short tasks all of which are forced to
594
+ finish after the long task. Our model can produce schedules
595
+ for which average completion time under SRPT is up to n−ε
596
+ times worse than the query schedule, for any small ε, where
597
+ n is the number of tasks. For example, for queries with n =
598
+ 3, q = 3 is infeasible because the average running time in
599
+ SchedSRPT cannot be more than the total running times of all
600
+ tasks, and at least one of the tasks in Schedquery must also take
601
+ this long. Obviously, preemption can mitigate such scenarios
602
+ of poor performance.
603
+ For the deadline query, the solver can generate valid sched-
604
+ ules for a′ >> a, showing that SRPT can finish a very small
605
+ number of tasks when it’s feasible to finish a much larger
606
+ number. For example, the solver was able to generate a sched-
607
+ ule for the values a = 1 and a′ = 10. The relationship does
608
+ not depend on the input choice of deadline; the solver can
609
+ generate similar examples for any reasonable choice of dead-
610
+ line. Figure 2b shows a concrete set of tasks for which SRPT
611
+ finishes only one task, while an optimal scheduler finishes
612
+ five. A limitation of our tool is the need to set concrete values
613
+ of a and a′. Thus, our results are limited by the values we
614
+ scanned. The key insight for the poor performance of SRPT
615
+ under the deadline query is that SRPT can force all tasks to
616
+ block simultaneously, thus wasting processor time. Without
617
+ the SRPT constraint, it is possible to strategically create a task
618
+ ordering which minimizes this wasted time. For example, in
619
+ Figure 2b, delaying the start of T3 allows its running period
620
+ to overlap with the blocking periods of other tasks, thereby
621
+ reducing wasted time.
622
+ Workload characterization. Our next set of questions is
623
+ aimed at understanding the relationship between blocking
624
+ time (i.e., the maximum time that can be spent in a single
625
+ 5
626
+
627
+ 5
628
+ 10
629
+ 15
630
+ 20
631
+ 25
632
+ T1
633
+ 21.75
634
+ T2
635
+ 19.25
636
+ T3
637
+ 20.5
638
+ T1
639
+ 4.5
640
+ T2
641
+ 3.25
642
+ T3
643
+ 23
644
+ x
645
+ x
646
+ SRPT
647
+ Ideal
648
+ Time
649
+ (a) A set of tasks for which average completion time under SRPT is 2× higher
650
+ than an ideal schedule (10.25 vs 20.5, with units arbitrary). By extending
651
+ the running period marked x, it is possible to achieve any performance gap
652
+ smaller than 3×. The first blocking period of T2 must be longer than T1’s
653
+ running period to force T3 to start running.
654
+ T1
655
+ 
656
+ T2
657
+ 
658
+ T3
659
+ 
660
+ T4
661
+ 
662
+ T5
663
+ 
664
+ T1
665
+ 
666
+ T2
667
+ 
668
+ T3
669
+ 
670
+ T4
671
+ 
672
+ T5
673
+ 
674
+ SRPT
675
+ Ideal
676
+ Time
677
+ (b) A concrete set of tasks for which it is feasible to finish 5 times more tasks
678
+ than SRPT. SRPT finishes only T1, while the oracle produces a schedule
679
+ which finishes all 5 tasks. The time scale is arbitrary.
680
+ Figure 2: Solver–generated schedules which for which SRPT
681
+ performs badly.
682
+ blocking call) and the performance of SRPT. We formulate
683
+ our question such that all time values are relative to the mini-
684
+ mum time a task can spend running, making it our unit time
685
+ (i.e., R f j
686
+ i −Rsj
687
+ i ≥ 1 for all i, j). Further, we bound the max-
688
+ imum blocking time to α. Since the time scale is arbitrary,
689
+ α acts as a bound on the ratio of minimum running times to
690
+ maximum blocking times.
691
+ For the average completion time objective, the results fol-
692
+ low the pattern shown in Figure 2a. Whenever α > n−2, it
693
+ is possible for the SRPT schedule to have arbitrarily higher
694
+ average completion time than the query schedule (subject
695
+ to the feasibility bound of q = n). This is the case because
696
+ α > n − 2 allows one short task to block during the entire
697
+ running periods of n−2 other short tasks, thereby forcing a
698
+ single long task to run before any of the short tasks can finish.
699
+ We have confirmed this result with our model up to n = 6. If
700
+ we have α ≤ n−2, then the possible values of r grow with
701
+ the number of tasks, but do not depend on α.
702
+ For the deadline objective, we create queries with fixed
703
+ a = 1 and scan all integer values 1 ≤ a′ ≤ 7 for each integer
704
+ value 1 ≤ α ≤ 7, creating 49 queries. We find a linear rela-
705
+ tionship between α and the worst case performance of SRPT.
706
+ In particular, for every α, an optimal schedule could finish α
707
+ times more tasks than SRPT, but not more. This relationship
708
+ with α is also related to the ability of an ideal scheduler to
709
+ place multiple running periods of one task within the blocking
710
+ time of another, thus reducing wasted time. For example, if
711
+ only two tasks are present and all blocking periods are smaller
712
+ than the smallest running period, wasted time is guaranteed.
713
+ As the maximum blocking time increases, more and more run-
714
+ ning time periods of other tasks can overlap with the blocking
715
+ of another, allowing an ideal scheduler to perform better than
716
+ SRPT. As in Figure 2b, the key is to minimize the time wasted
717
+ when all tasks are blocking.
718
+ Impact of work conservation. The results presented in this
719
+ section, so far, are based on queries that attempt to find a better-
720
+ performing schedule subject to a single constraint on the
721
+ gap in performance between that schedule and SRPT sched-
722
+ ules. Our goal is to better understand the better-performing
723
+ schedules. Our initial queries have no constraints except for
724
+ the performance they achieve. We initially thought that such
725
+ schedules can strategically order tasks by being non-work
726
+ conserving. Thus, we added a constraint on Schedquery to be
727
+ work conserving. However, to our surprise, this constraint
728
+ didn’t change any of our results.
729
+ The key takeaway from this case study is that our methodol-
730
+ ogy allows for exploring different aspects of the performance
731
+ of a heuristic by simply formulating a reasonable query, with-
732
+ out the need for any theoretical background or laborious ex-
733
+ ample checking.
734
+ 5
735
+ Case study: Work Stealing
736
+ Work stealing schedulers assign tasks to multiple proces-
737
+ sors. Each processor executes tasks in its local queue in
738
+ order. When a processor becomes idle, it steals the oldest
739
+ task from another processor’s queue. Work stealing has been
740
+ well-studied and has had a significant impact on real-world re-
741
+ source schedulers [18,34]. A well-known theorem guarantees
742
+ that work stealing will find a schedule that finishes all tasks in
743
+ at most twice the time of an offline optimal scheduler [6], but
744
+ does not account for context switching cost between threads.
745
+ We show how our methodology can address this gap.
746
+ A known model exists that over-approximates the sys-
747
+ tem [6]. The algorithm is invariant to the specific operations
748
+ of tasks, so task lengths are represented by a single real num-
749
+ ber. Dependencies between tasks, such as locks and multi-
750
+ threaded channels, are represented by edges in a Directed
751
+ Acyclic Graph (DAG). We encode the DAG as a boolean ad-
752
+ jacency matrix. To add context-switching costs to the model,
753
+ we group tasks into “threads”. Tasks in a thread must be con-
754
+ nected in a straight sequence in the DAG. When a processor
755
+ switches between tasks in the same thread, it incurs no con-
756
+ text switching cost. Otherwise, it incurs a cost that may be
757
+ different for each task. In general, context switching cost can
758
+ depend on the tasks and the processor involved because of ar-
759
+ chitectural features like NUMA, and the status of any caches.
760
+ We adopt a simpler model, and let the solver arbitrarily de-
761
+ cide switching costs for every task. Fig. 3a shows an example
762
+ 6
763
+
764
+ sc = 1
765
+ t1
766
+ len = 1.75
767
+ sc = 0.5
768
+ sc = 0.75
769
+ t4
770
+ len = 1
771
+ t3
772
+ len = 1.75
773
+ t2
774
+ len = 1
775
+ t5
776
+ len = 2
777
+ t6
778
+ len = 1
779
+ (a) An example DAG. The color of each task represents the owning
780
+ thread. Red edges denote inter-task dependencies. The blue dashed lines
781
+ show CPU jumps between tasks belonging to different threads, with the
782
+ switching cost (sc).
783
+ CPU1
784
+ t1
785
+ sc
786
+ t3
787
+ t4
788
+ sc
789
+ t6
790
+ CPU2
791
+ t5
792
+ idle
793
+ sc
794
+ t2
795
+ CPU1
796
+ t1
797
+ t3
798
+ t4
799
+ t6
800
+ CPU2
801
+ t2
802
+ sc
803
+ t5
804
+ OPT:
805
+ WS:
806
+ Time
807
+ (b) Work Stealing and Optimal schedules
808
+ Figure 3: Work Stealing vs Optimal
809
+ DAG with tasks grouped into threads.
810
+ The solver searches every choice of DAG, assignment of
811
+ tasks to threads, task lengths and context switching costs.
812
+ Together these choices specify a “job”. Constraints ensure
813
+ the choices make sense, i.e. costs are positive, the adjacency
814
+ matrix forms a DAG etc. To keep the problem tractable, we
815
+ restrict to a finite number of tasks and processors, though
816
+ there are no bounds on task lengths and costs.
817
+ The solver chooses variables representing two schedules for
818
+ this job. One schedule is constrained to mimic work stealing
819
+ and the other can be arbitrary. The solver is instructed to
820
+ maximize the ratio of the time taken by the work stealing
821
+ schedule to the time taken to finish the arbitrary one. This
822
+ forces it to minimize the completion time for the arbitrary
823
+ schedule, making it optimal. Our queries explore how this
824
+ ratio varies as we impose different constraints on the context
825
+ switching costs.
826
+ A schedule is represented by the start and finish times for
827
+ each task, as well as a boolean matrix that maps tasks to
828
+ CPUs. While the two schedules can be different, constraints
829
+ ensure that both respect the tasks’ properties, such as their
830
+ lengths, dependencies, and switching costs. For example, the
831
+ following constraints ensure that a task is ready to execute
832
+ when all of its dependencies are finished1:
833
+ 1tasks is a small, finite set. Hence ∀ and ∃ can be written in quantifier
834
+ free logic. This is easier to solve.
835
+ ∀t1,t2 ∈ tasks
836
+ edge(t1,t2) =⇒
837
+ t1.sched.end_time≤ t2.sched.ready_time
838
+ (1)
839
+ ∀t2 ∈ tasks
840
+ (∃t1 ∈ tasks,
841
+ edge(t1,t2)
842
+
843
+ t1.sched.end_time= t2.sched.ready_time)
844
+
845
+ t2.sched.ready_time= 0
846
+ (2)
847
+ Fig. 3b shows an example work stealing schedule and com-
848
+ pares it with the optimal schedule.
849
+ 5.1
850
+ Queries and Results
851
+ We ask the solver to maximize the ratio between completion
852
+ times in the work stealing and unconstrained schedules:
853
+ maximize(timews/timeopt)
854
+ (3)
855
+ First, we set the costs of context switching to zero and
856
+ queried for the maximal ratio between work stealing and op-
857
+ timal. We found that the bound was not 2, but 2− 1
858
+ P, where
859
+ P is the number of processors. Since we are using an SMT
860
+ solver, the bound it found is exact and matches the known
861
+ theoretical result. Thus, even though we only queried for up
862
+ to 4 processors, 4 threads, and 9 tasks, we believe that the pre-
863
+ cision and consistency of our results allow for extrapolation
864
+ to larger values.
865
+ Next, we introduce switching costs. We parametrize the
866
+ switching cost in two ways. Parameter k caps the maximum
867
+ switching cost and c caps how different the switching costs
868
+ can be from each other. Formally:
869
+ ∀t ∈ tasks,
870
+ switch_cost(t) ≤ k ×lenmin
871
+ (4)
872
+ max
873
+ t∈tasks(switch_cost(t))
874
+ ≤ c× min
875
+ t∈tasks(switch_cost(t))
876
+ (5)
877
+ Since the unit of time is arbitrary, we pick one so that the
878
+ maximum task length is 1. We fix c = 1 and k = 10. This
879
+ means that the solver can select any context switching cost
880
+ up to 10× the task length, as long as the costs are equal to
881
+ each other. Figure 4 shows the optimality ratio as we vary
882
+ the maximum size of the DAG tasks. For small DAGs, work
883
+ stealing performs close to optimal. The performance worsens
884
+ linearly as the size increases. It is interesting to note that the
885
+ bound grows at a slower rate with increasing number of CPUs
886
+ and the ratio remains bounded even though context switch
887
+ cost can be up to 10× the task length.
888
+ Next, we vary the bound on the maximum allowed switch-
889
+ ing cost while constraining that all costs be equal to each
890
+ other. Figure 5 plots the optimality ratio for different num-
891
+ bers of CPUs and tasks.2 Interestingly, it plateaus to a value
892
+ only slightly larger than in the case without context switching
893
+ cost. For instance, with 2 processors and up to 7 tasks, the
894
+ optimality ratio is smaller than 4.
895
+ 2In realistic systems, k should not be much larger than 1.
896
+ 7
897
+
898
+ 1
899
+ 2
900
+ 3
901
+ 4
902
+ 5
903
+ 6
904
+ 7
905
+ c
906
+ 1.0
907
+ 1.5
908
+ 2.0
909
+ 2.5
910
+ 3.0
911
+ 3.5
912
+ 4.0
913
+ 4.5
914
+ Work Stealing vs Optimal
915
+ #CPU=2
916
+ #CPU=3
917
+ Figure 4: Work Stealing performance for different DAG sizes
918
+ (k = 10,c = 1). When the number of tasks is less than the
919
+ number of CPUs, work stealing is trivially optimal.
920
+ 0
921
+ 2
922
+ 4
923
+ 6
924
+ 8
925
+ 10
926
+ k
927
+ 1.5
928
+ 2.0
929
+ 2.5
930
+ 3.0
931
+ 3.5
932
+ 4.0
933
+ Work Stealing vs Optimal
934
+ #CPU=2, #Tasks=7
935
+ #CPU=2, #Tasks=6
936
+ #CPU=3, #Tasks=7
937
+ #CPU=3, #Tasks=6
938
+ Figure 5: Work Stealing performance as switching costs scale
939
+ up. Every pair of task has the same cost (c = 1).
940
+ 1
941
+ 2
942
+ 3
943
+ 4
944
+ 5
945
+ 6
946
+ 7
947
+ 8
948
+ c
949
+ 2.5
950
+ 5.0
951
+ 7.5
952
+ 10.0
953
+ 12.5
954
+ 15.0
955
+ 17.5
956
+ 20.0
957
+ Work Stealing vs Optimal
958
+ #CPU=2, #Task=6
959
+ #CPU=2, #Task=5
960
+ #CPU=3, #Task=7
961
+ #CPU=3, #Task=6
962
+ #CPU=2, #Task=4
963
+ #CPU=3, #Task=5
964
+ Figure 6: Work Stealing performance when costs between dif-
965
+ ferent pairs of tasks can vary. (k = 10)
966
+ This sets the stage up for the final result, wherein we we
967
+ allow switching costs to vary between 0 and 10 as long as
968
+ they are within c× of each other, as shown in figure 6. Here
969
+ the ratio grows linearly with c, showing that the variation in
970
+ switching cost matters more than its absolute value.
971
+ What can practitioners take away from this analysis? If
972
+ context switching cost is small, work stealing is near optimal.
973
+ If not, it is still near optimal if the number of CPUs is large
974
+ or if cost variation is small and the job is representable by a
975
+ small DAG. A caveat is that our results are only rigorously
976
+ proven for small numbers of CPUs and tasks. Nevertheless
977
+ the numbers fit so perfectly on a line that we are tempted to
978
+ conjecture and extrapolate.
979
+ 6
980
+ Case study: The Linux CFS Load Balancer
981
+ The Completely Fair Scheduler (CFS) scheduler is the de-
982
+ fault process scheduler in Linux for systems with multiple
983
+ processing units. It aims to ensure fairness between running
984
+ threads without sacrificing performance. We studied the load
985
+ balancing logic in the CFS scheduler for Linux v5.5 [14].
986
+ Overview. The load balancer supports multi-core architec-
987
+ tures, including SMT, SMP, and NUMA. To minimize cost,
988
+ Level 0
989
+ Level 1
990
+ Level 2
991
+ CPU1
992
+ CPU2
993
+ CPU3
994
+ CPU4
995
+ Domain11
996
+ Domain12
997
+ Domain21
998
+ Figure 7: Domain hierarchy with 4 CPUs. The smaller sub-
999
+ domains also act as the groups of the larger domain.
1000
+ it tries to move tasks between nearby CPUs. To capture how
1001
+ close CPUs are to each other, the load balancer divides CPUs
1002
+ into scheduling domains. Scheduling domains form a hier-
1003
+ archy starting from individual CPUs at bottom level to the
1004
+ top-level domain that includes all CPUs. For example, a do-
1005
+ main could be the set of CPUs in the same NUMA node.
1006
+ Ideally, the algorithm should balance load between all CPUs
1007
+ at all domains. However, this task becomes expensive as the
1008
+ number of CPUs grows. To reduce the cost of load balancing,
1009
+ the algorithm balances the aggregate load between groups
1010
+ of CPUs. In particular, each domain is divided into multiple
1011
+ groups, each containing one or more CPUs. The algorithm
1012
+ attempts to balance load between the groups in each domain.
1013
+ Figure 7 shows an example a domain hierarchy.
1014
+ Algorithm 1 describes LoadBalance(), a function run by
1015
+ CPU c to balance domain sd. Each CPU runs its own in-
1016
+ stance of the load balancer at regular intervals. At each tick,
1017
+ the balancer traverses the domain hierarchy upwards and calls
1018
+ LoadBalance() to balance work between groups within pro-
1019
+ gressively larger domains. 3 At any point in time, exactly one
1020
+ CPU is responsible to balance the load in a particular domain
1021
+ (Line 36). To avoid moving small tasks between multiple
1022
+ CPUs, the balancer only moves tasks if the imbalance between
1023
+ two groups is larger than a configurable threshold (Line 38).
1024
+ To further reduce the balancing cost, a CPU only moves tasks
1025
+ from the busiest CPU of the busiest group (Line 43). De-
1026
+ pending on workload characteristics, the algorithm selects
1027
+ a ‘migration type’ (Line 44). These types define the rules
1028
+ to determine the busiest CPU, the imbalance between two
1029
+ groups, and the load generated by each task. We describe how
1030
+ each decision is made as needed later in the section.
1031
+ While we attempt to capture the full complexity of the per-
1032
+ tick algorithm, we forego modeling some optional features
1033
+ such as task pinning and priorities. We do not model cache-
1034
+ aware heuristics. Even with these simplifications, we find our
1035
+ model to be expressive enough to discover algorithmic bugs.
1036
+ 6.1
1037
+ Model
1038
+ At 10,000+ lines of code [17], the load balancer includes
1039
+ several complex heuristics that interact with each other at
1040
+ each scheduling tick to quantify a measure of imbalance, and
1041
+ 3Domains can have their own balancing interval that can change dy-
1042
+ namically. Interval calculations are nuanced, and we omit these details for
1043
+ simplicity.
1044
+ 8
1045
+
1046
+ Algorithm 1 Linux CFS Load Balancing Algorithm
1047
+ 1: procedure MIGRATIONTYPE(busiest,local,idle)
1048
+ 2:
1049
+ if local.HasSpare() then
1050
+ 3:
1051
+ if busiest.Overloaded() then
1052
+ 4:
1053
+ has_cap ← local.capacity > local.util
1054
+ 5:
1055
+ if !idle or has_cap then
1056
+ 6:
1057
+ return MIGRATE_UTIL
1058
+ 7:
1059
+ else
1060
+ 8:
1061
+ return MIGRATE_TASK
1062
+ 9:
1063
+ ···
1064
+ 10: end procedure
1065
+ 11: procedure BUSIESTCPU(CPUs,m_type)
1066
+ 12:
1067
+ key1 ← lambda c : c.util_avg
1068
+ 13:
1069
+ key2 ← lambda c : c.nr_running
1070
+ 14:
1071
+ moreThanOne ← Filter(CPUs,key2)
1072
+ ▷ added in
1073
+ v5.7
1074
+ 15:
1075
+ switch m_type do
1076
+ 16:
1077
+ case MIGRATE_UTIL
1078
+ 17:
1079
+ return argmax(moreThanOne, key1)
1080
+ 18:
1081
+ case MIGRATE_TASK
1082
+ 19:
1083
+ return argmax(CPUs, key2)
1084
+ 20:
1085
+ case ···
1086
+ 21: end procedure
1087
+ 22: procedure CALCIMB(busiest,local,m_type)
1088
+ 23:
1089
+ switch m_type do
1090
+ 24:
1091
+ case MIGRATE_UTIL
1092
+ 25:
1093
+ return local.capacity−local.util
1094
+ 26:
1095
+ case MIGRATE_TASK
1096
+ 27:
1097
+ if busiest.Overloaded() then
1098
+ 28:
1099
+ return 1
1100
+ 29:
1101
+ else
1102
+ 30:
1103
+ t1 ← busiest.nr_idle
1104
+ 31:
1105
+ t2 ← local.nr_idle
1106
+ 32:
1107
+ return max(0,(t1 −t2)/2)
1108
+ 33:
1109
+ case ···
1110
+ 34: end procedure
1111
+ 35: procedure LOADBALANCE(c,sd)
1112
+ 36:
1113
+ if !IsResponsible(c,sd) then
1114
+ 37:
1115
+ return
1116
+ 38:
1117
+ if !ConsiderableImb(c.idle,sd) then
1118
+ 39:
1119
+ return
1120
+ 40:
1121
+ dst_g ← sd.FindGroup(c)
1122
+ 41:
1123
+ if !sd.GroupAboveAvg(dst_g) then
1124
+ 42:
1125
+ return
1126
+ 43:
1127
+ src_g ← sd.FindBusiestGroup()
1128
+ 44:
1129
+ m_type ← MigrationType(src_g,dst_g,c.idle)
1130
+ 45:
1131
+ src_c ← BusiestCPU(src_g.CPUs,m_type)
1132
+ 46:
1133
+ imb ← CalcImb(src_g,dst_g,m_type)
1134
+ 47:
1135
+ DetachTasks(src_c,c,m_type,imb)
1136
+ ▷ moves tasks
1137
+ based on migration type
1138
+ 48: end procedure
1139
+ decide on how to balance it if at all. While we attempt to
1140
+ capture the full complexity of the per-tick algorithm, to keep
1141
+ analysis tractable, we make several simplifications. We forego
1142
+ modeling some optional features such as task pinning and
1143
+ priorities. We also do not model cache-aware heuristics. As
1144
+ a result, our model is sound, but not complete. Nevertheless,
1145
+ our model captures a large subset of possible Linux behaviors
1146
+ and detected several bugs.
1147
+ In general, CPUs can load balance asynchronously upon be-
1148
+ coming idle. However, we choose to only model the per-tick
1149
+ behavior to keep our model tractable. Timesteps are repre-
1150
+ sented as an integer vector of fixed size. Each CPU attempts to
1151
+ balance every domain that it belongs to at fixed, regular inter-
1152
+ vals between successive timesteps. The domains are balanced
1153
+ in the order of their level in the domain hierarchy.
1154
+ A task is represented as a collection of real variables to
1155
+ denote properties such as the weighted moving average of the
1156
+ time it was runnable (i.e., runnable average) and its running
1157
+ time (i.e., utilization average). Variables representing the prop-
1158
+ erties of tasks are defined for each timestep. We do not model
1159
+ asynchronous load balancing due to a CPU becoming idle,
1160
+ and assume tasks are runnable through all modeled timesteps.
1161
+ Nevertheless, the solver is free to choose any initial values.
1162
+ For instance, it can pick any initial value of the utilization
1163
+ average and the runnable average of tasks as long as the latter
1164
+ is larger. Since the choice of initial values is unconstrained,
1165
+ this model is akin to simulating a few microseconds start-
1166
+ ing from any reachable state of the system, including those
1167
+ caused by tasks alternating arbitrarily between runnable and
1168
+ non-runnable.
1169
+ The domain hierarchy, the number of tasks, and the num-
1170
+ ber of timesteps are configurable parameters. Real valued
1171
+ variables representing CPUs, groups, and domains are also
1172
+ defined for each timestep. For instance, the utilization average
1173
+ of each CPU tracks the sum of utilization averages of each
1174
+ task in its runqueue (i.e., runnable tasks assigned to that CPU).
1175
+ Similarly, a group’s utilization is just the sum of the utilization
1176
+ averages of CPUs contained in it. A CPU × Task boolean
1177
+ matrix encodes which task is runnin on which CPU at each
1178
+ timestep. Finally, we add constraints that connect metrics and
1179
+ the schedule at successive timesteps, closely following the
1180
+ actual code. As an example, the following constraints rep-
1181
+ resent IsResponsible(c, sd) (line 36), the function that
1182
+ determines if a CPU c is responsible to balance domain sd at
1183
+ timestep t:
1184
+ first_idle(t,c,sd)∨first_cpu(t,c,sd)
1185
+ 9
1186
+
1187
+ where,
1188
+ first_idle(c,sd) := c.nr_running[t −1] = 0
1189
+
1190
+ (∀c′ ∈ sd,c′.index< c.index =⇒
1191
+ c′.nr_running[t −1] > 0)
1192
+ first_cpu(c,sd) := c.index= 0
1193
+
1194
+ (∀c′ ∈ sd,c′.nr_running[t −1] > 0)
1195
+ c.nr_running[t −1] is the number of tasks running in CPU c
1196
+ at timestep t −1 and c.index is a statically assigned index to
1197
+ each CPU. This constraint is applied to every (CPU, domain)
1198
+ pair to find the responsible CPU at each timestep.
1199
+ 6.2
1200
+ Queries and Verification
1201
+ For this model, we focus on the Q1 class of questions. In
1202
+ particular, we formulate a single query to verify whether the
1203
+ algorithm is work conservating property of the algorithm.
1204
+ First we set the number of runnable tasks to be greater than
1205
+ the number of CPUs and ask whether at the end of T timesteps,
1206
+ any CPU is idle: ∃c ∈ CPUs,
1207
+ c.nr_running[T] == 0
1208
+ For a work conserving scheduler, the answer should be
1209
+ “no”, no matter how tasks are distributed initially. For small
1210
+ T, imbalance is acceptable and sometimes intended by the
1211
+ scheduler to avoid excessive task movement due to sudden
1212
+ changes in workload. We want to detect workloads where it
1213
+ never balances, no matter how long load is imbalanced. We
1214
+ run our model with up to 4 CPUs, 7 tasks, and 6 timesteps.
1215
+ All the bugs discovered with this setup were also found with
1216
+ just 3 timesteps.
1217
+ 6.2.1
1218
+ Bug 1: Busiest CPU with a single task
1219
+ As described earlier, there are multiple migration types
1220
+ that are determined based on the state of the system.
1221
+ MIGRATE_UTIL is chosen when the busiest group is over-
1222
+ loaded, and the current group has spare capacity (Line 6).
1223
+ Such a decision can be made in the scenario shown in Fig-
1224
+ ure 8 when the utilization average of all tasks in Group 1
1225
+ is high (e.g. their sum is greater than 90% of the group’s
1226
+ capacity).
1227
+ Choosing MIGRATE_UTIL implies that the algorithm’s goal
1228
+ is to balance the utilization average amongst groups by steal-
1229
+ ing tasks from the busiest CPU. The CPU with the highest
1230
+ utilization average in the busiest group is determined to be
1231
+ the busiest (Line 17). A CPU’s utilization average is just the
1232
+ sum of utilization averages of the tasks in its runqueue. The
1233
+ utilization average of a task is defined as the weighted moving
1234
+ average of its running time in the past.
1235
+ In our example, CPU2 can be the busiest CPU, even though
1236
+ it has a single task, if that task has a higher utilization average
1237
+ than the sum of the utilization averages of tasks on CPU1.
1238
+ However CPU3 steal work from CPU2 since it only has a
1239
+ single runnable task. This constraint helps avoid bouncing
1240
+ tasks between idle CPUs, but causes the scheduler to not be
1241
+ work conserving. One way to fix this ‘bug’ is to define the
1242
+ busiest CPU to be the one with the highest utilization average
1243
+ CPU1
1244
+ CPU2
1245
+ CPU3
1246
+ CPU4
1247
+ Group 1
1248
+ Group 2
1249
+ Bug 2
1250
+ Bug 1
1251
+ Figure 8: Example state to showcase the bugs. Except CPU3,
1252
+ each CPU’s runqueue has tasks represented as rectangles. CPU3
1253
+ is idle. Under different settings, CPU3 may be unable to steal
1254
+ any task from Group 1.
1255
+ that has more than one runnable task (line 14). After finding
1256
+ the bug using this model, we realized that it was also identified
1257
+ by the Linux community and fixed in Linux v5.7 [16] .
1258
+ 6.2.2
1259
+ Bug 2: Imbalance with idle CPUs
1260
+ With the previous bug fixed, we reran the work conservation
1261
+ query, identifying another bug. This bug happens when the
1262
+ migration type ‘MIGRATE_TASK’ is selected. Task migration
1263
+ is chosen when the the current group has spare capacity, but
1264
+ the conditions for the type ‘MIGRATE_UTIL‘ are not satisfied.
1265
+ In particular, it can be selected when no group is overloaded.
1266
+ Here, the algorithm’s goal is to balance the number of tasks
1267
+ between groups. The busiest CPU in the busiest group is
1268
+ decided based on the number of tasks in the runqueue of the
1269
+ CPU. Imbalance is defined as half of the difference between
1270
+ the number of idle CPUs in the busiest and the current group
1271
+ (Line 32). At first glance, this seems reasonable and should
1272
+ even out the number of idle CPUs. However, Linux can only
1273
+ perform integer division, leading to calculating the imbalance
1274
+ to be zero when the difference in idle CPUs is 1. This behavior
1275
+ is captured by the scenario in Figure 8 when group 1 is not
1276
+ overloaded. CPU1 is deemed as the busiest CPU with 3 tasks,
1277
+ but CPU3 is still unable to steal any of them.
1278
+ It is worth noting that both these issues intricately depend
1279
+ on the workload characteristics generated by our solver. Slight
1280
+ deviation in these characteristics can lead to a completely dif-
1281
+ ferent outcome. For instance, a different migration type may
1282
+ apply as the number of tasks or the blocking pattern of ex-
1283
+ isting tasks evolves. A different measure of imbalance may
1284
+ be able to push tasks to idle CPUs and balance load more
1285
+ evenly in general. Finely controlling the workload charac-
1286
+ teristics in synthetic benchmarks that highlight these bugs
1287
+ is difficult. This makes it hard for fuzzers and other tests to
1288
+ detect them. However, it does not preclude them from appear-
1289
+ ing in the real world. Our framework is not limited by this,
1290
+ and can freely search the workload space to generate intricate
1291
+ violating traces.
1292
+ 7
1293
+ Case study: TCP Synchronization in Ring
1294
+ Allreduce Training
1295
+ The above examples look at well-known schedulers. Perhaps
1296
+ the most compelling use-case for our methodology is to un-
1297
+ derstand less well-studied systems. As an example, we consid-
1298
+ ered a recent paper [37] which examines complex emergent
1299
+ 10
1300
+
1301
+ 0
1302
+ 10
1303
+ 20
1304
+ 30
1305
+ 40
1306
+ 50
1307
+ 60
1308
+ 70
1309
+ 0
1310
+ 0.1
1311
+ 0.2
1312
+ 0.3
1313
+ 0.4
1314
+ 0.5
1315
+ 0.6
1316
+ 0.7
1317
+ 0.8
1318
+ 0.9
1319
+ 1
1320
+ 1.1
1321
+ 1.2
1322
+ 1.3
1323
+ 1.4
1324
+ 1.5
1325
+ Link Utilization (Gbps)
1326
+ Time (sec)
1327
+ (b) Unfair bandwidth sharing
1328
+ 0
1329
+ 10
1330
+ 20
1331
+ 30
1332
+ 40
1333
+ 50
1334
+ 60
1335
+ 70
1336
+ 0
1337
+ 0.1
1338
+ 0.2
1339
+ 0.3
1340
+ 0.4
1341
+ 0.5
1342
+ 0.6
1343
+ 0.7
1344
+ 0.8
1345
+ 0.9
1346
+ 1
1347
+ 1.1
1348
+ 1.2
1349
+ 1.3
1350
+ 1.4
1351
+ 1.5
1352
+ Link Utilization (Gbps)
1353
+ Time (sec)
1354
+ (a) Fair bandwidth sharing
1355
+ J1 Comm. phase
1356
+ J2 Comm. phase
1357
+ J2 Comm. phase
1358
+ J2 Comm. phase
1359
+ J1 Comm. phase
1360
+ J1 Comm. phase
1361
+ J1 Comm. phase
1362
+ J2 Comm. phase
1363
+ J1
1364
+ J1
1365
+ J2
1366
+ J2
1367
+ J1 takes more bandwidth
1368
+ because its congestion control
1369
+ algorithm is more aggressive
1370
+ Iteration 1
1371
+ Iteration 2
1372
+ Iteration 3
1373
+ Jobs share bandwidth equally
1374
+ J1 starts its comm. phase
1375
+ for the next iteration before J2
1376
+ Unfairness keeps sliding the
1377
+ comm. phase of J2 to the right
1378
+ Jobs share bandwidth equally
1379
+ Jobs share bandwidth equally
1380
+ The sliding continues until there is minimal overlap between the
1381
+ comm. phases of jobs. Subsequent iterations maintain this pattern.
1382
+ Jobs share bandwidth equally
1383
+ Iteration 4
1384
+ J2 Comm. phase
1385
+ J1 Comm. phase
1386
+ J2 Comm. phase
1387
+ J1 Comm. phase
1388
+ Iteration 5
1389
+ Jobs share bandwidth equally
1390
+ Figure 9: Intuition behind why synchronization occurs. Reproduced from [37] with permission.
1391
+ Link
1392
+ (a)
1393
+ Link
1394
+ Link
1395
+ (b)
1396
+ Link
1397
+ Link
1398
+ Link
1399
+ (c)
1400
+ Link
1401
+ Link
1402
+ Link
1403
+ (d)
1404
+ Figure 10: Configurations of ring reduce jobs sharing links
1405
+ behavior in a cluster running multiple distributed neural net-
1406
+ work training jobs. Neural network training processes data in
1407
+ batches, computing the gradient of the error function for each
1408
+ batch to adjust the weights before processing the next batch.
1409
+ The computation-communication pattern for each batch is
1410
+ similar, making the process predictable and providing oppor-
1411
+ tunities for better scheduling [35,43,46].
1412
+ A training job consists of n servers connected logically in
1413
+ a ring. During training, servers have phases involving intense
1414
+ computation followed by communication along the ring. A
1415
+ datacenter may run multiple jobs sharing the same physical
1416
+ network. As a result, some of the logical links may be shared
1417
+ with other training jobs as shown in figure 10. One can maxi-
1418
+ mize network utilization by scheduling one job’s computation
1419
+ while the other communicates. This way, each job gets the
1420
+ full available bandwidth to itself when it is communicating.
1421
+ The reference paper [37] observes that jobs spontaneously
1422
+ synchronize to this desired schedule due to TCP unfairness.
1423
+ In this section, we verify this claim under complex settings
1424
+ and find it to be true.
1425
+ Preliminaries: We provide a brief overview of data-parallel
1426
+ distributed training using the ring-all-reduce method, omitting
1427
+ details that are not necessary to understand the scheduling
1428
+ problem. Each round of training processes one batch and
1429
+ consists of three phases: backpropagation, sum and broadcast.
1430
+ The first is purely computation and the rest are primarily
1431
+ communication. The model is divided into n pieces. Servers
1432
+ transmit data as soon as it is available. When summing, the
1433
+ % Completion
1434
+ Time
1435
+ Backpropagation
1436
+ Sum
1437
+ Broadcast
1438
+ Start of
1439
+ round i
1440
+ Start of
1441
+ round i+1
1442
+ 100%
1443
+ 0%
1444
+ A
1445
+ B
1446
+ C
1447
+ D
1448
+ E
1449
+ F
1450
+ Figure 11: Example of one server’s view of ring reduce during
1451
+ one round. Point A shows that server can send the first 1/nth
1452
+ chunk of data as soon as it has computed the gradients for
1453
+ that segment of the network. Till point B, it is waiting for the
1454
+ previous server to send data. Between B to C, it is sending at
1455
+ link rate. Between C and D, it is sharing the link with another
1456
+ job, due to which it sends at a lower rate. Once sum finishes,
1457
+ broadcast can start. Again, the first chunk can be sent without
1458
+ waiting for data from the previous server (point E). After this,
1459
+ it sends at less than line rate because it is waiting on data from
1460
+ the previous server, which it forwards as soon as possible.
1461
+ first piece is available as soon as backpropagation is done
1462
+ processing that part of the neural network. After this the
1463
+ server must wait for data from the preceeding server before
1464
+ forwarding it. The same holds for the broadcast step, except
1465
+ that the first piece is immediately available.
1466
+ Figure 9 is an experimental result showing the intuition
1467
+ behind why synchronization occurs. Suppose job 1 has been
1468
+ transmitting on the shared link at full link capacity, and a new
1469
+ job 2 starts transmitting over the same link. Both jobs will
1470
+ detect the congestion and adjust their sending rates so that
1471
+ both transmit at half of the full link capacity. However, it takes
1472
+ time for the transmission rates to reach this new equilibrium
1473
+ and many congestion control algorithms never reach it. In
1474
+ the meantime, job 1 gets more bandwidth than flow 2. As a
1475
+ result, the job corresponding to flow 1 will finish its sum and
1476
+ broadcast steps earlier than it otherwise would have, which
1477
+ will make it start even sooner for the next batch. This will
1478
+ continue until the transmissions on the shared link are fully
1479
+ separated in time – which is what we ideally want.
1480
+ This argument works for when there are just two rings.
1481
+ But what if there are more? One shared link could be forcing
1482
+ the job to slowly its schedule in one direction, while another
1483
+ shared link has an opposite effect. There could even be cycles.
1484
+ Figure 10 illustrates representative examples of the topologies
1485
+ for which we verify synchronization.
1486
+ 11
1487
+
1488
+ Figure 11 shows some model variables for a single server
1489
+ at each timestep. These include the percentage of backprop-
1490
+ agation, sum, and broadcast finished. It also includes an in-
1491
+ teger indicating the round number. A round is defined as the
1492
+ processing of a batch. We keep the state transition function
1493
+ between timesteps simple. For example, the number of flows
1494
+ transmitting on any link is fixed between two timesteps. How-
1495
+ ever, the time gap between timesteps is variable, allowing the
1496
+ solver to add a timestep whenever this changes. This way, we
1497
+ do not discretize time.
1498
+ In several places, we overapproximate. For example, we
1499
+ do not model the congestion control algorithm explicitly, but
1500
+ instead, constrain that (1) the link is fully utilized and (2)
1501
+ whichever job starts transmitting first gets more bandwidth.
1502
+ This models a wide range of congestion control behaviors
1503
+ while keeping the reasoning simple for both computers and
1504
+ humans. Additionally, we allow a server to send the first 1/nth
1505
+ chunk of "sum" and "broadcast" data as soon as it is done
1506
+ computing, without waiting for data from its predecessor. The
1507
+ amount of data it can send depends on the neural network
1508
+ architecture and system structure. Hence we let the solver
1509
+ arbitrarily pick the how much data is available.
1510
+ To verify synchronization, we constrain that the communi-
1511
+ cation of each job can fit inside the computation time of its
1512
+ neighbors, which is the only case in which the results in our
1513
+ reference paper hold. We then ask the solver to find a case
1514
+ where overlap in communication increases (or remains con-
1515
+ stant). If the solver cannot find such a case, we have proved
1516
+ that synchronization will always occur. It is important to note
1517
+ that the solver only proves this for a small and finite num-
1518
+ ber of events, and the initial state is unconstrained. However,
1519
+ this also proves that a larger sequence of events cannot exist
1520
+ where communication overlap increases, as if such a sequence
1521
+ were to exist, there would be a short sub-sequence where it
1522
+ increases, which we have proved is impossible.
1523
+ 8
1524
+ Related Work
1525
+ Automated Verification. A long history of automated ver-
1526
+ ification of schedulers can be traced back to mechanized
1527
+ proofs of real-time scheduling algorithms using proof assis-
1528
+ tants like Coq. These include proofs for specific algorithms
1529
+ such as Priority Inheritance [45], implementations in systems
1530
+ like CertiKOS [21], and frameworks to design them [8,28].
1531
+ These focus on qualitative correctness properties such as en-
1532
+ suring no task misses a deadline. In contrast, we focus on
1533
+ verifying quantitative performance properties and optimality.
1534
+ In a similar vein, model checking has achieved incredible
1535
+ success in formally verifying large scale systems including
1536
+ network stacks [33,36,41], language runtimes [23,25], and
1537
+ databases [20]. Large parts of the Linux subsystems have
1538
+ also been verified [19, 42]. Our approach has been directly
1539
+ inspired from model checking principles of building abstract
1540
+ models with specifications to verify it. However, existing
1541
+ model checking tools are restricted to identifying correctness
1542
+ bugs. We provide a framework to apply these principles to
1543
+ identify subtle performance bugs in schedulers.
1544
+ The verification work closest to our approach is CCAC
1545
+ [2]. CCAC models congestion control algorithms to formally
1546
+ verify their performance or generate violating traces. While
1547
+ CCAC provides a specialized tool for modeling congestion
1548
+ control algorithms, our framework is general and is applicable
1549
+ to a larger class of scheduling algorithms.
1550
+ Tracing and Benchmarking. All major operating systems
1551
+ include specialized tracers to analyze kernel performance.
1552
+ ftrace and perf_events are tracers built into Linux. They
1553
+ provide hooks to instrument various subsystems including
1554
+ the scheduler, and generate performance statistics. More re-
1555
+ cently, eBPF [39] and SystemTap [38] extend this function-
1556
+ ality by allowing custom user code to run inside the kernel
1557
+ to detect specific behavior. These tracers are used in conjunc-
1558
+ tion with standard benchmark suites to detect performance
1559
+ issues. Hackbench [13] benchmarks the average latency for
1560
+ communication between tasks, and has become the defacto
1561
+ standard to test improvements on the Linux load balancer.
1562
+ While tracers and benchmarks are often useful in detecting
1563
+ and explaining unexpected performance behavior, they pro-
1564
+ vide no guarantees on absence of bugs. Our framework, on
1565
+ the other hand, is an attempt to provide formal guarantees on
1566
+ scheduler performance.
1567
+ 9
1568
+ Limitations and Conclusion
1569
+ While we believe our methodology to be generally applicable
1570
+ to all scheduling algorithms, it is not a panacea for detecting
1571
+ all problems in an algorithm. Our methodology is dependent
1572
+ on the user’s ingenuity in creating tractable models and formu-
1573
+ lating relevant queries. For example, our model of the Linux
1574
+ CFS load balancer included several simplifying assumptions
1575
+ (e.g., ignoring asynchronous load balancing steps), yet the
1576
+ model was useful enough to detect practical bugs. Further,
1577
+ our modeling approach can only find performance issues for
1578
+ which a user formulates a metric and query; deeper issues
1579
+ which are not exposed by user–generated queries are not de-
1580
+ tected by our approach. Solver performance limitations mean
1581
+ that we can only perform bounded model checking, so our
1582
+ models won’t detect problems that happen only in the pres-
1583
+ ence of large number of tasks or events. However, we find
1584
+ that in many scenarios, it’s possible to generalize our results
1585
+ by creating models that focus on critical system components
1586
+ and formulating reasonable queries.
1587
+ In conclusion, we have demonstrated that formal meth-
1588
+ ods can provide a deeper and more rigorous understanding of
1589
+ scheduling heuristics used in practice. Further, since we verify
1590
+ the specification of these heuristics, and not the code, veri-
1591
+ fication effort is minimal. The most time consuming part of
1592
+ our method is posing the right queries and interpreting coun-
1593
+ terexamples in context. We invite the community to adopt
1594
+ quantitaive verification as a part of their worklflow when de-
1595
+ veloping scheduling heuristics.
1596
+ 12
1597
+
1598
+ References
1599
+ [1] Stefan Andrei, Albert M. K. Cheng, Vlad Radulescu,
1600
+ Sharfuddin Alam, and Suresh Vadlakonda.
1601
+ A new
1602
+ scheduling algorithm for non-preemptive independent
1603
+ tasks on a multi-processor platform.
1604
+ SIGBED Rev.,
1605
+ 13(2):24–29, apr 2016.
1606
+ [2] Venkat Arun, Mina Tahmasbi Arashloo, Ahmed Saeed,
1607
+ Mohammad Alizadeh, and Hari Balakrishnan. Toward
1608
+ formally verifying congestion control behavior. In Pro-
1609
+ ceedings of the 2021 ACM SIGCOMM 2021 Conference,
1610
+ SIGCOMM ’21, page 1–16, New York, NY, USA, 2021.
1611
+ Association for Computing Machinery.
1612
+ [3] Nikhil Bansal and David Gamarnik. Handling load with
1613
+ less stress. Queueing Systems, 54:45–54, 2006.
1614
+ [4] Nikhil Bansal and Mor Harchol-Balter. Analysis of
1615
+ SRPT scheduling: Investigating unfairness. In Proceed-
1616
+ ings of the 2001 ACM SIGMETRICS International Con-
1617
+ ference on Measurement and Modeling of Computer
1618
+ Systems, SIGMETRICS ’01, page 279–290, New York,
1619
+ NY, USA, 2001. Association for Computing Machinery.
1620
+ [5] Ryan Beckett, Aarti Gupta, Ratul Mahajan, and David
1621
+ Walker. A general approach to network configuration
1622
+ verification. In Proceedings of the Conference of the
1623
+ ACM Special Interest Group on Data Communication,
1624
+ SIGCOMM ’17, page 155–168, New York, NY, USA,
1625
+ 2017. Association for Computing Machinery.
1626
+ [6] Robert D. Blumofe and Charles E. Leiserson. Schedul-
1627
+ ing multithreaded computations by work stealing. J.
1628
+ ACM, 46(5):720–748, sep 1999.
1629
+ [7] James Bornholt, Rajeev Joshi, Vytautas Astrauskas,
1630
+ Brendan Cully, Bernhard Kragl, Seth Markle, Kyle
1631
+ Sauri, Drew Schleit, Grant Slatton, Serdar Tasiran, Jacob
1632
+ Van Geffen, and Andrew Warfield. Using lightweight
1633
+ formal methods to validate a key-value storage node in
1634
+ amazon s3. In Proceedings of the ACM SIGOPS 28th
1635
+ Symposium on Operating Systems Principles, SOSP ’21,
1636
+ page 836–850, New York, NY, USA, 2021. Association
1637
+ for Computing Machinery.
1638
+ [8] Felipe Cerqueira, Felix Stutz, and Björn B. Brandenburg.
1639
+ Prosa: A case for readable mechanized schedulability
1640
+ analysis. In 2016 28th Euromicro Conference on Real-
1641
+ Time Systems (ECRTS), pages 273–284, 2016.
1642
+ [9] Tej Chajed, Joseph Tassarotti, M. Frans Kaashoek, and
1643
+ Nickolai Zeldovich. Verifying concurrent, crash-safe
1644
+ systems with perennial. In Proceedings of the 27th ACM
1645
+ Symposium on Operating Systems Principles, SOSP ’19,
1646
+ page 243–258, New York, NY, USA, 2019. Association
1647
+ for Computing Machinery.
1648
+ [10] Leonardo Mendonça de Moura and Nikolaj S. Bjørner.
1649
+ Z3: an efficient SMT solver. In Tools and Algorithms
1650
+ for the Construction and Analysis of Systems, 14th In-
1651
+ ternational Conference, TACAS, pages 337–340, 2008.
1652
+ [11] Seyed K. Fayaz, Tushar Sharma, Ari Fogel, Ratul Maha-
1653
+ jan, Todd Millstein, Vyas Sekar, and George Varghese.
1654
+ Efficient network reachability analysis using a succinct
1655
+ control plane representation. In Proceedings of the 12th
1656
+ USENIX Conference on Operating Systems Design and
1657
+ Implementation, OSDI’16, page 217–232, USA, 2016.
1658
+ USENIX Association.
1659
+ [12] Ari Fogel, Stanley Fung, Luis Pedrosa, Meg Walraed-
1660
+ Sullivan, Ramesh Govindan, Ratul Mahajan, and Todd
1661
+ Millstein. A general approach to network configuration
1662
+ analysis. In Proceedings of the 12th USENIX Confer-
1663
+ ence on Networked Systems Design and Implementation,
1664
+ NSDI’15, page 469–483, USA, 2015. USENIX Associ-
1665
+ ation.
1666
+ [13] The Linux Foundation.
1667
+ Hackbench.
1668
+ https:
1669
+ //wiki.linuxfoundation.org/realtime/
1670
+ documentation/howto/tools/hackbench.
1671
+ [14] The Linux Foundation. Linux Kernel v5.5. https:
1672
+ //elixir.bootlin.com/linux/v5.5-rc2/source,
1673
+ 2020.
1674
+ [15] The Linux Foundation.
1675
+ Linux Kernel v5.5 Load
1676
+ Balancer lore.
1677
+ https://lore.kernel.org/lkml/
1678
+ 1571405198-27570-1-git-send-email-vincent.
1679
+ guittot@linaro.org/, 2020.
1680
+ [16] The
1681
+ Linux
1682
+ Foundation.
1683
+ Linux
1684
+ Kernel
1685
+ v5.7
1686
+ Load
1687
+ Balancer
1688
+ commit
1689
+ fix.
1690
+ https:
1691
+ //github.com/torvalds/linux/commit/
1692
+ c32b4308295aaaaedd5beae56cb42e205ae63e58,
1693
+ 2020.
1694
+ [17] The Linux Foundation.
1695
+ Linux Load Balancer
1696
+ sched/fair.c.
1697
+ https://elixir.bootlin.com/
1698
+ linux/v5.5.19/source/kernel/sched/fair.c,
1699
+ 2020.
1700
+ [18] Joshua Fried, Zhenyuan Ruan, Amy Ousterhout, and
1701
+ Adam Belay. Caladan: Mitigating interference at mi-
1702
+ crosecond timescales.
1703
+ In Proceedings of the 14th
1704
+ USENIX Conference on Operating Systems Design and
1705
+ Implementation, OSDI’20, USA, 2020. USENIX Asso-
1706
+ ciation.
1707
+ [19] Andy Galloway, Gerald Lüttgen, Jan Tobias Mühlberg,
1708
+ and Radu I. Siminiceanu. Model-checking the linux
1709
+ virtual file system. In Neil D. Jones and Markus Müller-
1710
+ Olm, editors, Verification, Model Checking, and Abstract
1711
+ Interpretation, pages 74–88, Berlin, Heidelberg, 2009.
1712
+ Springer Berlin Heidelberg.
1713
+ 13
1714
+
1715
+ [20] Milos Gligoric and Rupak Majumdar. Model checking
1716
+ database applications. In Proceedings of the 19th In-
1717
+ ternational Conference on Tools and Algorithms for the
1718
+ Construction and Analysis of Systems, TACAS’13, page
1719
+ 549–564, Berlin, Heidelberg, 2013. Springer-Verlag.
1720
+ [21] Xiaojie Guo, Maxime Lesourd, Mengqi Liu, Lionel
1721
+ Rieg, and Zhong Shao. Integrating formal schedula-
1722
+ bility analysis into a verified os kernel. In Isil Dillig
1723
+ and Serdar Tasiran, editors, Computer Aided Verifica-
1724
+ tion, pages 496–514. Springer International Publishing,
1725
+ 2019.
1726
+ [22] Travis Hance, Andrea Lattuada, Chris Hawblitzel, Jon
1727
+ Howell, Rob Johnson, and Bryan Parno. Storage sys-
1728
+ tems are distributed systems (so verify them that way!).
1729
+ In Proceedings of the 14th USENIX Conference on Op-
1730
+ erating Systems Design and Implementation, OSDI’20,
1731
+ USA, 2020. USENIX Association.
1732
+ [23] Thomas A. Henzinger, Ranjit Jhala, Rupak Majumdar,
1733
+ and Grégoire Sutre. Software verification with blast. In
1734
+ Thomas Ball and Sriram K. Rajamani, editors, Model
1735
+ Checking Software, pages 235–239, Berlin, Heidelberg,
1736
+ 2003. Springer Berlin Heidelberg.
1737
+ [24] Jeehoon Kang, Yoonseung Kim, Youngju Song, Juney-
1738
+ oung Lee, Sanghoon Park, Mark Dongyeon Shin,
1739
+ Yonghyun Kim, Sungkeun Cho, Joonwon Choi, Chung-
1740
+ Kil Hur, and Kwangkeun Yi. Crellvm: Verified credible
1741
+ compilation for llvm. In Proceedings of the 39th ACM
1742
+ SIGPLAN Conference on Programming Language De-
1743
+ sign and Implementation, PLDI 2018, page 631–645,
1744
+ New York, NY, USA, 2018. Association for Computing
1745
+ Machinery.
1746
+ [25] Daniel Kroening and Michael Tautschnig.
1747
+ Cbmc –
1748
+ c bounded model checker.
1749
+ In Erika Ábrahám and
1750
+ Klaus Havelund, editors, Tools and Algorithms for the
1751
+ Construction and Analysis of Systems, pages 389–391,
1752
+ Berlin, Heidelberg, 2014. Springer Berlin Heidelberg.
1753
+ [26] Xavier Leroy. Formal verification of a realistic compiler.
1754
+ Commun. ACM, 52(7):107–115, jul 2009.
1755
+ [27] Jed Liu, William Hallahan, Cole Schlesinger, Milad
1756
+ Sharif, Jeongkeun Lee, Robert Soulé, Han Wang, C˘alin
1757
+ Ca¸scaval, Nick McKeown, and Nate Foster. P4v: Practi-
1758
+ cal verification for programmable data planes. In Pro-
1759
+ ceedings of the 2018 Conference of the ACM Special
1760
+ Interest Group on data communication, pages 490–503,
1761
+ 2018.
1762
+ [28] Mengqi Liu, Lionel Rieg, Zhong Shao, Ronghui Gu,
1763
+ David Costanzo, Jung-Eun Kim, and Man-Ki Yoon. Vir-
1764
+ tual timeline: A formal abstraction for verifying pre-
1765
+ emptive schedulers with temporal isolation. Proc. ACM
1766
+ Program. Lang., 4(POPL), dec 2019.
1767
+ [29] Nuno P.
1768
+ Lopes, Juneyoung Lee, Chung-Kil Hur,
1769
+ Zhengyang Liu, and John Regehr. Alive2: Bounded
1770
+ translation validation for llvm. In Proceedings of the
1771
+ 42nd ACM SIGPLAN International Conference on Pro-
1772
+ gramming Language Design and Implementation, PLDI
1773
+ 2021, page 65–79, New York, NY, USA, 2021. Associa-
1774
+ tion for Computing Machinery.
1775
+ [30] Nuno P. Lopes, David Menendez, Santosh Nagarakatte,
1776
+ and John Regehr.
1777
+ Practical verification of peephole
1778
+ optimizations with alive. Commun. ACM, 61(2):84–91,
1779
+ jan 2018.
1780
+ [31] Jean-Pierre Lozi, Baptiste Lepers, Justin Funston, Fa-
1781
+ bien Gaud, Vivien Quéma, and Alexandra Fedorova.
1782
+ The linux scheduler: A decade of wasted cores.
1783
+ In
1784
+ Proceedings of the Eleventh European Conference on
1785
+ Computer Systems, EuroSys ’16, New York, NY, USA,
1786
+ 2016. Association for Computing Machinery.
1787
+ [32] Sarah McClure, Amy Ousterhout, Scott Shenker, and
1788
+ Sylvia Ratnasamy.
1789
+ Efficient scheduling policies for
1790
+ Microsecond-Scale tasks.
1791
+ In 19th USENIX Sympo-
1792
+ sium on Networked Systems Design and Implementa-
1793
+ tion (NSDI 22), pages 1–18, Renton, WA, April 2022.
1794
+ USENIX Association.
1795
+ [33] Madanlal Musuvathi and Dawson R. Engler. Model
1796
+ checking large network protocol implementations. In
1797
+ Proceedings of the 1st Conference on Symposium on Net-
1798
+ worked Systems Design and Implementation - Volume 1,
1799
+ NSDI’04, page 12, USA, 2004. USENIX Association.
1800
+ [34] Amy Ousterhout, Joshua Fried, Jonathan Behrens, Adam
1801
+ Belay, and Hari Balakrishnan.
1802
+ Shenango: Achiev-
1803
+ ing high cpu efficiency for latency-sensitive datacenter
1804
+ workloads. In Proceedings of the 16th USENIX Confer-
1805
+ ence on Networked Systems Design and Implementation,
1806
+ NSDI’19, page 361–377, USA, 2019. USENIX Associ-
1807
+ ation.
1808
+ [35] Yanghua Peng, Yibo Zhu, Yangrui Chen, Yixin Bao,
1809
+ Bairen Yi, Chang Lan, Chuan Wu, and Chuanxiong Guo.
1810
+ A generic communication scheduler for distributed dnn
1811
+ training acceleration. In Proceedings of the 27th ACM
1812
+ Symposium on Operating Systems Principles, SOSP ’19,
1813
+ page 16–29, New York, NY, USA, 2019. Association
1814
+ for Computing Machinery.
1815
+ [36] Santhosh Prabhu, Kuan-Yen Chou, Ali Kheradmand,
1816
+ P. Brighten Godfrey, and Matthew Caesar. Plankton:
1817
+ Scalable network configuration verification through
1818
+ model checking. In Proceedings of the 17th Usenix Con-
1819
+ ference on Networked Systems Design and Implemen-
1820
+ tation, NSDI’20, page 953–968, USA, 2020. USENIX
1821
+ Association.
1822
+ 14
1823
+
1824
+ [37] Sudarsanan Rajasekaran, Manya Ghobadi, Gautam Ku-
1825
+ mar, and Aditya Akella. Congestion control in machine
1826
+ learning clusters. ACM HotNets 2022, 2022.
1827
+ [38] Red Hat.
1828
+ SystemTap.
1829
+ https://sourceware.org/
1830
+ systemtap/, 2005.
1831
+ [39] Alastair Robertson.
1832
+ bpftrace: High-level tracing
1833
+ language for Linux eBPF.
1834
+ https://github.com/
1835
+ iovisor/bpftrace, 2019.
1836
+ [40] Linus Schrage and Louis Miller.
1837
+ The queue M/G/1
1838
+ with the shortest remaining processing time discipline.
1839
+ Operations Research, 14(4):670–684, 1966.
1840
+ [41] Wei Sun, Lisong Xu, Sebastian Elbaum, and Di Zhao.
1841
+ Model-agnostic and efficient exploration of numerical
1842
+ state space of real-world tcp congestion control imple-
1843
+ mentations. In Proceedings of the 16th USENIX Confer-
1844
+ ence on Networked Systems Design and Implementation,
1845
+ NSDI’19, page 719–733, USA, 2019. USENIX Associ-
1846
+ ation.
1847
+ [42] Thomas Witkowski, Nicolas Blanc, Daniel Kroening,
1848
+ and Georg Weissenbacher. Model checking concurrent
1849
+ linux device drivers.
1850
+ In Proceedings of the Twenty-
1851
+ Second IEEE/ACM International Conference on Auto-
1852
+ mated Software Engineering, ASE ’07, page 501–504,
1853
+ New York, NY, USA, 2007. Association for Computing
1854
+ Machinery.
1855
+ [43] Wencong Xiao, Romil Bhardwaj, Ramachandran Ram-
1856
+ jee, Muthian Sivathanu, Nipun Kwatra, Zhenhua Han,
1857
+ Pratyush Patel, Xuan Peng, Hanyu Zhao, Quanlu Zhang,
1858
+ Fan Yang, and Lidong Zhou. Gandiva: Introspective
1859
+ cluster scheduling for deep learning. In 13th USENIX
1860
+ Symposium on Operating Systems Design and Imple-
1861
+ mentation (OSDI 18), pages 595–610, Carlsbad, CA,
1862
+ October 2018. USENIX Association.
1863
+ [44] Irene Zhang, Amanda Raybuck, Pratyush Patel, Kirk
1864
+ Olynyk, Jacob Nelson, Omar S. Navarro Leija, Ash-
1865
+ lie Martinez, Jing Liu, Anna Kornfeld Simpson, Sujay
1866
+ Jayakar, Pedro Henrique Penna, Max Demoulin, Piali
1867
+ Choudhury, and Anirudh Badam. The demikernel dat-
1868
+ apath os architecture for microsecond-scale datacenter
1869
+ systems. In Proceedings of the ACM SIGOPS 28th Sym-
1870
+ posium on Operating Systems Principles, SOSP ’21,
1871
+ page 195–211, New York, NY, USA, 2021. Association
1872
+ for Computing Machinery.
1873
+ [45] Xingyuan Zhang, Christian Urban, and Chunhan Wu.
1874
+ Priority inheritance protocol proved correct. J. Autom.
1875
+ Reason., 64(1):73–95, jan 2020.
1876
+ [46] Yihao Zhao, Yuanqiang Liu, Yanghua Peng, Yibo Zhu,
1877
+ Xuanzhe Liu, and Xin Jin. Multi-resource interleav-
1878
+ ing for deep learning training. In Proceedings of the
1879
+ ACM SIGCOMM 2022 Conference, SIGCOMM ’22,
1880
+ page 428–440, New York, NY, USA, 2022. Association
1881
+ for Computing Machinery.
1882
+ 15
1883
+
59E2T4oBgHgl3EQf7Ahe/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
89AyT4oBgHgl3EQfdPcr/content/tmp_files/2301.00297v1.pdf.txt ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ Deterministic and Nondeterministic Particle Motion with Interaction
4
+ Mechanisms
5
+ Cameron McNamee1,2*, Renee Reijo Pera1
6
+ 1McLaugling Research Institute, Great Falls, MT, USA
7
+ 2Department of Mathematics, California Institute of Technology, Pasadena, CA, USA
8
+ * Correspondence:
9
+ cmcnamee@caltech.edu
10
+ Keywords: particle modeling, migration, interaction mechanisms, deterministic modeling,
11
+ Beauchemin modeling
12
+ Abstract
13
+ Studying systems where many individual bodies in motion interact with one another is a complex and
14
+ interesting area. Simple mechanisms that may be determined for biological, chemical, or physical
15
+ reasons can lead to astonishingly complex results that require a further understanding of the moving
16
+ bodies. With the increasing interaction between computation and various scientific areas, it has
17
+ become more useful, feasible, and important to create models for these systems. Here, we present two
18
+ families of models, deterministic and nondeterministic, along with three distinct and realistic
19
+ interaction mechanisms. These are combined in a unique way to provide the groundwork for particle
20
+ system models across multiple disciplines. This work has applications that range from biology to
21
+ chemistry and physics. In addition to describing the motivations and math behind all the models,
22
+ software is provided that allows researchers to quickly adjust and implement what is described here.
23
+ 1
24
+ Introduction
25
+ This work was inspired by consideration of quantum biology (QB) and cell migration and tissue
26
+ formation as previously described (Vecheck et al., 2022) with the goal of identifying quantitative
27
+ measures of particle clustering in a scaffold. Identifying such measures involves observing the
28
+ locations of the particles using image processing and applying these locations to a novel clustering
29
+ algorithm. The results were some surprising numbers that led to a search for other, more robust
30
+ methods of cluster analysis and an environment in which to test resulting algorithms. This proved not
31
+ to be a simple task prompting development of potentially new models to understand how particles
32
+ may interact under different conditions. From a biological perspective, and in view of the eventual
33
+ application of this work, this paper explores the coalescing of particles in a space towards one
34
+ another. It is also important to understand that in the simulation process, the goal is to replicate what
35
+ is seen in realistic settings. In addition to biology, many systems wherein multiple autonomous
36
+ particles are driven in a migratory pattern can be seen in physics and chemistry, allowing this work to
37
+ be expanded to multiple fields (Hansen et al., 2013; E. Courtier et al., 2019). Nanoparticle and ion
38
+ motion are two distinct areas that this type of computational research can be readily applied and lead
39
+ to interesting features of such a system being uncovered (Hansen et al., 2013; E. Courtier et al.,
40
+ 2019). With the diverse opportunities in mind, this paper seeks to abstract the motion and interaction
41
+ of individual bodies in such systems to create general models. These general models may then be
42
+ taken by a researcher and applied with the appropriate parameters to match a particular system, with
43
+
44
+
45
+ 2
46
+ the addition of the available software laying the foundation of such a model (Figure 1). This paper is
47
+ by no means all-encompassing; however, the range of these general models spans many fields with
48
+ many important implications.
49
+ 2 Methods
50
+ All software was implemented in MATLAB. Methods are described below as follows: First, different
51
+ environments in which particles may be generated are discussed. These environments decide what is
52
+ called the “initial seeding” of the particles. These initial seedings vary in ways that may be more
53
+ suitable for certain applications. Second, two families of models are explored: deterministic models
54
+ and nondeterministic models. Differences between these two families of models are described in more
55
+ detail; however, as the names suggest, the most important difference is that the deterministic model
56
+ will produce the exact same results given the same initial seedings, while the nondeterministic models
57
+ have an element of randomness that allows for changes in outcome. The third and last part discussed
58
+ is the interactions between particles. This is a unique addition to these models and distinguishes this
59
+ work for specific applications in various fields.
60
+ 2.1 Initial Seeding Environments
61
+ Initial seeding environments are different environments that serve different purposes and are each
62
+ suited for their own application. All seedings are centered at the origin.
63
+ 2.1.1 Uniform Seeding
64
+ Non-random uniform seeding places each particle in a lattice point of a Cartesian coordinate system.
65
+ This serves as what can be considered an “in vitro” environment, since in most applications, particles
66
+ are not placed in such an orderly fashion (Peercy and Starz-Gaiano, 2020; Calvillo et al., 2022).
67
+ 2.1.2 Uniform Random Seeding
68
+
69
+ This seeding generates a designated number of particles within specified dimensions. This is carried
70
+ out by generating three random numbers within ± the designated range for each particle. These
71
+ coordinates then determine the placement of each particle, providing uniformly random particles
72
+ throughout the space with an expected center at the origin.
73
+ 2.1.3 Spherical Random Seeding
74
+ Like the seeding above, spherical random seeding is a uniform distribution of particles with an
75
+ expected center at the origin. This differs from uniform random seeding in that the seeding is formed
76
+ with a given number of particles and a radius. The problem of creating a uniform distribution within
77
+ a sphere is not trivial; thus, this was accomplished by using polar coordinates.
78
+ To randomly generate a radius that places particles uniformly, a floating-point number between [0,1]
79
+ is sampled and the cube root is taken. Then, the maximum radius is multiplied by the resulting value.
80
+ This allows, for reasons that can be clearly seen from a geometric perspective, for the radius to
81
+ generate uniform points. Generating the random angles is described in more detail here, along with
82
+ more extensive reasoning throughout the entire process: (Simon, no date). This random process is
83
+ done for as many particles as desired, and this determines the initial seeding.
84
+ 2.2 Random Migration Models
85
+
86
+
87
+ 3
88
+ This class of models stems from the Beauchemin model for migrating bodies, which has been used
89
+ previously for biological modeling (Textor, Sinn and de Boer, 2013). All random migration models
90
+ were taken from Textor et al. (Textor, Sinn and de Boer, 2013).
91
+ 2.2.1 Beauchemin Random Walk
92
+ To develop a biased migration model for a group of particles, it is helpful to first create a model that
93
+ has no bias. The Beauchemin random walk model functions under two phases: a pause phase and a
94
+ free phase. During the pause phase, a particle generates two random angles, 𝜙 and 𝜃, and uses these
95
+ angles to generate a uniform random direction. The generation of these angles follows the same
96
+ method mentioned above for uniform sphere generation. After the pause phase, the particle enters the
97
+ free phase. In this random walk model, every particle has the same, given velocity 𝑣. Using its given
98
+ direction, a particle travels at the velocity 𝑣 for the entire free phase. The free phase length may be
99
+ changed, but for most modeling applications, it is 4 times longer than the pause phase. This is
100
+ arbitrary and may be changed easily.
101
+ As the name suggests, the Beauchemin random walk method is not biased in any way, so the
102
+ particles walk uniformly in any direction. This unbiased nature leads to no pattern of particle
103
+ migration but provides a basis wherein specific factors may be changed. With the unbiased model
104
+ established, an introduction to different biases is now needed.
105
+ 2.2.2 Simple Phenomenological
106
+ The Simple Phenomenological model (SP) introduces bias by adding a “drift element” during the
107
+ pause phase (Textor, Sinn and de Boer, 2013). Instead of the particles remaining stationary during the
108
+ pause phase, they drift towards some defined point. This causes uniform clumping amongst the
109
+ particles. In this implementation, the choice was made to make this point the origin. This is of course
110
+ arbitrary and may be changed to suit whatever environment is most sensible.
111
+ The drift velocity is defined as the given velocity 𝑣 times the bias variable 𝑝. For 𝑝 = 1, equivalent
112
+ bias is achieved between random walking and drifting towards the origin as the particles approach the
113
+ center at the same rate that they walk randomly. Of course, for 𝑝 = 0 there is no bias, so this model
114
+ would operate the same as the random walk above.
115
+ The drift vector is defined by using the current 𝑥, 𝑦, and 𝑧 coordinates to calculate the new
116
+ coordinates. First, 𝑑, the distance from the drift particle, is calculated using the 𝑙� norm
117
+ 𝑑 = �𝑥�
118
+ � + 𝑦�
119
+ � + 𝑧�
120
+
121
+ Then, the drift velocity is used to “shrink” the coordinates towards the origin
122
+ 𝑥����� = 𝑥� ⋅ 𝑑 − 𝑣𝑝
123
+ 𝑑
124
+
125
+ 𝑦����� = 𝑦� ⋅ 𝑑 − 𝑣𝑝
126
+ 𝑑
127
+
128
+ 𝑧����� = 𝑧� ⋅ 𝑑 − 𝑣𝑝
129
+ 𝑑
130
+
131
+
132
+
133
+ 4
134
+ In this implementation, there is an added condition that a particle only moves towards the origin if it
135
+ is farther away than the drift velocity will take it. This prevents the particle from “overshooting” its
136
+ target.
137
+ 2.2.3 Topotaxis Model
138
+ The Topotaxis Model (TM) differs from SP by introducing a different form of bias. Instead of
139
+ moving the particle in the target direction, TM changes the distribution from which the angles are
140
+ derived. Instead of 𝜙 and 𝜃 being taken from a uniform distribution, they are drawn from a beta
141
+ distribution. A beta distribution is essentially a truncated normal distribution. The normal distribution
142
+ cannot be used in this situation because of its infinite tails and the angles are within a strict range,
143
+ [0, 2𝜋] and [0, 𝜋] respectively. The beta distribution’s shape is determined by two parameters. Here,
144
+ the two parameters, 𝛼 and 𝛽, and are set equal. Since they are equal, we only refer to 𝛼 from here on.
145
+ To calculate the shape of the distribution given a single bias variable 𝑝, we need to consider that
146
+ when 𝑝 = 0, the distribution should be uniform and as 𝑝 approaches 1, the distribution should
147
+ become more skewed. This is accomplished by decreasing the variance in the distribution of angles
148
+ while keeping the mean towards the origin with the result that the particles have an increased
149
+ likelihood of orienting themselves towards the target location. In MATLAB, if 𝑎 = 1, the
150
+ distribution is defined as uniform and as 𝑎 → ∞, the variance decreases, and a peak centered at the
151
+ mean value is formed. To calculate 𝑎 given 𝑝, the following is done
152
+ 𝑎 =
153
+ 1
154
+ 1 − 𝑝
155
+ From this definition we see that for this model 𝑝 ≠ 1, but we also see lim
156
+ �→� 𝑎 = ∞, as desired. Thus,
157
+ for 𝑝 = 0, there is a uniform, or unbiased, distribution, and as 𝑝 → 1, there is an increased bias
158
+ towards the targeted location.
159
+ 2.3 Deterministic Migration Models
160
+ The deterministic migration models are a family of migration models that feature no random
161
+ elements. Within this class of models, there are two different groups: uniform clustering and local
162
+ clustering. The uniform clustering algorithms behave on a global scale, causing all particles to
163
+ migrate in a wholistic sense. The local clustering algorithms organize themselves at a much smaller
164
+ scale.
165
+ 2.3.1 Uniform Clustering, the Naïve Approach
166
+ The first and simplest way of clustering particles within space is to “shrink” the space they are in. This
167
+ first requires a clustering rate, 𝑟 ∈ [0,1]. This method is as follows
168
+ 𝑈�𝑐�, 𝑐�, 𝑐�� = �𝑐� ⋅ (1 − 𝑟), 𝑐� ⋅ (1 − 𝑟), 𝑐� ⋅ (1 − 𝑟)�; ∀𝑐 ∈ 𝐶 (Eq. 1)
169
+ Where C is the set of all particles, c is an individual particle, and 𝑐�, 𝑐� and 𝑐� are the x-, y-, and z-
170
+ coordinates of c, respectively. This function causes all particles to “march” inward, maintaining their
171
+ relative distances to one another. This does produce a set where each particle is relatively closer to all
172
+ other particles but does so in a way that is not representative of typical physical settings.
173
+ 2.3.2 Nearest Neighbor Clustering
174
+
175
+
176
+ 5
177
+ In nearest neighbor clustering, particles are not clustered into a single lump, but rather orient to their
178
+ neighbors and move toward them. This approach is much more nuanced compared to Uniform
179
+ Clustering and reveals samples that are much more reminiscent of certain applications. The method
180
+ behind this approach assumes that particles can only sense their n nearest neighbors (NN’s), and so
181
+ they only approach those particles. This method is conducted by first finding the geometric center
182
+ (GC) described by the n NN’s and then moving the particle towards that at a clustering rate, 𝑟 ∈ [0,1].
183
+ Using the same notation as above, we have the set of distances from all particles to a given particle c.
184
+ 𝑁� = min
185
+
186
+ { 𝑑�: 𝑑� = ||𝑠� − 𝑐||, ∀s� ∈ 𝐶} (Eq. 2)
187
+ Here, �|𝑠� − 𝑐|� is the 𝑙� normed distance between the particle 𝑠� and the particle c and min
188
+ � (𝑆)
189
+ represents taking the n smallest elements from the set S. With the NNs now identified, the GC (𝑔�) is
190
+ calculated for a given 𝑐. This center is calculated without including c itself.
191
+ 𝑔�� =
192
+
193
+ 𝑠�
194
+ �∈��
195
+ 𝑛
196
+
197
+ 𝑔�� =
198
+
199
+ 𝑠�
200
+ �∈��
201
+ 𝑛
202
+
203
+ 𝑔�� =
204
+
205
+ 𝑠�
206
+ �∈��
207
+ 𝑛
208
+
209
+ 𝑔� = �𝑔��, 𝑔��, 𝑔��� (Eq. 3)
210
+
211
+ In other words, the unweighted mean of x-, y-, and z-coordinates of each NN. This gives us the GC
212
+ described by the n NN’s. With this, we now have a location to move c towards.
213
+ �𝑐� , 𝑐�, 𝑐�� ↦ (𝑐�(1 − 𝑟) + 𝑔�𝑟, 𝑐�(1 − 𝑟) + 𝑔�𝑟, 𝑐�(1 − 𝑟) + 𝑔�𝑟) (Eq. 4)
214
+ For a given clustering, this calculation must be carried out for each particle, and it is done in its
215
+ entirety before any particle moves (i.e., no particle’s location changes until after every particle’s new
216
+ location has been calculated. Then, all particles move to their new locations at once). The result is
217
+ clustering in small groups that after enough time steps, form individual groups.
218
+ 2.3.3 Threshold Clustering
219
+ Threshold (TH) clustering differs from nearest neighbor clustering in that, instead of searching for a
220
+ certain number of neighbors, the particles have an assumed maximum influence distance. This
221
+ maximum influence distances, a threshold, is analogous to a particle only being able to sense other
222
+ particles for a small radius around itself. This method is implemented by collecting the set of particles
223
+ within the maximum sensing distance, t, calculating the GC described by these particles, and then
224
+ moving towards this center at the clustering rate r.
225
+ 𝑇� = {𝑠 ∈ 𝐶: �|𝑠 − 𝑐|� < 𝑡} (Eq. 5)
226
+
227
+
228
+ 6
229
+ Then, as above, the GC of this set is calculated using the unweighted mean (Eq. 3). This gives 𝑔�,
230
+ which we then use to calculate the new particle’s position using Eq. 4. This, as with NN, is done for
231
+ every particle before any particle changes its location. This mode produces several small groups
232
+ throughout the space whose group size changes with 𝑡.
233
+ 2.3.4 Weighted Clustering
234
+ This method is the next logical step for simulating clustering. Instead of taking the unweighted mean
235
+ to calculate the GC, why not weigh the locations based on distance? Weighted clustering is done by
236
+ first calculating 𝐷�, the set of all distances from each particle to c. Next, the weighted geometric
237
+ center (WGC) for c, 𝑤�, is calculated as follows:
238
+ 𝑤�� =
239
+
240
+ 𝑠��
241
+ 𝑑�
242
+ �∈��
243
+
244
+ � 1
245
+ 𝑑��
246
+ �∈��
247
+
248
+ 𝑤�� =
249
+
250
+ 𝑠��
251
+ 𝑑�
252
+ �∈��
253
+
254
+ � 1
255
+ 𝑑��
256
+ �∈��
257
+
258
+ 𝑤�� =
259
+
260
+ 𝑠��
261
+ 𝑑�
262
+ �∈��
263
+
264
+ � 1
265
+ 𝑑��
266
+ �∈��
267
+
268
+ 𝑤� = (𝑤��, 𝑤��, 𝑤��) (Eq. 6)
269
+ Where 𝐼� is the indexing set corresponding to 𝐷�. The reason for using the reciprocals of the weights
270
+ follows from the idea that as a particle get further, it should affect c less. The new coordinates for c
271
+ are calculated according to Eq. 4. This is a global clustering on particles, like uniform clustering. This
272
+ causes no small groups to small, but rather the entire population of particles congregate. This
273
+ congregation, however, does not appear the same nor does it occur at the same rate at uniform
274
+ clustering.
275
+ 2.3.5 Weighted Nearest Neighbor Clustering
276
+ In accordance with the previous modes, this weighted nearest neighbor (WNN) clustering is the next
277
+ intuitive step. This method is, as the name suggests, a combination of NN clustering and weighted
278
+ clustering. This combines the logic of only looking towards the closest particles, while also weighing
279
+ the particles according to their distance. The set of NN’s is calculated according to Eq. 2.
280
+ 𝐼�� = {𝑛 ∈ 𝐼: 𝑠� ∈ 𝑁�}
281
+ The WGC is calculated according to Eq. 6 with 𝐼�� used in place of 𝐼�. With this WGC, particle c is
282
+ mapped using Eq. 4. This mode appears very similar to the original NN clustering but tends to form
283
+ clusters that are skewed in slightly different locations.
284
+ 2.3.6 Weighted Threshold Clustering
285
+
286
+
287
+ 7
288
+ Weighted threshold (WT) clustering follows the same intuition. 𝑇� is calculated according to Eq. 5.
289
+ This defines the limited distance indexing set:
290
+ 𝐼�� = {𝑛 ∈ 𝐼: 𝑠� ∈ 𝑇�}
291
+ Using Eq. 6 with 𝐼�� in place of 𝐼� gives WGC about the particles within the threshold. Eq. 4 is used
292
+ to map c. As with WNN clustering, WT clustering appears quite similar to the TH clustering on small
293
+ scales.
294
+ 2.4 Interaction Mechanics
295
+ With the addition of interaction, defined here as how two particles behave when they physically hit
296
+ each other in the migration process, there are many different fields that may provide insights. Some
297
+ outcomes of physical interaction are cessation of movement, repolarization, or adhesion, to name just
298
+ a few examples of how particles may interact with one another.
299
+ 2.4.1 Cessation of Movement
300
+ The simplest self-interaction is cessation of movement, meaning simply that if two particles are
301
+ within a designated distance of one another, they both stop moving. In simulation, implementation of
302
+ this outcome simply checks if a particle is close to any other particle. If it is, it stays in the same spot.
303
+ Otherwise, it keeps moving as it was intended.
304
+ 2.4.2 Repolarization
305
+ Repolarization means that if two particles hit each other, they both turn around in the opposite
306
+ direction, much like an elastic collision. Implementation is carried out by taking the two angles that
307
+ defined the direction that a given particle was traveling, and negating both.
308
+ 2.4.3 Adhesion
309
+ Adhesion is the clustering or clumping of particles to form a single body that moves as one. This is
310
+ accomplished by assigning a clump to every particle. If a particle is within a given radius of other
311
+ particles, it gets put into that clump. Additionally, a clump is a collection of not just the particles that
312
+ are touching a given particle, but any that are touching its neighbors, the neighbor’s neighbors, and
313
+ so on. The clumps partition the set of particles and create moving bodies that can obtain more
314
+ particles. No limit to the size of clumps was designated, however this addition is trivial.
315
+ 3 Discussion
316
+ The purpose of this discussion is to explore the potential applications of the models described above
317
+ and how the simulations generated in the linked code can prove useful to other researchers. Many
318
+ particle or particle-like systems are seen across all scientific fields including physics, chemistry, and
319
+ biology. Proper implementation of these models requires a good understanding of the system at hand
320
+ and how the particles behave with one another. An understanding of the system is necessary to select
321
+ the proper migration model.
322
+ In highly chaotic systems, nondeterministic models are more suited as randomness (pseudo
323
+ randomness rather) is a key feature of such a system. In systems where the environment itself does
324
+ not drive motion of particles themselves, the deterministic models are better approximations.
325
+
326
+
327
+ 8
328
+ Implementing different interaction dynamics in the same setup leads to very different results. Thus, it
329
+ is also very important to understand how individual particles behave upon collision. With this
330
+ necessity understood and considered, let us explore specific use-cases.
331
+ The above models offer insight in biological settings. Cell migration is an important component of
332
+ many cell types, and thus estimating that motion can provide information about cellular mechanisms
333
+ (Davis et al., 2015; Gopinathan and Gov, 2019; Metzcar et al., 2019; Norden and Lecaudey, 2019;
334
+ Alert and Trepat, 2020; Guberman, Sherief and Regan, 2020; Peercy and Starz-Gaiano, 2020;
335
+ SenGupta, Parent and Bear, 2021). In different settings, cellular motion may be estimated by
336
+ different models. Chemotaxis, which is motion based on the sensing of chemical gradients, is the
337
+ driving force of many migration patterns at the cellular level (Hu et al., 2010; Gopinathan and Gov,
338
+ 2019; Norden and Lecaudey, 2019; Alert and Trepat, 2020; SenGupta, Parent and Bear, 2021). This
339
+ mode of migration is akin to the TM as the chemical gradient introduces bias as to what direction the
340
+ cells will migrate. The stronger the cells’ reaction to the chemical gradient, or the stronger the
341
+ chemical gradient itself, the greater this bias. Migratory cells have been noted to have a degree of
342
+ continuous bias, meaning they can have varying levels of directional influence due to chemotaxis, not
343
+ simply a binary model of directed motion (Hu et al., 2010; Alert and Trepat, 2020). This, along with
344
+ the mechanistic understanding of chemotaxis lends this type of cellular motion to the Topotaxis
345
+ model.
346
+ In addition to chemical sensitivity, further support of the nondeterministic models comes from the
347
+ observations that cells perform motion that follow the pause phase-free phase mechanism that
348
+ dictates these models (Gopinathan and Gov, 2019; Alert and Trepat, 2020; Peercy and Starz-Gaiano,
349
+ 2020). This comes from the discontinuation of motion in favor of direction recalibration noted in cell
350
+ motion, which strongly motivates the use of Beauchemin random walks in favor of other forms of
351
+ random walks (Textor, Sinn and de Boer, 2013; Gopinathan and Gov, 2019; Alert and Trepat, 2020;
352
+ Peercy and Starz-Gaiano, 2020). Other than chemical gradients, the mechanical force induced by the
353
+ microenvironment about the cell can influence motion (Peercy and Starz-Gaiano, 2020; SenGupta,
354
+ Parent and Bear, 2021). This is the defining feature of the SP model as there is an external force on
355
+ the cells driving them in a biased direction, much like fluid flowing. SP can be used to describe
356
+ mechanistic motion of cells during embryonic development and within the neural crest (Giniūnaitė et
357
+ al., 2020; Peercy and Starz-Gaiano, 2020). Contexts such as these are strong candidates for applying
358
+ nondeterministic models outlined in this paper in the field on biology.
359
+ Deterministic models also have niches in biological settings. Tumor growth has many similarities to
360
+ TH, as does cancer cell migration (Metzcar et al., 2019). This is an especially important area for
361
+ computational research, as hypotheses must be tested efficiently and accurately. Modeling can also
362
+ be used to determine metabolism of cancer and stem cells, giving useful information that can be
363
+ verified experimentally (Metzcar et al., 2019). This is a great example of how computational
364
+ modeling can uncover additional information and provide meaningful insight in a timely fashion.
365
+ Cellular mechanics also offer interesting interaction dynamics. All three modes of interaction take
366
+ place across the field of biology. Adhesion, as described above, occurs in migrating cell groups,
367
+ cancer metastasis, and embryogenesis (Gopinathan and Gov, 2019; Norden and Lecaudey, 2019;
368
+ Alert and Trepat, 2020; Guberman, Sherief and Regan, 2020; Peercy and Starz-Gaiano, 2020). This
369
+ typically occurs in homogenous cell groupings and is seen throughout the immune system (Davis et
370
+ al., 2015; Norden and Lecaudey, 2019; Alert and Trepat, 2020). Cessation of movement can be
371
+ observed in tissue growth for both epithelial cells and fibroblast cells (Abercrombie, 1961;
372
+ Guberman, Sherief and Regan, 2020). In fact, this behavior is the original mechanism that led to the
373
+
374
+
375
+ 9
376
+ term “contact inhibition of locomotion,” better known as CIL (Abercrombie, 1961). Notably, CIL
377
+ also can be described in different contexts as a repolarization effect, meaning within biology any of
378
+ the above interaction mechanisms may be observed (Davis et al., 2015; Roycroft et al., 2018; Alert
379
+ and Trepat, 2020; Guberman, Sherief and Regan, 2020).
380
+ Moving to a different field, in chemistry and physics nanoparticles display many of the features of
381
+ both clustering and motion. In certain settings, nanoparticles can behave in random walk migration
382
+ which can be biased due to outside forces to appear as any of the nondeterministic models outlined
383
+ above (Hansen et al., 2013). In addition, a mode of random potential driven motion along with
384
+ particle adhesion described as Oswald Ripening can be appropriately described by the TM with
385
+ particle adhesion (Hansen et al., 2013). Ion motion in solar cells holds many of the key mechanistic
386
+ contexts outlined above (E. Courtier et al., 2019). Additionally, the dynamic change of chemical
387
+ gradients, useful for chemistry and biological contexts, can by approximated by SP (Hu et al., 2010).
388
+ The novelty of the work contained within this paper comes from the diverse applications readily
389
+ available. It is the addition of particle interactions that leads to results with astounding complexity
390
+ that are reminiscent of what is captured in scientific settings, while keeping the simulations in a
391
+ tractable form. While this work was inspired from a biological setting, it is of upmost importance to
392
+ recognize the applications of such work to other fields, for it is the interaction between the sciences
393
+ that leads to strides in innovation. The union between computer science and physical science is
394
+ something that has never been seen before, and we as a scientific community have but scratched the
395
+ surface.
396
+ With these diverse and important applications available from the relatively simple models outlined
397
+ above, it is important to note the limitations of not just these models themselves, but also the current
398
+ state of computational physical science. First, we can only model what we understand, and this
399
+ knowledge is limited not by our ability to create models but by our methods in the physical sciences.
400
+ To aptly describe a complex system, a complex understanding of such system must be had first.
401
+ However, as far as biology, much of chemistry, and many places of physics goes, we do not have
402
+ such a deep understanding (SenGupta, Parent and Bear, 2021). The power of predictive models is the
403
+ ability to extrapolate our current understanding to generate nuanced hypotheses that may be tested in
404
+ the future. A major pitfall of this ability is, however, that false initial assumptions may lead to a
405
+ fruitless path.
406
+ The goal and purpose of this newfound computational power, a power that is only growing as time
407
+ passes, is to approximate to the best of our ability the real world. This allows us to uncover, with
408
+ unparalleled speed, extremely important new aspects of physical systems that were previously
409
+ obscure and serve to inspire experimentalists. Continuing to cautiously push forward with this
410
+ technology will lead to profound discoveries across academia.
411
+ The authors declare that the research was conducted in the absence of any commercial or financial
412
+ relationships that could be construed as a potential conflict of interest.
413
+ CM contributed to the experimental design, analysis of data and interpretation of the results. CM and
414
+ RRP prepared the manuscript and edited the final manuscript for publication.
415
+ This work was supported by funding from the National Institute of Health (1R01HD096026).
416
+ Abercrombie, M. (1961) ‘The bases of the locomotory behaviour of fibroblasts’, Experimental Cell
417
+ Research, 8, pp. 188–198. Available at: https://doi.org/10.1016/0014-4827(61)90348-2.
418
+
419
+
420
+ 10
421
+ Alert, R. and Trepat, X. (2020) ‘Physical Models of Collective Cell Migration’, Annual Review of
422
+ Condensed Matter Physics, 11(1), pp. 77–101. Available at: https://doi.org/10.1146/annurev-
423
+ conmatphys-031218-013516.
424
+ Calvillo, L. et al. (2022) ‘Quantum Biology Research Meets Pathophysiology and Therapeutic
425
+ Mechanisms: A Biomedical Perspective’, Quantum Reports, 4(2), pp. 148–172. Available at:
426
+ https://doi.org/10.3390/quantum4020011.
427
+ Davis, J.R. et al. (2015) ‘Inter-Cellular Forces Orchestrate Contact Inhibition of Locomotion’, Cell,
428
+ 161(2), pp. 361–373. Available at: https://doi.org/10.1016/j.cell.2015.02.015.
429
+ E. Courtier, N. et al. (2019) ‘How transport layer properties affect perovskite solar cell performance:
430
+ insights from a coupled charge transport/ion migration model’, Energy & Environmental Science,
431
+ 12(1), pp. 396–409. Available at: https://doi.org/10.1039/C8EE01576G.
432
+ Giniūnaitė, R. et al. (2020) ‘Modelling collective cell migration: neural crest as a model paradigm’,
433
+ Journal of Mathematical Biology, 80(1), pp. 481–504. Available at: https://doi.org/10.1007/s00285-
434
+ 019-01436-2.
435
+ Gopinathan, A. and Gov, N.S. (2019) ‘Cell cluster migration: Connecting experiments with physical
436
+ models’, Seminars in Cell & Developmental Biology, 93, pp. 77–86. Available at:
437
+ https://doi.org/10.1016/j.semcdb.2018.09.009.
438
+ Guberman, E., Sherief, H. and Regan, E.R. (2020) ‘Boolean model of anchorage dependence and
439
+ contact inhibition points to coordinated inhibition but semi-independent induction of proliferation
440
+ and migration’, Computational and Structural Biotechnology Journal, 18, pp. 2145–2165. Available
441
+ at: https://doi.org/10.1016/j.csbj.2020.07.016.
442
+ Hansen, T.W. et al. (2013) ‘Sintering of Catalytic Nanoparticles: Particle Migration or Ostwald
443
+ Ripening?’, Accounts of Chemical Research, 46(8), pp. 1720–1730. Available at:
444
+ https://doi.org/10.1021/ar3002427.
445
+ Hu, B. et al. (2010) ‘Physical Limits on Cellular Sensing of Spatial Gradients’, Physical Review
446
+ Letters, 105(4), p. 048104. Available at: https://doi.org/10.1103/PhysRevLett.105.048104.
447
+ Metzcar, J. et al. (2019) ‘A Review of Cell-Based Computational Modeling in Cancer Biology’, JCO
448
+ Clinical Cancer Informatics, (3), pp. 1–13. Available at: https://doi.org/10.1200/CCI.18.00069.
449
+ Norden, C. and Lecaudey, V. (2019) ‘Collective cell migration: general themes and new paradigms’,
450
+ Current Opinion in Genetics & Development, 57, pp. 54–60. Available at:
451
+ https://doi.org/10.1016/j.gde.2019.06.013.
452
+ Peercy, B.E. and Starz-Gaiano, M. (2020) ‘Clustered cell migration: Modeling the model system of
453
+ Drosophila border cells’, Seminars in Cell & Developmental Biology, 100, pp. 167–176. Available at:
454
+ https://doi.org/10.1016/j.semcdb.2019.11.010.
455
+ Roycroft, A. et al. (2018) ‘Redistribution of Adhesive Forces through Src/FAK Drives Contact
456
+ Inhibition of Locomotion in Neural Crest’, Developmental Cell, 45(5), pp. 565-579.e3. Available at:
457
+ https://doi.org/10.1016/j.devcel.2018.05.003.
458
+
459
+
460
+ 11
461
+ SenGupta, S., Parent, C.A. and Bear, J.E. (2021) ‘The principles of directed cell migration’, Nature
462
+ Reviews Molecular Cell Biology, 22(8), pp. 529–547. Available at: https://doi.org/10.1038/s41580-
463
+ 021-00366-6.
464
+ Simon, C. (no date) Generating uniformly distributed numbers on a sphere, Mathemathinking.
465
+ Available at: http://CorySimon.github.io/articles/uniformdistn-on-sphere/ (Accessed: 29 September
466
+ 2022).
467
+ Textor, J., Sinn, M. and de Boer, R.J. (2013) ‘Analytical results on the Beauchemin model of
468
+ lymphocyte migration’, BMC Bioinformatics, 14(6), p. S10. Available at:
469
+ https://doi.org/10.1186/1471-2105-14-S6-S10.
470
+ Vecheck, A.M. et al. (2022) ‘Quantum Biology in Cellular Migration’. bioRxiv, p.
471
+ 2022.09.09.507322. Available at: https://doi.org/10.1101/2022.09.09.507322.
472
+ 9 Data Availability Statement
473
+ The software for this study can be found at “Cameron-McNamee/Particle_Modeling”
474
+ [https://github.com/Cameron-McNamee/Particle_Modeling].
475
+
89AyT4oBgHgl3EQfdPcr/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf,len=488
2
+ page_content='Deterministic and Nondeterministic Particle Motion with Interaction Mechanisms Cameron McNamee1,2*, Renee Reijo Pera1 1McLaugling Research Institute, Great Falls, MT, USA 2Department of Mathematics, California Institute of Technology, Pasadena, CA, USA Correspondence: cmcnamee@caltech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
3
+ page_content='edu Keywords: particle modeling, migration, interaction mechanisms, deterministic modeling, Beauchemin modeling Abstract Studying systems where many individual bodies in motion interact with one another is a complex and interesting area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
4
+ page_content=' Simple mechanisms that may be determined for biological, chemical, or physical reasons can lead to astonishingly complex results that require a further understanding of the moving bodies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
5
+ page_content=' With the increasing interaction between computation and various scientific areas, it has become more useful, feasible, and important to create models for these systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
6
+ page_content=' Here, we present two families of models, deterministic and nondeterministic, along with three distinct and realistic interaction mechanisms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
7
+ page_content=' These are combined in a unique way to provide the groundwork for particle system models across multiple disciplines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
8
+ page_content=' This work has applications that range from biology to chemistry and physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
9
+ page_content=' In addition to describing the motivations and math behind all the models, software is provided that allows researchers to quickly adjust and implement what is described here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
10
+ page_content=' 1 Introduction This work was inspired by consideration of quantum biology (QB) and cell migration and tissue formation as previously described (Vecheck et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
11
+ page_content=', 2022) with the goal of identifying quantitative measures of particle clustering in a scaffold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
12
+ page_content=' Identifying such measures involves observing the locations of the particles using image processing and applying these locations to a novel clustering algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
13
+ page_content=' The results were some surprising numbers that led to a search for other, more robust methods of cluster analysis and an environment in which to test resulting algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
14
+ page_content=' This proved not to be a simple task prompting development of potentially new models to understand how particles may interact under different conditions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
15
+ page_content=' From a biological perspective, and in view of the eventual application of this work, this paper explores the coalescing of particles in a space towards one another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
16
+ page_content=' It is also important to understand that in the simulation process, the goal is to replicate what is seen in realistic settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
17
+ page_content=' In addition to biology, many systems wherein multiple autonomous particles are driven in a migratory pattern can be seen in physics and chemistry, allowing this work to be expanded to multiple fields (Hansen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
18
+ page_content=', 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
19
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
20
+ page_content=' Courtier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
21
+ page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
22
+ page_content=' Nanoparticle and ion motion are two distinct areas that this type of computational research can be readily applied and lead to interesting features of such a system being uncovered (Hansen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
23
+ page_content=', 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
24
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
25
+ page_content=' Courtier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
26
+ page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
27
+ page_content=' With the diverse opportunities in mind, this paper seeks to abstract the motion and interaction of individual bodies in such systems to create general models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
28
+ page_content=' These general models may then be taken by a researcher and applied with the appropriate parameters to match a particular system, with 2 the addition of the available software laying the foundation of such a model (Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
29
+ page_content=' This paper is by no means all-encompassing;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
30
+ page_content=' however, the range of these general models spans many fields with many important implications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
31
+ page_content=' 2 Methods All software was implemented in MATLAB.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
32
+ page_content=' Methods are described below as follows: First, different environments in which particles may be generated are discussed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
33
+ page_content=' These environments decide what is called the “initial seeding” of the particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
34
+ page_content=' These initial seedings vary in ways that may be more suitable for certain applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
35
+ page_content=' Second, two families of models are explored: deterministic models and nondeterministic models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
36
+ page_content=' Differences between these two families of models are described in more detail;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
37
+ page_content=' however, as the names suggest, the most important difference is that the deterministic model will produce the exact same results given the same initial seedings, while the nondeterministic models have an element of randomness that allows for changes in outcome.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
38
+ page_content=' The third and last part discussed is the interactions between particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
39
+ page_content=' This is a unique addition to these models and distinguishes this work for specific applications in various fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
40
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
41
+ page_content='1 Initial Seeding Environments Initial seeding environments are different environments that serve different purposes and are each suited for their own application.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
42
+ page_content=' All seedings are centered at the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
43
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
44
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
45
+ page_content='1 Uniform Seeding Non-random uniform seeding places each particle in a lattice point of a Cartesian coordinate system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
46
+ page_content=' This serves as what can be considered an “in vitro” environment, since in most applications, particles are not placed in such an orderly fashion (Peercy and Starz-Gaiano, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
47
+ page_content=' Calvillo et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
48
+ page_content=', 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
49
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
50
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
51
+ page_content='2 Uniform Random Seeding This seeding generates a designated number of particles within specified dimensions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
52
+ page_content=' This is carried out by generating three random numbers within ± the designated range for each particle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
53
+ page_content=' These coordinates then determine the placement of each particle, providing uniformly random particles throughout the space with an expected center at the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
54
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
55
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
56
+ page_content='3 Spherical Random Seeding Like the seeding above, spherical random seeding is a uniform distribution of particles with an expected center at the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
57
+ page_content=' This differs from uniform random seeding in that the seeding is formed with a given number of particles and a radius.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
58
+ page_content=' The problem of creating a uniform distribution within a sphere is not trivial;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
59
+ page_content=' thus, this was accomplished by using polar coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
60
+ page_content=' To randomly generate a radius that places particles uniformly, a floating-point number between [0,1] is sampled and the cube root is taken.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
61
+ page_content=' Then, the maximum radius is multiplied by the resulting value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
62
+ page_content=' This allows, for reasons that can be clearly seen from a geometric perspective, for the radius to generate uniform points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
63
+ page_content=' Generating the random angles is described in more detail here, along with more extensive reasoning throughout the entire process: (Simon, no date).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
64
+ page_content=' This random process is done for as many particles as desired, and this determines the initial seeding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
65
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
66
+ page_content='2 Random Migration Models 3 This class of models stems from the Beauchemin model for migrating bodies, which has been used previously for biological modeling (Textor, Sinn and de Boer, 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
67
+ page_content=' All random migration models were taken from Textor et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
68
+ page_content=' (Textor, Sinn and de Boer, 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
69
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
70
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
71
+ page_content='1 Beauchemin Random Walk To develop a biased migration model for a group of particles, it is helpful to first create a model that has no bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
72
+ page_content=' The Beauchemin random walk model functions under two phases: a pause phase and a free phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
73
+ page_content=' During the pause phase, a particle generates two random angles, 𝜙 and 𝜃, and uses these angles to generate a uniform random direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
74
+ page_content=' The generation of these angles follows the same method mentioned above for uniform sphere generation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
75
+ page_content=' After the pause phase, the particle enters the free phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
76
+ page_content=' In this random walk model, every particle has the same, given velocity 𝑣.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
77
+ page_content=' Using its given direction, a particle travels at the velocity 𝑣 for the entire free phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
78
+ page_content=' The free phase length may be changed, but for most modeling applications, it is 4 times longer than the pause phase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
79
+ page_content=' This is arbitrary and may be changed easily.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
80
+ page_content=' As the name suggests, the Beauchemin random walk method is not biased in any way, so the particles walk uniformly in any direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
81
+ page_content=' This unbiased nature leads to no pattern of particle migration but provides a basis wherein specific factors may be changed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
82
+ page_content=' With the unbiased model established, an introduction to different biases is now needed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
83
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
84
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
85
+ page_content='2 Simple Phenomenological The Simple Phenomenological model (SP) introduces bias by adding a “drift element” during the pause phase (Textor, Sinn and de Boer, 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
86
+ page_content=' Instead of the particles remaining stationary during the pause phase, they drift towards some defined point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
87
+ page_content=' This causes uniform clumping amongst the particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
88
+ page_content=' In this implementation, the choice was made to make this point the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
89
+ page_content=' This is of course arbitrary and may be changed to suit whatever environment is most sensible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
90
+ page_content=' The drift velocity is defined as the given velocity 𝑣 times the bias variable 𝑝.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
91
+ page_content=' For 𝑝 = 1, equivalent bias is achieved between random walking and drifting towards the origin as the particles approach the center at the same rate that they walk randomly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
92
+ page_content=' Of course, for 𝑝 = 0 there is no bias, so this model would operate the same as the random walk above.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
93
+ page_content=' The drift vector is defined by using the current 𝑥, 𝑦, and 𝑧 coordinates to calculate the new coordinates.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
94
+ page_content=' First, 𝑑, the distance from the drift particle, is calculated using the 𝑙� norm 𝑑 = �𝑥� � + 𝑦� � + 𝑧� � Then, the drift velocity is used to “shrink” the coordinates towards the origin 𝑥����� = 𝑥� ⋅ 𝑑 − 𝑣𝑝 𝑑 𝑦����� = 𝑦� ⋅ 𝑑 − 𝑣𝑝 𝑑 𝑧����� = 𝑧� ⋅ 𝑑 − 𝑣𝑝 𝑑 4 In this implementation, there is an added condition that a particle only moves towards the origin if it is farther away than the drift velocity will take it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
95
+ page_content=' This prevents the particle from “overshooting” its target.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
96
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
97
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
98
+ page_content='3 Topotaxis Model The Topotaxis Model (TM) differs from SP by introducing a different form of bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
99
+ page_content=' Instead of moving the particle in the target direction, TM changes the distribution from which the angles are derived.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
100
+ page_content=' Instead of 𝜙 and 𝜃 being taken from a uniform distribution, they are drawn from a beta distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
101
+ page_content=' A beta distribution is essentially a truncated normal distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
102
+ page_content=' The normal distribution cannot be used in this situation because of its infinite tails and the angles are within a strict range, [0, 2𝜋] and [0, 𝜋] respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
103
+ page_content=' The beta distribution’s shape is determined by two parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
104
+ page_content=' Here, the two parameters, 𝛼 and 𝛽, and are set equal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
105
+ page_content=' Since they are equal, we only refer to 𝛼 from here on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
106
+ page_content=' To calculate the shape of the distribution given a single bias variable 𝑝, we need to consider that when 𝑝 = 0, the distribution should be uniform and as 𝑝 approaches 1, the distribution should become more skewed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
107
+ page_content=' This is accomplished by decreasing the variance in the distribution of angles while keeping the mean towards the origin with the result that the particles have an increased likelihood of orienting themselves towards the target location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
108
+ page_content=' In MATLAB, if 𝑎 = 1, the distribution is defined as uniform and as 𝑎 → ∞, the variance decreases, and a peak centered at the mean value is formed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
109
+ page_content=' To calculate 𝑎 given 𝑝, the following is done 𝑎 = 1 1 − 𝑝 From this definition we see that for this model 𝑝 ≠ 1, but we also see lim �→� 𝑎 = ∞, as desired.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
110
+ page_content=' Thus, for 𝑝 = 0, there is a uniform, or unbiased, distribution, and as 𝑝 → 1, there is an increased bias towards the targeted location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
111
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
112
+ page_content='3 Deterministic Migration Models The deterministic migration models are a family of migration models that feature no random elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
113
+ page_content=' Within this class of models, there are two different groups: uniform clustering and local clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
114
+ page_content=' The uniform clustering algorithms behave on a global scale, causing all particles to migrate in a wholistic sense.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
115
+ page_content=' The local clustering algorithms organize themselves at a much smaller scale.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
116
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
117
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
118
+ page_content='1 Uniform Clustering, the Naïve Approach The first and simplest way of clustering particles within space is to “shrink” the space they are in.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
119
+ page_content=' This first requires a clustering rate, 𝑟 ∈ [0,1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
120
+ page_content=' This method is as follows 𝑈�𝑐�, 𝑐�, 𝑐�� = �𝑐� ⋅ (1 − 𝑟), 𝑐� ⋅ (1 − 𝑟), 𝑐� ⋅ (1 − 𝑟)�;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
121
+ page_content=' ∀𝑐 ∈ 𝐶 (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
122
+ page_content=' 1) Where C is the set of all particles, c is an individual particle, and 𝑐�, 𝑐� and 𝑐� are the x-, y-, and z- coordinates of c, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
123
+ page_content=' This function causes all particles to “march” inward, maintaining their relative distances to one another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
124
+ page_content=' This does produce a set where each particle is relatively closer to all other particles but does so in a way that is not representative of typical physical settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
125
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
126
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
127
+ page_content='2 Nearest Neighbor Clustering 5 In nearest neighbor clustering, particles are not clustered into a single lump, but rather orient to their neighbors and move toward them.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
128
+ page_content=' This approach is much more nuanced compared to Uniform Clustering and reveals samples that are much more reminiscent of certain applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
129
+ page_content=' The method behind this approach assumes that particles can only sense their n nearest neighbors (NN’s), and so they only approach those particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
130
+ page_content=' This method is conducted by first finding the geometric center (GC) described by the n NN’s and then moving the particle towards that at a clustering rate, 𝑟 ∈ [0,1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
131
+ page_content=' Using the same notation as above, we have the set of distances from all particles to a given particle c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
132
+ page_content=' 𝑁� = min � { 𝑑�: 𝑑� = ||𝑠� − 𝑐||, ∀s� ∈ 𝐶} (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
133
+ page_content=' 2) Here, �|𝑠� − 𝑐|� is the 𝑙� normed distance between the particle 𝑠� and the particle c and min � (𝑆) represents taking the n smallest elements from the set S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
134
+ page_content=' With the NNs now identified, the GC (𝑔�) is calculated for a given 𝑐.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
135
+ page_content=' This center is calculated without including c itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
136
+ page_content=' 𝑔�� = ∑ 𝑠� �∈�� 𝑛 𝑔�� = ∑ 𝑠� �∈�� 𝑛 𝑔�� = ∑ 𝑠� �∈�� 𝑛 𝑔� = �𝑔��, 𝑔��, 𝑔��� (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
137
+ page_content=' 3) In other words, the unweighted mean of x-, y-, and z-coordinates of each NN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
138
+ page_content=' This gives us the GC described by the n NN’s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
139
+ page_content=' With this, we now have a location to move c towards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
140
+ page_content=' �𝑐� , 𝑐�, 𝑐�� ↦ (𝑐�(1 − 𝑟) + 𝑔�𝑟, 𝑐�(1 − 𝑟) + 𝑔�𝑟, 𝑐�(1 − 𝑟) + 𝑔�𝑟) (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
141
+ page_content=' 4) For a given clustering, this calculation must be carried out for each particle, and it is done in its entirety before any particle moves (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
142
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
143
+ page_content=', no particle’s location changes until after every particle’s new location has been calculated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
144
+ page_content=' Then, all particles move to their new locations at once).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
145
+ page_content=' The result is clustering in small groups that after enough time steps, form individual groups.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
146
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
147
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
148
+ page_content='3 Threshold Clustering Threshold (TH) clustering differs from nearest neighbor clustering in that, instead of searching for a certain number of neighbors, the particles have an assumed maximum influence distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
149
+ page_content=' This maximum influence distances, a threshold, is analogous to a particle only being able to sense other particles for a small radius around itself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
150
+ page_content=' This method is implemented by collecting the set of particles within the maximum sensing distance, t, calculating the GC described by these particles, and then moving towards this center at the clustering rate r.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
151
+ page_content=' 𝑇� = {𝑠 ∈ 𝐶: �|𝑠 − 𝑐|� < 𝑡} (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
152
+ page_content=' 5) 6 Then, as above, the GC of this set is calculated using the unweighted mean (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
153
+ page_content=' 3).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
154
+ page_content=' This gives 𝑔�, which we then use to calculate the new particle’s position using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
155
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
156
+ page_content=' This, as with NN, is done for every particle before any particle changes its location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
157
+ page_content=' This mode produces several small groups throughout the space whose group size changes with 𝑡.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
158
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
159
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
160
+ page_content='4 Weighted Clustering This method is the next logical step for simulating clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
161
+ page_content=' Instead of taking the unweighted mean to calculate the GC, why not weigh the locations based on distance?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
162
+ page_content=' Weighted clustering is done by first calculating 𝐷�, the set of all distances from each particle to c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
163
+ page_content=' Next, the weighted geometric center (WGC) for c, 𝑤�, is calculated as follows: 𝑤�� = ∑ 𝑠�� 𝑑� �∈�� ∑ � 1 𝑑�� �∈�� 𝑤�� = ∑ 𝑠�� 𝑑� �∈�� ∑ � 1 𝑑�� �∈�� 𝑤�� = ∑ 𝑠�� 𝑑� �∈�� ∑ � 1 𝑑�� �∈�� 𝑤� = (𝑤��, 𝑤��, 𝑤��) (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
164
+ page_content=' 6) Where 𝐼� is the indexing set corresponding to 𝐷�.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
165
+ page_content=' The reason for using the reciprocals of the weights follows from the idea that as a particle get further, it should affect c less.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
166
+ page_content=' The new coordinates for c are calculated according to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
167
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
168
+ page_content=' This is a global clustering on particles, like uniform clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
169
+ page_content=' This causes no small groups to small, but rather the entire population of particles congregate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
170
+ page_content=' This congregation, however, does not appear the same nor does it occur at the same rate at uniform clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
171
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
172
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
173
+ page_content='5 Weighted Nearest Neighbor Clustering In accordance with the previous modes, this weighted nearest neighbor (WNN) clustering is the next intuitive step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
174
+ page_content=' This method is, as the name suggests, a combination of NN clustering and weighted clustering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
175
+ page_content=' This combines the logic of only looking towards the closest particles, while also weighing the particles according to their distance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
176
+ page_content=' The set of NN’s is calculated according to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
177
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
178
+ page_content=' 𝐼�� = {𝑛 ∈ 𝐼: 𝑠� ∈ 𝑁�} The WGC is calculated according to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
179
+ page_content=' 6 with 𝐼�� used in place of 𝐼�.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
180
+ page_content=' With this WGC, particle c is mapped using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
181
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
182
+ page_content=' This mode appears very similar to the original NN clustering but tends to form clusters that are skewed in slightly different locations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
183
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
184
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
185
+ page_content='6 Weighted Threshold Clustering 7 Weighted threshold (WT) clustering follows the same intuition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
186
+ page_content=' 𝑇� is calculated according to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
187
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
188
+ page_content=' This defines the limited distance indexing set: 𝐼�� = {𝑛 ∈ 𝐼: 𝑠� ∈ 𝑇�} Using Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
189
+ page_content=' 6 with 𝐼�� in place of 𝐼� gives WGC about the particles within the threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
190
+ page_content=' Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
191
+ page_content=' 4 is used to map c.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
192
+ page_content=' As with WNN clustering, WT clustering appears quite similar to the TH clustering on small scales.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
193
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
194
+ page_content='4 Interaction Mechanics With the addition of interaction, defined here as how two particles behave when they physically hit each other in the migration process, there are many different fields that may provide insights.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
195
+ page_content=' Some outcomes of physical interaction are cessation of movement, repolarization, or adhesion, to name just a few examples of how particles may interact with one another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
196
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
197
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
198
+ page_content='1 Cessation of Movement The simplest self-interaction is cessation of movement, meaning simply that if two particles are within a designated distance of one another, they both stop moving.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
199
+ page_content=' In simulation, implementation of this outcome simply checks if a particle is close to any other particle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
200
+ page_content=' If it is, it stays in the same spot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
201
+ page_content=' Otherwise, it keeps moving as it was intended.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
202
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
203
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
204
+ page_content='2 Repolarization Repolarization means that if two particles hit each other, they both turn around in the opposite direction, much like an elastic collision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
205
+ page_content=' Implementation is carried out by taking the two angles that defined the direction that a given particle was traveling, and negating both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
206
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
207
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
208
+ page_content='3 Adhesion Adhesion is the clustering or clumping of particles to form a single body that moves as one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
209
+ page_content=' This is accomplished by assigning a clump to every particle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
210
+ page_content=' If a particle is within a given radius of other particles, it gets put into that clump.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
211
+ page_content=' Additionally, a clump is a collection of not just the particles that are touching a given particle, but any that are touching its neighbors, the neighbor’s neighbors, and so on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
212
+ page_content=' The clumps partition the set of particles and create moving bodies that can obtain more particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
213
+ page_content=' No limit to the size of clumps was designated, however this addition is trivial.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
214
+ page_content=' 3 Discussion The purpose of this discussion is to explore the potential applications of the models described above and how the simulations generated in the linked code can prove useful to other researchers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
215
+ page_content=' Many particle or particle-like systems are seen across all scientific fields including physics, chemistry, and biology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
216
+ page_content=' Proper implementation of these models requires a good understanding of the system at hand and how the particles behave with one another.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
217
+ page_content=' An understanding of the system is necessary to select the proper migration model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
218
+ page_content=' In highly chaotic systems, nondeterministic models are more suited as randomness (pseudo randomness rather) is a key feature of such a system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
219
+ page_content=' In systems where the environment itself does not drive motion of particles themselves, the deterministic models are better approximations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
220
+ page_content=' 8 Implementing different interaction dynamics in the same setup leads to very different results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
221
+ page_content=' Thus, it is also very important to understand how individual particles behave upon collision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
222
+ page_content=' With this necessity understood and considered, let us explore specific use-cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
223
+ page_content=' The above models offer insight in biological settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
224
+ page_content=' Cell migration is an important component of many cell types, and thus estimating that motion can provide information about cellular mechanisms (Davis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
225
+ page_content=', 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
226
+ page_content=' Gopinathan and Gov, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
227
+ page_content=' Metzcar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
228
+ page_content=', 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
229
+ page_content=' Norden and Lecaudey, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
230
+ page_content=' Alert and Trepat, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
231
+ page_content=' Guberman, Sherief and Regan, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
232
+ page_content=' Peercy and Starz-Gaiano, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
233
+ page_content=' SenGupta, Parent and Bear, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
234
+ page_content=' In different settings, cellular motion may be estimated by different models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
235
+ page_content=' Chemotaxis, which is motion based on the sensing of chemical gradients, is the driving force of many migration patterns at the cellular level (Hu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
236
+ page_content=', 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
237
+ page_content=' Gopinathan and Gov, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
238
+ page_content=' Norden and Lecaudey, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
239
+ page_content=' Alert and Trepat, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
240
+ page_content=' SenGupta, Parent and Bear, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
241
+ page_content=' This mode of migration is akin to the TM as the chemical gradient introduces bias as to what direction the cells will migrate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
242
+ page_content=' The stronger the cells’ reaction to the chemical gradient, or the stronger the chemical gradient itself, the greater this bias.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
243
+ page_content=' Migratory cells have been noted to have a degree of continuous bias, meaning they can have varying levels of directional influence due to chemotaxis, not simply a binary model of directed motion (Hu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
244
+ page_content=', 2010;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
245
+ page_content=' Alert and Trepat, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
246
+ page_content=' This, along with the mechanistic understanding of chemotaxis lends this type of cellular motion to the Topotaxis model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
247
+ page_content=' In addition to chemical sensitivity, further support of the nondeterministic models comes from the observations that cells perform motion that follow the pause phase-free phase mechanism that dictates these models (Gopinathan and Gov, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
248
+ page_content=' Alert and Trepat, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
249
+ page_content=' Peercy and Starz-Gaiano, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
250
+ page_content=' This comes from the discontinuation of motion in favor of direction recalibration noted in cell motion, which strongly motivates the use of Beauchemin random walks in favor of other forms of random walks (Textor, Sinn and de Boer, 2013;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
251
+ page_content=' Gopinathan and Gov, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
252
+ page_content=' Alert and Trepat, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
253
+ page_content=' Peercy and Starz-Gaiano, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
254
+ page_content=' Other than chemical gradients, the mechanical force induced by the microenvironment about the cell can influence motion (Peercy and Starz-Gaiano, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
255
+ page_content=' SenGupta, Parent and Bear, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
256
+ page_content=' This is the defining feature of the SP model as there is an external force on the cells driving them in a biased direction, much like fluid flowing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
257
+ page_content=' SP can be used to describe mechanistic motion of cells during embryonic development and within the neural crest (Giniūnaitė et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
258
+ page_content=', 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
259
+ page_content=' Peercy and Starz-Gaiano, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
260
+ page_content=' Contexts such as these are strong candidates for applying nondeterministic models outlined in this paper in the field on biology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
261
+ page_content=' Deterministic models also have niches in biological settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
262
+ page_content=' Tumor growth has many similarities to TH, as does cancer cell migration (Metzcar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
263
+ page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
264
+ page_content=' This is an especially important area for computational research, as hypotheses must be tested efficiently and accurately.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
265
+ page_content=' Modeling can also be used to determine metabolism of cancer and stem cells, giving useful information that can be verified experimentally (Metzcar et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
266
+ page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
267
+ page_content=' This is a great example of how computational modeling can uncover additional information and provide meaningful insight in a timely fashion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
268
+ page_content=' Cellular mechanics also offer interesting interaction dynamics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
269
+ page_content=' All three modes of interaction take place across the field of biology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
270
+ page_content=' Adhesion, as described above, occurs in migrating cell groups, cancer metastasis, and embryogenesis (Gopinathan and Gov, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
271
+ page_content=' Norden and Lecaudey, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
272
+ page_content=' Alert and Trepat, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
273
+ page_content=' Guberman, Sherief and Regan, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
274
+ page_content=' Peercy and Starz-Gaiano, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
275
+ page_content=' This typically occurs in homogenous cell groupings and is seen throughout the immune system (Davis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
276
+ page_content=', 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
277
+ page_content=' Norden and Lecaudey, 2019;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
278
+ page_content=' Alert and Trepat, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
279
+ page_content=' Cessation of movement can be observed in tissue growth for both epithelial cells and fibroblast cells (Abercrombie, 1961;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
280
+ page_content=' Guberman, Sherief and Regan, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
281
+ page_content=' In fact, this behavior is the original mechanism that led to the 9 term “contact inhibition of locomotion,” better known as CIL (Abercrombie, 1961).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
282
+ page_content=' Notably, CIL also can be described in different contexts as a repolarization effect, meaning within biology any of the above interaction mechanisms may be observed (Davis et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
283
+ page_content=', 2015;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
284
+ page_content=' Roycroft et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
285
+ page_content=', 2018;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
286
+ page_content=' Alert and Trepat, 2020;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
287
+ page_content=' Guberman, Sherief and Regan, 2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
288
+ page_content=' Moving to a different field, in chemistry and physics nanoparticles display many of the features of both clustering and motion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
289
+ page_content=' In certain settings, nanoparticles can behave in random walk migration which can be biased due to outside forces to appear as any of the nondeterministic models outlined above (Hansen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
290
+ page_content=', 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
291
+ page_content=' In addition, a mode of random potential driven motion along with particle adhesion described as Oswald Ripening can be appropriately described by the TM with particle adhesion (Hansen et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
292
+ page_content=', 2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
293
+ page_content=' Ion motion in solar cells holds many of the key mechanistic contexts outlined above (E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
294
+ page_content=' Courtier et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
295
+ page_content=', 2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
296
+ page_content=' Additionally, the dynamic change of chemical gradients, useful for chemistry and biological contexts, can by approximated by SP (Hu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
297
+ page_content=', 2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
298
+ page_content=' The novelty of the work contained within this paper comes from the diverse applications readily available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
299
+ page_content=' It is the addition of particle interactions that leads to results with astounding complexity that are reminiscent of what is captured in scientific settings, while keeping the simulations in a tractable form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
300
+ page_content=' While this work was inspired from a biological setting, it is of upmost importance to recognize the applications of such work to other fields, for it is the interaction between the sciences that leads to strides in innovation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
301
+ page_content=' The union between computer science and physical science is something that has never been seen before, and we as a scientific community have but scratched the surface.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
302
+ page_content=' With these diverse and important applications available from the relatively simple models outlined above, it is important to note the limitations of not just these models themselves, but also the current state of computational physical science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
303
+ page_content=' First, we can only model what we understand, and this knowledge is limited not by our ability to create models but by our methods in the physical sciences.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
304
+ page_content=' To aptly describe a complex system, a complex understanding of such system must be had first.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
305
+ page_content=' However, as far as biology, much of chemistry, and many places of physics goes, we do not have such a deep understanding (SenGupta, Parent and Bear, 2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
306
+ page_content=' The power of predictive models is the ability to extrapolate our current understanding to generate nuanced hypotheses that may be tested in the future.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
307
+ page_content=' A major pitfall of this ability is, however, that false initial assumptions may lead to a fruitless path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
308
+ page_content=' The goal and purpose of this newfound computational power, a power that is only growing as time passes, is to approximate to the best of our ability the real world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
309
+ page_content=' This allows us to uncover, with unparalleled speed, extremely important new aspects of physical systems that were previously obscure and serve to inspire experimentalists.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
310
+ page_content=' Continuing to cautiously push forward with this technology will lead to profound discoveries across academia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
311
+ page_content=' The authors declare that the research was conducted in the absence of any commercial or financial relationships that could be construed as a potential conflict of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
312
+ page_content=' CM contributed to the experimental design, analysis of data and interpretation of the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
313
+ page_content=' CM and RRP prepared the manuscript and edited the final manuscript for publication.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
314
+ page_content=' This work was supported by funding from the National Institute of Health (1R01HD096026).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
315
+ page_content=' Abercrombie, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
316
+ page_content=' (1961) ‘The bases of the locomotory behaviour of fibroblasts’, Experimental Cell Research, 8, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
317
+ page_content=' 188–198.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
318
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
319
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
320
+ page_content='1016/0014-4827(61)90348-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
321
+ page_content=' 10 Alert, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
322
+ page_content=' and Trepat, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
323
+ page_content=' (2020) ‘Physical Models of Collective Cell Migration’, Annual Review of Condensed Matter Physics, 11(1), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
324
+ page_content=' 77–101.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
325
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
326
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
327
+ page_content='1146/annurev- conmatphys-031218-013516.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
328
+ page_content=' Calvillo, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
329
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
330
+ page_content=' (2022) ‘Quantum Biology Research Meets Pathophysiology and Therapeutic Mechanisms: A Biomedical Perspective’, Quantum Reports, 4(2), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
331
+ page_content=' 148–172.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
332
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
333
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
334
+ page_content='3390/quantum4020011.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
335
+ page_content=' Davis, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
336
+ page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
337
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
338
+ page_content=' (2015) ‘Inter-Cellular Forces Orchestrate Contact Inhibition of Locomotion’, Cell, 161(2), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
339
+ page_content=' 361–373.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
340
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
341
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
342
+ page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
343
+ page_content='cell.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
344
+ page_content='2015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
345
+ page_content='02.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
346
+ page_content='015.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
347
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
348
+ page_content=' Courtier, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
349
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
350
+ page_content=' (2019) ‘How transport layer properties affect perovskite solar cell performance: insights from a coupled charge transport/ion migration model’, Energy & Environmental Science, 12(1), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
351
+ page_content=' 396–409.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
352
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
353
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
354
+ page_content='1039/C8EE01576G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
355
+ page_content=' Giniūnaitė, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
356
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
357
+ page_content=' (2020) ‘Modelling collective cell migration: neural crest as a model paradigm’, Journal of Mathematical Biology, 80(1), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
358
+ page_content=' 481–504.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
359
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
360
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
361
+ page_content='1007/s00285- 019-01436-2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
362
+ page_content=' Gopinathan, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
363
+ page_content=' and Gov, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
364
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
365
+ page_content=' (2019) ‘Cell cluster migration: Connecting experiments with physical models’, Seminars in Cell & Developmental Biology, 93, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
366
+ page_content=' 77–86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
367
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
368
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
369
+ page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
370
+ page_content='semcdb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
371
+ page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
372
+ page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
373
+ page_content='009.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
374
+ page_content=' Guberman, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
375
+ page_content=', Sherief, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
376
+ page_content=' and Regan, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
377
+ page_content='R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
378
+ page_content=' (2020) ‘Boolean model of anchorage dependence and contact inhibition points to coordinated inhibition but semi-independent induction of proliferation and migration’, Computational and Structural Biotechnology Journal, 18, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
379
+ page_content=' 2145–2165.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
380
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
381
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
382
+ page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
383
+ page_content='csbj.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
384
+ page_content='2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
385
+ page_content='07.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
386
+ page_content='016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
387
+ page_content=' Hansen, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
388
+ page_content='W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
389
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
390
+ page_content=' (2013) ‘Sintering of Catalytic Nanoparticles: Particle Migration or Ostwald Ripening?’' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
391
+ page_content=', Accounts of Chemical Research, 46(8), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
392
+ page_content=' 1720–1730.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
393
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
394
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
395
+ page_content='1021/ar3002427.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
396
+ page_content=' Hu, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
397
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
398
+ page_content=' (2010) ‘Physical Limits on Cellular Sensing of Spatial Gradients’, Physical Review Letters, 105(4), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
399
+ page_content=' 048104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
400
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
401
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
402
+ page_content='1103/PhysRevLett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
403
+ page_content='105.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
404
+ page_content='048104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
405
+ page_content=' Metzcar, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
406
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
407
+ page_content=' (2019) ‘A Review of Cell-Based Computational Modeling in Cancer Biology’, JCO Clinical Cancer Informatics, (3), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
408
+ page_content=' 1–13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
409
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
410
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
411
+ page_content='1200/CCI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
412
+ page_content='18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
413
+ page_content='00069.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
414
+ page_content=' Norden, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
415
+ page_content=' and Lecaudey, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
416
+ page_content=' (2019) ‘Collective cell migration: general themes and new paradigms’, Current Opinion in Genetics & Development, 57, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
417
+ page_content=' 54–60.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
418
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
419
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
420
+ page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
421
+ page_content='gde.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
422
+ page_content='2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
423
+ page_content='06.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
424
+ page_content='013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
425
+ page_content=' Peercy, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
426
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
427
+ page_content=' and Starz-Gaiano, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
428
+ page_content=' (2020) ‘Clustered cell migration: Modeling the model system of Drosophila border cells’, Seminars in Cell & Developmental Biology, 100, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
429
+ page_content=' 167–176.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
430
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
431
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
432
+ page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
433
+ page_content='semcdb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
434
+ page_content='2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
435
+ page_content='11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
436
+ page_content='010.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
437
+ page_content=' Roycroft, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
438
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
439
+ page_content=' (2018) ‘Redistribution of Adhesive Forces through Src/FAK Drives Contact Inhibition of Locomotion in Neural Crest’, Developmental Cell, 45(5), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
440
+ page_content=' 565-579.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
441
+ page_content='e3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
442
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
443
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
444
+ page_content='1016/j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
445
+ page_content='devcel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
446
+ page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
447
+ page_content='05.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
448
+ page_content='003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
449
+ page_content=' 11 SenGupta, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
450
+ page_content=', Parent, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
451
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
452
+ page_content=' and Bear, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
453
+ page_content='E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
454
+ page_content=' (2021) ‘The principles of directed cell migration’, Nature Reviews Molecular Cell Biology, 22(8), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
455
+ page_content=' 529–547.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
456
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
457
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
458
+ page_content='1038/s41580- 021-00366-6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
459
+ page_content=' Simon, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
460
+ page_content=' (no date) Generating uniformly distributed numbers on a sphere, Mathemathinking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
461
+ page_content=' Available at: http://CorySimon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
462
+ page_content='github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
463
+ page_content='io/articles/uniformdistn-on-sphere/ (Accessed: 29 September 2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
464
+ page_content=' Textor, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
465
+ page_content=', Sinn, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
466
+ page_content=' and de Boer, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
467
+ page_content='J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
468
+ page_content=' (2013) ‘Analytical results on the Beauchemin model of lymphocyte migration’, BMC Bioinformatics, 14(6), p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
469
+ page_content=' S10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
470
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
471
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
472
+ page_content='1186/1471-2105-14-S6-S10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
473
+ page_content=' Vecheck, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
474
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
475
+ page_content=' et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
476
+ page_content=' (2022) ‘Quantum Biology in Cellular Migration’.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
477
+ page_content=' bioRxiv, p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
478
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
479
+ page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
480
+ page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
481
+ page_content='507322.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
482
+ page_content=' Available at: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
483
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
484
+ page_content='1101/2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
485
+ page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
486
+ page_content='09.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
487
+ page_content='507322.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
488
+ page_content=' 9 Data Availability Statement The software for this study can be found at “Cameron-McNamee/Particle_Modeling” [https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
489
+ page_content='com/Cameron-McNamee/Particle_Modeling].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/89AyT4oBgHgl3EQfdPcr/content/2301.00297v1.pdf'}
A9FKT4oBgHgl3EQfWS5f/content/tmp_files/2301.11791v1.pdf.txt ADDED
@@ -0,0 +1,814 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Biometrical Journal DOI
2
+
3
+ © 2021 Wiley-VCH GmbH, Weinheim
4
+
5
+
6
+
7
+
8
+
9
+
10
+ Improving Software Engineering in Biostatistics: Challenges
11
+ and Opportunities
12
+ Daniel Sabanés Bové*,1, Heidi Seibold2, Anne-Laure Boulesteix3, Juliane Manitz4,
13
+ Alessandro Gasparini5,6, Burak K. Günhan7, Oliver Boix8, Armin Schüler7, Sven
14
+ Fillinger9, Sven Nahnsen9,12, Anna E. Jacob3, Thomas Jaki10,11
15
+ 1 Hoffmann-La Roche Ltd., Product Development Data Sciences, Grenzacherstrasse 124, 4070 Basel,
16
+ Switzerland
17
+ 2 IGDORE, Elsenheimerstr. 48, 80687 München, Germany
18
+ 3 Institute for Medical Information Processing, Biometry and Epidemiology, LMU Munich, 81377
19
+ Munich, Germany
20
+ 4 EMD Serono, 45A Middlesex Turnpike, Billerica, MA 01821, USA
21
+ 5 Department of Medical Epidemiology and Biostatistics, Karolinska Institutet, PO Box 281, 17177
22
+ Stockholm, Sweden
23
+ 6 Red Door Analytics AB, Dianavägen 11A, 11542, Stockholm, Sweden
24
+ 7 Merck Healthcare KGaA, Frankfurter Strasse 250, 64293 Darmstadt, Germany
25
+ 8 Bayer AG, Aprather Weg 18a, 42113 Wuppertal, Germany
26
+ 9 Quantitative Biology Center (QBiC), University of Tübingen, 72076 Tübingen, Germany
27
+ 10 Faculty of Informatics and Data Science, University of Regensburg, Bajuwarenstraße 4
28
+ 93053 Regensburg, Germany
29
+ 11 MRC Biostatistics Unit, University of Cambridge, East Forvie Building, Forvie Site, Robinson Way,
30
+ Cambridge CB2 0SR, UK
31
+ 12 Biomedical Data Science, Department of Computer Science, University of Tübingen, 72076 Tübingen,
32
+ Germany
33
+
34
+
35
+ Received zzz, revised zzz, accepted zzz
36
+
37
+ Programming is ubiquitous in applied biostatistics; adopting software engineering skills will help
38
+ biostatisticians do a better job. To explain this, we start by highlighting key challenges for software
39
+ development and application in biostatistics. Silos between different statistician roles, projects,
40
+ departments, and organizations lead to the development of duplicate and suboptimal code. Building on
41
+ top of open-source software requires critical appraisal and risk-based assessment of the used modules.
42
+ Code that is written needs to be readable to ensure reliable software. The software needs to be easily
43
+ understandable for the user, as well as developed within testing frameworks to ensure that long term
44
+ maintenance of the software is feasible. Finally, the reproducibility of research results is hindered by
45
+ manual analysis workflows and uncontrolled code development. We next describe how the awareness of
46
+ the importance and application of good software engineering practices and strategies can help address
47
+ these challenges. The foundation is a better education in basic software engineering skills in schools,
48
+ universities, and during the work life. Dedicated software engineering teams within academic institutions
49
+ and companies can be a key factor for the establishment of good software engineering practices and
50
+ catalyze improvements across research projects. Providing attractive career paths is important for the
51
+ retainment of talents. Readily available tools can improve the reproducibility of statistical analyses and
52
+ their use can be exercised in community events. Similarly, tools exist to assess the risk of R packages,
53
+ and initiatives are developing shared repositories of trusted R packages. Finally, collaboration between
54
+ software developers from different organizations is key to harness open-source software efficiently and
55
+ optimally, while building trusted solutions. We illustrate the potential with examples of successful
56
+ projects.
57
+ Key words: Collaboration; Open-source; Programming; Software engineering.
58
+
59
+
60
+
61
+ *Corresponding author: e-mail: daniel.sabanes_bove@roche.com
62
+
63
+ Sabanés Bové et al: Improving Software Engineering in Biostatistics
64
+
65
+ © 2021 Wiley-VCH GmbH, Weinheim
66
+
67
+
68
+
69
+
70
+ www.biometrical-journal.com
71
+
72
+ 1 Introduction
73
+ As technology and methods advance, one of the key goals in the field of biostatistics is for the
74
+ statisticians of tomorrow to practice software engineering in a sustainable way. They can do this by
75
+ making methods and software open source, integrating new software in an established ecosystem,
76
+ organizing long-term maintenance, and adhering to good software engineering practices (e.g.,
77
+ documentation, code readability, code organization, and version control) (Seibold et al., 2021). In this
78
+ paper, we highlight opportunities and challenges on the path to this long-term vision – highlighting
79
+ how we can achieve this desirable future. The content originates from a panel discussion on “Research
80
+ Software Engineering for Clinical Biostatistics” which took place at the 43rd Annual Conference of the
81
+ International Society for Clinical Biostatistics (ISCB) in Newcastle in August 2022.
82
+ To provide a better understanding of what software engineering is, we will use a standardized
83
+ definition from the Institute of Electrical and Electronics Engineers (IEEE) to aid the interpretation of
84
+ this discipline. IEEE defines software engineering as “[t]he systematic application of scientific and
85
+ technological knowledge, methods, and experience to the design, implementation, testing, and
86
+ documentation of software” (“ISO/IEC/IEEE International Standard - Systems and Software
87
+ Engineering–Vocabulary,” 2017). From this, one can conclude that software engineering implies the
88
+ conscious application of methods and practices in the field of software craftsmanship to deliver a
89
+ software product of certain quality. In addition, we want to adhere in the following to the definition of
90
+ open source as stated by the Open Source Initiative (The Open Source Definition, 2007).
91
+ So far, there have been a few related publications within the statistical literature and beyond. Sanchez
92
+ et al. (2021) describe key steps for implementing a code quality assurance process that researchers can
93
+ follow throughout a project to improve their coding practices regarding the quality of the final data,
94
+ code, analyses, and results. This includes code style, documentation, version control, data management,
95
+ testing, and code review. Taschuk and Wilson (2017) present ten simple rules to make research
96
+ software robust enough to be run reproducibly. The rules include version control, documentation,
97
+ release versioning, build tools, and tests among others. Anzt et al. (2021) identify challenges for
98
+ research software sustainability in Germany and beyond in terms of motivation, selection, research
99
+ software engineering personnel, funding, infrastructure, and legal aspects. They recommend strategies
100
+ and measures to create an environment for sustainable research software, which enables valid,
101
+ reproducible, and sustainable research.
102
+ In this paper, we aim to summarize key challenges regarding programming in the field of biostatistics
103
+ and provide opportunities from the software engineering perspective to address them. In section 2, we
104
+ describe the challenges faced by software engineering in biostatistics today. In section 3, we highlight
105
+ important opportunities to overcome these challenges as a call to action. Examples of successful
106
+ projects implementing good software engineering practices are described in section 4. Finally, in
107
+ section 5, we discuss the key points.
108
+ 2 Challenges
109
+ 2.1 Silos
110
+ Silos are a particular challenge for software engineering in biostatistics (de Waal et al., 2019). In the
111
+ pharmaceutical industry, there are often purpose silos, with statistical analyses prepared for regulatory
112
+ use being run on a different system with a different programming language, compared to statistical
113
+ analyses prepared for exploratory use. This is inefficient because as soon as an exploratory analysis
114
+ becomes relevant for regulatory use, it must be recoded and run on the other system.
115
+ Another example are different projects within a company or research institution, where project team
116
+ members may communicate with members of the other projects, but only copy and paste code between
117
+ each other. This is time consuming because the code then needs to be adapted to the current project
118
+ manually, which is naturally error prone. It is problematic when there is not enough time to invest into
119
+
120
+ Biometrical Journal 63 (2021), ZZZZZZ / DOI 10.1002/bimj.201010000
121
+
122
+ © 2021 Wiley-VCH GmbH, Weinheim
123
+
124
+
125
+
126
+
127
+ www.biometrical-journal.com
128
+
129
+ the development of macros or packages that implement more generic code which could be easily used
130
+ across multiple projects in a sustainable way. With respect to resource efficiency, the situation can
131
+ worsen if these related code bases grow to more complex applications. This is especially the case when
132
+ there is a lack of strategic design that would have considered joint development efforts from the
133
+ beginning.
134
+ Similarly, often different departments are siloed from each other, e.g., the IT department and the
135
+ statistics department do not communicate sufficiently. Statistics departments need to rely on the IT
136
+ departments to provide the computing infrastructure (e.g., laptops, servers, cloud-based containers, etc.)
137
+ for running the statistical software, being able to develop it, and share it with users (e.g., for web-based
138
+ interactive applications). Especially in larger companies or research institutions, the IT landscape is
139
+ often complex, and requests from statistics departments for access to modern tools need to be properly
140
+ communicated to, received, and implemented by the IT departments. Otherwise, it is a source of
141
+ frustration for both sides and the statistics department will not be able to use the modern tools it
142
+ requires.
143
+ Finally, different organizations in academia or industry often reinvent the wheel for standard analysis
144
+ pipelines, which is inefficient from the perspective of society. This repeated effort is unnecessary
145
+ because those standard analyses are not the subject of innovation, research, or the business itself, but
146
+ only used as means to an end.
147
+
148
+ 2.2 Reliability
149
+ With the transition of medical practices towards data-driven assessment and the introduction of
150
+ statistical learning to biomedicine and patient care (for instance in personalized oncology), the notion
151
+ of software reliability needs to be given exceptional importance.
152
+ Most software engineering in biostatistics today is done within ecosystems of existing modular open-
153
+ source software packages which augment a foundational programming language. The most prominent
154
+ example is R, but there is also Python, and more recently Julia. In this situation, it is up to the
155
+ developer to judge which existing packages are of high enough quality to build upon them with the new
156
+ software, and which are not. While reliability is also a concern for proprietary software, they are sold
157
+ with appropriate documentation of quality and accuracy. Therefore, the user can build upon such
158
+ proprietary software with more confidence in the quality as compared to open-source alternatives.
159
+ In addition, all newly developed software should be developed using professional workflows and tested
160
+ sufficiently to ensure that the analyses are performed correctly, and the respective results are accurate.
161
+ In the clinical trial context, for instance, the ICH E9 guideline states that “software used should be
162
+ reliable, and documentation of appropriate software testing procedures should be available” (European
163
+ Medicines Agency, 1998). Risk assessment frameworks have been developed alongside tools to help
164
+ with this task and will be described in section 3.5. While observational research outside of clinical trials
165
+ does not fall under this regulation, it is clear that similar quality standards should be followed in order
166
+ to guarantee reliable analyses and results.
167
+ Making code open source is a good step to enable contributions from the open-source community.
168
+ However, this does not guarantee that the code is easy to understand for others (Martin, 2009). The
169
+ ratio of code being read to code being written is often greater than one and therefore code that is hard to
170
+ understand can lead to longer debugging time and makes it more difficult to build upon. Since
171
+ readability of code is a concern that is not domain specific, there is already sophisticated literature
172
+ available that suggests helpful guidelines to achieve readable code. The challenge is how to bring this
173
+ knowledge into domain-specific software engineering areas, e.g., statistics, where there might be little
174
+ awareness. Additionally, assuring its implementation, especially when there is no additional direct
175
+ value for the developer (e.g., public recognition) or part of a good practice statute within the
176
+ organization (i.e., lack of extrinsic motivation factors), is important.
177
+
178
+ Sabanés Bové et al: Improving Software Engineering in Biostatistics
179
+
180
+ © 2021 Wiley-VCH GmbH, Weinheim
181
+
182
+
183
+
184
+
185
+ www.biometrical-journal.com
186
+
187
+ 2.3 Usability
188
+ Open-source projects often start from code written by an individual for their own use. Either because of
189
+ altruistic motives or external incentives the code is published at one point in time. However, handling
190
+ of the software for others sometimes is difficult due to a lack of documentation, intuitive interface, or
191
+ naming conventions resulting in researchers sometimes preferring to start from scratch instead of using
192
+ the available software. At the same time, the usability of the software has a profound influence on its
193
+ subsequent use and success.
194
+ Application Program Interface (API) design itself can have a great influence in the usability of software
195
+ libraries, especially when the intentions of methods are unclear. Even simple elements such as the
196
+ semantics of method signatures can make a difference. Usability tests can help in designing efficient
197
+ and effective user interfaces or client APIs but might not have priority or are not part of the
198
+ development process.
199
+ 2.4 Maintenance
200
+ In academia, principal investigators often have neither sufficient time to work on complex software
201
+ themselves nor to keep pace with recent developments in a rapidly evolving ecosystem. Similarly in the
202
+ industry, statisticians working on projects have too many other duties to be able to write any software
203
+ themselves. It is also not usually part of the job for statistical programmers to engineer software;
204
+ instead, the expectation focuses on writing scripts that execute predefined macros.
205
+ Software engineering thus often relies on the efforts of earlier career statisticians such as PhD students,
206
+ post-doctoral researchers, or interns, who typically leave the institution and switch to other projects
207
+ before the end of the lifetime of the packages they developed. This raises major challenges related to
208
+ maintenance – in terms of expertise (other group members may not have the required expertise),
209
+ funding (which is typically not available for maintenance years after the package’s primary
210
+ development), and incentives (taking over another person’s package as a new maintainer is usually not
211
+ perceived as rewarding). We claim that funding and incentive structures must be increased to ensure
212
+ the middle- to long-term maintenance of packages developed by academic researchers, see Schönbrodt
213
+ (2022) for an example proposal. Package longevity should be taken into account from the beginning of
214
+ a project, for example by distributing competence over several researchers to improve the “bus factor”
215
+ (Cosentino et al., 2015).
216
+ Another aspect of the maintenance challenge is that building upon existing open-source packages
217
+ comes with risks. For example, a package our software depends on might be retired or abandoned by
218
+ the previous developers. Then all dependent packages are at risk of also becoming unusable. Naturally,
219
+ however, the more packages that depend on it, the larger the base of developers to inherit (i.e., become
220
+ the maintainer), update by contributing code changes (i.e., becoming a co-developer), or address
221
+ problems with this package will be. For example, recently the R package isoband, which is a
222
+ dependency of the popular package ggplot2, was at risk of being removed from the Comprehensive
223
+ R Archive Network (CRAN) repository. This could have impacted thousands of other R packages that
224
+ depend on ggplot2 themselves. Fortunately, the problem could be resolved quickly by the developer
225
+ community (Szymański, 2022). This is one reason to generally reduce the number of dependencies as
226
+ much as possible, while not copying code when possible.
227
+ 2.5 Reproducibility
228
+ The issue of reproducibility in scientific research (or lack thereof) has been heavily discussed in the
229
+ past years (Begley & Ioannidis, 2015; Cacho & Taghva, 2020; Mullane et al., 2018, p. 1; Niven et al.,
230
+ 2018; Stupple et al., 2019). For a certain biomedical study to be reproducible in practice, that is, for
231
+ independent researchers to be able to replicate the output of a study, data and software ought to be
232
+ shared by the original authors. Research reproducibility is paramount for the accumulation of scientific
233
+
234
+ Biometrical Journal 63 (2021), ZZZZZZ / DOI 10.1002/bimj.201010000
235
+
236
+ © 2021 Wiley-VCH GmbH, Weinheim
237
+
238
+
239
+
240
+
241
+ www.biometrical-journal.com
242
+
243
+ evidence (Peng et al., 2006) but it is increasingly complex to achieve in practice due to the increasing
244
+ complexity of data collected (and used) and more complicated statistical methods, bioinformatics, and
245
+ analysis pipelines.
246
+ For analyses using R, reproducibility is a challenge because more and more packages with different
247
+ versions are available on CRAN and results can differ depending on the exact versions. Theußl et al.
248
+ (2011) describe general challenges resulting from the increasing number of R packages.
249
+ Technical tools to streamline and simplify the creation and maintenance of reproducible workflows,
250
+ such as the targets package in R, have emerged in recent years (Landau, 2021). However, such
251
+ tools are not widely used in the community, partly because they are still complicated and require a
252
+ steep learning curve to their adoption. Thus, as alluded to in previous sections, good RSE practices are
253
+ fundamentally important as they provide basic building blocks to improve research reproducibility.
254
+ Initiatives such as The Turing Way have been advocating for all stakeholders to understand their roles
255
+ and responsibility of reproducibility in quantitative research (Arnold et al., 2019). Furthermore, they
256
+ introduce and promote tools that, while common in the RSE community, are still severely underused in
257
+ biostatistical settings: version control, containerization, code review, and continuous integration, to
258
+ name a few. Software engineering in biostatistics (and other research disciplines), for both the creation
259
+ of bespoke analysis pipelines and reusable software packages, could then greatly benefit from the
260
+ adoption of modern RSE tools. Finally, the issue of research reproducibility is often too abstract or
261
+ broad of a problem to be appreciated in practice.
262
+ 3 Opportunities
263
+ 3.1 Education
264
+ In both academia and industry, we need skilled statistical software engineers. But how do we get them?
265
+ Some university courses include software engineering practices and coding classes for researchers,
266
+ such as “The Carpentries”, which has been operating since 1998 (The Carpentries, n.d.). If we look at
267
+ training strategically, we see three main pillars: (1) software education in schools, (2) undergraduate
268
+ and graduate training at universities, and (3) life-long learning opportunities for academics and industry
269
+ personnel.
270
+ We envision a future where fun coding projects at schools become the norm rather than the notable and
271
+ often extracurricular exception (see for example Girls Who Code or Jugend Hackt) (Girls Who Code,
272
+ n.d.; “Jugend hackt – Mit Code die Welt verbessern,” 2019). Bachelor’s and master’s programs in
273
+ statistics and data science are already incorporating more software skills than they used to, yet, to our
274
+ knowledge, no specific programs with a focus on statistical software engineering exist. We encourage
275
+ lecturers to incorporate topics such as good engineering practices (e.g., version control, documenting,
276
+ modular coding) in current statistics curricula. Further, we suggest that universities cater to the
277
+ increasing demand in skilled RSEs, particularly in the fields of statistics and data science. As most
278
+ statisticians have an increasing need for software skills, life-long learning is the third important pillar.
279
+ Online courses (e.g., Coursera), books (e.g., Software Engineering with Python), RSE workshops, and
280
+ conferences provide good possibilities to continue learning RSE skills after university studies
281
+ (Coursera, n.d.; Irving et al., 2021).
282
+ We note, as an important development, that many university curricula in the life sciences (e.g., biology
283
+ and biochemistry) introduce mandatory data science education for their students. While this still a
284
+ rather new development, we consider the formal introduction of software engineering skills to students
285
+ of application domains to be of paramount importance.
286
+ 3.2 RSE teams
287
+
288
+ Sabanés Bové et al: Improving Software Engineering in Biostatistics
289
+
290
+ © 2021 Wiley-VCH GmbH, Weinheim
291
+
292
+
293
+
294
+
295
+ www.biometrical-journal.com
296
+
297
+ Statistical software engineers can support and train statisticians and researchers with their expertise and
298
+ build infrastructure for efficient research. RSEs who are integrated into research teams are one part of
299
+ the puzzle, and dedicated RSE teams are another.
300
+ In academia, we currently see dedicated RSE teams being established in many institutions in the United
301
+ Kingdom. While these are not necessarily focused on statistical software, examples include the RSE
302
+ groups at the University of Sheffield and the Alan Turing Institute. Another example is the Quantitative
303
+ Biology Center (QBiC) at the University of Tübingen, which operates as a core facility for the life-
304
+ science campus and has offered services for data-management and bioinformatics analysis since 2012.
305
+ Sustainable software engineering and reproducible analysis became a central pillar over the last years,
306
+ resulting in a joint effort community for reproducible bioinformatics analysis (Ewels et al., 2020). At
307
+ QBiC, the need for sustainable development of business-critical software has been addressed with the
308
+ implementation of dedicated groups and leadership positions that constantly improve in software
309
+ engineering practices. This includes coding katas (CodeKata, n.d.), pair programming sessions, peer-
310
+ review of pull-requests, workshops (e.g., in user experience design), and teaching in software
311
+ architecture principles. Especially for young developers, the resistance towards code sharing with
312
+ others and fear of receiving negative feedback can be high. However, this can be addressed when they
313
+ participate in these regular sessions of sharing and learning from each other. The implementation of a
314
+ research software engineering team as a central unit of an academic institution has been critical for
315
+ strategic developments in biomedical research projects and is also used as a blueprint in similar settings
316
+ internationally.
317
+ Similarly, dedicated RSE teams focused on statistics are being formed in industry settings. One
318
+ example is the Statistical Engineering team in Roche, which is closely working together with applied
319
+ biostatisticians, methods experts, and IT professionals (Sabanés Bové, 2022). The team develops
320
+ business critical R packages, R/Shiny modules, and how-to templates that are used across multiple
321
+ projects to enable efficient data science solutions. An important part of the strategy is the open-source
322
+ collaboration with other companies and institutions, for example, see the crmPack example described
323
+ below. Another example is the Digitalization & Computational Science team in the Bayer Oncology
324
+ Strategic Business Unit with similar focus as the Roche Statistical Engineering team. The team was
325
+ recently formed and has grown significantly due to the increasing need for professional data science
326
+ solutions within statistics and data management, but also outside of the classical data science functions,
327
+ e.g., in Medical Writing and Clinical Operations to complement and accelerate their current practices
328
+ and processes using advanced analytics.
329
+ For statistics there is still plenty of open room for dedicated RSE teams, but with better training (see
330
+ above) and more attractive career paths (see below) we predict that dedicated statistical software
331
+ engineering teams will become common soon.
332
+ 3.3 Career paths
333
+ Statistical software engineers are highly valuable experts as they are knowledgeable in research,
334
+ statistics, and software engineering. With that skill set one has plenty of opportunities for work in both
335
+ academia and industry (including self-employment). It is of vital importance for the interested
336
+ employers to provide an attractive work environment. In academia, the discussion already starts with
337
+ valuing software as a research work output and incentivizing good research software. Currently, a
338
+ paper publication is still the most valued output of a research group but we notice increasing demands
339
+ to make software a first class citizen in research and to give credit to software contributors and
340
+ maintainers (Kuzak et al., 2018; Taschuk & Wilson, 2017). Of course, not all RSEs want to or can be
341
+ on the classical academic career path that leads to a professorship. Hence, we need to provide dedicated
342
+ career paths for RSEs that go beyond the postdoc level, such as RSE group leadership (e.g.,
343
+ implemented in the QBiC) or career paths mirroring those of librarians and lecturers. The UK and
344
+ France are among the countries who are already addressing the issue of attractive permanent positions
345
+ for RSEs in academia, but in most countries we see large opportunities for improvement (Society of
346
+ Research Software Engineering, n.d.).
347
+
348
+ Biometrical Journal 63 (2021), ZZZZZZ / DOI 10.1002/bimj.201010000
349
+
350
+ © 2021 Wiley-VCH GmbH, Weinheim
351
+
352
+
353
+
354
+
355
+ www.biometrical-journal.com
356
+
357
+ The demand for software engineers fluent in data science languages such as R and Python is also high
358
+ in the industry (Varney, 2018). This is not restricted to statistics, e.g., chemistry departments are also
359
+ employing software engineers (Python Success Stories, n.d.). It needs to be accounted for that RSEs are
360
+ also in demand by tech companies, and hence the competition for such profiles is high. The search and
361
+ recruitment process including interviews can thus take substantial resources and time. Therefore, it is
362
+ even more important to retain RSEs once they are found and hired and offering a competitive career
363
+ path is an important retainment factor. The career paths in the industry are diverse for biostatisticians
364
+ and software engineers within biostatistics; both contract-based temporary roles as well as permanent
365
+ roles are available. It will be important to allow the same seniority levels and compensation packages
366
+ for software engineers compared to, for example, methodology expert roles or managerial roles.
367
+ 3.4 Reproducibility
368
+ Dedicated software utilities can help to address the reproducibility challenge. To address the
369
+ dependency challenge for R software, renv (Ushey et al., 2022) allows saving the state of the R
370
+ library in a single file and later restoring it from there. Another more recent dependency management
371
+ toolkit for R is the small command line tool Rmageddon which helps to use the R session information
372
+ and resolve all dependencies against Conda’s bioconda and R package channels (Fillinger,
373
+ 2018/2020). The resulting environment file can then be used to build immutable and portable Docker
374
+ containers that can be shared with others. A recent review of tools applied to biostatistics is given by
375
+ Hejblum et al. (2020).
376
+ Approaches such as that pioneered by ReproHack, where workshop participants “attempt to reproduce
377
+ published research of their choice from a list of proposed papers with publicly available associated
378
+ code and data”, are fundamentally important to improve the general awareness and solutions for
379
+ reproducible research (ReproHack Core, n.d.). Practicing research reproducibility as a learning
380
+ experience allows critical failure points (such as software dependencies and versioning, remember for
381
+ instance the abovementioned “isoband” incident) to be fully experienced and appreciated,
382
+ highlighting the inherent complexities of building and maintaining reproducible research pipelines.
383
+ 3.5 Reliability
384
+ Reliability is a key requirement for statistical software, and we mention here several reliability
385
+ initiatives for the R programming language.
386
+ The R Validation Hub is a collaboration to support the adoption of R within a biopharmaceutical
387
+ regulatory setting. The group received funding from the R Consortium and has participants from over
388
+ 60 organizations. In early 2020, the R Validation Hub published a white paper introducing "A risk-
389
+ based approach for assessing R package accuracy within a validated infrastructure” (Nicholls et al.,
390
+ 2020). The framework addresses concerns raised by statisticians, statistical programmers, informatics
391
+ teams, executive leadership, quality assurance teams, and others within the pharmaceutical industry
392
+ about the use of R and selected R packages as a primary tool for statistical analysis for regulatory
393
+ submission work (Nicholls et al., 2020). In a nutshell, there is minimal risk in using base R and
394
+ recommended packages as a component in a validated system for regulatory analysis and reporting
395
+ (The R Foundation for Statistical Computing, 2021).
396
+ Contributed R packages may differ in popularity and accuracy. Risk assessment criteria can be broken
397
+ down into four categories: package purpose, good maintenance practices, testing coverage, and
398
+ community usage. The R Validation Hub has also created tools to facilitate gathering information for
399
+ risk assessment, including the riskmetric R Package and the Risk Assessment
400
+ application (R Validation Hub et al., n.d., 2020/2022). The concept of risk-based R package
401
+ assessment has been implemented by various companies into their standard processes. Reflection and
402
+
403
+ Sabanés Bové et al: Improving Software Engineering in Biostatistics
404
+
405
+ © 2021 Wiley-VCH GmbH, Weinheim
406
+
407
+
408
+
409
+
410
+ www.biometrical-journal.com
411
+
412
+ learnings, including which aspects were easy to implement into practice and where difficulties
413
+ occurred, are discussed in Manitz et al. (2022).
414
+ In addition to sharing ideas and tools on how to check the reliability of R packages, it would be of great
415
+ help to have a repository of R packages which are deemed reliable enough to be used for medical data
416
+ analysis. To this end, the Regulatory R Package Repository working group was established (RC
417
+ Working Group on Repositories, 2021/2022). The idea is to help users to differentiate easily between
418
+ high-quality and standard packages and thus make the right decisions when choosing which software to
419
+ use for a given purpose.
420
+ 3.6 Collaboration
421
+ While intra-organization collaboration is important, we focus here on the opportunities for inter-
422
+ organization collaboration. The availability of real time video conferencing, document sharing and
423
+ editing, code reviewing, editing, and execution for data science applications via cloud-based web
424
+ services is providing an ideal technological infrastructure to seamlessly work together across
425
+ geographies and organizations. With open-source software being hosted on code sharing platforms such
426
+ as GitHub, Gitlab, Bitbucket, etc., it is only a matter of creating an account before being
427
+ able to ask questions to the developers and contribute ideas, documentation, or code to the software
428
+ project.
429
+ While this is also possible for sharing code building on top of proprietary software, it is easier to do for
430
+ packages and modules on top of open-source software, as this allows running of integration checks for
431
+ new code contributions directly on the code sharing platform. Moreover, every interested reader can
432
+ install the underlying open-source software together with the extension packages and try it out after
433
+ finding it online.
434
+ For both industry and academia, publication of code as open source is important. External stakeholders
435
+ for pharmaceutical companies, such as the regulatory authorities, health insurance payer institutions,
436
+ legislators, and the general public rightfully want to know how the data from clinical trials was
437
+ analyzed and summarized into the final published results. Here the possibility of sharing clinical trial
438
+ data is a first step (Hopkins et al., 2018), but the availability of the full stack of software used in the
439
+ data analysis will be an important second step. Interestingly, even for software companies it is
440
+ increasingly a better business model to publish the base version of the software as open source (Sahu,
441
+ 2022).
442
+ For academic research, increasingly the software developed for studying new statistical methods is
443
+ required to be available as online appendices of papers. Here it is a competitive advantage to build on
444
+ open-source software. Starting from the publication of open-source software, it is then natural to
445
+ collaborate across organizations on this common public code base. It would not be of high academic
446
+ value to develop the same functionality in a separate software again, and it would not be good use of
447
+ resources for a company to internally build a software which is available open source already. It is
448
+ economical to start with what is readily available and contribute, especially since the software is getting
449
+ more reliable when the burden of developing, testing, and addressing user requests is shared across
450
+ more developers.
451
+ Certain hurdles might need to be overcome, e.g., discussions with legal departments will be needed
452
+ before publishing the first software modules open source that were previously kept internal in a
453
+ company. Parts of the initial software might need to be split in separate, internally kept modules when
454
+ they access internal APIs. In general, smaller software modules can be more easily reused across
455
+ companies, and therefore loosely coupled software packages should be preferred over tightly connected
456
+ software packages.
457
+
458
+ Biometrical Journal 63 (2021), ZZZZZZ / DOI 10.1002/bimj.201010000
459
+
460
+ © 2021 Wiley-VCH GmbH, Weinheim
461
+
462
+
463
+
464
+
465
+ www.biometrical-journal.com
466
+
467
+ Within the pharmaceutical industry, several initiatives drive forward the synergistic collaboration
468
+ between companies on common open-source software. The R consortium works with and provides
469
+ support to the R Foundation and to the key organizations developing, maintaining, distributing, and
470
+ using R software through the identification, development, and implementation of infrastructure
471
+ projects (R: The R Foundation, n.d.; RConsortium, n.d.). It hosts several working groups that work on
472
+ specific topics, e.g., submissions, tabulations, certifications, and repositories. The Software
473
+ Engineering working group (SWE WG) aims to engineer selected R packages to fill gaps in the open-
474
+ source statistical software landscape and to promote good software engineering practices within
475
+ biostatistics (ASA Biopharmaceutical Section Software Engineering Working Group, 2022). The
476
+ PHUSE organization is a global community and platform for the discussion of statistical programming
477
+ topics, and PSI is a community dedicated to leading and promoting the use of statistics within the
478
+ healthcare industry (PSI Web, 2018; Warren, 2022). Both PHUSE and PSI have working groups
479
+ focused on programming with open-source software.
480
+ 4 Examples
481
+ 4.1 Reproducible bioinformatics analysis pipelines
482
+ A comparably recent example for the community-based implementation of RSE principles and the
483
+ development and maintenance of state-of-the-art scientific software is nf-core (Ewels et al., 2020;
484
+ Nf-Core, n.d.). Nf-core is a community effort and framework that is built on the basis of Nextflow
485
+ as a workflow management system. (Data-Driven Computational Pipelines, n.d.; Di Tommaso et al.,
486
+ 2017), The idea of nf-core is to transparently develop and provide bioinformatic analysis pipelines
487
+ with the scientific community and resolve redundant pipeline development due to silos of individual
488
+ bioinformaticians trying to address similar problems all over the world. The aim is to provide pipelines
489
+ that are reliable, reproducible, and well documented. Continuous integration and testing are a central
490
+ part of the development workflow, and containerization of software dependencies promotes numerical
491
+ stability of results when executing pipelines in different computing environments. What started with a
492
+ handful of bioinformaticians and an idea in 2017 matured to a world-wide community of
493
+ bioinformaticians with regular hackathons, trainings, and tutorials.
494
+ Such approaches to create frameworks can be role models for other domains and analysis types as well.
495
+ Typically, if designed well, such initiatives can start bottom-up without any heavy-lifting at the
496
+ beginning. Rmageddon mentioned above is another example. However, these helper tools need
497
+ support and commitment from motivated RSEs to mature to stable tools that continuously apply
498
+ methods from the software engineering discipline.
499
+ 4.2 Dose escalation R package
500
+ Another example of a successful RSE project concerns the design and analysis of dose escalation trials.
501
+ These trials are often the first experimentation of new drugs in humans with the primary objective of
502
+ finding the maximum tolerable dose along with the recommended dose for further clinical
503
+ development. In these trials, cohorts of patients are added sequentially and decisions regarding the
504
+ dose for the next cohort are made based on the available data. Dose escalation trials are exploratory by
505
+ nature and the design depends often on the properties of the investigational drug and the characteristics
506
+ of the target population. Frequent variations between dose escalation studies can be of operational or
507
+ methodological nature, e.g., inclusion of different dose levels, varying cohort sizes, flexible rules for
508
+ stopping the trial, and the underlying statistical model.
509
+ Designing such a phase I dose escalation trial using state of the art methodology usually includes study
510
+ simulations to derive operational characteristics. In addition, analysis software is needed to support
511
+ dose escalation meetings during the conduct of the trial. Often in-house developed software,
512
+ proprietary/commercially available software, or open-source packages without reliable maintenance are
513
+ used. Problems with this include the maintenance of in-house software, the inflexibility of proprietary
514
+
515
+ Sabanés Bové et al: Improving Software Engineering in Biostatistics
516
+
517
+ © 2021 Wiley-VCH GmbH, Weinheim
518
+
519
+
520
+
521
+
522
+ www.biometrical-journal.com
523
+
524
+ software, and the lack of validation of open-source software. Due to the continuous research and
525
+ proposal of new methods to be used in phase I trials, the problem becomes even more pronounced.
526
+ To overcome these problems, a group of industry, CRO, and academia statisticians came together to
527
+ evaluate the possibility to collaborate on the open-source R package crmPack (Sabanés Bové et
528
+ al., 2019). This package was originally developed at Hoffmann-La Roche Ltd. and open sourced in
529
+ 2015. crmPack provides a simple and unified object-oriented framework for model-based dose
530
+ escalation designs. The package has already been used in some phase I trials in the industry and
531
+ academia individually, often tailoring it further to specific needs of the study. The group found that
532
+ crmPack already covers a wide variety of methods and that different companies and institutions
533
+ extended the package by including additional functions, more documentation, and testing for their
534
+ needs separately. To avoid duplication and ensure continued maintenance, the group agreed to
535
+ collaborate on the further development of the package and develop the package using modern software
536
+ development methods and tools.
537
+ The current workflow takes place on GitHub to ensure version control and reliable collaboration. The
538
+ collaborative work is driven by short iteration cycles by working on small tasks and prioritizing review
539
+ of pull requests. This way of working is fundamentally different from past software development in
540
+ clinical biostatistics, which typically involved long requirements documents and inefficient
541
+ collaborative work. Furthermore, the package is extended by unit tests for the functions to prepare for
542
+ subsequent validation of the package. The collaborative aspects of this collaboration were co-presented
543
+ by members of the development team at the ISCB conference (Boix & Günhan, 2022).
544
+ 5 Discussion
545
+ A key element of modern biostatistics is to bring science and data together to generate knowledge.
546
+ Neither science nor data alone are keys to success. Both must be combined efficiently. The link
547
+ between both are computer programs. This link must be optimized to extract actionable insights from
548
+ the data. For example, by making use of data standards (e.g., CDISC) and moving away from one-off
549
+ analysis scripts to reusable analysis software for standard tasks, researchers would be relieved of some
550
+ menial work and could instead focus on complex tasks that add value to their organizations. Analysis
551
+ methods are getting more complex, and the volume of data is increasing. Thus, special expertise is
552
+ needed to link both in a strong way. For this, state-of-the art programming methodology must be used
553
+ and should not merely be conducted alongside methodological research or data management. In our
554
+ view, implementing Research Software Engineering (RSE) as a recognized and dedicated profession,
555
+ jointly with a basic RSE skills education for all statisticians, is the way forward in fulfilling these
556
+ needs. RSE can facilitate cross-functional discussion and would support other functions to implement
557
+ novel ideas. This would not only include multidisciplinary collaboration within an organization, but
558
+ also structured cooperation across both academia and industry.
559
+ Today, we must also acknowledge the needed effort on maintenance and development for open-source
560
+ projects. The open-source community approach would benefit from RSE by dedicated experts who
561
+ focus on maintenance and rigor development of statistical software. For example, the current level of
562
+ interdependencies sometimes seen between R packages is somewhat like the “Spaghetti Crisis”
563
+ identified 40 years ago (Steele, 1977), leading to the implementation of a more structured code
564
+ development including dedicated informatic career path. In other words, companies and academia must
565
+ spend dedicated resources. Having dedicated research engineers would account for this, as the saying
566
+ goes, “nothing comes from nothing”.
567
+ An important element to foster RSE is that it must be regarded as a profession of its own with an
568
+ adequate placement within the job hierarchy of academia and industry. Young researchers must see this
569
+ as a desirable career opportunity. It is a serious, complex, and important task not to be underestimated.
570
+ Software Engineering is a little bit like riding a bicycle, in that most of us possess the basic skillset to
571
+ ride a bike, but that does not necessarily make us experts in designing racing bikes. RSE is more than
572
+
573
+ Biometrical Journal 63 (2021), ZZZZZZ / DOI 10.1002/bimj.201010000
574
+
575
+ © 2021 Wiley-VCH GmbH, Weinheim
576
+
577
+
578
+
579
+
580
+ www.biometrical-journal.com
581
+
582
+ coding. Knowing basic programming language syntax is just a start, not the end. It requires objective
583
+ input from “outside”, collaborating with experts and more experienced peers can significantly boost
584
+ one’s skills.
585
+ In conclusion, the way of software development has drastically changed with the introduction of the
586
+ open-source concept. Similarly, the daily tasks for biostatisticians in academia and industry have
587
+ drastically changed. We must change our ways to match. It is time to improve software engineering in
588
+ biostatistics.
589
+ Acknowledgements:
590
+ We would like to thank Andy Nicholls and Martin Shaw who also participated in the
591
+ panel discussion at ISCB 43.
592
+ Conflict of Interest
593
+ The authors have declared no conflict of interest.
594
+
595
+
596
+
597
+
598
+
599
+
600
+
601
+
602
+
603
+
604
+
605
+
606
+
607
+
608
+
609
+
610
+
611
+
612
+
613
+
614
+
615
+
616
+
617
+
618
+
619
+
620
+
621
+
622
+
623
+
624
+
625
+
626
+ Sabanés Bové et al: Improving Software Engineering in Biostatistics
627
+
628
+ © 2021 Wiley-VCH GmbH, Weinheim
629
+
630
+
631
+
632
+
633
+ www.biometrical-journal.com
634
+
635
+ References
636
+ Anzt, H., Bach, F., Druskat, S., Löffler, F., Loewe, A., Renard, B. Y., Seemann, G., Struck, A., Achhammer, E.,
637
+ Aggarwal, P., Appel, F., Bader, M., Brusch, L., Busse, C., Chourdakis, G., Dabrowski, P. W., Ebert, P.,
638
+ Flemisch, B., Friedl, S., … Weeber, R. (2021). An environment for sustainable research software in
639
+ Germany and beyond: Current state, open challenges, and call for action. F1000Research, 9, 295.
640
+ https://doi.org/10.12688/f1000research.23224.2
641
+ Arnold, B., Bowler, L., Gibson, S., Herterich, P., Higman, R., Krystalli, A., Morley, A., O’Reilly, M., &
642
+ Whitaker, K. (2019). The Turing Way: A Handbook for Reproducible Data Science (v0.0.4). Zenodo.
643
+ https://doi.org/10.5281/ZENODO.3233986
644
+ ASA Biopharmaceutical Section Software Engineering Working Group. (2022). ASA BIOP SWE WG.
645
+ https://rconsortium.github.io/asa-biop-swe-wg/
646
+ Begley, C. G., & Ioannidis, J. P. A. (2015). Reproducibility in Science. Circulation Research, 116(1), 116–126.
647
+ https://doi.org/10.1161/CIRCRESAHA.114.303819
648
+ Boix, O., & Günhan, B. K. (2022, August 23). A collaborative approach to software development; The crmPack
649
+ experience. ISCB 2022 Conference, Newcastle, UK. https://www.burakguenhan.com/talk/crmpack/
650
+ Cacho, J. R. F., & Taghva, K. (2020). The State of Reproducible Research in Computer Science. In S. Latifi
651
+ (Ed.), 17th International Conference on Information Technology–New Generations (ITNG 2020) (pp. 519–
652
+ 524). Springer International Publishing. https://doi.org/10.1007/978-3-030-43020-7_68
653
+ CodeKata. (n.d.). Retrieved November 30, 2022, from http://codekata.com/
654
+ Cosentino, V., Izquierdo, J. L. C., & Cabot, J. (2015). Assessing the bus factor of Git repositories. 2015 IEEE
655
+ 22nd International Conference on Software Analysis, Evolution, and Reengineering (SANER), 499–503.
656
+ https://doi.org/10.1109/SANER.2015.7081864
657
+ Coursera. (n.d.). Coursera. Retrieved November 30, 2022, from https://www.coursera.org/
658
+ Data-driven
659
+ computational
660
+ pipelines.
661
+ (n.d.).
662
+ Nextflow.
663
+ Retrieved
664
+ November
665
+ 30,
666
+ 2022,
667
+ from
668
+ https://www.nextflow.io/
669
+ de Waal, Weaver, Day, & van der Heijden. (2019). Silo-Busting: Overcoming the Greatest Threat to
670
+ Organizational Performance. Sustainability, 11(23), 6860. https://doi.org/10.3390/su11236860
671
+ Di Tommaso, P., Chatzou, M., Floden, E. W., Barja, P. P., Palumbo, E., & Notredame, C. (2017). Nextflow
672
+ enables
673
+ reproducible
674
+ computational
675
+ workflows.
676
+ Nature
677
+ Biotechnology,
678
+ 35(4),
679
+ Article
680
+ 4.
681
+ https://doi.org/10.1038/nbt.3820
682
+ European Medicines Agency. (1998). ICH Topic E9 Statistical Principles for Clinical Trials (p. 37)
683
+ [CPMP/ICH/363/96].
684
+ Ewels, P. A., Peltzer, A., Fillinger, S., Patel, H., Alneberg, J., Wilm, A., Garcia, M. U., Di Tommaso, P., &
685
+ Nahnsen, S. (2020). The nf-core framework for community-curated bioinformatics pipelines. Nature
686
+ Biotechnology, 38(3), Article 3. https://doi.org/10.1038/s41587-020-0439-x
687
+ Fillinger, S. (2020). Rmageddon [Python]. QBiC. https://github.com/qbicsoftware/rmageddon-cli (Original work
688
+ published 2018)
689
+ Girls Who Code. (n.d.). Girls Who Code. Retrieved November 30, 2022, from https://girlswhocode.com/
690
+ Hejblum, B. P., Kunzmann, K., Lavagnini, E., Hutchinson, A., Robertson, D. S., Jones, S. C., & Eckes-Shephard,
691
+ A. H. (2020). Realistic and Robust Reproducible Research for Biostatistics (No. 2020060002). Preprints.
692
+ https://doi.org/10.20944/preprints202006.0002.v1
693
+ Hopkins, A. M., Rowland, A., & Sorich, M. J. (2018). Data sharing from pharmaceutical industry sponsored
694
+ clinical studies: Audit of data availability. BMC Medicine, 16(1), 165. https://doi.org/10.1186/s12916-018-
695
+ 1154-z
696
+ Irving, D., Hertweck, K., Johnston, L., Ostblom, J., Wickham, C., & Wilson, G. (2021, October 14). Research
697
+ Software Engineering with Python. https://merely-useful.tech/py-rse/
698
+ ISO/IEC/IEEE International Standard—Systems and software engineering–Vocabulary. (2017). ISO/IEC/IEEE
699
+ 24765:2017(E), 1–541. https://doi.org/10.1109/IEEESTD.2017.8016712
700
+
701
+ Biometrical Journal 63 (2021), ZZZZZZ / DOI 10.1002/bimj.201010000
702
+
703
+ © 2021 Wiley-VCH GmbH, Weinheim
704
+
705
+
706
+
707
+
708
+ www.biometrical-journal.com
709
+
710
+ Jugend hackt – Mit Code die Welt verbessern. (2019, July 11). Jugend hackt. https://jugendhackt.org/
711
+ Kuzak, M., Cruz, M., Thiel, C., Sufi, S., & Eisty, N. (2018, November 28). Making Software a First-Class Citizen
712
+ in Research. WSSSPE6.1 Speed Blog. https://software.ac.uk/blog/2018-11-28-making-software-first-class-
713
+ citizen-research
714
+ Landau, W. M. (2021). The targets R package: A dynamic Make-like function-oriented pipeline toolkit for
715
+ reproducibility and high-performance computing. Journal of Open Source Software, 6(57), 2959.
716
+ https://doi.org/10.21105/joss.02959
717
+ Manitz, J., Nicholls, A., Gotti, M., Kelkhoff, D., Clark, A., Palukuru, U. P., & Taylor, L. (2022). Risk Assessment
718
+ of R Packages: Learning and Reflections (No. 3; Biopharmaceutical Report, pp. 3–10). American
719
+ Statistical Association. https://higherlogicdownload.s3.amazonaws.com/AMSTAT/fa4dd52c-8429-41d0-
720
+ abdf-0011047bfa19/UploadedImages/BIOP%20Report/BioPharm_fall2022FINAL.pdf
721
+ Martin, R. C. (2009). Clean code: A handbook of agile software craftsmanship. Prentice Hall.
722
+ Mullane, K., Curtis, M. J., & Williams, M. (2018). Chapter 1—Reproducibility in Biomedical Research. In M.
723
+ Williams, M. J. Curtis, & K. Mullane (Eds.), Research in the Biomedical Sciences (pp. 1–66). Academic
724
+ Press. https://doi.org/10.1016/B978-0-12-804725-5.00001-X
725
+ Nf-core. (n.d.). Nf-Core. Retrieved November 30, 2022, from https://nf-co.re/pipelines
726
+ Nicholls, A., Bargo, P., R., & Sims, J. (2020). A Risk-based Approach for Assessing R package Accuracy within
727
+ a Validated Infrastructure. https://www.pharmar.org/white-paper/
728
+ Niven, D. J., McCormick, T. J., Straus, S. E., Hemmelgarn, B. R., Jeffs, L., Barnes, T. R. M., & Stelfox, H. T.
729
+ (2018). Reproducibility of clinical research in critical care: A scoping review. BMC Medicine, 16(1), 26.
730
+ https://doi.org/10.1186/s12916-018-1018-6
731
+ Peng, R. D., Dominici, F., & Zeger, S. L. (2006). Reproducible Epidemiologic Research. American Journal of
732
+ Epidemiology, 163(9), 783–789. https://doi.org/10.1093/aje/kwj093
733
+ PSI Web. (2018). PSI. https://www.psiweb.org
734
+ Python
735
+ Success
736
+ Stories.
737
+ (n.d.).
738
+ Python.Org.
739
+ Retrieved
740
+ November
741
+ 30,
742
+ 2022,
743
+ from
744
+ https://www.python.org/about/success/astra/
745
+ R: The R Foundation. (n.d.). Retrieved November 30, 2022, from https://www.r-project.org/foundation/
746
+ R Validation Hub, Gotti, M., Clark, A., Krajcik, R., Gans, M., Kallem, A., & Fission Labs India. (2022).
747
+ PharmaR/risk_assessment [R]. pharmaR. https://github.com/pharmaR/risk_assessment (Original work
748
+ published 2020)
749
+ R Validation Hub, Kelkhoff, D., Gotti, M., Miller, E., K, K., Zhang, Y., Miliman, E., & Manitz, J. (n.d.).
750
+ Riskmetric [R]. Retrieved November 30, 2022, from https://pharmar.github.io/riskmetric/
751
+ RC Working Group on Repositories. (2022, November 14). https://github.com/RConsortium/r-repositories-wg
752
+ (Original work published 2021)
753
+ RConsortium. (n.d.). R Consortium. Retrieved November 30, 2022, from https://www.r-consortium.org/
754
+ ReproHack
755
+ Core.
756
+ (n.d.).
757
+ ReproHack
758
+ Hub.
759
+ ReproHack.
760
+ Retrieved
761
+ November
762
+ 30,
763
+ 2022,
764
+ from
765
+ https://www.reprohack.org/
766
+ Sabanés Bové, D. (2022, November). Working at the intersection of Biostatistics and Software Engineering in a
767
+ Pharma company. 2022 Conference. https://rinpharma.com/publication/rinpharma_271/
768
+ Sabanés Bové, D., Yeung, W. Y., Palermo, G., & Jaki, T. (2019). Model-Based Dose Escalation Designs in R
769
+ with crmPack. Journal of Statistical Software, 89, 1–22. https://doi.org/10.18637/jss.v089.i10
770
+ Sahu, A. (2022, August 17). Why making your product’s code free is a competitive advantage. World Economic
771
+ Forum. https://www.weforum.org/agenda/2022/08/open-source-companies-competitive-advantage-free-
772
+ product-code/
773
+ Sanchez, R., Griffin, B. A., Pane, J., & McCaffrey, D. F. (2021). Best practices in statistical computing. Statistics
774
+ in Medicine, 40(27), 6057–6068. https://doi.org/10.1002/sim.9169
775
+ Schönbrodt, F. (2022, May 23). Research Software in Academic Hiring and Promotion: A proposal for how to
776
+ assess it. https://www.nicebread.de/research-software-in-academic-hiring/
777
+
778
+ Sabanés Bové et al: Improving Software Engineering in Biostatistics
779
+
780
+ © 2021 Wiley-VCH GmbH, Weinheim
781
+
782
+
783
+
784
+
785
+ www.biometrical-journal.com
786
+
787
+ Seibold, H., Charlton, A., Boulesteix, A., & Hoffmann, S. (2021). Statisticians, roll up your sleeves! There’s a
788
+ crisis to be solved. Significance, 18(4), 42–44. https://doi.org/10.1111/1740-9713.01554
789
+ Society of Research Software Engineering. (n.d.). Careers. Society of Research Software Engineering. Retrieved
790
+ November 30, 2022, from https://society-rse.org/careers/
791
+ Steele, G. L. (1977). Macaroni is better than spaghetti. ACM SIGPLAN Notices, 12(8), 60–66.
792
+ https://doi.org/10.1145/872734.806933
793
+ Stupple, A., Singerman, D., & Celi, L. A. (2019). The reproducibility crisis in the age of digital medicine. Npj
794
+ Digital Medicine, 2(1), 2. https://doi.org/10.1038/s41746-019-0079-z
795
+ Szymański, R. (2022, December 10). CRAN and the Isoband Incident—Is Your Project at Risk and How to Fix
796
+ It. https://appsilon.com/cran-and-the-isoband-incident/
797
+ Taschuk, M., & Wilson, G. (2017). Ten simple rules for making research software more robust. PLOS
798
+ Computational Biology, 13(4), e1005412. https://doi.org/10.1371/journal.pcbi.1005412
799
+ The Carpentries. (n.d.). The Carpentries. Retrieved November 30, 2022, from https://carpentries.org/index.html
800
+ The Open Source Definition. (2007). Open Source Initiative. https://opensource.org/osd
801
+ The R Foundation for Statistical Computing. (2021). R: Regulatory Compliance and Validation Issues A
802
+ Guidance Document for the Use of R in Regulated Clinical Trial Environments. https://www.r-
803
+ project.org/doc/R-FDA.pdf
804
+ Theußl, S., Ligges, U., & Hornik, K. (2011). Prospects and challenges in R package development. Computational
805
+ Statistics, 26(3), 395–404. https://doi.org/10.1007/s00180-010-0205-5
806
+ Ushey, K., RStudio, & PBC. (2022).
807
+ renv: Project Environments (0.16.0). https://CRAN.R-
808
+ project.org/package=renv
809
+ Varney, B. (2018, February 18). Why R? The Next Generation in Pharma. Pubs - Bio-IT World. https://www.bio-
810
+ itworld.com/news/2022/02/18/why-r-the-next-generation-in-pharma
811
+ Warren, K. (2022, October 2). Welcome to the PHUSE Advance Hub—WORKING GROUPS - PHUSE Advance
812
+ Hub. PHUSE. https://advance.phuse.global/
813
+
814
+
A9FKT4oBgHgl3EQfWS5f/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
C9E0T4oBgHgl3EQfQQB9/content/tmp_files/2301.02190v1.pdf.txt ADDED
@@ -0,0 +1,1371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A general framework for implementing distances for categorical
2
+ variables
3
+ Michel van de Velden
4
+ Alfonso Iodice D’Enza
5
+ Angelos Markos
6
+ Carlo Cavicchia
7
+ January 6, 2023
8
+ Abstract
9
+ The degree to which subjects differ from each other with respect to certain properties measured
10
+ by a set of variables, plays an important role in many statistical methods. For example, classification,
11
+ clustering, and data visualization methods all require a quantification of differences in the observed
12
+ values. We can refer to the quantification of such differences, as distance. An appropriate definition
13
+ of a distance depends on the nature of the data and the problem at hand. For distances between
14
+ numerical variables, there exist many definitions that depend on the size of the observed differences.
15
+ For categorical data, the definition of a distance is more complex, as there is no straightforward
16
+ quantification of the size of the observed differences. Consequently, many proposals exist that can
17
+ be used to measure differences based on categorical variables. In this paper, we introduce a general
18
+ framework that allows for an efficient and transparent implementation of distances between observations
19
+ on categorical variables. We show that several existing distances can be incorporated into the framework.
20
+ Moreover, our framework quite naturally leads to the introduction of new distance formulations and
21
+ allows for the implementation of flexible, case and data specific distance definitions. Furthermore, in
22
+ a supervised classification setting, the framework can be used to construct distances that incorporate
23
+ the association between the response and predictor variables and hence improve the performance of
24
+ distance-based classifiers.
25
+ 1
26
+ Introduction
27
+ In many statistical methods, the quantification of dissimilarity, that is, the degree to which objects differ
28
+ from each other, plays an important role. We can refer to such dissimilarity quantification as a distance.
29
+ Classification methods such as K-Nearest Neighbors (KNN, Cover and Hart 1967), but also clustering
30
+ methods as K-means, (MacQueen 1967), partitioning around medoids (PAM, Kaufman and Rousseeuw
31
+ 1990) and hierarchical linkage methods (Gordon 1999), and data visualization methods such as multidi-
32
+ mensional scaling (Borg and Groenen 2005) and biplots (Gabriel 1971; Gower, Lubbe, and Le Roux 2011),
33
+ require a definition of distance between subjects and/or objects. The way to select a definition of distance
34
+ depends on the nature of the data and problem at hand.
35
+ Distance measures for numerical data are typically based on the magnitude of the observed differences
36
+ in values (for a list of different distance measures, see, e.g., Mardia 1978). For categorical data, however,
37
+ the situation is more complex, as we do not directly observe, and hence cannot directly quantify, sizes of
38
+ differences. We can only directly establish whether there is a difference or not.
39
+ For distance calculations in multivariate contexts, two cases can be distinguished. First, the distances
40
+ are calculated for each variable independently and then added. Second, the association between the
41
+ variables is taken into account when calculating the distances. For numerical variables, several well-known
42
+ distances, for example, Euclidean or Manhattan distances, implicitly assume independence between the
43
+ variables. Obviously, in such “independent” cases, the measurement scales must be commensurable. For
44
+ categorical variables, there are also several measures that take the sum of dissimilarities per variable when
45
+ considering a multivariate distance. For example, in simple matching, distance between two observations is
46
+ defined as the number of times that the categories of corresponding variables do not match.
47
+ For numerical variables, the association between variables can be accounted for using the Mahalanobis
48
+ distance, where (sample) covariances are used to weigh observed differences to account for correlation
49
+ 1
50
+ arXiv:2301.02190v1 [stat.ML] 4 Jan 2023
51
+
52
+ between the variables. For categorical data, so-called association-based distances exist. In such distances,
53
+ the association between categorical variables is used to quantify differences between observations. The
54
+ question of how to account for associations in a categorical setting is not trivial. Several relatively recent
55
+ new proposals for distances between categorical variables are indeed association-based distances (see, e.g.,
56
+ Le and Ho 2005; Ahmad and Dey 2007; Jia, Cheung, and Liu 2014; Ring et al. 2015).
57
+ The complexity of defining a distance for categorical variables, and recent interest in this topic, is
58
+ illustrated by a wide range of articles that review existing (e.g., Boriah, Chandola, and Kumar 2008; Alves,
59
+ Couceiro, and Napoli 2019) or introduce (new) distances (e.g., Le and Ho 2005; Ahmad and Dey 2007; Jia,
60
+ Cheung, and Liu 2014; Ring et al. 2015; Šulc and ˇRezanková 2019; Bai and Liang 2022). In this paper,
61
+ we propose a general framework for implementing categorical distances. Our framework can be used to
62
+ incorporate existing categorical variable distances, but it also allows researchers to define and implement
63
+ new or customized distances.
64
+ By reformulating existing distances in our framework, it becomes possible to assess the differences
65
+ and similarities between them. Currently, such comparisons are not trivial due to the wide variety of
66
+ notation and research fields (and hence objectives) in which methods have been proposed. In addition, our
67
+ framework makes it possible to construct and define new and highly customizable distances. For example,
68
+ in a supervised classification context, the framework can be used to define a distance that takes into account
69
+ association with the classes of the response variable.
70
+ As our framework is not method- or application-specific, it can be used to calculate distance matrices
71
+ for any method or application requiring distance calculations. For example, multidimensional scaling,
72
+ cluster analysis, or, in a supervised context, K-nearest neighbors. We show that distance calculations
73
+ using the framework are fast, efficient, and transparent and can be a significant improvement over existing
74
+ implementations. In particular, we show that for the distance for categorical variables proposed in Ahmad
75
+ and Dey (2007), our implementation is much faster than existing implementations.
76
+ An important issue with regard to the definition of distance measures is the validation of the measures.
77
+ That is, how does one know that the chosen measure is appropriate? Although the new framework does
78
+ not provide an answer to this question, having one general formulation simplifies both theoretical and
79
+ empirical comparisons between different choices.
80
+ We illustrate our method by applying distance-based data analysis methods to several well-known
81
+ categorical data sets using a selection of known and new, association-based, categorical distances. We
82
+ implemented functions to perform all categorical distance calculations using our general framework in the
83
+ R package catdist, which is available on GitHub1 and is soon to be released on CRAN.
84
+ This paper is organized as follows. After introducing some notation in Section 2, we describe our general
85
+ framework in Section 3. In Section 4, we introduce several common distance measures for categorical
86
+ variables and show how they can be incorporated. Categorical distances based on co-occurrences are
87
+ introduced in Section 5, with particular attention to a distance measure proposed by Ahmad and Dey (2007).
88
+ In Section 6, we show how supervised distances can be constructed and implemented using our framework.
89
+ Tuning of distance definitions is described in Section 7, after which we illustrate our methodology using
90
+ several data sets in Section 8. Section 9 concludes the paper.
91
+ 2
92
+ Notation
93
+ Suppose that we have n observations on Q categorical variables and let the number of categories for the
94
+ j ∈ 1...Q-th variable be qj. We can then code the categorical data by using indicator matrices. That is, for
95
+ each categorical variable j ∈ 1...Q, we create an n×qj binary matrix Z j, where the n rows correspond
96
+ to observations and the qj columns to categories. The observed category is indicated by a one, and all
97
+ other categories are assigned zeros. Furthermore, for each observation of a categorical variable, exactly one
98
+ category is observed, and we only include categories that have been observed at least once in the data set.
99
+ Hence, each column of Z j contains at least one element equal to one and Z j1q j = 1n, where, generically, 1i
100
+ denotes an i by 1 vector of ones. That is, the sum over the columns is 1.
101
+ 1https://github.com/alfonsoIodiceDE/catdist_package
102
+ 2
103
+
104
+ Using these indicator matrices, we can code data on Q categorical variables in a so-called super-indicator
105
+ matrix by collecting all indicator matrices next to each other. That is,
106
+ Z =
107
+
108
+ Z1
109
+ ...
110
+ ZQ
111
+
112
+ .
113
+ Furthermore, define
114
+ P = 1
115
+ nZ′Z,
116
+ (1)
117
+ and
118
+ Pd = 1
119
+ n
120
+
121
+ Z′Z
122
+
123
+ ⊙IQ∗,
124
+ (2)
125
+ where ⊙ indicates the Hadamard product, that is, element-wise multiplication, and Q∗ = ∑Q
126
+ j=1 qj. Note
127
+ that Pd is a diagonal matrix with as its diagonal elements the observed relative frequencies (within each
128
+ variable) for the categories. Moreover, let
129
+ p = Pd1Q∗
130
+ (3)
131
+ denote the vector of observed relative frequencies, and
132
+ p− = P−1
133
+ d 1Q∗
134
+ (4)
135
+ is the vector of inverse observed relative frequencies.
136
+ Note that the i j-th off-diagonal block of P gives the relative frequencies of co-occurrences for the
137
+ categories of variables i and j. They can be seen as (empirical) joint probability distributions for variables i
138
+ and j. For the calculation of association-based distances in Section 5, we also define
139
+ R = P−1
140
+ d (P−Pd).
141
+ (5)
142
+ The rows of the i j-th off-diagonal block of R give, for the categories of the i-th variable, the distributions
143
+ over the categories of the j-th variable. These can be interpreted as (empirical) conditional distributions.
144
+ 3
145
+ Categorical distance calculations based on category dissimilarities
146
+ For a categorical variable, it is not obvious how to quantify differences between different categories. For
147
+ example, suppose that we observe three individuals, one from the Netherlands, one from Italy, and one
148
+ from Greece. Geographically, and perhaps also culturally, Italy and Greece are more similar than the
149
+ Netherlands. How to take such differences into account is, however, not trivial. In our framework, we do so
150
+ by defining category dissimilarities.
151
+ A matrix ∆∆∆ j is the category dissimilarity matrix for variable j. The elements of this matrix, δab, where
152
+ a and b indicate two categories of variable j, quantify the dissimilarities between the categories a and b
153
+ of the j-th variable. We can impose conditions on the dissimilarity matrix that are consistent with typical
154
+ distance definitions. That is, 1) the dissimilarity of a category from itself is zero (δaa = 0, for all categories).
155
+ 2) Dissimilarities are symmetric (δab = δba, for all pairs of categories). 3) Dissimilarities satisfy the triangle
156
+ inequality. That is, if a,b and c denote different categories for a variable j, then, for all categories a,b and
157
+ c,
158
+ δac ≤ δab +δbc.
159
+ If all three of these conditions are satisfied, the dissimilarities can be considered as metric distances between
160
+ categories. If they are non-negative and satisfy only the first two conditions, they can be interpreted as
161
+ non-metric distances between categories. However, we refer to them as category dissimilarities and reserve
162
+ the term “distance” for the distances between observations.
163
+ 3
164
+
165
+ If we have Q categorical variables, each with a category dissimilarity matrix ∆∆∆ j, we can construct a
166
+ Q∗ ×Q∗, block diagonal matrix ∆∆∆, with separate category dissimilarity matrices as diagonal blocks.
167
+ The category dissimilarity matrices can be used to calculate a between observations distance matrix
168
+ as follows. First, consider the n by qj indicator matrix Zj corresponding to the j-th categorical variable.
169
+ Furthermore, we have the corresponding category dissimilarity matrix ∆∆∆ j. We can formulate the following
170
+ theorems:
171
+ Theorem 1. The distances between the observations for the categorical variable j are
172
+ Dj = Z j∆∆∆ jZ′
173
+ j.
174
+ Proof. The matrix multiplication of the row i of Zj with ∆∆∆ j selects the row of ∆∆∆ j corresponding to the
175
+ category chosen by the individual i. Similarly, matrix multiplication of this row by the i′-th column of Z′
176
+ j
177
+ (i.e., the i′-th observation) selects the element corresponding to the category chosen by the individual i′.
178
+ Hence, the (i,i′)-th element of D j is the dissimilarity between the categories chosen by individuals i and
179
+ i′.
180
+
181
+ Theorem 2. If we define the distance between observations on Q categorical variables as the sum of Q
182
+ dissimilarities for each categorical variable, the n×n distance matrix can be calculated as
183
+ D = Z∆∆∆Z′.
184
+ (6)
185
+ Proof.
186
+ D = Z∆∆∆Z′
187
+ =
188
+
189
+ Z1
190
+ ...
191
+ ZQ
192
+
193
+
194
+
195
+
196
+ ∆∆∆1
197
+ ...
198
+ ∆∆∆Q
199
+
200
+
201
+
202
+
203
+
204
+
205
+ Z′
206
+ 1...
207
+ Z′
208
+ Q
209
+
210
+
211
+
212
+ =
213
+ Q
214
+
215
+ j=1
216
+ Zj∆∆∆ jZ′
217
+ j
218
+ =
219
+ Q
220
+
221
+ j=1
222
+ Dj
223
+
224
+ From (6), it follows that distances between observations of categorical variables depend on the choices
225
+ of the category dissimilarity matrices ∆∆∆ j. This allows for great flexibility in defining a suitable distance
226
+ measure for a set of categorical variables. In the next section, we briefly review some choices for ∆∆∆, and
227
+ we show how they relate to existing distances.
228
+ Note that associations between categorical variables are not explicitly incorporated in this formulation.
229
+ That is, the differences between the categories observed for one variable are not related to the differences
230
+ in the categories observed for other variables. There are, however, ways to account for such observations.
231
+ For example, rather than creating an indicator matrix for each categorical variable, one could construct an
232
+ indicator matrix for all possible combinations (or subsets thereof) of observations. That is, one can create,
233
+ for each (or a subset of) combination of categories one indicator matrix. The number of columns of such a
234
+ matrix is therefore ∏Q
235
+ j=1 qj and only one category dissimilarity matrix is needed where each category is a
236
+ combination of the categories for all Q variables. However, with several categorical variables, the total
237
+ number of combinations and hence the number of categories of the final indicator matrix quickly becomes
238
+ large. Furthermore, finding an appropriate category dissimilarity matrix for the combinations is not a trivial
239
+ task.
240
+ An alternative way to account for associations between the categorical variables is to use them in
241
+ the construction of the category dissimilarity matrices. That is, by defining the dissimilarities between
242
+ the categories of a variable in ∆∆∆ j, based on the associations with other variables. In Section 5, we give
243
+ examples of such category dissimilarity measures.
244
+ 4
245
+
246
+ 3.1
247
+ Distances between sets
248
+ Suppose that we have two separate sets of observations on the same Q categorical variables. Data for these
249
+ two sets can be collected in the n1 ×Q∗ and n2 ×Q∗ super indicator matrices Z(1) and Z(2). Then, for a
250
+ known category dissimilarity matrix ∆∆∆, it is easily verified that the distances between the observations for
251
+ the two sets can be calculated as
252
+ D(12) = Z(1)∆∆∆Z(2)′.
253
+ (7)
254
+ Note that the matrix D(12) is of order n1 ×n2.
255
+ The calculation of distances between sets can be useful when considering distance-based classification
256
+ problems. In KNN, for example, distances between “new” (unlabeled) observations and observations in a
257
+ labeled data set are required. The KNN predictions are based on (usually by majority vote) the labels of
258
+ the K nearest neighbors in the training set. Similarly, in partitioning around medoids, a popular clustering
259
+ algorithm similar to K-means, where instead of considering within-cluster variation around the mean,
260
+ variation around an actual observation, the medoid, is considered. If the medoids are collected in Z(1) and
261
+ “new” data points in Z(2), we can assign the new points to existing clusters considering the distances D(12)
262
+ and selecting the smallest distances.
263
+ 4
264
+ Independent category dissimilarity matrices
265
+ We first consider several definitions of dissimilarity for categorical data that do not take into account the
266
+ association between variables. Hence, in a multivariate context, distances are calculated as the sum of
267
+ distances per variable, and for each variable, the category dissimilarities are independent of the observations
268
+ on other variables. However, category dissimilarities may depend on the observed frequencies for a variable.
269
+ We do not aim to be complete with respect to the different definitions of dissimilarity. Instead, we select
270
+ definitions from Šulc and ˇRezanková (2019) (which contain several definitions also reviewed in Boriah,
271
+ Chandola, and Kumar 2008; Alves, Couceiro, and Napoli 2019), and transform them into dissimilarities. We
272
+ show how these dissimilarities can be incorporated into our framework by defining the appropriate category
273
+ dissimilarity matrices ∆∆∆. For a more detailed description, as well as study on the relative performances of
274
+ these dissimilarity definitions in a cluster analysis setting, see Šulc and ˇRezanková (2019).
275
+ 4.1
276
+ Overlap or simple matching
277
+ The idea of simple matching (SM) is that the distance between observations is 1 if the categories do not
278
+ match and 0 if they do. Consequently, all different between category dissimilarities are 1. For the j-th
279
+ categorical variable with q j categories, we define
280
+ ∆∆∆Mj = 1q j1′
281
+ qj −Iq j.
282
+ That is, the distance between each category is exactly 1. If we have Q categorical variables and want to
283
+ calculate a simple matching for all variables, we simply collect all ∆∆∆Mj in a block diagonal matrix ∆∆∆M.
284
+ ∆∆∆M = Kb −IQ∗,
285
+ where Kb is a Q∗ ×Q∗ block diagonal matrix with, for j = 1...Q, qj ×q j matrices (Kb j) of ones as its
286
+ diagonal blocks.
287
+ 4.2
288
+ Eskin
289
+ For Eskin distance (Eskin et al. 2002), category dissimilarities depend on the number of categories.
290
+ Dissimilarities for variables with more categories are smaller than dissimilarities for variables with fewer
291
+ categories. In particular, the dissimilarity between different categories for a variable with qj categories is
292
+ 5
293
+
294
+ 2/q2
295
+ j. Therefore, for the j-th categorical variable with qj categories, the category dissimilarity matrix is
296
+ defined as
297
+ ∆∆∆Ej =
298
+
299
+ 2/q2
300
+ j
301
+ ��
302
+ 1qj1′
303
+ q j −Iqj
304
+
305
+ .
306
+ Collecting all Q dissimilarity matrices in a block diagonal matrix produces the Eskin category dissimilarity
307
+ matrix ∆∆∆E. If all variables have the same number of categories, Eskin merely re-scales the simple matching
308
+ dissimilarity.
309
+ 4.3
310
+ Lin
311
+ Lin (1998) proposed an information-theoretic measurethat gives more weight to matches on frequent values
312
+ and lower weight to mismatches on infrequent values. We implement Lin’s proposal as follows: define
313
+ Pr = p1′
314
+ Q∗ and Pc = 1Q∗p′, where p is as defined in 3. Furthermore, let �P = Pr + Pc − Pd. Then, the
315
+ category dissimilarity matrix can be defined as
316
+ ∆∆∆Lin =
317
+
318
+ log(Pr)+log(Pc)−2log(�P)
319
+
320
+ ⊘2log(Pr +Pc),
321
+ where ⊘ indicates the Hadamard division (i.e., element-wise) and log(·) takes the logarithms of the
322
+ elements of the parenthesized object and collects them in an object of the same size.
323
+ Note that Lin’s dissimilarity for a category with itself is zero. Furthermore, in our implementation, for
324
+ each variable, the dissimilarity between different categories, say categories a and b, is
325
+ [log(pa)+log(pb)−2log(pa + pb)]/2log(pa + pb),
326
+ where pa and pb are, respectively, the relative frequencies of categories a and b.
327
+ 4.4
328
+ Inverse occurrence frequency
329
+ For inverse occurrence frequency (IOF, Boriah, Chandola, and Kumar 2008), a higher dissimilarity is
330
+ assigned when categories are more frequently observed. In particular, the category dissimilarity matrix is
331
+ defined as
332
+ ∆∆∆IOF = [log(np)][log(np)]′ −[log(np)][log(np)]′ ⊙IQ∗.
333
+ It is worth observing that, for each variable, IOF dissimilarity for a category with itself is zero, and the
334
+ dissimilarity between two different categories, say a and b, corresponds to
335
+ log(npa)log(npb).
336
+ The IOF measure is related to the concept of inverse document frequency (TF-IDF) from information
337
+ retrieval, where it is used to account for document relevance for a given term (Spärck Jones 1972). In other
338
+ words, since a rare term contributes more information than a more frequent term, the IOF measure accounts
339
+ for how rare the term is, and a lower IOF dissimilarity corresponds to a rarer term. Log frequency is used
340
+ to reduce the impact of terms of very high frequencies.
341
+ 4.5
342
+ Occurrence frequency
343
+ For occurrence frequency (OF) dissimilarity, dissimilarities are higher if the categories are observed less
344
+ frequently. The category dissimilarity matrix is defined as
345
+ ∆∆∆OF =
346
+
347
+ log
348
+
349
+ p−���
350
+ log
351
+
352
+ p−��′ −
353
+
354
+ log
355
+
356
+ p−���
357
+ log
358
+
359
+ p−��′ ⊙IQ∗.
360
+ Therefore, OF dissimilarity for a category with itself is zero, and the dissimilarity between two different
361
+ categories a and b is
362
+ log(pa)log(pb).
363
+ 6
364
+
365
+ 4.6
366
+ Goodall dissimilarities
367
+ In Boriah, Chandola, and Kumar (2008), four variations of Goodall’s similarity are considered. These are
368
+ based on Goodall’s original proposal (Goodall 1966). After transforming similarities into dissimilarities,
369
+ where dissimilarity is 1− similarity, the four measures have in common that dissimilarities between
370
+ different categories are, as is the case with simple matching, always equal to one. However, the dissimilarity
371
+ of a category with respect to the same category depends on the observed proportions of the categories.
372
+ Below we provide the category dissimilarity matrices for Goodall 3 and Goodall 4. For Goodall 1 and 2,
373
+ we can also construct such matrices. However, these definitions require conditional sums of proportions.
374
+ In particular, for Goodall 1, the dissimilarity for category a with itself, is defined as the sum of squared
375
+ observed proportions that are smaller or equal to the observed proportion of category a. For Goodall 2, it is
376
+ the sum of squared observed proportions that are larger or equal to the observed proportion of category a.
377
+ The Goodall 3 and 4 measures do not require the calculation of a (conditional) sum and have the
378
+ squared proportion and one minus the squared proportion of a category, respectively, on the diagonal blocks
379
+ of ∆∆∆. That is,
380
+ ∆∆∆G3 = Kb −IQ∗ +P2
381
+ d,
382
+ and
383
+ ∆∆∆G4 = Kb −P2
384
+ d.
385
+ In these definitions, dissimilarity of a category with the same category is not zero. Consequently, the
386
+ resulting “distances” do not satisfy the typical requirements of a distance. Note that for the Goodall 1 and 3
387
+ measures, a higher dissimilarity is assigned when the matching categories are frequent, whereas for the
388
+ Goodall 2 and 4 measures a higher dissimilarity is assigned when the matching categories are infrequent.
389
+ 4.7
390
+ Variable Entropy and Variable Mutability dissimilarities
391
+ Šulc and ˇRezanková (2019) proposed two variability-based dissimilarity measures that are related to
392
+ Goodall 1 and 2, respectively. These dissimilarities are equal to one if the categories do not match, while,
393
+ if they do match, the Variable Entropy (VE) measure uses the entropy and the Variable Mutability (VM)
394
+ measure uses the Gini coefficient to quantify “dissimilarity”. In particular, for the j-th categorical variable
395
+ with qj categories, the category dissimilarity matrices are defined as
396
+ ∆∆∆VEj = Kb j +
397
+
398
+ 1
399
+ logqj
400
+ q j
401
+
402
+ l=1
403
+ pl log pl
404
+
405
+ Iqj
406
+ and
407
+ ∆∆∆VMj = Kb j −
408
+
409
+ qj
410
+ qj −1
411
+
412
+ 1−
413
+ q j
414
+
415
+ l=1
416
+ p2
417
+ l
418
+ ��
419
+ Iq j,
420
+ respectively. Collecting all Q dissimilarity matrices in a block diagonal matrix returns the VE and VM
421
+ category dissimilarity matrices ∆∆∆VE and ∆∆∆VM.
422
+ 4.8
423
+ Ordered categories
424
+ If categories are ordered, the order can be reflected in the dissimilarities. A simple choice would be to
425
+ define the dissimilarities as the difference in category numbers. That is, the dissimilarity between categories
426
+ a and b is simply b −a. If the data are rank order data or rating (e.g., Likert) scale data, this definition
427
+ would imply treating the data as interval data. However, implementation of alternative, custom, definitions
428
+ of ordered between-category distances is also straightforward. For example, if the categories correspond to
429
+ bins on a numerical scale (e.g., age or income groups), differences between the midpoints of the bins can
430
+ 7
431
+
432
+ be used to define dissimilarities that better reflect the underlying values. More generally, let ∆∆∆i
433
+ o denote the
434
+ i-th diagonal block of ∆∆∆∗
435
+ o, and δ i
436
+ ab its ab-th element. Then, dissimilarities between ordered categories can
437
+ be imposed by letting δ i
438
+ ab ≤ δ i
439
+ ab∗, for a,b ∈ 1...iqi, b∗ ̸= b and b∗ > a. Note that this definition does not
440
+ guarantee that the triangle inequality holds. That is, without additional constraints, it may be the case that
441
+ the direct distances between two categories are larger than the indirect distances between those categories.
442
+ 5
443
+ Association-based category dissimilarity matrices
444
+ Several authors (e.g., Ahmad and Dey 2007; Jia, Cheung, and Liu 2014; Le and Ho 2005; Ring et al.
445
+ 2015) have proposed distance measures for categorical variables that take into account the association
446
+ between categorical variables. The general idea is that, similar to the case of the Mahalanobis distance for
447
+ numerical variables, differences that are in line with the association between variables are less informative
448
+ (i.e., should correspond to smaller dissimilarity values) than differences that are not in line with the general
449
+ association. How to exactly implement this idea depends on the calculation of the association between
450
+ categorical variables, and how to incorporate this association in the category dissimilarities.
451
+ Here, we present a general form to calculate and collect association-based dissimilarities that can be
452
+ directly implemented in our general framework in Section 3. We then present some specific variants and
453
+ link them to recent proposals.
454
+ 5.1
455
+ A general form for association-based dissimilarities
456
+ Fundamental in the calculation of association-based distances is the matrix of proportions of co-occurrences
457
+ P and the corresponding profile matrix R as defined in Equations (1) and (5). In particular, recall that
458
+ the off-diagonal blocks of P and R can be interpreted as (empirical) joint and conditional probability
459
+ distributions, respectively. By considering different ways to quantify the dissimilarities between the
460
+ conditional distributions (i.e., the rows of the off-diagonal blocks of R) we can construct different category
461
+ dissimilarity matrices ∆∆∆ that, by applying Equation (6) can be used to obtain the between-observation
462
+ distances.
463
+ Let Ri j denote the i j-th off-diagonal block of R, and let ri j
464
+ a denote its a-th row. Note that the elements
465
+ of each row of Rij add up to 1. Hence, these elements can be seen as (empirical) conditional probabilities.
466
+ We define the dissimilarities between categories for all pairs of categories of variable i (for i = 1...Q)
467
+ based on the association with variable j (with j ̸= i), as
468
+ δ ij(a,b) = Φi j �
469
+ ri j
470
+ a ,ri j
471
+ b
472
+
473
+ ,
474
+ (8)
475
+ where, generically, a and b indicate categories of variable i and Φi j is the dissimilarity function that
476
+ quantifies the differences between profiles based on the association between variables i and j. The overall
477
+ between category dissimilarities for all pairs of categories of variable i can be defined as
478
+ δ i(a,b) =
479
+ Q
480
+
481
+ j̸=i
482
+ wi jδ i j(a,b) =
483
+ Q
484
+
485
+ j̸=i
486
+ wi jΦi j �
487
+ ri j
488
+ a ,ri j
489
+ b
490
+
491
+ .
492
+ (9)
493
+ The weights wij in Equation (9) allow flexibility with respect to the importance of different variables in the
494
+ calculation of association-based category dissimilarities, as defined by Φi j. By collecting, for each variable
495
+ i, the elements δ i(a,b) in a category dissimilarity matrix ∆∆∆i
496
+ Φ, and organizing them on the diagonal of a
497
+ block diagonal matrix, we obtain a dissimilarity matrix ∆∆∆Φ, which can be used to calculate the distances
498
+ between the observations using Equation (6).
499
+ Equation 9 provides a very general way to define category dissimilarities using pair-specific weights
500
+ and dissimilarity functions.
501
+ For the association-based dissimilarity functions Φi j, any function that quantifies the difference between
502
+ two distributions can be used. A brief overview of 46 different functions and their implementation in the
503
+ R package philentropy is described in Drost (2018), and a more comprehensive overview of those
504
+ functions, dividing them into different types and classes, can be found in Cha (2007).
505
+ 8
506
+
507
+ Concerning the choice of weights wij, we distinguish two options. In the first, all weights are equal.
508
+ Usually 1 or 1/(Q−1), so that either sums or averages are obtained. Alternatively, different weights can
509
+ be used for different pairs. These weights can either be selected using expert knowledge (e.g., based on
510
+ the experience and preferences of the researcher) or by using a data-driven approach. For example, one
511
+ could set certain weights to zero and others to some constant based on some predetermined data dependent
512
+ criterion (e.g., a measure of association like Cramér’s V). Pairs with non-zero weights can then be referred
513
+ to as “context” variables. Approaches using such context-based dissimilarities are described in Ienco,
514
+ Pensa, and Meo (2009), Jia, Cheung, and Liu (2014), and Ring et al. (2015).
515
+ If an objective measure of overall fit of a solution is available, one could consider wi j and Φi j as tuning
516
+ parameters, and search combinations of these parameters to make a choice. In Section 7 we shall further
517
+ explore the tuning of wij and Φij.
518
+ In the next subsections, we describe some specific choices of dissimilarity functions. In particular,
519
+ we provide definitions for category dissimilarities between categories a and b of variable i, based on the
520
+ association between variables i and j. That is, we present specific choices of Φi j in Equation (8). Inserting
521
+ these definitions into Equation (9) results in a dissimilarity matrix ∆∆∆Φ that can be used to calculate the
522
+ between-observation distances. For ease of notation, we now drop the superscripts ij.
523
+ 5.2
524
+ Total variation distance between profiles
525
+ The total variation distance (TVD) between two discrete probability distributions can be defined as 1/2
526
+ times the L1 norm between the distributions. We can implement this in our framework by defining the
527
+ category dissimilarity function Φ as
528
+ Φ(ra,rb) = 1
529
+ 2
530
+ q j
531
+
532
+ l=1
533
+ |ral −rbl|,
534
+ (10)
535
+ where ral and rbl denote the l-th element of ra and rb, respectively.
536
+ Calculating category dissimilarities using this definition for Φ is equivalent to the proposal (for
537
+ categorical variables) by Ahmad and Dey (2007). However, as this relationship is not trivial and appears to
538
+ be unknown, we present this here in some detail.
539
+ 5.2.1
540
+ Ahmad and Dey’s categorical variable distance
541
+ Ahmad and Dey (2007) argue that the dissimilarity between categories should be computed as a function of
542
+ their distribution in the overall data set and in co-occurrence with other categories, rather than in isolation.
543
+ The idea is to take into account co-occurrences of categories when constructing distances. The way they do
544
+ this, is by considering all combinations of categories of one variable, and selecting the partitioning (that is,
545
+ a combination of categories) for which the sum of proportions in the two complementary partitions for the
546
+ two categories is maximal. Following Ahmad and Dey (2007), we can define the dissimilarity between
547
+ categories a and b of variable i, with respect to the distribution over the categories of variable j, as
548
+ δ(a,b) = max
549
+ ωj (P(ωj|a)+P( ¯ωj|b)−1),
550
+ (11)
551
+ where ωj and its complement ¯ωj define a binary partition with respect to the categories of variable j, and
552
+ P(ωj|a) denotes the proportion of observations with the category a of variable i, corresponding to the set
553
+ of categories of variable j as defined by ωj. Note that the term −1 is only introduced to fix the upper
554
+ limit of the dissimilarities at 1. The number of binary partitions for variable j, excluding the partitions
555
+ containing all or no categories, equals 2q j −2, where qj gives number of categories of variable j, and hence
556
+ this number grows exponentially when the number of categories for a variable increases. Ahmad and Dey
557
+ (2007) propose an algorithm to calculate their distances. The order of their algorithm is O(Q∗2n+Q∗2 ¯q3),
558
+ where Q∗ gives the total number of categories, n is the number of observations and ¯q denotes the average
559
+ number of categories per variable. However, as we show below, and in more detail in Appendix A, for
560
+ distances between categorical variables, the distance of Ahmad and Dey (2007) is equivalent to the total
561
+ variation distance, and calculations using Equation 10 are much more efficient.
562
+ 9
563
+
564
+ 5.2.2
565
+ Equivalence of Ahmad and Dey’s distance and the total variation distance between profiles
566
+ When going from δ(a,b) to δ(b,a), the optimal partition ωj, that is, the combination of categories that
567
+ maximizes the sum, is simply flipped (i.e., the complement is taken), hence Ahmad and Dey’s distance is
568
+ symmetric:
569
+ δ(a,b) = max
570
+ ω (P(ω|a)+P( ¯ω|b)−1) = max
571
+ ω (P(ω|b)+P( ¯ω|a)−1) = δ(b,a),
572
+ where, for convenience, we dropped the subscripts j for the ω’s.
573
+ As P( ¯ω|b) = 1−P(ω|b) and P( ¯ω|a) = 1−P(ω|a), we have
574
+ δ(a,b) = max
575
+ ω (P(ω|a)−P(ω|b))
576
+ = max
577
+ ω (P(ω|b)−P(ω|a))
578
+ = max
579
+ ω |P(ω|a)−P(ω|b)|.
580
+ (12)
581
+ Equation (12) shows that Ahmad and Dey’s distance is equal to finding the maximum difference between
582
+ all combinations of observed proportions. This implies that we can express this distance as the supremum
583
+ norm of a vector of differences between probabilities. The total variation distance as defined in (10) can
584
+ also be defined as the largest difference between probabilities from two probability distributions that can be
585
+ assigned to the same event. Therefore, the Ahmad and Dey (2007) distance for categorical variables is
586
+ equivalent to the total variation distance. For the sake of completeness, we provide a complete proof of the
587
+ equivalence in Appendix A.
588
+ 5.3
589
+ Kullback-Leibler divergence between profiles
590
+ Kullback-Leibler divergence (KL, Kullback and Leibler 1951; Kullback 1959) is an entropy-based measure
591
+ of dissimilarity between probability distributions. Le and Ho (2005) define category dissimilarities for
592
+ the categories of variable i by taking the sum of KL-divergences between the (empirical) conditional
593
+ probability distributions over all other variables. Using similar notation as before, we can implement this
594
+ divergence by setting all weights wij equal to one and by defining Φ as
595
+ Φ(ra,rb) =
596
+ q j
597
+
598
+ l=1
599
+
600
+ ral log
601
+ �ral
602
+ rbl
603
+
604
+ +rbl log
605
+ �rbl
606
+ ral
607
+ ��
608
+ ,
609
+ where log() is the binary logarithm and ral and rbl denote, as before, l-th element of ra and rb, respectively.
610
+ It is important to note that KL is not symmetric. Hence, distance calculations using ∆∆∆KL may result in
611
+ non-symmetric distances.
612
+ 5.4
613
+ χ2-distance between profiles
614
+ A distance for categorical data, that has a strong link to the data visualization technique correspondence
615
+ analysis, is the chi-squared distance. There exist several forms and implementations of the chi-square
616
+ distance that differ with respect to the chosen standardization. That, is, chi-squared distance considers the
617
+ squared differences between proportions divided by the expected proportions. For an n× p contingency
618
+ matrix F j = Z′
619
+ iZ j, the squared χ2-distance between rows a and b can be defined as
620
+ s
621
+ p
622
+
623
+ l=1
624
+ 1
625
+ f•l
626
+ � fal
627
+ fa•
628
+ − fbl
629
+ fb•
630
+ �2
631
+ ,
632
+ (13)
633
+ where s is the sum of all elements of F and • denotes the summation in the appropriate dimension (rows or
634
+ columns) of the matrix (see, e.g., Gifi 1990, p.266). In our notation, we can implement the chi-squared
635
+ distances as category dissimilarities by defining
636
+ Φ(ra,rb) =
637
+ qj
638
+
639
+ l=1
640
+ 1
641
+ pl
642
+ (ral −rbl)2,
643
+ 10
644
+
645
+ where we dropped the constant s, pl corresponds to the l-th element of the j-th block of Pd and, as before,
646
+ ral and rbl denote the l-th element of ra and rb, respectively.
647
+ 6
648
+ Supervised association-based distances
649
+ In a supervised setting, where we want to assign observations to classes (i.e., categories) for one variable,
650
+ say y, based on observations on categorical variables xj where j = 1,...,Q, we can define a supervised
651
+ variant of association-based categorical variable distances. That is, we can define category dissimilarities
652
+ that take into account the association between variables y and x. Next, we can make predictions using either
653
+ the K-nearest neighbors or a distance-based clustering method, where we fix the number of clusters to the
654
+ number of classes and do a post-hoc comparison of clusters and classes. That is, we match the clusters to
655
+ the true classes and assign labels accordingly.
656
+ To define supervised association-based distances we create an n × c indicator matrix Zy, where c
657
+ corresponds to the number of classes of y. If we add, to the right, this indicator matrix to Z and insert this
658
+ supplemented Z into Equations (1) through (5), we can calculate category dissimilarities using Equations
659
+ (8) and (9). Note that in this new setting, we have Q+1 variables and consequently Q+1 association-based
660
+ category dissimilarity matrices ∆∆∆i. However, the (Q+1)-th diagonal block gives the category dissimilarities
661
+ between the categories of the y variable. In a supervised setting, a category (class) of y is to be predicted
662
+ based on data from the other Q variables. The category dissimilarities for y should therefore not be used.
663
+ This is easily achieved by simply ignoring these in the overall block diagonal category dissimilarity matrix
664
+ ∆∆∆. That is, we construct ∆∆∆ by collecting only the first Q category dissimilarity matrices on its diagonal.
665
+ As before, our framework allows for great flexibility in how to incorporate the information of variable y.
666
+ In particular, the dissimilarity functions Φij and the weights wi j are pair-specific. One could, as suggested in
667
+ Section 5, set all weights wij equal to 1, so that the category dissimilarities take into account all associations.
668
+ We refer to this choice as “full supervised” dissimilarity.
669
+ Alternatively, in a supervised setting, one may choose to have the category dissimilarities depend only
670
+ on the association with the variable y. This corresponds to the choice wi j = 1 for j = Q+1 and 0 for all
671
+ other pairs. In this case, category dissimilarities may better discriminate with respect to the classes of y.
672
+ We refer to this choice as “supervised” dissimilarity. Note that both supervised variants require a choice of
673
+ association-based dissimilarity functions Φi j, for all pairs of variables.
674
+ 7
675
+ Aggregation and dissimilarity tuning
676
+ Our general framework introduced in Section 3 allows for great flexibility in the implementation of
677
+ distances between categorical variables. In particular, in the previous sections, we introduced a selection of
678
+ category dissimilarity measures. However, there are many more options. For example, all 46 functions
679
+ available in the R package philentropy, described in Drost (2018), can be used for association-based
680
+ functions. Moreover, as is clear from Definition (9, all separate category dissimilarity measures can be
681
+ combined and aggregated according to the researcher’s preferences. How to exactly determine which
682
+ category dissimilarity and aggregation strategy is the most appropriate is non-trivial, and this choice may
683
+ depend on the properties of the data and the research objectives.
684
+ In several distance-based methods, for example cluster analysis and multidimensional scaling, a clear
685
+ measure of fit is not available, as the methods tend to be primarily exploratory. That is, the goal is to find
686
+ and interpret patterns in the data. As the interpretability of a solution is not easily quantified, validation is
687
+ typically not trivial. If, however, a measure of fit can be calculated, we can use this to select an aggregation
688
+ and category-dissimilarity definition. That is, we can apply several aggregation and category dissimilarity
689
+ definitions, and compare the fit for each of them by considering the selected measure.
690
+ In a supervised classification setting, where we have a data set for which the true classes are known,
691
+ we can assess the fit by comparing true classes with “predicted” classes. A choice for aggregation and
692
+ category dissimilarity definitions, can then be made based on the discrepancy between these. Therefore,
693
+ the aggregation state (that is, the weights wi j) and the category dissimilarity function (that is, Φi j) can be
694
+ 11
695
+
696
+ treated as tuning parameters. Note, however, that Equation 9 allows many combinations and some choices
697
+ need to be made to restrict the total search space.
698
+ 8
699
+ Applications
700
+ To illustrate how our general framework can be used in practice, we consider distance-based methods for
701
+ supervised and unsupervised learning. In a supervised setting, a distance-based approach is K-nearest
702
+ neighbors averaging, which can be used in regression and classification problems: each new observation is
703
+ labeled according to a set of K close training points (neighbors). In an unsupervised setting, distance-based
704
+ cluster analysis aims to assign observations to groups (clusters) for which the within-cluster distances are
705
+ small, whereas the distances between clusters are large.
706
+ As a general setup, we consider nine different labeled data sets (see Table 1), all available via the UCI
707
+ Machine Learning repository2. Each data set is split into five folds, for cross-validation. On the training
708
+ folds, a block diagonal matrix ∆∆∆ of pair-wise category dissimilarities is calculated for each of the reviewed
709
+ dissimilarity measures (see Table 2). The test fold is used for performance assessment of the considered
710
+ methods. The performance metric depends on the considered method:
711
+ • Accuracy of the nearest neighbors classifier: proportion of the test observations correctly classified
712
+ (Metz 1978);
713
+ • Adjusted Rand Index (ARI, Hubert and Arabie 1985) comparing the cluster allocation of the test
714
+ observations to the true cluster allocation (the labels).
715
+ The procedure is iterated until each fold is used as test. The whole cross-validation process is repeated
716
+ 10 times, for different random splits.
717
+ Table 1: Data set information
718
+ Dataset
719
+ n
720
+ p
721
+ # clusters
722
+ australian
723
+ 690
724
+ 8
725
+ 2
726
+ balance
727
+ 625
728
+ 4
729
+ 3
730
+ cars
731
+ 1728
732
+ 6
733
+ 4
734
+ lympho
735
+ 148
736
+ 18
737
+ 4
738
+ soybean (large)
739
+ 307
740
+ 35
741
+ 19
742
+ tae
743
+ 151
744
+ 5
745
+ 3
746
+ tictac
747
+ 958
748
+ 9
749
+ 2
750
+ vote
751
+ 435
752
+ 16
753
+ 2
754
+ wbcd
755
+ 699
756
+ 9
757
+ 2
758
+ Table 2: Category dissimilarity measures considered in the experiments
759
+ Independent
760
+ Association-based
761
+ SM (Sec. 4.1)
762
+ TVD (Sec. 5.2)
763
+ Eskin (Sec. 4.2)
764
+ KL (Sec. 5.3)
765
+ Lin (Sec. 4.3)
766
+ KL (Sec. 5.3)
767
+ IOF (Sec. 4.4)
768
+ Supervised TVD, Supervised TVD-full (Sec. 6)
769
+ OF (Sec. 4.5)
770
+ Goodall 3 and 4 (Sec. 4.6)
771
+ VE, VM (Sec. 4.7)
772
+ 2https://archive.ics.uci.edu/ml/index.php
773
+ 12
774
+
775
+ 8.1
776
+ K-nearest neighbors of categorical data
777
+ The KNN classification of the test observations is based on the calculation of the distance between
778
+ each test observation and the training observations, as described in Section 3.1. Let ∆∆∆train denote the
779
+ category dissimilarity matrix, where the subscript train indicates that if the category dissimilarities are data
780
+ dependent, only observations of the training set were used. Furthermore, Ztest and Ztrain are the indicator
781
+ matrices of the test and training observations, respectively. The distances of interest are in the columns of
782
+ Ztrain∆∆∆trainZ′
783
+ test
784
+ and the nearest neighbors for the j-th test observation are the K smallest values in the j-th column.
785
+ Since the lower the number of considered neighbors, the higher the flexibility of the classifier, K is a
786
+ hyper-parameter. We tune this hyper-parameter using the repeated cross-validation validation procedure
787
+ described in Section 8. In particular, we consider values for K ∈ {1,3,5,9,15,21} and, for each data
788
+ set/category dissimilarity combination, the value of K is chosen that minimizes the cross-validation estimate
789
+ of the classifier’s test accuracy.
790
+ Figure 1 presents the accuracy assessment of the tuned KNN classifier for each considered data set and
791
+ for each considered category dissimilarity definition. In each panel, the position of each point corresponds
792
+ to the accuracy obtained using the indicated category dissimilarity definition; the size of each point is
793
+ proportional to the tuned value of the hyper-parameter K. The lines centered at each point span twice the
794
+ standard deviation of the accuracy over the 10 replicates. For each data set, the distances are reported
795
+ in descending order, highlighting the best performing ones. For some data sets, e.g., australian, vote
796
+ and wbcd, the accuracy is high almost irrespective to the chosen distance, with no variability over the 10
797
+ cross-validation replicates. For smaller data sets, such as lympho and tae, there is more variability over the
798
+ 10 cross-validation replicates, as expected.
799
+ tictac (n: 958, p: 9, cl: 2)
800
+ vote (n: 435, p: 16, cl: 2)
801
+ wbcd (n: 699, p: 9, cl: 2)
802
+ lympho (n: 148, p: 18, cl: 4)
803
+ soybeanlarge (n: 307, p: 35, cl: 19)
804
+ tae (n: 151, p: 5, cl: 3)
805
+ australian (n: 690, p: 8, cl: 2)
806
+ balance (n: 625, p: 4, cl: 3)
807
+ cars (n: 1728, p: 6, cl: 4)
808
+ 0.25
809
+ 0.50
810
+ 0.75
811
+ 1.00
812
+ 0.25
813
+ 0.50
814
+ 0.75
815
+ 1.00
816
+ 0.25
817
+ 0.50
818
+ 0.75
819
+ 1.00
820
+ Of
821
+ Lin
822
+ Iof
823
+ Var_mutability
824
+ Var_entropy
825
+ Gifi_chi2
826
+ Kullback−Leibler
827
+ Supervised_full
828
+ Tot_var_dist
829
+ Supervised
830
+ Goodall_3
831
+ Goodall_4
832
+ Eskin
833
+ Matching
834
+ Iof
835
+ Tot_var_dist
836
+ Goodall_4
837
+ Kullback−Leibler
838
+ Eskin
839
+ Matching
840
+ Supervised_full
841
+ Var_entropy
842
+ Goodall_3
843
+ Gifi_chi2
844
+ Var_mutability
845
+ Supervised
846
+ Of
847
+ Lin
848
+ Of
849
+ Lin
850
+ Goodall_4
851
+ Matching
852
+ Eskin
853
+ Var_mutability
854
+ Gifi_chi2
855
+ Var_entropy
856
+ Goodall_3
857
+ Kullback−Leibler
858
+ Iof
859
+ Supervised
860
+ Tot_var_dist
861
+ Supervised_full
862
+ Kullback−Leibler
863
+ Gifi_chi2
864
+ Var_entropy
865
+ Iof
866
+ Var_mutability
867
+ Eskin
868
+ Matching
869
+ Goodall_4
870
+ Goodall_3
871
+ Tot_var_dist
872
+ Lin
873
+ Of
874
+ Supervised_full
875
+ Supervised
876
+ Goodall_4
877
+ Lin
878
+ Of
879
+ Var_mutability
880
+ Var_entropy
881
+ Iof
882
+ Eskin
883
+ Matching
884
+ Gifi_chi2
885
+ Kullback−Leibler
886
+ Goodall_3
887
+ Tot_var_dist
888
+ Supervised_full
889
+ Supervised
890
+ Lin
891
+ Var_mutability
892
+ Var_entropy
893
+ Goodall_3
894
+ Of
895
+ Eskin
896
+ Matching
897
+ Iof
898
+ Supervised_full
899
+ Tot_var_dist
900
+ Gifi_chi2
901
+ Goodall_4
902
+ Kullback−Leibler
903
+ Supervised
904
+ Lin
905
+ Of
906
+ Goodall_4
907
+ Goodall_3
908
+ Eskin
909
+ Gifi_chi2
910
+ Supervised_full
911
+ Tot_var_dist
912
+ Var_entropy
913
+ Var_mutability
914
+ Iof
915
+ Supervised
916
+ Matching
917
+ Kullback−Leibler
918
+ Lin
919
+ Of
920
+ Eskin
921
+ Goodall_4
922
+ Matching
923
+ Var_mutability
924
+ Kullback−Leibler
925
+ Var_entropy
926
+ Goodall_3
927
+ Iof
928
+ Gifi_chi2
929
+ Supervised_full
930
+ Tot_var_dist
931
+ Supervised
932
+ Lin
933
+ Of
934
+ Supervised
935
+ Eskin
936
+ Matching
937
+ Var_mutability
938
+ Var_entropy
939
+ Goodall_4
940
+ Goodall_3
941
+ Gifi_chi2
942
+ Kullback−Leibler
943
+ Iof
944
+ Supervised_full
945
+ Tot_var_dist
946
+ accuracy
947
+ distances
948
+ distance measure
949
+ Eskin
950
+ Gifi_chi2
951
+ Goodall_3
952
+ Goodall_4
953
+ Iof
954
+ Kullback−Leibler
955
+ Lin
956
+ Matching
957
+ Of
958
+ Supervised
959
+ Supervised_full
960
+ Tot_var_dist
961
+ Var_entropy
962
+ Var_mutability
963
+ 5−fold CV
964
+ KNN
965
+ Figure 1: KNN classification accuracy for each considered distance measure and data set
966
+ 8.2
967
+ Partitioning around medoids
968
+ Partitioning around medoids (PAM) is an iterative clustering procedure that takes as input a matrix of
969
+ pair-wise distances between a set of observations. Within a cluster, a medoid corresponds to the median
970
+ 13
971
+
972
+ observation, just like a centroid in K-means corresponds to the mean. The starting set of the K medoids
973
+ is random, and each observation is assigned to the closest medoid; given the allocation of the obtained
974
+ clusters, the medoids are updated accordingly. The procedure stops iterating when there are no changes in
975
+ the set of medoids. Although we are working in an unsupervised context, the performance of PAM on each
976
+ data set/category dissimilarity combination can be assessed via cross-validation by calculating distances
977
+ based on the supervised association dissimilarities; this also allows for consistency with the KNN-based
978
+ application.
979
+ In particular, for each data set and each category dissimilarity definition, a medoids set is obtained by
980
+ applying PAM to the training data. That is,
981
+ D = Ztrain∆∆∆trainZ′
982
+ train
983
+ where for ∆∆∆train we consider all category dissimilarity matrices described in Sections 4 and 5 and reported
984
+ in Table 2. Next, we compute the test observation-to-medoid distance matrix as follows,
985
+ Zmedoid∆∆∆trainZ′
986
+ test,
987
+ and assign each test observation to the cluster corresponding to the nearest medoid.
988
+ The results are reported in Figure 2. We observe that, with the exception of the cars data set, for which
989
+ performance of all methods is poor, the supervised association-based distance generally performs well.
990
+ In general, the KNN and PAM results lead to the following conclusions.
991
+ • Data sets for which classification accuracy is high in the supervised setting also have higher ARI
992
+ values in the unsupervised setting.
993
+ • The choice of category dissimilarity does not appear to impact classification accuracy when the
994
+ overall performance of the method is very poor (for example, for cars) or very good (e.g., for wbcd).
995
+ • In an unsupervised setting, association-based measures seem to provide an edge: in wbcd, five out of
996
+ the six measures with an ARI value above 0.75 are association-based.
997
+ Note that extended results and the R code to reproduce them are available online 3.
998
+ 9
999
+ Conclusion
1000
+ In this paper, we propose a general framework for implementing distances between categorical variables in
1001
+ a flexible, efficient and transparent manner. In detail, we show that both independent and association-based
1002
+ distances can be incorporated in our framework. The latter can therefore be used to implement several
1003
+ existing measures, as well as to easily introduce new highly customizable ones.
1004
+ Our proposal is valuable from a theoretical perspective because it allows assessing the differences
1005
+ between dissimilarity measures by simplifying the wide variety of notation and definitions used in the
1006
+ literature, and therefore making their implementation much more transparent. From an applied perspective,
1007
+ since the proposed framework is not method- or application-specific, it can allow the definition of problem-
1008
+ specific dissimilarity measures. With respect to this, it is important to outline that in a supervised context
1009
+ our proposal can be used to define measures that consider associations with a response variable.
1010
+ For the independent category dissimilarities, the definitions described in Section 4 (with the exception
1011
+ of the ordered category dissimilarities) are implemented in the nomclust R package (Šulc and ˇRezanková
1012
+ 2015). However, association-based measures are not implemented in this package. In the catdist
1013
+ package4 the independent as well as the association-based measures presented in this paper are implemented.
1014
+ 3https://alfonsoiodicede.github.io/blogposts_archive/distances_experiment_superv_
1015
+ unsuperv.html
1016
+ 4Available on GitHub at https://github.com/alfonsoIodiceDE/catdist_package and will soon be re-
1017
+ leased on CRAN.
1018
+ 14
1019
+
1020
+ tictac (n: 958, p: 9, cl: 2)
1021
+ vote (n: 435, p: 16, cl: 2)
1022
+ wbcd (n: 699, p: 9, cl: 2)
1023
+ lympho (n: 148, p: 18, cl: 4)
1024
+ soybeanlarge (n: 307, p: 35, cl: 19)
1025
+ tae (n: 151, p: 5, cl: 3)
1026
+ australian (n: 690, p: 8, cl: 2)
1027
+ balance (n: 625, p: 4, cl: 3)
1028
+ cars (n: 1728, p: 6, cl: 4)
1029
+ 0.00
1030
+ 0.25
1031
+ 0.50
1032
+ 0.75
1033
+ 0.00
1034
+ 0.25
1035
+ 0.50
1036
+ 0.75
1037
+ 0.00
1038
+ 0.25
1039
+ 0.50
1040
+ 0.75
1041
+ Goodall_3
1042
+ Var_mutability
1043
+ Eskin
1044
+ Var_entropy
1045
+ Lin
1046
+ Matching
1047
+ Of
1048
+ Goodall_4
1049
+ Kullback−Leibler
1050
+ Iof
1051
+ Supervised_full
1052
+ Gifi_chi2
1053
+ Supervised
1054
+ Tot_var_dist
1055
+ Lin
1056
+ Tot_var_dist
1057
+ Of
1058
+ Supervised_full
1059
+ Var_entropy
1060
+ Matching
1061
+ Gifi_chi2
1062
+ Goodall_3
1063
+ Kullback−Leibler
1064
+ Var_mutability
1065
+ Goodall_4
1066
+ Iof
1067
+ Eskin
1068
+ Supervised
1069
+ Of
1070
+ Lin
1071
+ Goodall_4
1072
+ Eskin
1073
+ Matching
1074
+ Var_entropy
1075
+ Var_mutability
1076
+ Goodall_3
1077
+ Kullback−Leibler
1078
+ Gifi_chi2
1079
+ Iof
1080
+ Tot_var_dist
1081
+ Supervised_full
1082
+ Supervised
1083
+ Lin
1084
+ Of
1085
+ Tot_var_dist
1086
+ Supervised_full
1087
+ Eskin
1088
+ Matching
1089
+ Goodall_4
1090
+ Goodall_3
1091
+ Var_entropy
1092
+ Var_mutability
1093
+ Iof
1094
+ Gifi_chi2
1095
+ Kullback−Leibler
1096
+ Supervised
1097
+ Goodall_4
1098
+ Lin
1099
+ Of
1100
+ Var_mutability
1101
+ Var_entropy
1102
+ Iof
1103
+ Eskin
1104
+ Matching
1105
+ Goodall_3
1106
+ Kullback−Leibler
1107
+ Gifi_chi2
1108
+ Supervised_full
1109
+ Tot_var_dist
1110
+ Supervised
1111
+ Lin
1112
+ Eskin
1113
+ Matching
1114
+ Var_entropy
1115
+ Var_mutability
1116
+ Of
1117
+ Iof
1118
+ Gifi_chi2
1119
+ Goodall_4
1120
+ Tot_var_dist
1121
+ Supervised_full
1122
+ Goodall_3
1123
+ Kullback−Leibler
1124
+ Supervised
1125
+ Gifi_chi2
1126
+ Kullback−Leibler
1127
+ Of
1128
+ Lin
1129
+ Goodall_4
1130
+ Matching
1131
+ Goodall_3
1132
+ Supervised_full
1133
+ Iof
1134
+ Eskin
1135
+ Var_entropy
1136
+ Var_mutability
1137
+ Tot_var_dist
1138
+ Supervised
1139
+ Goodall_4
1140
+ Of
1141
+ Eskin
1142
+ Lin
1143
+ Kullback−Leibler
1144
+ Iof
1145
+ Goodall_3
1146
+ Matching
1147
+ Var_mutability
1148
+ Tot_var_dist
1149
+ Supervised_full
1150
+ Gifi_chi2
1151
+ Var_entropy
1152
+ Supervised
1153
+ Lin
1154
+ Of
1155
+ Var_entropy
1156
+ Var_mutability
1157
+ Goodall_3
1158
+ Supervised_full
1159
+ Iof
1160
+ Tot_var_dist
1161
+ Gifi_chi2
1162
+ Kullback−Leibler
1163
+ Eskin
1164
+ Matching
1165
+ Goodall_4
1166
+ Supervised
1167
+ test ARI
1168
+ distances
1169
+ distance measure
1170
+ Eskin
1171
+ Gifi_chi2
1172
+ Goodall_3
1173
+ Goodall_4
1174
+ Iof
1175
+ Kullback−Leibler
1176
+ Lin
1177
+ Matching
1178
+ Of
1179
+ Supervised
1180
+ Supervised_full
1181
+ Tot_var_dist
1182
+ Var_entropy
1183
+ Var_mutability
1184
+ k−medoids
1185
+ PAM clustering
1186
+ Figure 2: ARI results on K-medoids
1187
+ To illustrate the importance of selecting the “best” or the “most appropriate” distance for the problem
1188
+ at hand, we used our framework in both supervised (via KNN) and unsupervised (via PAM) contexts.
1189
+ Applications on real-world data sets revealed that choosing a specific measure will not affect neither classifi-
1190
+ cation accuracy nor clustering performance, respectively, when the variables have no discriminatory power,
1191
+ there is no strong cluster structure in the data, or KNN/ PAM are not appropriate methods for the problem
1192
+ at hand. Similarly, there are cases where all measures perform equally well. Putting extreme scenarios
1193
+ aside, choosing the “most appropriate” measure can lead to a classification / clustering improvement and
1194
+ association-based measures outperformed, in many cases, independent category measures. A further, more
1195
+ structured study, using synthetic data as well as a larger collection of empirical data sets, is needed to
1196
+ appraise this claim. Such a study is beyond the scope of this paper.
1197
+ By using the general framework proposed in this paper, new or customized distances can easily be
1198
+ implemented. In fact, the supervised total variation distances introduced in Section 6, for example, are “new”
1199
+ measures. However, rather than introducing and appraising new measures, it might be more interesting to
1200
+ consider a more systematic comparison of the strengths and weaknesses of different dissimilarity measures
1201
+ for categorical variables that are already available in the literature.
1202
+ Appendix A
1203
+ Here we prove that the definition of category dissimilarities using total variance, as in Equation (10), is
1204
+ equivalent to the definition of category dissimilarities proposed in Ahmad and Dey (2007). Recall that, as
1205
+ explained in Section 5.2.2, Ahmad and Dey (2007) define the dissimilarity between categories a and b of
1206
+ variable i, with respect to the distribution over the categories of variable j, as
1207
+ δ(a,b) = max
1208
+ ωj (P(ωj|a)+P( ¯ωj|b)−1),
1209
+ (14)
1210
+ where ωj and its complement ¯ωj define a binary partition with respect to the categories of variable j, and
1211
+ P(ωj|a) denotes the proportion of observations with the category a of variable i, corresponding to the set
1212
+ 15
1213
+
1214
+ of categories of variable j as defined by ωj. In Section 5.2.2, we showed that Equation (14) is equivalent to
1215
+ δ(a,b) = max
1216
+ ωj
1217
+ ��P(ωj|a)−P(ω j|b)
1218
+ ��.
1219
+ (15)
1220
+ Let Kq j be a design matrix that defines all, except the empty and complete, binary partitions for the qj
1221
+ categories of variable j. Hence, Kq j is a qj ×q⋆
1222
+ j matrix of zeros and ones, where q⋆
1223
+ j = ∑
1224
+ qj−1
1225
+ l=1
1226
+ �qj
1227
+ l
1228
+
1229
+ = 2q j −2.
1230
+ We can re-express Equation (15) as
1231
+ δ(a,b) =
1232
+ ���K′
1233
+ qj (ra −rb)
1234
+ ���
1235
+ ∞ =
1236
+ ���K′
1237
+ q jdab
1238
+ ���
1239
+ ∞ ,
1240
+ where dab = (ra −rb) and ∥x∥∞ denotes the supremum norm of vector x, that is, the maximum element of
1241
+ x in absolute value.
1242
+ The number of columns of Kq j and hence the size of the vector from which we need to take the norm,
1243
+ grows exponentially with the number of categories. For example, for qj = 4, q⋆
1244
+ j = 14 but for qj = 8 we
1245
+ have q⋆
1246
+ j = 254. When considering binary partitions, only half of these combinations are needed. Still,
1247
+ when the number of categories of the categorical variables is not too small, considering all combinations
1248
+ becomes computationally expensive.
1249
+ A more efficient way to calculate the distances between categories a and b can be obtained using the
1250
+ following relationship
1251
+ ���K′
1252
+ qjdab
1253
+ ���
1254
+ ∞ = 1
1255
+ 2 ∥dab∥1 ,
1256
+ (16)
1257
+ where ∥x∥1 denotes the L1 norm of vector x, that is
1258
+ ∥x∥1 = ∑
1259
+ i
1260
+ |xi|.
1261
+ To see that Equation (16) holds, note that the maximum, in absolute value, for the combinations of
1262
+ elements of dab is obtained by selecting the combination consisting of elements that have the same sign.
1263
+ Furthermore, as the sum of elements of dab equals zero, that is:
1264
+ dab1q j = (ra −rb)1qj = 0,
1265
+ it immediately follows that the sum of all positive elements equals the sum of all negative values. Therefore,
1266
+ ���K′
1267
+ q jdab
1268
+ ���
1269
+ ∞ = ∑
1270
+ l:dl>0
1271
+ |dl| = ∑
1272
+ l:dl<0
1273
+ |dl|,
1274
+ where dl denotes the l-th element of dab. Finally, as
1275
+ ∥dab∥1 = ∑
1276
+ l
1277
+ |dl| = ∑
1278
+ l:dl>0
1279
+ |dl|+ ∑
1280
+ l:dl<0
1281
+ |dl|
1282
+ the equivalence in Equation (16) immediately follows, and we can express Ahmad and Dey’s distance
1283
+ between categories a and b with respect to the categories of variable j, as
1284
+ δ(a,b) = 1
1285
+ 2 ∥dab∥1 = 1
1286
+ 2 ∥dab∥1 = 1
1287
+ 2
1288
+ qj
1289
+
1290
+ l=1
1291
+ |ral −rbl|.
1292
+ (17)
1293
+ Comparing Equations 10 and 17 shows that Ahmad and Dey’s distance is equivalent to the total variation
1294
+ distance introduced in Section 5.2.
1295
+ Funding
1296
+ The authors received no financial support for the research, authorship, and/or publication of this article.
1297
+ 16
1298
+
1299
+ Conflict of interest
1300
+ The authors declare that they have no conflict of interest.
1301
+ Data availability
1302
+ Extended results and the code to reproduce them are available online at https://alfonsoiodicede.
1303
+ github.io/blogposts_archive/distances_experiment_superv_unsuperv.html. The
1304
+ data used in this study were downloaded from the UCI repository (Dua and Graff 2017) and are also avail-
1305
+ able in the catdist package available on GitHub at https://github.com/alfonsoIodiceDE/
1306
+ catdist_package.
1307
+ References
1308
+ Ahmad, A. and L. Dey (2007). “A k-mean clustering algorithm for mixed numeric and categorical data”.
1309
+ In: Data & Knowledge Engineering 63.2, pp. 503–527.
1310
+ Alves, G., M. Couceiro, and A. Napoli (Dec. 2019). “Similarity Measure Selection for Categorical Data
1311
+ Clustering”. Working paper or preprint. URL: https://hal.archives-ouvertes.fr/hal-
1312
+ 02399640.
1313
+ Bai, L. and J. Liang (2022). “A Categorical Data Clustering Framework on Graph Representation”. In:
1314
+ Pattern Recognition, p. 108694.
1315
+ Borg, I. and P.J.F. Groenen (2005). Modern multidimensional scaling: Theory and applications. Springer
1316
+ Science & Business Media.
1317
+ Boriah, S., V. Chandola, and V. Kumar (2008). “Similarity measures for categorical data: A comparative
1318
+ evaluation”. In: Proceedings of the 2008 SIAM international conference on data mining. SIAM, pp. 243–
1319
+ 254.
1320
+ Cha, S.H. (2007). “Comprehensive survey on distance/similarity measures between probability density
1321
+ functions”. In: International journal of mathematical models and methods in applied sciences 1.4,
1322
+ pp. 300–307.
1323
+ Cover, T. and P. Hart (1967). “Nearest neighbor pattern classification”. In: IEEE Transactions on Informa-
1324
+ tion Theory 13.1, pp. 21–27.
1325
+ Drost, H.G. (2018). “Philentropy: information theory and distance quantification with R”. In: Journal of
1326
+ Open Source Software 3.26, p. 765.
1327
+ Dua, D. and C. Graff (2017). UCI Machine Learning Repository. URL: http://archive.ics.uci.
1328
+ edu/ml.
1329
+ Eskin, E. et al. (2002). “A geometric framework for unsupervised anomaly detection”. In: Applications of
1330
+ data mining in computer security. Springer, pp. 77–101.
1331
+ Gabriel, K. R. (1971). “The biplot graphic display of matrices with application to principal component
1332
+ analysis”. In: Biometrika 58.3, pp. 453–467.
1333
+ Gifi, A. (1990). Nonlinear multivariate analysis. John Wiley & Sons Ltd.
1334
+ Goodall, D.W. (1966). “A new similarity index based on probability”. In: Biometrics, pp. 882–907.
1335
+ Gordon, A.D. (1999). Classification. 2nd ed. Chapman and Hall/CRC.
1336
+ Gower, J. C., S. G. Lubbe, and N. J Le Roux (2011). Understanding biplots. John Wiley & Sons.
1337
+ Hubert, L. and P. Arabie (1985). “Comparing Partitions”. In: Journal of Classification 2.1, pp. 193–218.
1338
+ Ienco, D., R.G. Pensa, and R. Meo (2009). “Context-based distance learning for categorical data clustering”.
1339
+ In: International Symposium on Intelligent Data Analysis. Springer, pp. 83–94.
1340
+ Jia, H., Y. Cheung, and J. Liu (2014). “A New Distance Metric for Unsupervised Learning of Categorical
1341
+ Data”. In: IEEE Transactions on Neural Networks and Learning Systems 27, pp. 1065–1079.
1342
+ Kaufman, L. and P.J. Rousseeuw (1990). Finding groups in data: an introduction to cluster analysis. John
1343
+ Wiley & Sons, New York.
1344
+ Kullback, S. (1959). Information theory and statistics. New York: Wiley.
1345
+ 17
1346
+
1347
+ Kullback, S. and R.A. Leibler (1951). “On information and sufficiency”. In: The Annals of Mathematical
1348
+ Statistics 22.1, pp. 79–86.
1349
+ Le, S.Q. and T.B. Ho (2005). “An association-based dissimilarity measure for categorical data”. In: Pattern
1350
+ Recognition Letters 26.16, pp. 2549–2557. ISSN: 0167-8655.
1351
+ Lin, D. (1998). “An Information-Theoretic Definition of Similarity”. In: Proceedings of the Fifteenth Inter-
1352
+ national Conference on Machine Learning. San Francisco, CA, USA: Morgan Kaufmann Publishers
1353
+ Inc., pp. 296–304. ISBN: 1558605568.
1354
+ MacQueen, J. (1967). “Some methods for classification and analysis of multivariate observations”. In:
1355
+ Proceedings of the Fifth Berkeley Symposium on Mathematical Statistics and Probability, Volume 1:
1356
+ Statistics. Berkeley, Calif.: University of California Press, pp. 281–297.
1357
+ Mardia, K.V. (1978). “Some properties of classical multidimesional scaling”. In: Communications in
1358
+ Statistics - Theory and Methods 7.13, pp. 1233–1241.
1359
+ Metz, C. E. (1978). “Basic principles of ROC analysis”. In: Seminars in Nuclear Medicine 8.4, pp. 283–298.
1360
+ Ring, M. et al. (2015). “ConDist: A Context-Driven Categorical Distance Measure”. In: Machine Learn-
1361
+ ing and Knowledge Discovery in Databases. Ed. by A. Appice et al. Cham: Springer International
1362
+ Publishing, pp. 251–266.
1363
+ Spärck Jones, K. (1972). “A Statistical Interpretation of Term Specificity and Its Application in Retrieval”.
1364
+ In: Journal of Documentation 28, pp. 11–21.
1365
+ Šulc, Z. and H. ˇRezanková (2015). “nomclust: an R package for hierarchical clustering of objects character-
1366
+ ized by nominal variables”. In: Proceedings of the 9th International Days of Statistics and Economics.
1367
+ Prague: Slaný: Melandrium, pp. 1581–1590.
1368
+ — (2019). “Comparison of similarity measures for categorical data in hierarchical clustering”. In: Journal
1369
+ of Classification 36.1, pp. 58–72.
1370
+ 18
1371
+
C9E0T4oBgHgl3EQfQQB9/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
DtE2T4oBgHgl3EQfSQej/content/tmp_files/2301.03791v1.pdf.txt ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Fair Recommendation by Geometric Interpretation and Analysis of
2
+ Matrix Factorization
3
+ Hao Wang
4
+ haow85@live.com
5
+ Ratidar.com
6
+ Beijing, China
7
+ ABSTRACT
8
+ Matrix factorization-based recommender system is in effect an angle preserving dimensionality reduction technique.
9
+ Since the frequency of items follows power-law distribution, most vectors in the original dimension of user feature
10
+ vectors and item feature vectors lie on the same hyperplane. However, it is very difficult to reconstruct the embeddings
11
+ in the original dimension analytically, so we reformulate the original angle preserving dimensionality reduction problem
12
+ into a distance preserving dimensionality reduction problem. We show that the geometric shape of input data of
13
+ recommender system in its original higher dimension are distributed on co-centric circles with interesting properties, and
14
+ design a paraboloid-based matrix factorization named ParaMat to solve the recommendation problem. In the experiment
15
+ section, we compare our algorithm with 8 other algorithms and prove our new method is the most fair algorithm
16
+ compared with modern day recommender systems such as ZeroMat and DotMat Hybrid.
17
+ Keywords: matrix factorization, ParaMat, Linear Factorization, geometric interpretation, recommender system
18
+ 1. INTRODUCTION
19
+ Today, every big internet company is into recommender systems. Recommender systems can boost traffic volume and
20
+ increase sales revenues by a large margin (30%-40% for all sales on Amazon) while saving a huge amount of marketing
21
+ budget. Although recommender systems suffered from a major setback in the first half of 2010’s when most companies
22
+ agreed unanimously that recommender system could not serve as a stand-alone product, in a short period after the
23
+ rampant spreading of pessimistic opinions, companies such as TikTok and Kuai Shou emerged as major players on
24
+ global market as stand-alone recommender system products.
25
+ Researchers invented recommender system a couple of decades ago, and the trickling stream of the AI technology
26
+ becomes torrents today. The basic idea behind recommender system is to use big data analytical mechanisms to analyze
27
+ historic information of users and items to recommend new items to users based on computed preference scores. For
28
+ example, if we know Alice loves reading Su Tung-Po , The Three Kingdoms and many other Chinese Classics, we could
29
+ recommend other Chinese Classics which she had not read for her. This procedure is much more effective to help Alice
30
+ find her new books than search engines with which Alice needs to find what she likes by herself. The example we just
31
+ illustrated is the primitive form of a recommender system technology named content-based recommendation. It is just
32
+ one of the sub-fields of many recommender system research areas.
33
+ Since the year of 2016, prestigious research venues such as RecSys [1][2] has witnessed the rise of deep neural network
34
+ models. With more and more companies and schools taking up the course, the technical models of recommender systems
35
+ are becoming more and more complex and individualistic. Since deep neural networks can represent any function, it
36
+ enables researchers to choose the best model from a much larger candidate pool than in the age of shallow models.
37
+ However, due to the number of choices in parameter tweaking, public knowledge of deep neural network becomes more
38
+ and more individualistic – usually only a small hand of researchers know the reasons and principles behind the neural
39
+ models that they produce.
40
+
41
+ Although earlier models in the age of shallow models seem out-of-dated , they are still widely used in various companies.
42
+ One of the major reasons of their longevity is their simplicity and interpretability. Due to the same reasons, we choose
43
+ matrix factorization [3] as our main research target in this paper. We provide a geometric interpretation and analysis of
44
+ the classic matrix factorization algorithm, and based on our observations and analysis, we propose a new algorithm
45
+ named ParaMat that is much more interpretable , and at the same time very accurate when compared with other models.
46
+ 2. RELATED WORK
47
+ Recommender system remains a popular research field irrespective of the up-and-down in the investment in the field. As
48
+ one of the most successful recommendation paradigms, matrix factorization is provided with a probabilistic framework
49
+ as the foundation in 2007 [4]. The probabilistic framework is named Probabilistic Matrix Factorization. Based on
50
+ modification of this framework, ZeroMat [5] and DotMat [6] are proposed to solve the cold-start and sparsity problem
51
+ without input data. The algorithms could predict user preferences fairly accurately with no historic information. The
52
+ social implications of the 2 algorithms are astonishing - human cultures are locked into a predictable state only after a
53
+ couple of years of evolution.
54
+ Matrix factorization can also be used to solve the context-aware recommendation (CARS) problem [7] [8]. In 2021, a
55
+ new CARS solution named MatMat [9] is introduced. Instead of scalar fitting, MatMat uses matrix factorization by
56
+ matrix fitting to incorporate contextual information into the system. A practical example of MatMat named MovieMat
57
+ [10] which solves CARS problem for movie recommendation is proposed in the same year. The algorithm incorporates
58
+ no more than 6 contextual information fields in the algorithm and achieves better results than classic models.
59
+ Fairness is a hot research topic in recent years. Google comes up with an algorithm named Focused Learning [11] in the
60
+ year of 2017 which penalizes the matrix factorization loss function with a fairness metric. More researchers spend time
61
+ and energy on fair Learning to Rank approaches [12] [13] [14] and publish extensively at top conferences such as SIGIR
62
+ and KDD. However, due to its simplicity, matrix factorization still remains a good benchmark for fairness ideas. Zipf
63
+ Matrix Factorization [15] is introduced in 2021 with the introduction of a fairness metric named Degree of Matthew
64
+ Effect as a side product. MatRec [16] and KL-Mat [17] are also examples of fair recommender algorithms based on
65
+ matrix factorization framework. In 2022, Wang [18] proposed a set of fairness metrics using extreme value theory.
66
+ 3. GEOMETRIC ANALYSIS OF MATRIX FACTORIZATION
67
+ One of the most popular definition of the loss function of matrix factorization is as follows :
68
+ L =
69
+ i=1
70
+ n
71
+ j=1
72
+ m
73
+ Ri, j − Ui
74
+ T ∙ Vj
75
+ 2
76
+
77
+
78
+ (1)
79
+ In this formula, ������, ��� represents the rating value that the i-th user gives on the j-th item. ������ is the user feature vector, and
80
+ ������ is the item feature vector. If ������ and ������ are not normalized in the real world applications, we usually encounter gradient
81
+ explosion problems, so a more practical loss function of matrix factorization is actually defined as follows :
82
+ L =
83
+ i=1
84
+ n
85
+ j=1
86
+ m
87
+ Ri, j
88
+ Rmax
89
+
90
+ Ui
91
+ T ∙ Vj
92
+ ||Ui|| × ||Vj||
93
+ 2
94
+
95
+
96
+ (2)
97
+
98
+ Since
99
+ Ui
100
+ T∙Vj
101
+ ||Ui||×||Vj|| is actually the cosine between vectors Ui and Vj , we are actually looking for vectors in higher dimensions
102
+ whose cosine values of angles between each pair are defined by
103
+ Ri, j
104
+ Rmax .
105
+ As in most real world data sets,
106
+ Ri, j
107
+ Rmax follows power law distribution. To make the framework simpler, we assume the
108
+ ratios follow Zipf Distribution. Namely, the frequency of occurrences of
109
+ Ri, j
110
+ Rmax is proportional to their own values. This
111
+ brings about a very interesting geometric property of the input data set : The majority values of
112
+ Ri, j
113
+ Rmax are 1. This means to
114
+ minimize L in Formula (2), most of ������ and ������ should be co-linear.
115
+ A natural question arises for us : What does the vector space of ������ and ������ look like ? Let’s define the number of co-linear
116
+ ������ and ������ pairs as M, then
117
+ ���������,���
118
+ ������������ of the vector pairs have cosine value
119
+ Ri, j
120
+ Rmax . We find it extremely difficult to come up
121
+ with a visualization of such space by analytical methods without intervention of computers.
122
+ To examine the impact of the co-linearity property of the majority of ������ and ������ , we propose an algorithm as follows :
123
+ We define the loss function of the new algorithm (which we name Linear Factorization) below :
124
+ L =
125
+ i=1
126
+ n
127
+ j=1
128
+ m
129
+ Ri, j
130
+ Rmax
131
+
132
+ Ui
133
+ T ∙ Vj
134
+ ||Ui|| × ||Vj||
135
+ 2
136
+
137
+
138
+ Subject to :
139
+ ���=1
140
+ ���
141
+ ������ ∙ ���1,��� = 0
142
+
143
+ . . .
144
+ ���=1
145
+ ���
146
+ ������ ∙ ������,��� = 0
147
+
148
+ ���������
149
+ ���=1
150
+ ���
151
+ ������ ∙ ���1,��� = 0
152
+
153
+ . . .
154
+ ���=1
155
+ ���
156
+ ������ ∙ ������,��� = 0
157
+
158
+ (3)
159
+ We tested the algorithm on MovieLens 1 Million Dataset [19] (Fig. 1) , and discovered that the technical accuracy of our
160
+ proposed algorithm can achieve competitive
161
+ results with other algorithms. By this observation, we safely draw the
162
+ conclusion that the co-linearity property of user and item feature vector space plays a vital role in the technical accuracy
163
+ of matrix factorization algorithms. We also find out that Linear Factorization is the most fair recommender system
164
+ among the 9 algorithms. We will provide bibliographic information for the algorithms in this experiment in the
165
+ Experiment Section.
166
+
167
+ However, due to the difficulty of designing a vector space functional that caters to all the geometric angle preserving
168
+ vectors, we take a different route to solve the problem. Instead of considering
169
+ Ri, j
170
+ Rmax as consine angles between vectors, we
171
+ consider the value as the distance from a vector to the origin. By doing this, the input user item rating values (by Zipf
172
+ Law) becomes equidistantly distributed sample points on co-centric circles.
173
+ The reason behind this geometric property is because the number of equidistantly distributed points on co-centric circles
174
+ is proportional to the radius length. If we define the radii by
175
+ Ri, j
176
+ Rmax , the points of our newly designed geometry just
177
+ become compliant with Zipf’s Law.
178
+ To propose our new algorithm named ParaMat, we elevate our 2-D geometry into 3-D space by the following formula :
179
+ z =
180
+ Ri, j
181
+ Rmax
182
+ xk
183
+ 2 + yk
184
+ 2
185
+ (4)
186
+ We define the x and y as the products of user feature vector and item feature vector :
187
+ xk = Ui
188
+ T ∙ Vj
189
+ yk = Wi
190
+ T ∙ Pj
191
+ (5)
192
+ The loss function for ParaMat is defined as follows :
193
+ L =
194
+ i=1
195
+ n
196
+ j=1
197
+ m
198
+ Ri, j
199
+ Rmax
200
+ − d
201
+ xk , yk ,
202
+ Ri, j
203
+ Rmax
204
+ xk
205
+ 2 + yk
206
+ 2
207
+ , 0 , 0 , 0
208
+ 2
209
+
210
+
211
+ (6)
212
+ Fig. 1 Linear Factorization comparison experiments on MovieLens 1 Million Dataset (MAE and Degree of
213
+ Matthew Effect)
214
+
215
+ 2.0
216
+ 1.8
217
+ ZeroMat
218
+ 1.6
219
+ RandomPlacement
220
+ ClassicMatrixFactorization
221
+ DotMat
222
+ 1.4
223
+ DotMat Hybrid
224
+ ZipfPlacement
225
+ PoissonMat
226
+ 1.2
227
+ PoissonMatHybrid
228
+ LinearFactorization
229
+ 1.0
230
+ 0.8
231
+ 0.2
232
+ 0.4
233
+ 0.6
234
+ 0.8
235
+ 1.0
236
+ GradientLearningStep
237
+ 1e-6-0.032
238
+ -0.034
239
+ Effect
240
+ ZeroMat
241
+ RandomPlacement
242
+ -0.036
243
+ ofMatthew
244
+ ClassicMatrixFactorization
245
+ DotMat
246
+ -0.038
247
+ DotMatHybrid
248
+ ZipfPlacement
249
+ -0.040
250
+ PoissonMat
251
+ Degr
252
+ PoissonMatHybrid
253
+ LinearFactorization
254
+ -0.042
255
+ -0.044
256
+ 0.2
257
+ 0.4
258
+ 0.6
259
+ 0.8
260
+ 1.0
261
+ GradientLearningStep
262
+ 1e-6We plug in the values of x and y by dot products of user feature vectors and item feature vectors, and substitute
263
+ Ri, j
264
+ Rmax by
265
+ dot products of user feature vectors and item feature vectors produced by the classic matrix factorization. We obtain the
266
+ following formula :
267
+ L =
268
+ i=1
269
+ n
270
+ j=1
271
+ m
272
+ Ri, j
273
+ Rmax
274
+
275
+ Ui
276
+ T ∙ Vj
277
+ 2 + Wi
278
+ T ∙ Pj
279
+ 2 +
280
+ Ri, j
281
+ Rmax
282
+ 2
283
+ Ui
284
+ T ∙ Vj
285
+ 2 + Wi
286
+ T ∙ Pj
287
+ 2
288
+ 2
289
+
290
+
291
+ (7)
292
+ We choose to optimize Formula (7) using Stochastic Gradient Descent (SGD) algorithm, after which we compute the
293
+ unknown user item rating values using the following formula :
294
+ Ri, j = Rmax
295
+ Ui
296
+ T ∙ Vj
297
+ 2 + Wi
298
+ T ∙ Pj
299
+ 2 +
300
+ Ri, j
301
+ Rmax
302
+ 2
303
+ Ui
304
+ T ∙ Vj
305
+ 2 + Wi
306
+ T ∙ Pj
307
+ 2
308
+ (8)
309
+ In the Experiment section, we show that ParaMat is the best algorithm when evaluated by fairness metric. After
310
+ geometric analysis and derivation, we have obtained an effective solution for fairness problem in recommender systems.
311
+ 4. EXPERIMENT
312
+ To evaluate the performance of ParaMat, we compare the algorithm with 8 other algorithms on both the MovieLens 1
313
+ Million Dataset [19] and LDOS-CoMoDa [20] dataset. Among the algorithms, Random Placement means recommend
314
+ uniformly randomly; Zipf Placement means recommend according to Power Law distribution; ZeroMat [5] and DotMat
315
+ [6] are introduced in the Related Work section; PoissonMat [21] is an algorithm to appear at an international conference
316
+ in 2022.
317
+ Fig.2 illustrates the experimental results of comparison among 9 algorithms on the MovieLens 1 Million Dataset.
318
+ ParaMat achieves the 5th place by technical accuracy metric MAE, but wins the 1st place by fairness metric Degree of
319
+ Matthew Effect. By observation, ParaMat is a quite effective recommender system evaluated by fairness metric. Since
320
+ the error curves are pretty cluttered, we also use Takens Embedding to visualize the curves in 2D (displayed in 3D grid
321
+ for better visibility).
322
+
323
+ 1.8
324
+ 1.6
325
+ ZeroMat
326
+ RandomPlacement
327
+ Classic Matrix Factorization
328
+ MAE
329
+ 1.4
330
+ DotMat Hybrid
331
+ ZipfPlacement
332
+ PoissonMat
333
+ 1.2
334
+ PoissonMat Hybrid
335
+ ParaMat
336
+ 1.0
337
+ 0
338
+ 1
339
+ 2
340
+ 3
341
+ 4
342
+ 5
343
+ 6
344
+ 7
345
+ 8
346
+ Gradient Leaming Step
347
+ le-50.0020
348
+ ofMatthewEffect
349
+ 0.0025
350
+ ZeroMat
351
+ RandomPlacement
352
+ ClassicMatrixFactorization
353
+ 0.0030
354
+ DotMat
355
+ DotMat Hybrid
356
+ ZipfPlacement
357
+ 0.0035
358
+ PoissonMat
359
+ PoissonMat Hybrid
360
+ ParaMat
361
+ -0.0040
362
+ 1
363
+ 2
364
+ E
365
+ 4
366
+ 5
367
+ 6
368
+ 7
369
+ 8
370
+ Gradient Learning Step
371
+ le-5Fig.3 demonstrates the experimental results of comparison among 9 algorithms on the LDOS-CoMoDa Dataset. ParaMat
372
+ achieves the 4th place by technical accuracy metric MAE, but once again wins the 1st place by fairness metric Degree of
373
+ Matthew Effect. By observation, ParaMat is the most fair recommender system. The result is more clearly demonstrated
374
+ in 2-D via Takens Embedding.
375
+ Fig. 2 ParaMat comparison on MovieLens 1 Million Dataset (MAE and Degree of Matthew Effect).
376
+ The first row shows the MAE and DME curves of different algorithms. The bottom row visualizes the
377
+ curves respectively using Takens Embedding.
378
+ Fig. 3 ParaMat comparison on LDOS-CoMoDa Dataset (MAE and Degree of
379
+ Matthew Effect). The first row shows the MAE and DME curves of different
380
+ algorithms. The bottom row visualizes the curves respectively using Takens
381
+ Embedding.
382
+
383
+ ZeroMat
384
+ RandomPlacement
385
+ ClassicMatrixFactorization
386
+ DotMat
387
+ DotMat Hybrid
388
+ Zipf Placement
389
+ 0.04
390
+ PoissonMat
391
+ PoissonMatHybrid
392
+ 0.02
393
+ ParaMat
394
+ 0.00
395
+ 0.02
396
+ 0.04
397
+ 1.8
398
+ 1.6
399
+ 1.4
400
+ 1.0
401
+ 1.2
402
+ 1.2
403
+ 1.4
404
+ 1.6
405
+ 1.0ZeroMat
406
+ Random Placement
407
+ Classic Matrix Factorization
408
+ DotMat
409
+ DotMatHybrid
410
+ ZipfPlacement
411
+ PoissonMat
412
+ 0.04
413
+ PoissonMat Hybrid
414
+ .
415
+ ParaMat
416
+ 0.02
417
+ 0.00
418
+ 0.02
419
+ 0.04
420
+ 0.0020
421
+ -0.0025
422
+ 0.0040
423
+ 0.0030
424
+ -0.0035
425
+ 0.0030
426
+ -0.0035
427
+ 0.0025
428
+ 0.00402.0
429
+ 1.8
430
+ ZeroMat
431
+ 1.6
432
+ Random Placement
433
+ Classic MatrixFactorization
434
+ DotMat
435
+ DotMat Hybrid
436
+ Zipf Placement
437
+ PoissonMat
438
+ 1.2
439
+ PoissonMat Hybrid
440
+ ParaMat
441
+ 1.0
442
+ 0.8
443
+ 0
444
+ 1
445
+ 2
446
+ 3
447
+ 4
448
+ 5
449
+ 6
450
+ 7
451
+ 8
452
+ Gradient Learning Step
453
+ le-50.032
454
+ 0.034
455
+ Ettect
456
+ ZeroMat
457
+ RandomPlacement
458
+ 0.036
459
+ Classic Matrix Factorization
460
+ DotMat
461
+ 0.038
462
+ DotMatHybrid
463
+ ZipfPlacement
464
+ PoissonMat
465
+ -0.040
466
+ PoissonMat Hybrid
467
+ ParaMat
468
+ 0.042
469
+ 0.044
470
+ 0
471
+ 1
472
+ 2
473
+ 3
474
+ 4
475
+ 5
476
+ 6
477
+ 7
478
+ 8
479
+ Gradient Leaming Step
480
+ le~5ZeroMat
481
+ RandomPlacement
482
+ Classic Matrix Factorization
483
+ DotMat
484
+ DotMat Hybrid
485
+ ZipfPlacement
486
+ PoissonMat
487
+ 3.04
488
+ PoissonMat Hybrid
489
+ 0.02
490
+ ParaMat
491
+ 0.00
492
+ 0.02
493
+ 0.04
494
+ 2.0
495
+ 1.8
496
+ 1.6
497
+ 0.8
498
+ 1.0
499
+ 1.4
500
+ 1.2
501
+ 1.2
502
+ 1.4
503
+ 1.6
504
+ 1.0
505
+ 1.8
506
+ 0.8ZeroMat
507
+ Random Placement
508
+ Classic Matrix Factorization
509
+ DotMat
510
+ DotMatHybrid
511
+ ZipfPlacement
512
+ 0.04
513
+ PoissonMat
514
+ PoissonMatHybrid
515
+ 0.02
516
+ ParaMat
517
+ 0.00
518
+ 0.02
519
+ 0.04
520
+ 0.032
521
+ 0.034
522
+ 0.036
523
+ 0.038
524
+ 0.040
525
+ 0.042
526
+ 0.0445. CONCLUSION
527
+ In this paper, we propose geometry-powered fair recommender system algorithms named Linear Factorization and
528
+ ParaMat. Linear Factorization assumes the user feature vectors and item feature vectors of matrix factorization lay on the
529
+ same hyperplane due to the observation that user/item feature vectors are mostly co-linear. ParaMat also relies on the
530
+ geometric observation that user item rating values mostly lie on co-centric circles equidistantly.
531
+ Both Linear Factorization and ParaMat are not the best performing algorithm on technical accuracy metrics such as
532
+ MAE, but they are the most fair algorithms among the 9 algorithms in our Experiment section. By analyzing the
533
+ geometric space of the input data structure, we’ve come up with two effective fair recommendation algorithms.
534
+ In future work, we would like to explore the geometric space of input data structures to other classic algorithms such as
535
+ learning to rank and factorization machines. We are also very interested in geometric interpretation of deep learning
536
+ models.
537
+ REFERENCES
538
+ [1] H. Guo, R. Tang, et. al., “DeepFM: A Factorization-Machine based Neural Network for CTR Prediction”,
539
+ IJCAI, 2017
540
+ [2] G. Zhou, X. Zhu, C. Song, et.al, “Deep Interest Network for Click-Through Rate Prediction”, KDD, 2018
541
+ [3] R. Mehta; K. Rana, “A Review on Matrix Factorization Techniques in Recommender Systems”, CSCITA,
542
+ 2017
543
+ [4] R.Salakhutdinov, A.Mnih, “Probabilistic Matrix Factorization”, Proceedings of the 20th International
544
+ Conference on Neural Information Processing Systems, 2007
545
+ [5] H. Wang, “ZeroMat: Solving Cold-start Problem of Recommender System with No Input Data”, IEEE 4th
546
+ International Conference on Information Systems and Computer Aided Education (ICISCAE), 2021
547
+ [6] H. Wang, “DotMat: Solving Cold-start Problem and Alleviating Sparsity Problem for Recommender Systems”,
548
+ IEEE 5th International Conference on Electronics Technology (ICET 2022)
549
+ [7] E. Coviello, K. Ellis, et. al. “A Scalable Model for Online Contextual Music Recommendations”, CARS
550
+ Workshop, 2021
551
+ [8] Y. Chen, J. Li, “Modeling Dynamic Attributes for Next Basket Recommendation”, CARS Workshop, 2021
552
+ [9] H.Wang, “MatMat: Matrix Factorization by Matrix Fitting”, IEEE 4th International Conference on Information
553
+ Systems and Computer Aided Education (ICISCAE), 2021
554
+ [10]H.Wang, "MovieMat: Context-aware Movie Recommendation with Matrix Factorization by Matrix Fitting", 7th
555
+ International Conference on Computer and Communications (ICCC), 2021
556
+ [11]A. Beutel, Ed. Chi, et. al. “Beyond Globally Optimal: Focused Learning for Improved Recommendations”,
557
+ WWW, 2017
558
+ [12]H.Yadav, Z.Du, T.Joachims. “Fair Learning-to-Rank from Implicit Feedback.”, SIGIR, 2020
559
+ [13]M.Morik, A.Singh, J.Hong, T.Joachims. “Controlling Fairness and Bias in Dynamic Learning-to-Rank”, SIGIR,
560
+ 2020
561
+ [14]M.Zehlike, C.Castillo. “ Reducing Disparate Exposure in Ranking: A Learning to Rank Approach.”, SIGIR,
562
+ 2020
563
+ [15]H. Wang, “Zipf Matrix Factorization : Matrix Factorization with Matthew Effect Reduction”, ICAIBD, 2021
564
+ [16]H. Wang, B. Ruan, “MatRec: Matrix Factorization for Highly Skewed Dataset”, ICBDT, 2020
565
+ [17]H. Wang, “KL-Mat: Fair Recommender System via Information Geometry”, icWCSN, 2022
566
+ [18]H. Wang, “Fairness Metrics for Recommender Systems”, icWCSN, 2022
567
+ [19]T. Bertin-Mahieux, B. Whitman, P. Lamere, The Million Song Dataset, ISMIR, 2011
568
+ [20]ODIĆ, Ante, TKALČIČ, Marko, TASIČ, Jurij F., KOŠIR, Andrej: Predicting and Detecting the Relevant
569
+ Contextual Information in a Movie-Recommender System, Interacting with Computers, Volume 25, Issue 1,
570
+ 2013
571
+ [21]H. Wang, “PoissonMat: Remodeling Matrix Factorization using Poisson Distribution and Solving the Cold Start
572
+ Problem without Input Data”, to appear, MLISE, 2022
573
+
DtE2T4oBgHgl3EQfSQej/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf,len=275
2
+ page_content='Fair Recommendation by Geometric Interpretation and Analysis of Matrix Factorization Hao Wang haow85@live.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
3
+ page_content='com Ratidar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
4
+ page_content='com Beijing, China ABSTRACT Matrix factorization-based recommender system is in effect an angle preserving dimensionality reduction technique.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
5
+ page_content=' Since the frequency of items follows power-law distribution, most vectors in the original dimension of user feature vectors and item feature vectors lie on the same hyperplane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
6
+ page_content=' However, it is very difficult to reconstruct the embeddings in the original dimension analytically, so we reformulate the original angle preserving dimensionality reduction problem into a distance preserving dimensionality reduction problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
7
+ page_content=' We show that the geometric shape of input data of recommender system in its original higher dimension are distributed on co-centric circles with interesting properties, and design a paraboloid-based matrix factorization named ParaMat to solve the recommendation problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
8
+ page_content=' In the experiment section, we compare our algorithm with 8 other algorithms and prove our new method is the most fair algorithm compared with modern day recommender systems such as ZeroMat and DotMat Hybrid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
9
+ page_content=' Keywords: matrix factorization, ParaMat, Linear Factorization, geometric interpretation, recommender system 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
10
+ page_content=' INTRODUCTION Today, every big internet company is into recommender systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
11
+ page_content=' Recommender systems can boost traffic volume and increase sales revenues by a large margin (30%-40% for all sales on Amazon) while saving a huge amount of marketing budget.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
12
+ page_content=' Although recommender systems suffered from a major setback in the first half of 2010’s when most companies agreed unanimously that recommender system could not serve as a stand-alone product, in a short period after the rampant spreading of pessimistic opinions, companies such as TikTok and Kuai Shou emerged as major players on global market as stand-alone recommender system products.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
13
+ page_content=' Researchers invented recommender system a couple of decades ago, and the trickling stream of the AI technology becomes torrents today.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
14
+ page_content=' The basic idea behind recommender system is to use big data analytical mechanisms to analyze historic information of users and items to recommend new items to users based on computed preference scores.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
15
+ page_content=' For example, if we know Alice loves reading Su Tung-Po , The Three Kingdoms and many other Chinese Classics, we could recommend other Chinese Classics which she had not read for her.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
16
+ page_content=' This procedure is much more effective to help Alice find her new books than search engines with which Alice needs to find what she likes by herself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
17
+ page_content=' The example we just illustrated is the primitive form of a recommender system technology named content-based recommendation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
18
+ page_content=' It is just one of the sub-fields of many recommender system research areas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
19
+ page_content=' Since the year of 2016, prestigious research venues such as RecSys [1][2] has witnessed the rise of deep neural network models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
20
+ page_content=' With more and more companies and schools taking up the course, the technical models of recommender systems are becoming more and more complex and individualistic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
21
+ page_content=' Since deep neural networks can represent any function, it enables researchers to choose the best model from a much larger candidate pool than in the age of shallow models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
22
+ page_content=' However, due to the number of choices in parameter tweaking, public knowledge of deep neural network becomes more and more individualistic – usually only a small hand of researchers know the reasons and principles behind the neural models that they produce.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
23
+ page_content=' Although earlier models in the age of shallow models seem out-of-dated , they are still widely used in various companies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
24
+ page_content=' One of the major reasons of their longevity is their simplicity and interpretability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
25
+ page_content=' Due to the same reasons, we choose matrix factorization [3] as our main research target in this paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
26
+ page_content=' We provide a geometric interpretation and analysis of the classic matrix factorization algorithm, and based on our observations and analysis, we propose a new algorithm named ParaMat that is much more interpretable , and at the same time very accurate when compared with other models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
27
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
28
+ page_content=' RELATED WORK Recommender system remains a popular research field irrespective of the up-and-down in the investment in the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
29
+ page_content=' As one of the most successful recommendation paradigms, matrix factorization is provided with a probabilistic framework as the foundation in 2007 [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
30
+ page_content=' The probabilistic framework is named Probabilistic Matrix Factorization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
31
+ page_content=' Based on modification of this framework, ZeroMat [5] and DotMat [6] are proposed to solve the cold-start and sparsity problem without input data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
32
+ page_content=' The algorithms could predict user preferences fairly accurately with no historic information.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
33
+ page_content=' The social implications of the 2 algorithms are astonishing - human cultures are locked into a predictable state only after a couple of years of evolution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
34
+ page_content=' Matrix factorization can also be used to solve the context-aware recommendation (CARS) problem [7] [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
35
+ page_content=' In 2021, a new CARS solution named MatMat [9] is introduced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
36
+ page_content=' Instead of scalar fitting, MatMat uses matrix factorization by matrix fitting to incorporate contextual information into the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
37
+ page_content=' A practical example of MatMat named MovieMat [10] which solves CARS problem for movie recommendation is proposed in the same year.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
38
+ page_content=' The algorithm incorporates no more than 6 contextual information fields in the algorithm and achieves better results than classic models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
39
+ page_content=' Fairness is a hot research topic in recent years.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
40
+ page_content=' Google comes up with an algorithm named Focused Learning [11] in the year of 2017 which penalizes the matrix factorization loss function with a fairness metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
41
+ page_content=' More researchers spend time and energy on fair Learning to Rank approaches [12] [13] [14] and publish extensively at top conferences such as SIGIR and KDD.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
42
+ page_content=' However, due to its simplicity, matrix factorization still remains a good benchmark for fairness ideas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
43
+ page_content=' Zipf Matrix Factorization [15] is introduced in 2021 with the introduction of a fairness metric named Degree of Matthew Effect as a side product.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
44
+ page_content=' MatRec [16] and KL-Mat [17] are also examples of fair recommender algorithms based on matrix factorization framework.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
45
+ page_content=' In 2022, Wang [18] proposed a set of fairness metrics using extreme value theory.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
46
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
47
+ page_content=' GEOMETRIC ANALYSIS OF MATRIX FACTORIZATION One of the most popular definition of the loss function of matrix factorization is as follows : L = i=1 n j=1 m Ri, j − Ui T ∙ Vj 2 � � (1) In this formula, ������, ��� represents the rating value that the i-th user gives on the j-th item.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
48
+ page_content=' ������ is the user feature vector, and ������ is the item feature vector.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
49
+ page_content=' If ������ and ������ are not normalized in the real world applications,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
50
+ page_content=' we usually encounter gradient explosion problems,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
51
+ page_content=' so a more practical loss function of matrix factorization is actually defined as follows : L = i=1 n j=1 m Ri,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
52
+ page_content=' j Rmax − Ui T ∙ Vj ||Ui|| × ||Vj|| 2 � � (2) Since Ui T∙Vj ||Ui||×||Vj|| is actually the cosine between vectors Ui and Vj ,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
53
+ page_content=' we are actually looking for vectors in higher dimensions whose cosine values of angles between each pair are defined by Ri,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
54
+ page_content=' j Rmax .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
55
+ page_content=' As in most real world data sets, Ri, j Rmax follows power law distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
56
+ page_content=' To make the framework simpler, we assume the ratios follow Zipf Distribution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
57
+ page_content=' Namely, the frequency of occurrences of Ri, j Rmax is proportional to their own values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
58
+ page_content=' This brings about a very interesting geometric property of the input data set : The majority values of Ri, j Rmax are 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
59
+ page_content=' This means to minimize L in Formula (2), most of ������ and ������ should be co-linear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
60
+ page_content=' A natural question arises for us : What does the vector space of ������ and ������ look like ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
61
+ page_content=' Let’s define the number of co-linear ������ and ������ pairs as M, then ���×������,��� ������������ of the vector pairs have cosine value Ri, j Rmax .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
62
+ page_content=' We find it extremely difficult to come up with a visualization of such space by analytical methods without intervention of computers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
63
+ page_content=' To examine the impact of the co-linearity property of the majority of ������ and ������ , we propose an algorithm as follows : We define the loss function of the new algorithm (which we name Linear Factorization) below : L = i=1 n j=1 m Ri, j Rmax − Ui T ∙ Vj ||Ui|| × ||Vj|| 2 � � Subject to : ���=1 ��� ������ ∙ ���1,��� = 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
64
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
65
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
66
+ page_content=' ���=1 ��� ������ ∙ ������,��� = 0 � ��������� ���=1 ��� ������ ∙ ���1,��� = 0 � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
67
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
68
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
69
+ page_content=' ���=1 ��� ������ ∙ ������,��� = 0 � (3) We tested the algorithm on MovieLens 1 Million Dataset [19] (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
70
+ page_content=' 1) , and discovered that the technical accuracy of our proposed algorithm can achieve competitive results with other algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
71
+ page_content=' By this observation, we safely draw the conclusion that the co-linearity property of user and item feature vector space plays a vital role in the technical accuracy of matrix factorization algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
72
+ page_content=' We also find out that Linear Factorization is the most fair recommender system among the 9 algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
73
+ page_content=' We will provide bibliographic information for the algorithms in this experiment in the Experiment Section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
74
+ page_content=' However, due to the difficulty of designing a vector space functional that caters to all the geometric angle preserving vectors, we take a different route to solve the problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
75
+ page_content=' Instead of considering Ri, j Rmax as consine angles between vectors, we consider the value as the distance from a vector to the origin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
76
+ page_content=' By doing this, the input user item rating values (by Zipf Law) becomes equidistantly distributed sample points on co-centric circles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
77
+ page_content=' The reason behind this geometric property is because the number of equidistantly distributed points on co-centric circles is proportional to the radius length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
78
+ page_content=' If we define the radii by Ri, j Rmax , the points of our newly designed geometry just become compliant with Zipf’s Law.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
79
+ page_content=' To propose our new algorithm named ParaMat, we elevate our 2-D geometry into 3-D space by the following formula : z = Ri, j Rmax xk 2 + yk 2 (4) We define the x and y as the products of user feature vector and item feature vector : xk = Ui T ∙ Vj yk = Wi T ∙ Pj (5) The loss function for ParaMat is defined as follows : L = i=1 n j=1 m Ri, j Rmax − d xk , yk , Ri, j Rmax xk 2 + yk 2 , 0 , 0 , 0 2 � � (6) Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
80
+ page_content=' 1 Linear Factorization comparison experiments on MovieLens 1 Million Dataset (MAE and Degree of Matthew Effect) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
81
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
82
+ page_content='8 ZeroMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
83
+ page_content='6 RandomPlacement ClassicMatrixFactorization DotMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
84
+ page_content='4 DotMat Hybrid ZipfPlacement PoissonMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
85
+ page_content='2 PoissonMatHybrid LinearFactorization 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
86
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
87
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
88
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
89
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
90
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
91
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
92
+ page_content='0 GradientLearningStep 1e-6-0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
93
+ page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
94
+ page_content='034 Effect ZeroMat RandomPlacement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
95
+ page_content='036 ofMatthew ClassicMatrixFactorization DotMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
96
+ page_content='038 DotMatHybrid ZipfPlacement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
97
+ page_content='040 PoissonMat Degr PoissonMatHybrid LinearFactorization 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
98
+ page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
99
+ page_content='044 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
100
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
101
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
102
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
103
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
104
+ page_content='0 GradientLearningStep 1e-6We plug in the values of x and y by dot products of user feature vectors and item feature vectors, and substitute Ri, j Rmax by dot products of user feature vectors and item feature vectors produced by the classic matrix factorization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
105
+ page_content=' We obtain the following formula : L = i=1 n j=1 m Ri,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
106
+ page_content=' j Rmax − Ui T ∙ Vj 2 + Wi T ∙ Pj 2 + Ri,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
107
+ page_content=' j Rmax 2 Ui T ∙ Vj 2 + Wi T ∙ Pj 2 2 � � (7) We choose to optimize Formula (7) using Stochastic Gradient Descent (SGD) algorithm,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
108
+ page_content=' after which we compute the unknown user item rating values using the following formula : Ri,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
109
+ page_content=' j = Rmax Ui T ∙ Vj 2 + Wi T ∙ Pj 2 + Ri,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
110
+ page_content=' j Rmax 2 Ui T ∙ Vj 2 + Wi T ∙ Pj 2 (8) In the Experiment section,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
111
+ page_content=' we show that ParaMat is the best algorithm when evaluated by fairness metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
112
+ page_content=' After geometric analysis and derivation, we have obtained an effective solution for fairness problem in recommender systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
113
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
114
+ page_content=' EXPERIMENT To evaluate the performance of ParaMat, we compare the algorithm with 8 other algorithms on both the MovieLens 1 Million Dataset [19] and LDOS-CoMoDa [20] dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
115
+ page_content=' Among the algorithms, Random Placement means recommend uniformly randomly;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
116
+ page_content=' Zipf Placement means recommend according to Power Law distribution;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
117
+ page_content=' ZeroMat [5] and DotMat [6] are introduced in the Related Work section;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
118
+ page_content=' PoissonMat [21] is an algorithm to appear at an international conference in 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
119
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
120
+ page_content='2 illustrates the experimental results of comparison among 9 algorithms on the MovieLens 1 Million Dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
121
+ page_content=' ParaMat achieves the 5th place by technical accuracy metric MAE, but wins the 1st place by fairness metric Degree of Matthew Effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
122
+ page_content=' By observation, ParaMat is a quite effective recommender system evaluated by fairness metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
123
+ page_content=' Since the error curves are pretty cluttered, we also use Takens Embedding to visualize the curves in 2D (displayed in 3D grid for better visibility).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
124
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
125
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
126
+ page_content='6 ZeroMat RandomPlacement Classic Matrix Factorization MAE 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
127
+ page_content='4 DotMat Hybrid ZipfPlacement PoissonMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
128
+ page_content='2 PoissonMat Hybrid ParaMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
129
+ page_content='0 0 1 2 3 4 5 6 7 8 Gradient Leaming Step le-50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
130
+ page_content='0020 ofMatthewEffect 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
131
+ page_content='0025 ZeroMat RandomPlacement ClassicMatrixFactorization 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
132
+ page_content='0030 DotMat DotMat Hybrid ZipfPlacement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
133
+ page_content='0035 PoissonMat PoissonMat Hybrid ParaMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
134
+ page_content='0040 1 2 E 4 5 6 7 8 Gradient Learning Step le-5Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
135
+ page_content='3 demonstrates the experimental results of comparison among 9 algorithms on the LDOS-CoMoDa Dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
136
+ page_content=' ParaMat achieves the 4th place by technical accuracy metric MAE, but once again wins the 1st place by fairness metric Degree of Matthew Effect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
137
+ page_content=' By observation, ParaMat is the most fair recommender system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
138
+ page_content=' The result is more clearly demonstrated in 2-D via Takens Embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
139
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
140
+ page_content=' 2 ParaMat comparison on MovieLens 1 Million Dataset (MAE and Degree of Matthew Effect).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
141
+ page_content=' The first row shows the MAE and DME curves of different algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
142
+ page_content=' The bottom row visualizes the curves respectively using Takens Embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
143
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
144
+ page_content=' 3 ParaMat comparison on LDOS-CoMoDa Dataset (MAE and Degree of Matthew Effect).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
145
+ page_content=' The first row shows the MAE and DME curves of different algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
146
+ page_content=' The bottom row visualizes the curves respectively using Takens Embedding.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
147
+ page_content=' ZeroMat RandomPlacement ClassicMatrixFactorization DotMat DotMat Hybrid Zipf Placement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
148
+ page_content='04 PoissonMat PoissonMatHybrid 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
149
+ page_content='02 ParaMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
150
+ page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
151
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
152
+ page_content='04 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
153
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
154
+ page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
155
+ page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
156
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
157
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
158
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
159
+ page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
160
+ page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
161
+ page_content='0ZeroMat Random Placement Classic Matrix Factorization DotMat DotMatHybrid ZipfPlacement PoissonMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
162
+ page_content='04 PoissonMat Hybrid .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
163
+ page_content=' ParaMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
164
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
165
+ page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
166
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
167
+ page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
168
+ page_content='0020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
169
+ page_content='0025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
170
+ page_content='0040 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
171
+ page_content='0030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
172
+ page_content='0035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
173
+ page_content='0030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
174
+ page_content='0035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
175
+ page_content='0025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
176
+ page_content='00402.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
177
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
178
+ page_content='8 ZeroMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
179
+ page_content='6 Random Placement Classic MatrixFactorization DotMat DotMat Hybrid Zipf Placement PoissonMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
180
+ page_content='2 PoissonMat Hybrid ParaMat 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
181
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
182
+ page_content='8 0 1 2 3 4 5 6 7 8 Gradient Learning Step le-50.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
183
+ page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
184
+ page_content='034 Ettect ZeroMat RandomPlacement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
185
+ page_content='036 Classic Matrix Factorization DotMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
186
+ page_content='038 DotMatHybrid ZipfPlacement PoissonMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
187
+ page_content='040 PoissonMat Hybrid ParaMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
188
+ page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
189
+ page_content='044 0 1 2 3 4 5 6 7 8 Gradient Leaming Step le~5ZeroMat RandomPlacement Classic Matrix Factorization DotMat DotMat Hybrid ZipfPlacement PoissonMat 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
190
+ page_content='04 PoissonMat Hybrid 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
191
+ page_content='02 ParaMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
192
+ page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
193
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
194
+ page_content='04 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
195
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
196
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
197
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
198
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
199
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
200
+ page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
201
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
202
+ page_content='2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
203
+ page_content='4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
204
+ page_content='6 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
205
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
206
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
207
+ page_content='8ZeroMat Random Placement Classic Matrix Factorization DotMat DotMatHybrid ZipfPlacement 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
208
+ page_content='04 PoissonMat PoissonMatHybrid 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
209
+ page_content='02 ParaMat 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
210
+ page_content='00 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
211
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
212
+ page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
213
+ page_content='032 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
214
+ page_content='034 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
215
+ page_content='036 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
216
+ page_content='038 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
217
+ page_content='040 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
218
+ page_content='042 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
219
+ page_content='0445.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
220
+ page_content=' CONCLUSION In this paper, we propose geometry-powered fair recommender system algorithms named Linear Factorization and ParaMat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
221
+ page_content=' Linear Factorization assumes the user feature vectors and item feature vectors of matrix factorization lay on the same hyperplane due to the observation that user/item feature vectors are mostly co-linear.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
222
+ page_content=' ParaMat also relies on the geometric observation that user item rating values mostly lie on co-centric circles equidistantly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
223
+ page_content=' Both Linear Factorization and ParaMat are not the best performing algorithm on technical accuracy metrics such as MAE, but they are the most fair algorithms among the 9 algorithms in our Experiment section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
224
+ page_content=' By analyzing the geometric space of the input data structure, we’ve come up with two effective fair recommendation algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
225
+ page_content=' In future work, we would like to explore the geometric space of input data structures to other classic algorithms such as learning to rank and factorization machines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
226
+ page_content=' We are also very interested in geometric interpretation of deep learning models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
227
+ page_content=' REFERENCES [1] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
228
+ page_content=' Guo, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
229
+ page_content=' Tang, et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
230
+ page_content=' al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
231
+ page_content=', “DeepFM: A Factorization-Machine based Neural Network for CTR Prediction”, IJCAI, 2017 [2] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
232
+ page_content=' Zhou, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
233
+ page_content=' Zhu, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
234
+ page_content=' Song, et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
235
+ page_content='al, “Deep Interest Network for Click-Through Rate Prediction”, KDD, 2018 [3] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
236
+ page_content=' Mehta;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
237
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
238
+ page_content=' Rana, “A Review on Matrix Factorization Techniques in Recommender Systems”, CSCITA, 2017 [4] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
239
+ page_content='Salakhutdinov, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
240
+ page_content='Mnih, “Probabilistic Matrix Factorization”, Proceedings of the 20th International Conference on Neural Information Processing Systems, 2007 [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
241
+ page_content=' Wang, “ZeroMat: Solving Cold-start Problem of Recommender System with No Input Data”, IEEE 4th International Conference on Information Systems and Computer Aided Education (ICISCAE), 2021 [6] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
242
+ page_content=' Wang, “DotMat: Solving Cold-start Problem and Alleviating Sparsity Problem for Recommender Systems”, IEEE 5th International Conference on Electronics Technology (ICET 2022) [7] E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
243
+ page_content=' Coviello, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
244
+ page_content=' Ellis, et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
245
+ page_content=' al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
246
+ page_content=' “A Scalable Model for Online Contextual Music Recommendations”, CARS Workshop, 2021 [8] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
247
+ page_content=' Chen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
248
+ page_content=' Li, “Modeling Dynamic Attributes for Next Basket Recommendation”, CARS Workshop, 2021 [9] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
249
+ page_content='Wang, “MatMat: Matrix Factorization by Matrix Fitting”, IEEE 4th International Conference on Information Systems and Computer Aided Education (ICISCAE), 2021 [10]H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
250
+ page_content='Wang, "MovieMat: Context-aware Movie Recommendation with Matrix Factorization by Matrix Fitting", 7th International Conference on Computer and Communications (ICCC), 2021 [11]A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
251
+ page_content=' Beutel, Ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
252
+ page_content=' Chi, et.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
253
+ page_content=' al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
254
+ page_content=' “Beyond Globally Optimal: Focused Learning for Improved Recommendations”, WWW, 2017 [12]H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
255
+ page_content='Yadav, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
256
+ page_content='Du, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
257
+ page_content='Joachims.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
258
+ page_content=' “Fair Learning-to-Rank from Implicit Feedback.”, SIGIR, 2020 [13]M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
259
+ page_content='Morik, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
260
+ page_content='Singh, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
261
+ page_content='Hong, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
262
+ page_content='Joachims.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
263
+ page_content=' “Controlling Fairness and Bias in Dynamic Learning-to-Rank”, SIGIR, 2020 [14]M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
264
+ page_content='Zehlike, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
265
+ page_content='Castillo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
266
+ page_content=' “ Reducing Disparate Exposure in Ranking: A Learning to Rank Approach.”, SIGIR, 2020 [15]H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
267
+ page_content=' Wang, “Zipf Matrix Factorization : Matrix Factorization with Matthew Effect Reduction”, ICAIBD, 2021 [16]H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
268
+ page_content=' Wang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
269
+ page_content=' Ruan, “MatRec: Matrix Factorization for Highly Skewed Dataset”, ICBDT, 2020 [17]H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
270
+ page_content=' Wang, “KL-Mat: Fair Recommender System via Information Geometry”, icWCSN, 2022 [18]H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
271
+ page_content=' Wang, “Fairness Metrics for Recommender Systems”, icWCSN, 2022 [19]T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
272
+ page_content=' Bertin-Mahieux, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
273
+ page_content=' Whitman, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
274
+ page_content=' Lamere, The Million Song Dataset, ISMIR, 2011 [20]ODIĆ, Ante, TKALČIČ, Marko, TASIČ, Jurij F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
275
+ page_content=', KOŠIR, Andrej: Predicting and Detecting the Relevant Contextual Information in a Movie-Recommender System, Interacting with Computers, Volume 25, Issue 1, 2013 [21]H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
276
+ page_content=' Wang, “PoissonMat: Remodeling Matrix Factorization using Poisson Distribution and Solving the Cold Start Problem without Input Data”, to appear, MLISE, 2022' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/DtE2T4oBgHgl3EQfSQej/content/2301.03791v1.pdf'}
GdE1T4oBgHgl3EQfFAO9/content/tmp_files/2301.02898v1.pdf.txt ADDED
@@ -0,0 +1,696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ INFERENCE / Vol. 7, No. 3
2
+ 1 / 7
3
+ A Truncated Manuscript
4
+ Pierre Schapira
5
+ Récoltes et Semailles I, II. Réflexions et témoignage
6
+ sur un passé de mathématicien
7
+ by Alexander Grothendieck
8
+ Editions Gallimard, 29,50 €.
9
+ S
10
+ trictly speaking, this essay is not solely a review
11
+ of Alexander Grothendieck’s Récoltes et Semailles
12
+ (Reaping and Sowing).1 Although the book as a
13
+ whole, as well as Grothendieck’s work and life, will be dis-
14
+ cussed here, a good part of the essay is devoted to refuting
15
+ a thesis Grothendieck developed throughout his original
16
+ text. We have an important ally in this respect: the author
17
+ himself. In a series of important additions that have not
18
+ been incorporated in this new version, Grothendieck goes
19
+ back entirely on some of his assertions.
20
+ T
21
+ he figure of Grothendieck dominated much of
22
+ mathematics during the second half of the twen-
23
+ tieth century. If his work is essentially concerned
24
+ with algebraic geometry, his vision and methods have
25
+ spread far beyond—to algebraic topology, representation
26
+ theory, complex geometry, symplectic geometry, algebraic
27
+ analysis and even, more recently, computational geometry.
28
+ In short, all linear mathematics, as opposed to dynamical
29
+ systems, probabilities, or purely geometric geometry, such
30
+ as Riemannian or Hamiltonian geometry. It was under
31
+ his influence that the language of derived categories and
32
+ sheaf theory were established in these fields. It was also
33
+ Grothendieck who had the intuition and then formulated
34
+ the main lines of the theory of derived categories. He left
35
+ it to his student Jean-Louis Verdier to write out all the
36
+ details for his thesis, in which Verdier clarified the key
37
+ notion of a triangulated category.2 But it was Grothend-
38
+ ieck who situated sheaf theory and the six operations in
39
+ the theory of derived categories, to which we will return
40
+ later.
41
+ Between 1950 and 1970, functions—possibly gen-
42
+ eralized—were studied on real or complex manifolds,
43
+ and especially on Euclidean spaces, using the Fourier
44
+ transform. But on a complex manifold, when we refer
45
+ to functions we really mean holomorphic functions, and
46
+ these present a serious difculty. That is, they do not
47
+ exist, at least not globally on a compact manifold such as
48
+ the projective line—apart, of course, from the constants.
49
+ Global knowledge, therefore, does not provide any infor-
50
+ mation, unlike the real diferentiable case, and one must
51
+ work locally. There is a rather extraordinary tool for this
52
+ purpose: sheaf theory, which was invented by Jean Leray
53
+ while he was a prisoner of war in Germany between 1941
54
+ and 1945.3 Leray’s text was somewhat incomprehensible
55
+ but was later clarified by Henri Cartan and Jean-Pierre
56
+ Serre, resulting in Roger Godement’s famous book,
57
+ Théorie des Faisceaux (Theory of Sheaves).4 Cartan and
58
+ Serre used this tool in their respective studies of holomor-
59
+ phic functions in dimension ≥ 1, after the seminal work of
60
+ Kiyoshi Oka, giving rise to Cartan’s Theorems A and B and
61
+ Serre’s influential paper “Faisceaux algébriques cohérents”
62
+ (Coherent algebraic sheaves).5
63
+ It is in this context that Grothendieck approached
64
+ algebraic geometry around 1955, providing a solid basis
65
+ for sheaf cohomology in a foundational paper published
66
+ by the Tōhoku Mathematical Journal.6 In this paper, one
67
+ already encounters—implicitly—the main difculty of cat-
68
+ egory theory, namely the problem of universes, a problem
69
+ later solved by Grothendieck in SGA4. This was done in the
70
+ manner of another Alexander cutting the Gordian knot:7
71
+ Grothendieck poses the axiom that every set belongs to
72
+ a universe. This problem of universes, also known as the
73
+ inaccessible cardinals, outside of which category theory
74
+ cannot develop, is probably the reason why Bourbaki
75
+ gave up on categories and why Grothendieck then left the
76
+ group. Ralf Krömer has written an excellent article on this
77
+ issue.8
78
+ During the 1940s and 1950s, there were two concep-
79
+ tual revolutions whose importance was not immediately
80
+ understood: sheaf theory, as mentioned above, and cat-
81
+ egory theory, the latter due to Samuel Eilenberg and
82
+ Saunders Mac Lane.9 Moreover, the categorical point of
83
+ view is part of a vast movement of ideas that embraced
84
+ the structuralist approach of Claude Lévi-Strauss and
85
+ the linguistics of Noam Chomsky. Instead of considering
86
+ sets endowed with certain structures, category theory
87
+ focuses on the relations that can exist between objects. A
88
+ category C is thus a family of objects—as a set is a family
89
+ of elements—but given two objects X and Y, there exists
90
+ a priori a set called HomC (X, Y) representing the mor-
91
+ Published by Inference-Review.com Vol 7, issue 3, Dec 2022.
92
+
93
+ 2 / 7
94
+ BOOK REVIEWS
95
+ phisms from X to Y, these data being, of course, subject to
96
+ a certain number of natural axioms—composition of mor-
97
+ phisms, identity morphisms, etc. A new step then consists
98
+ in looking at morphisms between categories, which are
99
+ called functors. Certain key notions then emerge—such as
100
+ those of adjoint functors, final or initial objects, limits and
101
+ colimits—giving a precise and unifying meaning to many
102
+ ideas that run through mathematics.
103
+ There is a family of categories that plays a central role:
104
+ these are the additive categories and, among them, the
105
+ abelian categories, which are modeled on the category of
106
+ modules over a ring. But if the vector spaces over a field are
107
+ replaced by the modules over a ring, the classical tensor
108
+ product and internal Hom functors are no longer exact, i.e.,
109
+ they do not transform exact sequences into exact sequenc-
110
+ es—a subspace does not always admit a supplementary.
111
+ It is thus necessary to consider the derived functors. We
112
+ then enter the domain of homological algebra, a natural
113
+ generalization of linear algebra. Here, the reference book
114
+ was initially that of Cartan and Eilenberg,10 before it was
115
+ dethroned by Grothendieck’s Tōhoku paper. But comput-
116
+ ing the derived functor of the composition of two functors
117
+ requires the use of Leray’s spectral sequences, giving rise
118
+ to often inextricable calculations. This is where derived
119
+ categories show their power: in this language, everything
120
+ is remarkably simple.
121
+ What are the six operations? With ordinary functions, we
122
+ have three natural operations, besides addition: the prod-
123
+ uct and, associated with an application f : X → Y between
124
+ real manifolds, the integration which sends—modulo some
125
+ technical details—functions on X to functions on Y and the
126
+ composition by f which sends functions on Y to functions
127
+ on X. In sheaf theory, the tensor product ⊗
128
+ L is the analogue
129
+ of the product, the proper direct image Rf! is the analogue
130
+ of the integration, and the inverse image f –1 is the analogue
131
+ of the composition by f. But the tensor product has a right
132
+ adjoint, Rhom, the functor f –1 has a right adjoint, the direct
133
+ image Rf*, and the functor Rf! has a right adjoint, f !.
134
+ The functor f !, which exists only in the derived frame-
135
+ work, unlike the other five, was discovered by Grothendieck
136
+ in the context of étale cohomology and was subsequently
137
+ constructed by Verdier for locally compact spaces. As Gro-
138
+ thendieck had seen, f ! provides a broad generalization of
139
+ Poincaré duality and this functor now plays a crucial role.
140
+ But since locally compact spaces appear more often than
141
+ the étale topology, it is the name Poincaré–Verdier, if not
142
+ just Verdier alone, that remains associated with duality.
143
+ This attribution is largely unfair and left Grothendieck
144
+ feeling somewhat bitter—rightly so.11
145
+ One might think that such an abstract framework
146
+ dispenses with explicit computations, but this is a mis-
147
+ conception: put simply, the computations are no longer
148
+ the same. If the direct image functor does not allow for
149
+ integrals to be computed explicitly, the formalism of the
150
+ six operations nevertheless gives rise to sophisticated
151
+ numerical results, such as the computation of dimensions
152
+ of cohomology spaces. The Riemann–Roch–Hirzebruch–
153
+ Grothendieck theorem is a beautiful illustration.
154
+ In the same vein, one of Grothendieck’s fundamen-
155
+ tal discoveries was to develop sheaf theory on categories
156
+ and thus, in particular, on spaces that no longer have any
157
+ points. What do the sheaves require to exist? Namely, the
158
+ data of open sets and their inclusions, and the notion of a
159
+ covering. There is nothing to prevent the objects of a cat-
160
+ egory from playing the role of the open sets, the category
161
+ is then called a pre-site, and it remains to define axiomat-
162
+ ically what the coverings are in order to obtain a site—i.e.,
163
+ a category with a Grothendieck topology. This natural
164
+ generalization of the usual topological spaces proves to be
165
+ extremely fruitful and analysts would, in fact, do well to
166
+ draw inspiration from it. On a real manifold, there are far
167
+ too many pathological open sets and too many coverings
168
+ if one is interested in what happens at the edge of an open
169
+ set.
170
+ One then arrives at topos theory—topoi for the scholars.
171
+ The underlying idea, which in a particular situation goes
172
+ back to Israel Gelfand, is that a space—in this case, a site—
173
+ can be reconstructed from the category of sheaves on that
174
+ site. A topos is then a category equivalent to a category
175
+ of sheaves. The category of sets, for example, is nothing
176
+ other than the topos associated with a point. But even if
177
+ topos theory has been used in a new proof of Paul Cohen’s
178
+ result on the independence of the continuum hypothesis,
179
+ its applications in mathematics are still uncertain.
180
+ This presentation of a selection of Grothendieck’s fun-
181
+ damental ideas is far from complete and only reflects the
182
+ particular interests of the reviewer. In R&S, Grothendieck
183
+ lists what he considers the 12 key ideas of his work and
184
+ also provides a list of his students.12
185
+ One should, of course, also mention his first works in
186
+ functional analysis, dating from around 1955, and scheme
187
+ theory, which revolutionized algebraic geometry, as well
188
+ as the intuition of motives, a partially conjectural theory
189
+ that was later developed by Pierre Deligne, Vladimir
190
+ Voevodsky, Joseph Ayoub, and many others. One should
191
+ also mention the fundamental text “A la poursuite des
192
+ champs” in which Grothendieck lays the foundations for
193
+ ∞-categories and homotopical algebra.13 Indeed, if trian-
194
+ gulated categories are an incredibly simple and efcient
195
+ tool, they have a defect which seriously limits their use, for
196
+ example, in gluing problems. This defect is linked to the
197
+ fact that a certain morphism is unique up to isomorphism,
198
+ but this isomorphism is not unique! The new theory of
199
+ ∞-categories, to which the names Jacob Lurie, Graeme
200
+ Segal, Bertrand Töen, and a few others must be associated,
201
+ is in the process of completely supplanting the classical
202
+ theory of derived categories, although it is, for the time
203
+ being, not easily accessible, to say the least.
204
+ P
205
+ ierre cartier has written a remarkable article on
206
+ Grothendieck’s life and it is pointless to paraphrase
207
+ it here.14 There are also excellent articles by Allyn
208
+
209
+ INFERENCE / Vol. 7, No. 3
210
+ 3 / 7
211
+ Jackson and Winfried Scharlau, as well as all the links
212
+ on the Grothendieck Circle website managed by Leila
213
+ Schneps.15
214
+ Nonetheless, a few words on this subject are helpful.
215
+ Grothendieck’s father, Sascha Schapiro, was a Russian
216
+ anarchist who took part in the aborted revolution of 1905.
217
+ He then served ten years in the prisons of Czar Nicholas
218
+ II before being released following the revolution of 1917.
219
+ Despite initially being feted as a hero, Schapiro was soon
220
+ declared an enemy of the people. He later fought along-
221
+ side the Republicans during the Spanish Civil war before
222
+ becoming a traveling photographer in France. In 1939,
223
+ Schapiro was interned at the Camp Vernet in the French
224
+ Pyrenees. He was handed over to the Nazis by the Vichy
225
+ police in 1942 and disappeared into Auschwitz.
226
+ Grothendieck’s mother, Hanka, was an extreme-left
227
+ militant in Germany during the 1920s who emigrated to
228
+ France when Adolf Hitler came to power.16 Her son did not
229
+ join her until 1938, at the age of 10, after having lived in
230
+ hiding on a farm in Germany. Grothendieck spent part of
231
+ the war in Le Chambon-sur-Lignon at the famous Collège
232
+ Cévenol that saved so many Jewish children.
233
+ Grothendieck’s mathematical life began in Nancy
234
+ during the 1950s, where Jean Dieudonné and Laurent
235
+ Schwartz took him under their wing. After his initial work
236
+ in functional analysis, which still remains fundamental, he
237
+ turned to algebraic geometry with great success, a story
238
+ that is now well-known.
239
+ Grothendieck was one of the first two professors
240
+ appointed to the Institut des Hautes Études Scientifiques
241
+ (IHES) in 1959, where he obtained most of his results and
242
+ published, with the help of Jean Dieudonné, the other
243
+ professor at the IHES, the famous EGA (Éléments de
244
+ géométrie algébrique). He directed the seminar on alge-
245
+ braic geometry, which resulted in a publication of more
246
+ than 5,000 pages coauthored with some of his students,
247
+ known as SGA (Séminaire de Géométrie Algébrique du Bois
248
+ Marie). Grothendieck was awarded the Fields Medal at
249
+ the International Congress of Mathematicians in 1966,
250
+ but did not travel to Moscow to receive it. In 1988, he won
251
+ the prestigious and well-funded Crafoord Prize, which he
252
+ refused.
253
+ Grothendieck left the IHES in 1970 after discovering
254
+ that the institute benefited from military funding and
255
+ launched his own ecological crusade, first through the
256
+ journal Survivre, and then later Survivre et vivre. But Gro-
257
+ thendieck not only left the IHES, he also left the world of
258
+ mathematics and, in particular, his students. He returned
259
+ to mathematics in 1983, but in a very diferent style, with
260
+ his publications “Esquisse d’un programme” (Sketch of a
261
+ Programme) and “À la poursuite des champs” (Pursuing
262
+ Stacks).17 After a year at the Collège de France, he was
263
+ appointed professor in Montpellier, where he worked
264
+ until his retirement in 1988. He spent his final years living
265
+ in the countryside in almost total seclusion, until his death
266
+ in 2014 at the age of 86.
267
+ A
268
+ s we have seen, Grothendieck is the author of a
269
+ considerable body of mathematical work. But he
270
+ is also the author of significant literary works.
271
+ Among them is R&S, which was published by Gallimard in
272
+ January 2022 after having been widely distributed on the
273
+ internet since Grothendieck first wrote the text in 1986.
274
+ Amounting to more than 1,900 pages, the book deals with
275
+ many subjects: the author’s journey as a mathematician,
276
+ his passions, his illusions and disillusions, the process of
277
+ creation, and a thousand other topics. It also includes long
278
+ passages on Yin and Yang, feminine and masculine ways
279
+ of doing mathematics, the mother, the father and child,
280
+ dreams, and so on. A large part of the text is devoted to
281
+ a revelation he is said to have experienced in 1976 and
282
+ a long period of meditation that followed. It is a kind of
283
+ self-analysis tinged, it has to be said, with a certain degree
284
+ of paranoia. A recurring theme is the sense of betrayal he
285
+ felt toward his former students, which is manifested in his
286
+ work being ignored and forgotten. The words “funeral,”
287
+ “deceased,” “hearse,” “massacre,” and “gravedigger,” and so
288
+ on, quickly become omnipresent after their appearance in
289
+ the table of contents. More generally, the book denounces
290
+ a loss of ethics among the entire mathematical community.
291
+ Grothendieck explains to the reader that mathematics
292
+ “was better before”—that is, prior to 1960—as if the older
293
+ generation was irreproachable! In fact, on the contrary,
294
+ it can be said that mathematicians have become much
295
+ more honest since the 1990s. The source of this miracle
296
+ has a name: arXiv. It is now becoming ever more difcult
297
+ to appropriate the ideas of others, although, of course, it
298
+ is still possible to some degree. The institution of math-
299
+ ematics itself has also been greatly improved, or at least
300
+ has been greatly transformed. The system of mandarins
301
+ that dominated French mathematics until the 1970s, from
302
+ which Grothendieck did not experience any difculties
303
+ and about whom he does not say a word, has practically
304
+ disappeared.
305
+ Grothendieck, who is very self-critical throughout the
306
+ text, sometimes ponders whether he might have been arro-
307
+ gant or even contemptuous of those around him during his
308
+ heyday in the 1960s and 1970s. Despite these concerns, it
309
+ is clear that he cares little about ingratiating himself with
310
+ his readers. Instead, he ofers a book of more than 1,900
311
+ pages, while in response to a question about the IHES
312
+ library in its early days, he remarks: “We don’t read books,
313
+ we write them!”18 R&S contains many contradictions that
314
+ are only partly corrected by a series of Notes—some of
315
+ which, despite being of particular importance, are not
316
+ included in this new edition. Addressing these contradic-
317
+ tions properly would undoubtedly have required the text
318
+ to be completely rewritten.
319
+ Grothendieck is not paralyzed by any sense of false
320
+ modesty:
321
+ The thing that struck me is that I do not remember having
322
+ known, even from the allusions of friends or colleagues
323
+
324
+ 4 / 7
325
+ BOOK REVIEWS
326
+ who are better versed in history, of a mathematician apart
327
+ from myself who contributed such a multiplicity of inno-
328
+ vative ideas, not more or less disjointed from one another,
329
+ but as part of a vast unifying vision (as was the case for
330
+ Newton and for Einstein in physics and cosmology, and for
331
+ Darwin and for Pasteur in biology).19
332
+ Elsewhere, he writes: “It would seem that, as a servant of
333
+ a vast unifying vision born in me, I am ‘one of a kind’ in the
334
+ history of mathematics from its origin to the present day.”20
335
+ Although the writing style is not lacking in inspiration,
336
+ it is nonetheless uneven and sometimes—deliberately—
337
+ familiar. Grothendieck is not le duc de Saint-Simon.
338
+ The following analysis will focus only on the content con-
339
+ cerning mathematics and the world of mathematicians.
340
+ In the text, Grothendieck complains at length that his
341
+ ideas have been plundered by his former students with-
342
+ out reference to their master or that they have simply been
343
+ erased and forgotten. These assertions are not always
344
+ supported by solid arguments or precise references. But,
345
+ above all, it is the nature of discoveries to be trivialized
346
+ and their author forgotten, and all the more so when the
347
+ underlying ideas are often, in hindsight, obvious.
348
+ Grothendieck’s reproaches are addressed to all his
349
+ pupils, and particularly to Deligne—whose name is almost
350
+ always preceded by the words “my friend,” insinuating
351
+ “my former friend”—and to Verdier. It is quite possible to
352
+ imagine that Deligne was only lightly involved with Gro-
353
+ thendieck’s authorship of the motives or that the “Verdier
354
+ duality” already mentioned could just as well be called the
355
+ “Grothendieck duality.” But, otherwise, everyone knows
356
+ that it was Grothendieck who invented schemes, motives,
357
+ Grothendieck topologies, topoi, and, above all, that he
358
+ imposed the functorial point of view via the six opera-
359
+ tions and the derived categories. Everyone knows that it
360
+ is thanks to the machinery devised by Grothendieck that
361
+ Deligne was able to prove André Weil’s last conjecture.
362
+ In support of his claims about the total loss of ethics
363
+ in the mathematical community from the 1970s onwards,
364
+ Grothendieck’s entire argument is based on the unique
365
+ testimony of one and only one mathematician who came
366
+ to see him several times at his home in the countryside.
367
+ It is common practice in ethnology to rely on an infor-
368
+ mant from the group being studied and who speaks the
369
+ language. The problem is that the informant may not
370
+ always be all that reliable and can, in fact, say anything.
371
+ Here it is an even worse situation, since the informant
372
+ declares himself to be the first person afected by the story
373
+ he is going to tell, namely the Riemann–Hilbert (R–H)
374
+ correspondence.
375
+ T
376
+ he informant was able to convince Grothend-
377
+ ieck that he was, in a certain sense, his spiritual
378
+ son—“a continuator of my work.”21 Furthermore,
379
+ the informant persuaded Grothendieck that he had been
380
+ able to demonstrate the R–H correspondence—without
381
+ the slightest advice and in the face of indiference, if not
382
+ outright hostility, from all around him.22 And that he had
383
+ done so using the language of derived categories for the
384
+ first time in this field. Speaking of this informant, Gro-
385
+ thendieck also writes that “his pioneering work since 1972
386
+ has been done in complete solitude.”23
387
+ All of this is grossly untrue.
388
+ The informant did his postgraduate thesis in 1974 under
389
+ my direction and on a subject that I had proposed to him.
390
+ He had largely benefited from a private talk given by
391
+ Masaki Kashiwara in 1975 when the informant was start-
392
+ ing to prepare his first article, which makes no mention of
393
+ this decisive talk. He also benefited from a copy of Kashi-
394
+ wara’s 1970 thesis24—written in Japanese, but there is no
395
+ lack of translators—and from Christian Houzel’s repeated
396
+ advice throughout his thèse d’état. As for the derived cat-
397
+ egories, they appear on the first page of the foundational
398
+ article by Mikio Sato, Takahiro Kawai, and Kashiwara
399
+ published in 1973.25
400
+ The R–H correspondence is an “equivalence of cat-
401
+ egories” formulated by Kashiwara in 1975 and was
402
+ demonstrated by the same author in 1980.26
403
+ In passing, it is worth noting that interesting equiv-
404
+ alences of categories build bridges between diferent a
405
+ priori unrelated fields of mathematics: here, the partial
406
+ diferential equations of analysis and the constructible
407
+ sheaves of algebraic topology. Another more recent and
408
+ very important equivalence is provided by Maxim Kontse-
409
+ vich’s “mirror symmetry,” which links complex geometry
410
+ and symplectic geometry.
411
+ Grothendieck’s entire statement about the role of his
412
+ protégé in the story of the R–H correspondence, scattered
413
+ and repeated throughout the 1,900 pages of his original
414
+ text, is therefore based on false testimonies. However,
415
+ with astonishing naïveté, our author takes everything
416
+ that his interlocutor tells him at face value and he is
417
+ quoted more than 200 times. Grothendieck goes on a cru-
418
+ sade against Kashiwara, whom he goes so far as to call “a
419
+ ringleader [caïd in the original French] from across the
420
+ Pacific,”27 even though he had never communicated with
421
+ Kashiwara and had only a very fragmentary knowledge
422
+ of his work. By extension, it is the entire Sato school that
423
+ is labeled “ringleaders from across the Pacific.”28 With-
424
+ out being exhaustive, which would be impossible in any
425
+ practical sense without copying the entire book, consider
426
+ the following quote from Note 458, which is not lacking
427
+ in unintentional irony: “the Sato school is said to have ini-
428
+ tiated the method of surrounding itself with obscurity in
429
+ order to dominate.”29
430
+ In 1981, Grothendieck reached the peak of his resent-
431
+ ment with an event he referred to as “le Colloque Pervers”
432
+ (the Perverse Colloquium), a historically important con-
433
+ ference to which his protégé was not invited. It was on this
434
+ occasion that Alexander Beilinson, Joseph Bernstein, and
435
+
436
+ INFERENCE / Vol. 7, No. 3
437
+ 5 / 7
438
+ Deligne—not counting Ofer Gabber who refused to asso-
439
+ ciate his name with it for obscure reasons—introduced
440
+ perverse sheaves.30 The idea of these sheaves—which are
441
+ not sheaves in the strict sense, but complexes of sheaves,
442
+ hence, perhaps, the adjective—arises naturally from the
443
+ R–H correspondence. Their definition already appears
444
+ implicitly in Kashiwara’s 1975 text. Grothendieck was
445
+ outraged that his protégé was completely ignored at this
446
+ colloquium when he should have been its star. If no ref-
447
+ erence was made to the authorship of R–H at the event,
448
+ it was probably because the mathematical community
449
+ was aware of the controversies surrounding it and nobody
450
+ wanted to be involved. But if one reads what Grothendieck
451
+ writes in the additions to R&S, the notable absentee at this
452
+ colloquium was not, in fact, his informant, but Kashiwara!
453
+ In other words, all the pages and pages of indignation in
454
+ the original text are either entirely misplaced, or do not
455
+ defend the right people. There is no doubt in my mind that
456
+ Sato and his student Kashiwara were unjustly ignored, or
457
+ maybe even misunderstood, by the French school during
458
+ the 1980s, and by Bourbaki in particular, but that is another
459
+ story.31
460
+ In the edition published by Gallimard, Grothendieck
461
+ cautiously goes back on his assertions concerning the
462
+ authorship of R–H and is willing to acknowledge that
463
+ Kashiwara could have played a role in it,32 perhaps even
464
+ a role equivalent to that of his informant. Finally, at the
465
+ end of Part III, he ofers “my most sincere apologies” to
466
+ Kashiwara.33 But 1,500 pages later, none of these admis-
467
+ sions preclude Grothendieck from insulting Kashiwara
468
+ once again.
469
+ I
470
+ n 1986, I was aware of this part of Grothendieck’s
471
+ text—concerning the ringleaders, or caïds, as he
472
+ referred to them, from the other side of the Pacific—
473
+ and I wrote to him about it on January 16. An important
474
+ correspondence followed that continued for several
475
+ months until around the end of March. Supported by the
476
+ testimony of Christian Houzel, I believe that I successfully
477
+ convinced Grothendieck that his version of R–H was com-
478
+ pletely false.
479
+ In a series of additions, adding about twenty pages
480
+ to the initial text of R&S, some extracts from which are
481
+ presented below,34 Grothendieck went back completely
482
+ on what he had written earlier. He finally afrmed that
483
+ it was indeed Kashiwara who first formulated the R–H
484
+ correspondence in 1975 and that it was also Kashiwara
485
+ who gave the first outline of a proof in 1980. It is to Gro-
486
+ thendieck’s credit that he admits his error of judgement,
487
+ but because it had been propagated throughout 1,900
488
+ pages, it was an error that was not easily rectified. Gro-
489
+ thendieck chose to address the problem using additions
490
+ and footnotes, but unfortunately, apart from a flat apol-
491
+ ogy,35 none of these amendments appear in the published
492
+ book.
493
+ Consider, for example, the following additions.
494
+ Grothendieck, writing on May 9, 1986:
495
+ After the provisional distribution of Récoltes et semailles,
496
+ from October last year, I was contacted by Pierre Schapira,
497
+ and then by Christian Houzel, to point out some glaring
498
+ inaccuracies in the version of events presented in Récol-
499
+ tes et semailles. The situation was clarified considerably
500
+ during correspondence with both of them, which con-
501
+ tinued between January and March of this year. It now
502
+ appears to me that in the “[Zoghman] Mebkhout version”
503
+ (which was not lacking in internal consistency) the true,
504
+ the tendentious and the downright false are inextricably
505
+ mixed.
506
+ Grothendieck, also dated May 15, 1986:
507
+ In retrospect, I am convinced that Kashiwara cannot be
508
+ reproached for the slightest incorrectness in this case. In
509
+ his presentation, he gives a statement and a first sketch of
510
+ a proof of a theorem, which he had indeed been the first
511
+ to conjecture as early as 1975… Moreover, he has the cor-
512
+ rection to specify, as early as page 2: “Let us note that the
513
+ theorem is also proved by Mebkhout by a diferent way.”
514
+ This was even “lending to the rich,” because the previ-
515
+ ous month, in his note to the CRAS [Comptes Rendus de
516
+ l’Académie des Sciences] of 3 March 1980, Mebkhout had
517
+ expressed himself in the hypothetical form “we hope to
518
+ show that…,” and without making the slightest allusion to
519
+ it…
520
+ A
521
+ ll of this raises some questions about the edi-
522
+ torial process that led to the publication of R&S
523
+ by Gallimard in January 2022. In the foreword,
524
+ which is dated January 1986,36 Grothendieck thanks Chris-
525
+ tian Bourgois and Stéphane Deligeorges for including his
526
+ text in the Épistémè collection. What happened between
527
+ this date and the publication by Gallimard? And, above all,
528
+ why do Grothendieck’s additions—incorporated prior to
529
+ May 29, 1986—not appear in the final published version,
530
+ while the brief apology that does appear clearly proves
531
+ that the Gallimard edition includes other elements added
532
+ after January 1986?37
533
+ In this review, I have focused on the mathematical sec-
534
+ tions of the book and the passages that discuss the history
535
+ of the R–H correspondence. The latter is far from being
536
+ an anecdotal component in the book. Indeed, Grothend-
537
+ ieck refers to it constantly—it is a leitmotif. Unfortunately,
538
+ and as he admits with great frankness, Grothendieck was
539
+ misled by an informant lacking in objectivity, to say the
540
+ absolute least. With a disarming and, in a certain sense,
541
+ admirable degree of naivety, he also admits that he never
542
+ imagined that the information he was being fed could be
543
+ biased or incomplete, let alone downright false. Between
544
+ 1955 and 1970, Grothendieck lived in a world of pure ideas.
545
+ He was immersed in mathematics to an extent that is hard
546
+ to imagine. When he emerged from the noosphere into the
547
+
548
+ 6 / 7
549
+ BOOK REVIEWS
550
+ real world—that is, the social world—one can only imag-
551
+ ine the harsh shock of everyday life and how crushed he
552
+ felt by what he perceived as a loss of ethics in the world of
553
+ science. But why should this world be any diferent from
554
+ the rest of society? The rigor of science has never been
555
+ reflected in its practitioners—examples are legion.
556
+ Science is a great devourer of men and characters.38
557
+ Pierre Schapira is Professor Emeritus at University Pierre
558
+ et Marie Curie (University of Paris 6).
559
+ 1.
560
+ The author would like to thank Leila Schneps for her critical
561
+ and constructive advice.
562
+ 2.
563
+ Alexander Grothendieck, Récoltes et Semailles: I, II. Réflex-
564
+ ions et témoignage sur un passé de mathématicien (Paris:
565
+ Gallimard, 2022), 439.
566
+ 3.
567
+ See the historical article by Christian Houzel “Les débuts de
568
+ la théorie des faisceaux,” in Masaki Kashiwara and Pierre
569
+ Schapira, Sheaves on Manifolds, Grundlehren der Mathema-
570
+ tischen Wissenschaften, vol. 292 (Berlin: Springer-Verlag,
571
+ 1990), doi:10.1007/978-3-662-02661-8.
572
+ 4.
573
+ Roger Godement, Théorie des faisceaux (Paris: Hermann,
574
+ 1958).
575
+ 5.
576
+ Jean-Pierre Serre, “Faisceaux Algebriques Coherents,”
577
+ Annals of Mathematics, 2nd Series 61, no. 2. (1955): 197–278,
578
+ doi:10.2307/1969915 .
579
+ 6.
580
+ Alexander Grothendieck, “Sur quelques points d’algèbre
581
+ homologique,” Tōhoku Mathematical Journal 9, no. 3 (1957):
582
+ 119–21, doi:10.2748/tmj/1178244774. This article is analyzed
583
+ in detail in Rick Jardine, “Tōhoku,” Inference 1, no. 3 (2015),
584
+ doi:10.37282/991819.15.13.
585
+ 7.
586
+ Mike Artin, Alexandre Grothendieck, and Jean-Louis
587
+ Verdier, Théorie des topos et cohomologie étale des schémas,
588
+ Lecture Notes in Mathematics, vols. 269, 270, 305 (Berlin:
589
+ Springer-Verlag, 1972–73).
590
+ 8.
591
+ Ralf Krömer, “La « machine de Grothendieck » se fonde-
592
+ t-elle seulement sur des vocables métamathématiques ?
593
+ Bourbaki et les catégories au cours des années cinquante,”
594
+ Revue d’histoire des mathématiques 12 (2006): 119–62.
595
+ 9.
596
+ Samuel Eilenberg and Saunders Mac Lane, “Natural Iso-
597
+ morphisms in Group Theory,” Proceedings of the National
598
+ Academy of Sciences 28 (1942): 537–43, doi:10.1073/
599
+ pnas.28.12.537; Samuel Eilenberg and Saunders Mac Lane,
600
+ “General Theory of Natural Equivalences,” Transactions
601
+ of the American Mathematical Society 58 (1945): 231–94,
602
+ doi:10.1090/S0002-9947-1945-0013131-6.
603
+ 10. Henri Cartan and Samuel Eilenberg, Homological Algebra
604
+ (Princeton, NJ : Princeton University Press, 1956).
605
+ 11. Grothendieck, Récoltes et Semailles, 158.
606
+ 12. Grothendieck, Récoltes et Semailles, 42, 377.
607
+ 13. Alexander Grothendieck, “A la poursuite des champs,”
608
+ (1987).
609
+ 14. Pierre Cartier, “A Country Known Only by Name,” Infer-
610
+ ence 1, no. 1 (2014), doi:10.37282/991819.14.7. For additional
611
+ details, see “Sascha Shapiro,” Wikipedia.
612
+ 15. Allyn Jackson, “Comme Appelé du Néant—As if Summoned
613
+ from the Void: The Life of Alexandre Grothendieck,” Notices
614
+ of the AMS 51, no. 4 and 51, no. 10 (2004): 1,038–54 and 1,196–
615
+ 212; Winfried Scharlau, “Who is Alexander Grothendiek?,”
616
+ Notices of the AMS 55, no. 8 (2008): 930–41; Leila Schneps,
617
+ “The Grothendieck Circle” (grothendieckcircle.org).
618
+ 16. The wording “extreme-left militant in Germany in the
619
+ 1920s” does not really have the same meaning as its transpo-
620
+ sition a century later.
621
+ 17. Grothendieck, “A la poursuite des champs.”
622
+ 18. Jackson, “Comme Appelé du Néant,” 1,050.
623
+ 19. Grothendieck, Récoltes et Semailles, 935.
624
+ 20. Grothendieck, Récoltes et Semailles, 94.
625
+ 21. Grothendieck, Récoltes et Semailles, 1,664.
626
+ 22. Grothendieck, Récoltes et Semailles, 413.
627
+ 23. Grothendieck, Récoltes et Semailles, 1,663.
628
+ 24. Masaki Kashiwara, “Algebraic Study of Systems of Partial
629
+ Diferential Equations,” Master’s thesis (Tokyo University,
630
+ 1970), Mémoires de la Société Mathématique de France 63
631
+ (1995).
632
+ 25. Mikio Sato, Takahiro Kawai, and Masaki Kashiwara,
633
+ “Microfunctions
634
+ and
635
+ Pseudo-Diferential
636
+ Equations,
637
+ Hyperfunctions and Pseudo-Diferential Equations,” in
638
+ Proceedings of a Conference at Katata, 1971; Dedicated to the
639
+ Memory of André Martineau, Lecture Notes in Mathematics,
640
+ vol. 287 (Berlin: Springer-Verlag, 1973), 265–529.
641
+ 26. A few words on the R–H correspondence follow. The
642
+ modern formulation uses the theory of D-modules, a theory
643
+ that was intuited by Sato in the 1960s, and fully imple-
644
+ mented by Kashiwara in his thesis. (A related theory was
645
+ developed independently by Joseph Bernstein.) In everyday
646
+ language, a coherent D-module means a system of partial
647
+ diferential equations with holomorphic coefcients. Holo-
648
+ nomic modules are the > 1 dimensional version of ordinary
649
+ diferential equations, and among them regular holonomic
650
+ modules generalize the classical notion of Fuchsian equa-
651
+ tions. In 1975, Kashiwara showed that the functor Sol,
652
+ which associates the complex of its holomorphic solutions
653
+ to a holonomic module, takes its values in constructible
654
+ sheaves, the sheaves which behave locally as a direct sum
655
+ of constant sheaves along a stratification. In the same year
656
+ he conjectured that there exists a triangulated subcategory
657
+ of the holonomic modules, the regular holonomic modules,
658
+ on which the functor Sol induces an “equivalence of cate-
659
+ gories.” Kashiwara proved his conjecture in 1980 and gave a
660
+ detailed account of the main steps in his proof at the École
661
+ Polytechnique seminar, which was published. His proof
662
+ uses Heisuke Hironaka’s singularity resolution theorem and
663
+ the precursor work of Deligne who treated the special case
664
+ of “meromorphic connections.”
665
+ 27. Grothendieck, Récoltes et Semailles, 1,656.
666
+ 28. Grothendieck, Récoltes et Semailles, 1,651.
667
+ 29. Grothendieck, Récoltes et Semailles, 1,650.
668
+
669
+ INFERENCE / Vol. 7, No. 3
670
+ 7 / 7
671
+ 30. Alexander Beilinson, Joseph Bernstein, and Pierre Del-
672
+ igne, “Faisceaux pervers, Analysis and topology on singular
673
+ spaces, I” (Luminy, 1981), Astérisque, no. 100 (Paris: Societé
674
+ Mathematique de France, Paris, 1982): 5–171.
675
+ 31. Pierre Schapira, “Mikio Sato, a Visionary of Mathematics,”
676
+ Notices of the AMS 54, no. 2 (2007); Pierre Schapira, “Fifty
677
+ Years of Mathematics with Masaki Kashiwara,” Proceed-
678
+ ings of the International Congress of Mathematicians, Rio
679
+ de Janeiro, 2018, vol. 1, Plenary Lectures (Singapore: World
680
+ Scientific, 2018).
681
+ 32. Grothendieck, Récoltes et Semailles, 163.
682
+ 33. Grothendieck, Récoltes et Semailles, 164.
683
+ 34. These additions have just been posted on Leila Schneps’s
684
+ website, “The Grothendieck Circle” (grothendieckcircle.org).
685
+ 35. Grothendieck, Récoltes et Semailles, 163.
686
+ 36. Grothendieck, Récoltes et Semailles, 9–15.
687
+ 37. On the recently updated “Grothendieck Circle” (grothend-
688
+ ieckcircle.org) website, it is written that Gallimard editions
689
+ will soon publish a new version of R&S augmented with
690
+ these Grothendieck additions.
691
+ 38. Adapted freely from a famous quotation by Leon Trotsky.
692
+ DOI: 10.37282/991819.22.61
693
+ Sorbonne Université, Cnrs IMJ-PRG
694
+ pierre.schapira@imj-prg.fr
695
+ http://webusers.imj-prg.fr/~pierre.schapira
696
+
GdE1T4oBgHgl3EQfFAO9/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf,len=349
2
+ page_content='INFERENCE / Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
3
+ page_content=' 7, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
4
+ page_content=' 3 1 / 7 A Truncated Manuscript Pierre Schapira Récoltes et Semailles I, II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
5
+ page_content=' Réflexions et témoignage sur un passé de mathématicien by Alexander Grothendieck Editions Gallimard, 29,50 €.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
6
+ page_content=' S trictly speaking, this essay is not solely a review of Alexander Grothendieck’s Récoltes et Semailles (Reaping and Sowing).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
7
+ page_content='1 Although the book as a whole, as well as Grothendieck’s work and life, will be dis- cussed here, a good part of the essay is devoted to refuting a thesis Grothendieck developed throughout his original text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
8
+ page_content=' We have an important ally in this respect: the author himself.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
9
+ page_content=' In a series of important additions that have not been incorporated in this new version, Grothendieck goes back entirely on some of his assertions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
10
+ page_content=' T he figure of Grothendieck dominated much of mathematics during the second half of the twen- tieth century.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
11
+ page_content=' If his work is essentially concerned with algebraic geometry, his vision and methods have spread far beyond—to algebraic topology, representation theory, complex geometry, symplectic geometry, algebraic analysis and even, more recently, computational geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
12
+ page_content=' In short, all linear mathematics, as opposed to dynamical systems, probabilities, or purely geometric geometry, such as Riemannian or Hamiltonian geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
13
+ page_content=' It was under his influence that the language of derived categories and sheaf theory were established in these fields.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
14
+ page_content=' It was also Grothendieck who had the intuition and then formulated the main lines of the theory of derived categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
15
+ page_content=' He left it to his student Jean-Louis Verdier to write out all the details for his thesis, in which Verdier clarified the key notion of a triangulated category.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
16
+ page_content='2 But it was Grothend- ieck who situated sheaf theory and the six operations in the theory of derived categories, to which we will return later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
17
+ page_content=' Between 1950 and 1970, functions—possibly gen- eralized—were studied on real or complex manifolds, and especially on Euclidean spaces, using the Fourier transform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
18
+ page_content=' But on a complex manifold, when we refer to functions we really mean holomorphic functions, and these present a serious difculty.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
19
+ page_content=' That is, they do not exist, at least not globally on a compact manifold such as the projective line—apart, of course, from the constants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
20
+ page_content=' Global knowledge, therefore, does not provide any infor- mation, unlike the real diferentiable case, and one must work locally.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
21
+ page_content=' There is a rather extraordinary tool for this purpose: sheaf theory, which was invented by Jean Leray while he was a prisoner of war in Germany between 1941 and 1945.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
22
+ page_content='3 Leray’s text was somewhat incomprehensible but was later clarified by Henri Cartan and Jean-Pierre Serre, resulting in Roger Godement’s famous book, Théorie des Faisceaux (Theory of Sheaves).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
23
+ page_content='4 Cartan and Serre used this tool in their respective studies of holomor- phic functions in dimension ≥ 1, after the seminal work of Kiyoshi Oka, giving rise to Cartan’s Theorems A and B and Serre’s influential paper “Faisceaux algébriques cohérents” (Coherent algebraic sheaves).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
24
+ page_content='5 It is in this context that Grothendieck approached algebraic geometry around 1955, providing a solid basis for sheaf cohomology in a foundational paper published by the Tōhoku Mathematical Journal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
25
+ page_content='6 In this paper, one already encounters—implicitly—the main difculty of cat- egory theory, namely the problem of universes, a problem later solved by Grothendieck in SGA4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
26
+ page_content=' This was done in the manner of another Alexander cutting the Gordian knot:7 Grothendieck poses the axiom that every set belongs to a universe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
27
+ page_content=' This problem of universes, also known as the inaccessible cardinals, outside of which category theory cannot develop, is probably the reason why Bourbaki gave up on categories and why Grothendieck then left the group.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
28
+ page_content=' Ralf Krömer has written an excellent article on this issue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
29
+ page_content='8 During the 1940s and 1950s, there were two concep- tual revolutions whose importance was not immediately understood: sheaf theory, as mentioned above, and cat- egory theory, the latter due to Samuel Eilenberg and Saunders Mac Lane.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
30
+ page_content='9 Moreover, the categorical point of view is part of a vast movement of ideas that embraced the structuralist approach of Claude Lévi-Strauss and the linguistics of Noam Chomsky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
31
+ page_content=' Instead of considering sets endowed with certain structures, category theory focuses on the relations that can exist between objects.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
32
+ page_content=' A category C is thus a family of objects—as a set is a family of elements—but given two objects X and Y, there exists a priori a set called HomC (X, Y) representing the mor- Published by Inference-Review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
33
+ page_content='com Vol 7, issue 3, Dec 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
34
+ page_content=' 2 / 7 BOOK REVIEWS phisms from X to Y, these data being, of course, subject to a certain number of natural axioms—composition of mor- phisms, identity morphisms, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
35
+ page_content=' A new step then consists in looking at morphisms between categories, which are called functors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
36
+ page_content=' Certain key notions then emerge—such as those of adjoint functors, final or initial objects, limits and colimits—giving a precise and unifying meaning to many ideas that run through mathematics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
37
+ page_content=' There is a family of categories that plays a central role: these are the additive categories and, among them, the abelian categories, which are modeled on the category of modules over a ring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
38
+ page_content=' But if the vector spaces over a field are replaced by the modules over a ring, the classical tensor product and internal Hom functors are no longer exact, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
39
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
40
+ page_content=', they do not transform exact sequences into exact sequenc- es—a subspace does not always admit a supplementary.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
41
+ page_content=' It is thus necessary to consider the derived functors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
42
+ page_content=' We then enter the domain of homological algebra, a natural generalization of linear algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
43
+ page_content=' Here, the reference book was initially that of Cartan and Eilenberg,10 before it was dethroned by Grothendieck’s Tōhoku paper.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
44
+ page_content=' But comput- ing the derived functor of the composition of two functors requires the use of Leray’s spectral sequences, giving rise to often inextricable calculations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
45
+ page_content=' This is where derived categories show their power: in this language, everything is remarkably simple.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
46
+ page_content=' What are the six operations?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
47
+ page_content=' With ordinary functions, we have three natural operations, besides addition: the prod- uct and, associated with an application f : X → Y between real manifolds, the integration which sends—modulo some technical details—functions on X to functions on Y and the composition by f which sends functions on Y to functions on X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
48
+ page_content=' In sheaf theory, the tensor product ⊗ L is the analogue of the product, the proper direct image Rf!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
49
+ page_content=' is the analogue of the integration, and the inverse image f –1 is the analogue of the composition by f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
50
+ page_content=' But the tensor product has a right adjoint, Rhom, the functor f –1 has a right adjoint, the direct image Rf*, and the functor Rf!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
51
+ page_content=' has a right adjoint, f !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
52
+ page_content='. The functor f !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
53
+ page_content=', which exists only in the derived frame- work, unlike the other five, was discovered by Grothendieck in the context of étale cohomology and was subsequently constructed by Verdier for locally compact spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
54
+ page_content=' As Gro- thendieck had seen, f !' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
55
+ page_content=' provides a broad generalization of Poincaré duality and this functor now plays a crucial role.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
56
+ page_content=' But since locally compact spaces appear more often than the étale topology, it is the name Poincaré–Verdier, if not just Verdier alone, that remains associated with duality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
57
+ page_content=' This attribution is largely unfair and left Grothendieck feeling somewhat bitter—rightly so.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
58
+ page_content='11 One might think that such an abstract framework dispenses with explicit computations, but this is a mis- conception: put simply, the computations are no longer the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
59
+ page_content=' If the direct image functor does not allow for integrals to be computed explicitly, the formalism of the six operations nevertheless gives rise to sophisticated numerical results, such as the computation of dimensions of cohomology spaces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
60
+ page_content=' The Riemann–Roch–Hirzebruch– Grothendieck theorem is a beautiful illustration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
61
+ page_content=' In the same vein, one of Grothendieck’s fundamen- tal discoveries was to develop sheaf theory on categories and thus, in particular, on spaces that no longer have any points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
62
+ page_content=' What do the sheaves require to exist?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
63
+ page_content=' Namely, the data of open sets and their inclusions, and the notion of a covering.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
64
+ page_content=' There is nothing to prevent the objects of a cat- egory from playing the role of the open sets, the category is then called a pre-site, and it remains to define axiomat- ically what the coverings are in order to obtain a site—i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
65
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
66
+ page_content=', a category with a Grothendieck topology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
67
+ page_content=' This natural generalization of the usual topological spaces proves to be extremely fruitful and analysts would, in fact, do well to draw inspiration from it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
68
+ page_content=' On a real manifold, there are far too many pathological open sets and too many coverings if one is interested in what happens at the edge of an open set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
69
+ page_content=' One then arrives at topos theory—topoi for the scholars.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
70
+ page_content=' The underlying idea, which in a particular situation goes back to Israel Gelfand, is that a space—in this case, a site— can be reconstructed from the category of sheaves on that site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
71
+ page_content=' A topos is then a category equivalent to a category of sheaves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
72
+ page_content=' The category of sets, for example, is nothing other than the topos associated with a point.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
73
+ page_content=' But even if topos theory has been used in a new proof of Paul Cohen’s result on the independence of the continuum hypothesis, its applications in mathematics are still uncertain.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
74
+ page_content=' This presentation of a selection of Grothendieck’s fun- damental ideas is far from complete and only reflects the particular interests of the reviewer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
75
+ page_content=' In R&S, Grothendieck lists what he considers the 12 key ideas of his work and also provides a list of his students.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
76
+ page_content='12 One should, of course, also mention his first works in functional analysis, dating from around 1955, and scheme theory, which revolutionized algebraic geometry, as well as the intuition of motives, a partially conjectural theory that was later developed by Pierre Deligne, Vladimir Voevodsky, Joseph Ayoub, and many others.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
77
+ page_content=' One should also mention the fundamental text “A la poursuite des champs” in which Grothendieck lays the foundations for ∞-categories and homotopical algebra.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
78
+ page_content='13 Indeed, if trian- gulated categories are an incredibly simple and efcient tool, they have a defect which seriously limits their use, for example, in gluing problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
79
+ page_content=' This defect is linked to the fact that a certain morphism is unique up to isomorphism, but this isomorphism is not unique!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
80
+ page_content=' The new theory of ∞-categories, to which the names Jacob Lurie, Graeme Segal, Bertrand Töen, and a few others must be associated, is in the process of completely supplanting the classical theory of derived categories, although it is, for the time being, not easily accessible, to say the least.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
81
+ page_content=' P ierre cartier has written a remarkable article on Grothendieck’s life and it is pointless to paraphrase it here.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
82
+ page_content='14 There are also excellent articles by Allyn INFERENCE / Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
83
+ page_content=' 7, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
84
+ page_content=' 3 3 / 7 Jackson and Winfried Scharlau, as well as all the links on the Grothendieck Circle website managed by Leila Schneps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
85
+ page_content='15 Nonetheless, a few words on this subject are helpful.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
86
+ page_content=' Grothendieck’s father, Sascha Schapiro, was a Russian anarchist who took part in the aborted revolution of 1905.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
87
+ page_content=' He then served ten years in the prisons of Czar Nicholas II before being released following the revolution of 1917.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
88
+ page_content=' Despite initially being feted as a hero, Schapiro was soon declared an enemy of the people.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
89
+ page_content=' He later fought along- side the Republicans during the Spanish Civil war before becoming a traveling photographer in France.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
90
+ page_content=' In 1939, Schapiro was interned at the Camp Vernet in the French Pyrenees.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
91
+ page_content=' He was handed over to the Nazis by the Vichy police in 1942 and disappeared into Auschwitz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
92
+ page_content=' Grothendieck’s mother, Hanka, was an extreme-left militant in Germany during the 1920s who emigrated to France when Adolf Hitler came to power.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
93
+ page_content='16 Her son did not join her until 1938, at the age of 10, after having lived in hiding on a farm in Germany.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
94
+ page_content=' Grothendieck spent part of the war in Le Chambon-sur-Lignon at the famous Collège Cévenol that saved so many Jewish children.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
95
+ page_content=' Grothendieck’s mathematical life began in Nancy during the 1950s, where Jean Dieudonné and Laurent Schwartz took him under their wing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
96
+ page_content=' After his initial work in functional analysis, which still remains fundamental, he turned to algebraic geometry with great success, a story that is now well-known.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
97
+ page_content=' Grothendieck was one of the first two professors appointed to the Institut des Hautes Études Scientifiques (IHES) in 1959, where he obtained most of his results and published, with the help of Jean Dieudonné, the other professor at the IHES, the famous EGA (Éléments de géométrie algébrique).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
98
+ page_content=' He directed the seminar on alge- braic geometry, which resulted in a publication of more than 5,000 pages coauthored with some of his students, known as SGA (Séminaire de Géométrie Algébrique du Bois Marie).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
99
+ page_content=' Grothendieck was awarded the Fields Medal at the International Congress of Mathematicians in 1966, but did not travel to Moscow to receive it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
100
+ page_content=' In 1988, he won the prestigious and well-funded Crafoord Prize, which he refused.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
101
+ page_content=' Grothendieck left the IHES in 1970 after discovering that the institute benefited from military funding and launched his own ecological crusade, first through the journal Survivre, and then later Survivre et vivre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
102
+ page_content=' But Gro- thendieck not only left the IHES, he also left the world of mathematics and, in particular, his students.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
103
+ page_content=' He returned to mathematics in 1983, but in a very diferent style, with his publications “Esquisse d’un programme” (Sketch of a Programme) and “À la poursuite des champs” (Pursuing Stacks).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
104
+ page_content='17 After a year at the Collège de France, he was appointed professor in Montpellier, where he worked until his retirement in 1988.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
105
+ page_content=' He spent his final years living in the countryside in almost total seclusion, until his death in 2014 at the age of 86.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
106
+ page_content=' A s we have seen, Grothendieck is the author of a considerable body of mathematical work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
107
+ page_content=' But he is also the author of significant literary works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
108
+ page_content=' Among them is R&S, which was published by Gallimard in January 2022 after having been widely distributed on the internet since Grothendieck first wrote the text in 1986.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
109
+ page_content=' Amounting to more than 1,900 pages, the book deals with many subjects: the author’s journey as a mathematician, his passions, his illusions and disillusions, the process of creation, and a thousand other topics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
110
+ page_content=' It also includes long passages on Yin and Yang, feminine and masculine ways of doing mathematics, the mother, the father and child, dreams, and so on.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
111
+ page_content=' A large part of the text is devoted to a revelation he is said to have experienced in 1976 and a long period of meditation that followed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
112
+ page_content=' It is a kind of self-analysis tinged, it has to be said, with a certain degree of paranoia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
113
+ page_content=' A recurring theme is the sense of betrayal he felt toward his former students, which is manifested in his work being ignored and forgotten.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
114
+ page_content=' The words “funeral,” “deceased,” “hearse,” “massacre,” and “gravedigger,” and so on, quickly become omnipresent after their appearance in the table of contents.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
115
+ page_content=' More generally, the book denounces a loss of ethics among the entire mathematical community.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
116
+ page_content=' Grothendieck explains to the reader that mathematics “was better before”—that is, prior to 1960—as if the older generation was irreproachable!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
117
+ page_content=' In fact, on the contrary, it can be said that mathematicians have become much more honest since the 1990s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
118
+ page_content=' The source of this miracle has a name: arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
119
+ page_content=' It is now becoming ever more difcult to appropriate the ideas of others, although, of course, it is still possible to some degree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
120
+ page_content=' The institution of math- ematics itself has also been greatly improved, or at least has been greatly transformed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
121
+ page_content=' The system of mandarins that dominated French mathematics until the 1970s, from which Grothendieck did not experience any difculties and about whom he does not say a word, has practically disappeared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
122
+ page_content=' Grothendieck, who is very self-critical throughout the text, sometimes ponders whether he might have been arro- gant or even contemptuous of those around him during his heyday in the 1960s and 1970s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
123
+ page_content=' Despite these concerns, it is clear that he cares little about ingratiating himself with his readers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
124
+ page_content=' Instead, he ofers a book of more than 1,900 pages, while in response to a question about the IHES library in its early days, he remarks: “We don’t read books, we write them!”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
125
+ page_content='18 R&S contains many contradictions that are only partly corrected by a series of Notes—some of which, despite being of particular importance, are not included in this new edition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
126
+ page_content=' Addressing these contradic- tions properly would undoubtedly have required the text to be completely rewritten.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
127
+ page_content=' Grothendieck is not paralyzed by any sense of false modesty: The thing that struck me is that I do not remember having known,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
128
+ page_content=' even from the allusions of friends or colleagues 4 / 7 BOOK REVIEWS who are better versed in history,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
129
+ page_content=' of a mathematician apart from myself who contributed such a multiplicity of inno- vative ideas,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
130
+ page_content=' not more or less disjointed from one another,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
131
+ page_content=' but as part of a vast unifying vision (as was the case for Newton and for Einstein in physics and cosmology,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
132
+ page_content=' and for Darwin and for Pasteur in biology).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
133
+ page_content='19 Elsewhere, he writes: “It would seem that, as a servant of a vast unifying vision born in me, I am ‘one of a kind’ in the history of mathematics from its origin to the present day.”20 Although the writing style is not lacking in inspiration, it is nonetheless uneven and sometimes—deliberately— familiar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
134
+ page_content=' Grothendieck is not le duc de Saint-Simon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
135
+ page_content=' The following analysis will focus only on the content con- cerning mathematics and the world of mathematicians.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
136
+ page_content=' In the text, Grothendieck complains at length that his ideas have been plundered by his former students with- out reference to their master or that they have simply been erased and forgotten.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
137
+ page_content=' These assertions are not always supported by solid arguments or precise references.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
138
+ page_content=' But, above all, it is the nature of discoveries to be trivialized and their author forgotten, and all the more so when the underlying ideas are often, in hindsight, obvious.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
139
+ page_content=' Grothendieck’s reproaches are addressed to all his pupils, and particularly to Deligne—whose name is almost always preceded by the words “my friend,” insinuating “my former friend”—and to Verdier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
140
+ page_content=' It is quite possible to imagine that Deligne was only lightly involved with Gro- thendieck’s authorship of the motives or that the “Verdier duality” already mentioned could just as well be called the “Grothendieck duality.” But, otherwise, everyone knows that it was Grothendieck who invented schemes, motives, Grothendieck topologies, topoi, and, above all, that he imposed the functorial point of view via the six opera- tions and the derived categories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
141
+ page_content=' Everyone knows that it is thanks to the machinery devised by Grothendieck that Deligne was able to prove André Weil’s last conjecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
142
+ page_content=' In support of his claims about the total loss of ethics in the mathematical community from the 1970s onwards, Grothendieck’s entire argument is based on the unique testimony of one and only one mathematician who came to see him several times at his home in the countryside.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
143
+ page_content=' It is common practice in ethnology to rely on an infor- mant from the group being studied and who speaks the language.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
144
+ page_content=' The problem is that the informant may not always be all that reliable and can, in fact, say anything.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
145
+ page_content=' Here it is an even worse situation, since the informant declares himself to be the first person afected by the story he is going to tell, namely the Riemann–Hilbert (R–H) correspondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
146
+ page_content=' T he informant was able to convince Grothend- ieck that he was, in a certain sense, his spiritual son—“a continuator of my work.”21 Furthermore, the informant persuaded Grothendieck that he had been able to demonstrate the R–H correspondence—without the slightest advice and in the face of indiference, if not outright hostility, from all around him.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
147
+ page_content='22 And that he had done so using the language of derived categories for the first time in this field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
148
+ page_content=' Speaking of this informant, Gro- thendieck also writes that “his pioneering work since 1972 has been done in complete solitude.”23 All of this is grossly untrue.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
149
+ page_content=' The informant did his postgraduate thesis in 1974 under my direction and on a subject that I had proposed to him.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
150
+ page_content=' He had largely benefited from a private talk given by Masaki Kashiwara in 1975 when the informant was start- ing to prepare his first article, which makes no mention of this decisive talk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
151
+ page_content=' He also benefited from a copy of Kashi- wara’s 1970 thesis24—written in Japanese, but there is no lack of translators—and from Christian Houzel’s repeated advice throughout his thèse d’état.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
152
+ page_content=' As for the derived cat- egories, they appear on the first page of the foundational article by Mikio Sato, Takahiro Kawai, and Kashiwara published in 1973.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
153
+ page_content='25 The R–H correspondence is an “equivalence of cat- egories” formulated by Kashiwara in 1975 and was demonstrated by the same author in 1980.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
154
+ page_content='26 In passing, it is worth noting that interesting equiv- alences of categories build bridges between diferent a priori unrelated fields of mathematics: here, the partial diferential equations of analysis and the constructible sheaves of algebraic topology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
155
+ page_content=' Another more recent and very important equivalence is provided by Maxim Kontse- vich’s “mirror symmetry,” which links complex geometry and symplectic geometry.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
156
+ page_content=' Grothendieck’s entire statement about the role of his protégé in the story of the R–H correspondence, scattered and repeated throughout the 1,900 pages of his original text, is therefore based on false testimonies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
157
+ page_content=' However, with astonishing naïveté, our author takes everything that his interlocutor tells him at face value and he is quoted more than 200 times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
158
+ page_content=' Grothendieck goes on a cru- sade against Kashiwara, whom he goes so far as to call “a ringleader [caïd in the original French] from across the Pacific,”27 even though he had never communicated with Kashiwara and had only a very fragmentary knowledge of his work.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
159
+ page_content=' By extension,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
160
+ page_content=' it is the entire Sato school that is labeled “ringleaders from across the Pacific.”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
161
+ page_content='28 With- out being exhaustive,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
162
+ page_content=' which would be impossible in any practical sense without copying the entire book,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
163
+ page_content=' consider the following quote from Note 458,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
164
+ page_content=' which is not lacking in unintentional irony: “the Sato school is said to have ini- tiated the method of surrounding itself with obscurity in order to dominate.”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
165
+ page_content='29 In 1981,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
166
+ page_content=' Grothendieck reached the peak of his resent- ment with an event he referred to as “le Colloque Pervers” (the Perverse Colloquium),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
167
+ page_content=' a historically important con- ference to which his protégé was not invited.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
168
+ page_content=' It was on this occasion that Alexander Beilinson, Joseph Bernstein, and INFERENCE / Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
169
+ page_content=' 7, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
170
+ page_content=' 3 5 / 7 Deligne—not counting Ofer Gabber who refused to asso- ciate his name with it for obscure reasons—introduced perverse sheaves.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
171
+ page_content='30 The idea of these sheaves—which are not sheaves in the strict sense, but complexes of sheaves, hence, perhaps, the adjective—arises naturally from the R–H correspondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
172
+ page_content=' Their definition already appears implicitly in Kashiwara’s 1975 text.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
173
+ page_content=' Grothendieck was outraged that his protégé was completely ignored at this colloquium when he should have been its star.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
174
+ page_content=' If no ref- erence was made to the authorship of R–H at the event, it was probably because the mathematical community was aware of the controversies surrounding it and nobody wanted to be involved.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
175
+ page_content=' But if one reads what Grothendieck writes in the additions to R&S, the notable absentee at this colloquium was not, in fact, his informant, but Kashiwara!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
176
+ page_content=' In other words, all the pages and pages of indignation in the original text are either entirely misplaced, or do not defend the right people.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
177
+ page_content=' There is no doubt in my mind that Sato and his student Kashiwara were unjustly ignored, or maybe even misunderstood, by the French school during the 1980s, and by Bourbaki in particular, but that is another story.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
178
+ page_content='31 In the edition published by Gallimard, Grothendieck cautiously goes back on his assertions concerning the authorship of R–H and is willing to acknowledge that Kashiwara could have played a role in it,32 perhaps even a role equivalent to that of his informant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
179
+ page_content=' Finally, at the end of Part III, he ofers “my most sincere apologies” to Kashiwara.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
180
+ page_content='33 But 1,500 pages later, none of these admis- sions preclude Grothendieck from insulting Kashiwara once again.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
181
+ page_content=' I n 1986, I was aware of this part of Grothendieck’s text—concerning the ringleaders, or caïds, as he referred to them, from the other side of the Pacific— and I wrote to him about it on January 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
182
+ page_content=' An important correspondence followed that continued for several months until around the end of March.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
183
+ page_content=' Supported by the testimony of Christian Houzel, I believe that I successfully convinced Grothendieck that his version of R–H was com- pletely false.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
184
+ page_content=' In a series of additions, adding about twenty pages to the initial text of R&S, some extracts from which are presented below,34 Grothendieck went back completely on what he had written earlier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
185
+ page_content=' He finally afrmed that it was indeed Kashiwara who first formulated the R–H correspondence in 1975 and that it was also Kashiwara who gave the first outline of a proof in 1980.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
186
+ page_content=' It is to Gro- thendieck’s credit that he admits his error of judgement, but because it had been propagated throughout 1,900 pages, it was an error that was not easily rectified.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
187
+ page_content=' Gro- thendieck chose to address the problem using additions and footnotes, but unfortunately, apart from a flat apol- ogy,35 none of these amendments appear in the published book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
188
+ page_content=' Consider, for example, the following additions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
189
+ page_content=' Grothendieck, writing on May 9, 1986: After the provisional distribution of Récoltes et semailles, from October last year, I was contacted by Pierre Schapira, and then by Christian Houzel, to point out some glaring inaccuracies in the version of events presented in Récol- tes et semailles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
190
+ page_content=' The situation was clarified considerably during correspondence with both of them, which con- tinued between January and March of this year.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
191
+ page_content=' It now appears to me that in the “[Zoghman] Mebkhout version” (which was not lacking in internal consistency) the true, the tendentious and the downright false are inextricably mixed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
192
+ page_content=' Grothendieck, also dated May 15, 1986: In retrospect, I am convinced that Kashiwara cannot be reproached for the slightest incorrectness in this case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
193
+ page_content=' In his presentation,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
194
+ page_content=' he gives a statement and a first sketch of a proof of a theorem,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
195
+ page_content=' which he had indeed been the first to conjecture as early as 1975… Moreover,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
196
+ page_content=' he has the cor- rection to specify,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
197
+ page_content=' as early as page 2: “Let us note that the theorem is also proved by Mebkhout by a diferent way.”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
198
+ page_content=' This was even “lending to the rich,”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
199
+ page_content=' because the previ- ous month,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
200
+ page_content=' in his note to the CRAS [Comptes Rendus de l’Académie des Sciences] of 3 March 1980,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
201
+ page_content=' Mebkhout had expressed himself in the hypothetical form “we hope to show that…,”' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
202
+ page_content=' and without making the slightest allusion to it… A ll of this raises some questions about the edi- torial process that led to the publication of R&S by Gallimard in January 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
203
+ page_content=' In the foreword, which is dated January 1986,36 Grothendieck thanks Chris- tian Bourgois and Stéphane Deligeorges for including his text in the Épistémè collection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
204
+ page_content=' What happened between this date and the publication by Gallimard?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
205
+ page_content=' And, above all, why do Grothendieck’s additions—incorporated prior to May 29, 1986—not appear in the final published version, while the brief apology that does appear clearly proves that the Gallimard edition includes other elements added after January 1986?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
206
+ page_content='37 In this review, I have focused on the mathematical sec- tions of the book and the passages that discuss the history of the R–H correspondence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
207
+ page_content=' The latter is far from being an anecdotal component in the book.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
208
+ page_content=' Indeed, Grothend- ieck refers to it constantly—it is a leitmotif.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
209
+ page_content=' Unfortunately, and as he admits with great frankness, Grothendieck was misled by an informant lacking in objectivity, to say the absolute least.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
210
+ page_content=' With a disarming and, in a certain sense, admirable degree of naivety, he also admits that he never imagined that the information he was being fed could be biased or incomplete, let alone downright false.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
211
+ page_content=' Between 1955 and 1970, Grothendieck lived in a world of pure ideas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
212
+ page_content=' He was immersed in mathematics to an extent that is hard to imagine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
213
+ page_content=' When he emerged from the noosphere into the 6 / 7 BOOK REVIEWS real world—that is, the social world—one can only imag- ine the harsh shock of everyday life and how crushed he felt by what he perceived as a loss of ethics in the world of science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
214
+ page_content=' But why should this world be any diferent from the rest of society?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
215
+ page_content=' The rigor of science has never been reflected in its practitioners—examples are legion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
216
+ page_content=' Science is a great devourer of men and characters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
217
+ page_content='38 Pierre Schapira is Professor Emeritus at University Pierre et Marie Curie (University of Paris 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
218
+ page_content=' 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
219
+ page_content=' The author would like to thank Leila Schneps for her critical and constructive advice.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
220
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
221
+ page_content=' Alexander Grothendieck, Récoltes et Semailles: I, II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
222
+ page_content=' Réflex- ions et témoignage sur un passé de mathématicien (Paris: Gallimard, 2022), 439.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
223
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
224
+ page_content=' See the historical article by Christian Houzel “Les débuts de la théorie des faisceaux,” in Masaki Kashiwara and Pierre Schapira, Sheaves on Manifolds, Grundlehren der Mathema- tischen Wissenschaften, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
225
+ page_content=' 292 (Berlin: Springer-Verlag, 1990), doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
226
+ page_content='1007/978-3-662-02661-8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
227
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
228
+ page_content=' Roger Godement, Théorie des faisceaux (Paris: Hermann, 1958).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
229
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
230
+ page_content=' Jean-Pierre Serre, “Faisceaux Algebriques Coherents,” Annals of Mathematics, 2nd Series 61, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
231
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
232
+ page_content=' (1955): 197–278, doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
233
+ page_content='2307/1969915 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
234
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
235
+ page_content=' Alexander Grothendieck, “Sur quelques points d’algèbre homologique,” Tōhoku Mathematical Journal 9, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
236
+ page_content=' 3 (1957): 119–21, doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
237
+ page_content='2748/tmj/1178244774.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
238
+ page_content=' This article is analyzed in detail in Rick Jardine, “Tōhoku,” Inference 1, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
239
+ page_content=' 3 (2015), doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
240
+ page_content='37282/991819.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
241
+ page_content='15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
242
+ page_content='13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
243
+ page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
244
+ page_content=' Mike Artin, Alexandre Grothendieck, and Jean-Louis Verdier, Théorie des topos et cohomologie étale des schémas, Lecture Notes in Mathematics, vols.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
245
+ page_content=' 269, 270, 305 (Berlin: Springer-Verlag, 1972–73).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
246
+ page_content=' 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
247
+ page_content=' Ralf Krömer, “La « machine de Grothendieck » se fonde- t-elle seulement sur des vocables métamathématiques ?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
248
+ page_content=' Bourbaki et les catégories au cours des années cinquante,” Revue d’histoire des mathématiques 12 (2006): 119–62.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
249
+ page_content=' 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
250
+ page_content=' Samuel Eilenberg and Saunders Mac Lane, “Natural Iso- morphisms in Group Theory,” Proceedings of the National Academy of Sciences 28 (1942): 537–43, doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
251
+ page_content='1073/ pnas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
252
+ page_content='28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
253
+ page_content='12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
254
+ page_content='537;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
255
+ page_content=' Samuel Eilenberg and Saunders Mac Lane, “General Theory of Natural Equivalences,” Transactions of the American Mathematical Society 58 (1945): 231–94, doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
256
+ page_content='1090/S0002-9947-1945-0013131-6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
257
+ page_content=' 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
258
+ page_content=' Henri Cartan and Samuel Eilenberg, Homological Algebra (Princeton, NJ : Princeton University Press, 1956).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
259
+ page_content=' 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
260
+ page_content=' Grothendieck, Récoltes et Semailles, 158.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
261
+ page_content=' 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
262
+ page_content=' Grothendieck, Récoltes et Semailles, 42, 377.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
263
+ page_content=' 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
264
+ page_content=' Alexander Grothendieck, “A la poursuite des champs,” (1987).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
265
+ page_content=' 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
266
+ page_content=' Pierre Cartier, “A Country Known Only by Name,” Infer- ence 1, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
267
+ page_content=' 1 (2014), doi:10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
268
+ page_content='37282/991819.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
269
+ page_content='14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
270
+ page_content='7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
271
+ page_content=' For additional details, see “Sascha Shapiro,” Wikipedia.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
272
+ page_content=' 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
273
+ page_content=' Allyn Jackson, “Comme Appelé du Néant—As if Summoned from the Void: The Life of Alexandre Grothendieck,” Notices of the AMS 51, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
274
+ page_content=' 4 and 51, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
275
+ page_content=' 10 (2004): 1,038–54 and 1,196– 212;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
276
+ page_content=' Winfried Scharlau, “Who is Alexander Grothendiek?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
277
+ page_content=',” Notices of the AMS 55, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
278
+ page_content=' 8 (2008): 930–41;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
279
+ page_content=' Leila Schneps, “The Grothendieck Circle” (grothendieckcircle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
280
+ page_content='org).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
281
+ page_content=' 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
282
+ page_content=' The wording “extreme-left militant in Germany in the 1920s” does not really have the same meaning as its transpo- sition a century later.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
283
+ page_content=' 17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
284
+ page_content=' Grothendieck, “A la poursuite des champs.” 18.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
285
+ page_content=' Jackson, “Comme Appelé du Néant,” 1,050.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
286
+ page_content=' 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
287
+ page_content=' Grothendieck, Récoltes et Semailles, 935.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
288
+ page_content=' 20.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
289
+ page_content=' Grothendieck, Récoltes et Semailles, 94.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
290
+ page_content=' 21.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
291
+ page_content=' Grothendieck, Récoltes et Semailles, 1,664.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
292
+ page_content=' 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
293
+ page_content=' Grothendieck, Récoltes et Semailles, 413.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
294
+ page_content=' 23.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
295
+ page_content=' Grothendieck, Récoltes et Semailles, 1,663.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
296
+ page_content=' 24.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
297
+ page_content=' Masaki Kashiwara, “Algebraic Study of Systems of Partial Diferential Equations,” Master’s thesis (Tokyo University, 1970), Mémoires de la Société Mathématique de France 63 (1995).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
298
+ page_content=' 25.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
299
+ page_content=' Mikio Sato, Takahiro Kawai, and Masaki Kashiwara, “Microfunctions and Pseudo-Diferential Equations, Hyperfunctions and Pseudo-Diferential Equations,” in Proceedings of a Conference at Katata, 1971;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
300
+ page_content=' Dedicated to the Memory of André Martineau, Lecture Notes in Mathematics, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
301
+ page_content=' 287 (Berlin: Springer-Verlag, 1973), 265–529.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
302
+ page_content=' 26.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
303
+ page_content=' A few words on the R–H correspondence follow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
304
+ page_content=' The modern formulation uses the theory of D-modules, a theory that was intuited by Sato in the 1960s, and fully imple- mented by Kashiwara in his thesis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
305
+ page_content=' (A related theory was developed independently by Joseph Bernstein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
306
+ page_content=') In everyday language, a coherent D-module means a system of partial diferential equations with holomorphic coefcients.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
307
+ page_content=' Holo- nomic modules are the > 1 dimensional version of ordinary diferential equations, and among them regular holonomic modules generalize the classical notion of Fuchsian equa- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
308
+ page_content=' In 1975, Kashiwara showed that the functor Sol, which associates the complex of its holomorphic solutions to a holonomic module, takes its values in constructible sheaves, the sheaves which behave locally as a direct sum of constant sheaves along a stratification.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
309
+ page_content=' In the same year he conjectured that there exists a triangulated subcategory of the holonomic modules, the regular holonomic modules, on which the functor Sol induces an “equivalence of cate- gories.” Kashiwara proved his conjecture in 1980 and gave a detailed account of the main steps in his proof at the École Polytechnique seminar, which was published.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
310
+ page_content=' His proof uses Heisuke Hironaka’s singularity resolution theorem and the precursor work of Deligne who treated the special case of “meromorphic connections.” 27.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
311
+ page_content=' Grothendieck, Récoltes et Semailles, 1,656.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
312
+ page_content=' 28.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
313
+ page_content=' Grothendieck, Récoltes et Semailles, 1,651.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
314
+ page_content=' 29.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
315
+ page_content=' Grothendieck, Récoltes et Semailles, 1,650.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
316
+ page_content=' INFERENCE / Vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
317
+ page_content=' 7, No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
318
+ page_content=' 3 7 / 7 30.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
319
+ page_content=' Alexander Beilinson, Joseph Bernstein, and Pierre Del- igne, “Faisceaux pervers, Analysis and topology on singular spaces, I” (Luminy, 1981), Astérisque, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
320
+ page_content=' 100 (Paris: Societé Mathematique de France, Paris, 1982): 5–171.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
321
+ page_content=' 31.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
322
+ page_content=' Pierre Schapira, “Mikio Sato, a Visionary of Mathematics,” Notices of the AMS 54, no.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
323
+ page_content=' 2 (2007);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
324
+ page_content=' Pierre Schapira, “Fifty Years of Mathematics with Masaki Kashiwara,” Proceed- ings of the International Congress of Mathematicians, Rio de Janeiro, 2018, vol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
325
+ page_content=' 1, Plenary Lectures (Singapore: World Scientific, 2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
326
+ page_content=' 32.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
327
+ page_content=' Grothendieck, Récoltes et Semailles, 163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
328
+ page_content=' 33.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
329
+ page_content=' Grothendieck, Récoltes et Semailles, 164.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
330
+ page_content=' 34.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
331
+ page_content=' These additions have just been posted on Leila Schneps’s website, “The Grothendieck Circle” (grothendieckcircle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
332
+ page_content='org).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
333
+ page_content=' 35.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
334
+ page_content=' Grothendieck, Récoltes et Semailles, 163.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
335
+ page_content=' 36.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
336
+ page_content=' Grothendieck, Récoltes et Semailles, 9–15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
337
+ page_content=' 37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
338
+ page_content=' On the recently updated “Grothendieck Circle” (grothend- ieckcircle.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
339
+ page_content='org) website, it is written that Gallimard editions will soon publish a new version of R&S augmented with these Grothendieck additions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
340
+ page_content=' 38.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
341
+ page_content=' Adapted freely from a famous quotation by Leon Trotsky.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
342
+ page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
343
+ page_content='37282/991819.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
344
+ page_content='22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
345
+ page_content='61 Sorbonne Université, Cnrs IMJ-PRG pierre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
346
+ page_content='schapira@imj-prg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
347
+ page_content='fr http://webusers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
348
+ page_content='imj-prg.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
349
+ page_content='fr/~pierre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
350
+ page_content='schapira' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/GdE1T4oBgHgl3EQfFAO9/content/2301.02898v1.pdf'}
GtA0T4oBgHgl3EQfBv8y/content/tmp_files/2301.01979v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
GtA0T4oBgHgl3EQfBv8y/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
IdE4T4oBgHgl3EQfhA2z/content/tmp_files/2301.05122v1.pdf.txt ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Quantum algorithm for finding minimum values in a Quantum Random Access Memory
2
+ Anton S. Albino,1, ∗ Lucas Q. Galv˜ao,1, † Ethan Hansen,2, ‡ Mauro Q. Nooblath Neto,1, § and Clebson Cruz3, ¶
3
+ 1Latin American Quantum Computing Center, SENAI CIMATEC, Salvador, Brazil.
4
+ 2Zapata Computing, Canada
5
+ 3Grupo de Informa¸c˜ao Quˆantica, Centro de Ciˆencias Exatas e das Tecnologias,
6
+ Universidade Federal do Oeste da Bahia - Campus Reitor Edgard Santos. Rua Bertioga,
7
+ 892, Morada Nobre I, 47810-059 Barreiras, Bahia, Brasil.
8
+ Finding the minimum value in an unordered database is a common and fundamental task in computer science.
9
+ However, the optimal classical deterministic algorithm can find the minimum value with a time complexity that
10
+ grows linearly with the number of elements in the database. In this paper, we present the proposal of a quantum
11
+ algorithm for finding the minimum value of a database, which is quadratically faster than its best classical
12
+ analogs. We assume a Quantum Random Access Memory (QRAM) that stores values from a database and
13
+ perform an iterative search based on an oracle whose role is to limit the searched values by controlling the states
14
+ of the most significant qubits. A complexity analysis was performed in order to demonstrate the advantage of this
15
+ quantum algorithm over its classical counterparts. Furthermore, we demonstrate how the proposed algorithm
16
+ would be used in an unsupervised machine learning task through a quantum version of the K-means algorithm.
17
+ Keywords: Quantum RAM, Minimum search, Grover’s Algorithm
18
+ I.
19
+ INTRODUCTION
20
+ Random Access Memory (RAM) is a versatile, short-term
21
+ memory used in computing for storing and retrieving infor-
22
+ mation via bits [1]. Similarly, the concept of Quantum RAM
23
+ (QRAM) emerges with the same goal but employing qubits
24
+ to apply a superposition of states to achieve faster results for
25
+ computational applications, whether quantum or classical [2–
26
+ 11]. Several works discuss the potential of its applications
27
+ to optimize the execution of quantum algorithms, including
28
+ quantum searching on a classical database [10, 12–15], col-
29
+ lision finding [12, 16–18], and algorithms for solving linear
30
+ systems [19–22], for instance.
31
+ These results have attracted the attention of the scien-
32
+ tific community in the past few years, leading to the de-
33
+ velopment of QRAM architectures that demonstrate the po-
34
+ tential for producing efficient results in quantum computing
35
+ [3, 4, 6, 7, 9, 23–25]. Some models, such as Fanout quantum
36
+ RAM [3, 7, 9] and Bucket-Brigade quantum RAM [3, 23, 25],
37
+ illustrate potential future implementations of a QRAM in
38
+ practical scenarios. In addition, recent experiments have re-
39
+ vealed a designed architecture for hybrid quantum computers
40
+ that use superconducting qubits and spin-qubit memory crys-
41
+ tals capable, in theory, of implementing a QRAM in real sys-
42
+ tems [11].
43
+ Furthermore, other efforts have been made to construct
44
+ quantum algorithms that are able to optimally access a QRAM
45
+ in the process of searching for certain values stored in its cells
46
+ [10]. In general, problems based on searching use the famous
47
+ Grover’s algorithm to search quantum states in an unstruc-
48
+ tured list [16, 26–29]. On the other hand, a well-known exam-
49
+ ple of determining the minimal value in a list is the so-called
50
+ ∗ anton.albino@fieb.org.br
51
+ † lucas.g5847@ufob.edu.br
52
+ ‡ 1ethanhansen@proton.me
53
+ § mauro.neto@fbter.org.br
54
+ ¶ clebson.cruz@ufob.edu.br
55
+ D¨urr-Hoyer minimum finding algorithm [30], which employs
56
+ Grover’s Algorithm as a fundamental subroutine to find the
57
+ greatest or smallest entry in a list [31].
58
+ In this scenario, based on the core concept of Durr-Hoyer’s
59
+ algorithm, we apply Grover’s Algorithm as a subroutine to de-
60
+ velop a quantum algorithm for identifying the smallest value
61
+ in a classical data set stored in a QRAM. The proposed Quan-
62
+ tum Minimum Search (QMS) algorithm is based on the itera-
63
+ tive change of the oracle function, which limits the searched
64
+ values by controlling the states of the most significant qubits.
65
+ First, we present the description of the QMS algorithm, de-
66
+ scribing the concept of QRAM and approaching an example
67
+ to find the minimum in a list of four real values using the
68
+ proposed algorithm. In sequence, we analyze the complex-
69
+ ity of the QMS algorithm compared with classical algorithms.
70
+ The results show that, whereas the complexity of the classi-
71
+ cal algorithm grows linearly with the number of elements in
72
+ the database, O(N), since the classical algorithms go through
73
+ all the N items in the list, the presented QMS algorithm has
74
+ a complexity of O(
75
+
76
+ N
77
+ t ), with t being the number of marked
78
+ states. Finally, we present an application of the proposed al-
79
+ gorithm in the K-means problem of determining the optimal
80
+ location of K-centroids in order to minimize the sum of all
81
+ distances between the points and their respective centroids.
82
+ II.
83
+ QUANTUM MINIMUM SEARCH (QMS) ALGORITHM
84
+ The search problem is a ubiquitous subject of discussion
85
+ in classical computer science [26]. The problem consists of
86
+ identifying the index of the database item (x) that fulfills some
87
+ predetermined search criterion x = y, where y is the sought
88
+ element, given an unstructured database with N elements. In
89
+ this context, it is possible to prepare the so-called response
90
+ function (R(x)) that translates database entries to True if the
91
+ entry x matches the search criterion (x = y) or False if x �
92
+ y. This is possible by using the so-called Oracle subroutine,
93
+ which queries the database until the desired item is located.
94
+ arXiv:2301.05122v1 [quant-ph] 12 Jan 2023
95
+
96
+ 2
97
+ Consequently, the bigger the requested element’s position in
98
+ the list, the greater the number of queries required to locate it.
99
+ Therefore, the complexity of this task is exactly proportional
100
+ to the number of items on the list [26, 27]. On average, N
101
+ 2
102
+ queries are required, and the complexity of the classical search
103
+ problem is thus defined as being of order O (N) [29].
104
+ The renowned quantum search algorithm developed by
105
+ Grover searches unstructured datasets and comprises an appli-
106
+ cation that demonstrates the advantages of quantum comput-
107
+ ing over classical analogs [26]. The introduction of the quan-
108
+ tum superposition concept enables the algorithm the ability to
109
+ map all the database items simultaneously, which allows for a
110
+ reduction in the total number of queries, which gives an im-
111
+ provement in the efficiency of the search process [27]. In this
112
+ regard, Grover’s Algorithm presents a complexity that grows
113
+ in order of O(
114
+
115
+ N), being quadratically faster than its classical
116
+ counterpart [29]. Therefore, many algorithms use Grover’s
117
+ method as a subroutine in order to optimize some quantum
118
+ processes. A famous example is the so-called D¨urr-Hoyer’s
119
+ Algorithm for finding a minimum value in an unstructured
120
+ database [32]. Based on the Grovers algorithm, the achieve
121
+ a quadratic speed-up in the minimum search problem, which
122
+ complexity can be expressed as O(
123
+
124
+ N
125
+ t ), where t is the num-
126
+ ber of marked states [30].
127
+ In this context, this study investigates the quantum mini-
128
+ mum search (QMS) problem, proposing a quantum algorithm
129
+ quadratically faster than any classical analogs for finding min-
130
+ imal values in a quantum random access memory (QRAM).
131
+ We proposed the algorithm’s description considering a data
132
+ set represented by the vector ⃗y, which can be rewritten in the
133
+ computational bases as quantum states. The problem is to find
134
+ the minimum value of the list, ymin, using Grover’s Algorithm
135
+ as a subroutine based on Durr-Hoyer’s approach.
136
+ A.
137
+ Quantum Random Access Memory (QRAM)
138
+ In order to use Grover’s Algorithm to find a minimum in a
139
+ classical data set, one proposal is to use a QRAM, typically
140
+ meaning a large classical memory, which can be queried in
141
+ a quantum superposition. It can be built using an equivalent
142
+ quantum circuit in which classical data is stored in a quan-
143
+ tum register in binary form. It can be done by creating two
144
+ quantum registers (with n and m qubits, respectively) whose
145
+ initialization should be
146
+ |ψ0⟩ =
147
+ 1
148
+
149
+ 2n
150
+ 2n−1
151
+
152
+ x=0
153
+ |x⟩ ⊗ |0⟩⊗m ,
154
+ (1)
155
+ which can be implemented by the operation H⊗n ⊗ I⊗m that
156
+ creates an equal superposition on the first register and keeps
157
+ the second register in the state |0⟩⊗m. The quantum RAM is
158
+ implemented in the second register by applying an operator
159
+ UX given by multicontrolled-NOT operations. The goal is to
160
+ store the classical values ⃗y = {y0, y1, y2, ..., yk} into quantum
161
+ states in order to obtain
162
+ |ψ1⟩ =
163
+ 1
164
+
165
+ 2n
166
+ 2n−1
167
+
168
+ x=0
169
+ |x⟩ ⊗ |yx⟩.
170
+ (2)
171
+ Thus, a quantum RAM can store 2n data values. In this
172
+ scenario, we need to choose the number m of qubits used in
173
+ the second register. Since we are searching for the minimum
174
+ value of the whole dataset, a random index can serve as our
175
+ first iteration. Thus, it is possible to specify the number m
176
+ such that the number associated with such an index can be
177
+ expressed on a computational binary basis.
178
+ B.
179
+ Finding the minimum in a QRAM
180
+ In order to perform the task of finding a smallest value
181
+ stored in a QRAM, the adopted strategy is by performing a
182
+ search analyzing the most (or less, if we want the maximum
183
+ value) significant bits from a single measurement. The full
184
+ quantum circuit can be seen in Fig. 1, where the special sub-
185
+ routine responsible for searching according to most significant
186
+ qubits is the iterative phase flip, given by an operator P.
187
+ Figure 1. Full quantum circuit for minimum search. UX is the rep-
188
+ resentation of a QRAM; the operator P is changed iteratively by
189
+ analysing the most significant qubits in the last measurement; and
190
+ W is the diffuser operator. It is important to emphasize that the last
191
+ qubit has its state initialized in |−⟩.
192
+ The key idea of the algorithm is in the dynamics of the P op-
193
+ erator. The additional register (qubits further down in Fig. 1)
194
+ is used to represent the storage of classical values in QRAM
195
+ and is also where the search is done. It is known that if the
196
+ most significant qubits have bits in the 0 state, it means that,
197
+ in the decimal base, this number is smaller than if the most
198
+ significant qubits were in the 1 state. Based on this logic, the
199
+ P operator can be constructed through multicontrolled-NOT
200
+ having qubits either with control at 0 or with control at 1.
201
+ Therefore, the algorithm that governs the dynamics of P is
202
+ described on box Algorithm 1.
203
+
204
+ H
205
+ EE
206
+ M
207
+
208
+ X
209
+ Ux
210
+ Xn
211
+ P3
212
+ Algorithm 1 Finding the minimum in a QRAM
213
+ • Input A classical database ⃗y
214
+ 1. Take a random value yi = f(xi), whose binary represen-
215
+ tation demands m bits.
216
+ 2. Initialize a quantum computer in the state |ψ0⟩
217
+ =
218
+ 1
219
+
220
+ 2n
221
+ �2n
222
+ x=0 |x⟩|0⟩⊗m|−⟩.
223
+ 3. Store the classical values in the QRAM in order to get
224
+ the state |ψ1⟩ =
225
+ 1
226
+
227
+ 2n
228
+ �2n
229
+ x=0 |x⟩|yx⟩|−⟩
230
+ 4. Apply the oracle operator P in order to guarantee that
231
+ the most signicant qubit is 0, that is, the marked states
232
+ is less than yi.
233
+ 5. Apply the diffuser operator, W, to amplify the marked
234
+ states.
235
+ 6. Perform a measurement in the computational basis to
236
+ obtain yi+1 < yi.
237
+ 7. If all qubits have analyzed:
238
+ – end if
239
+ else:
240
+ – repeat steps
241
+ • return yi
242
+ Thus, Grover’s Algorithm can be used iteratively in or-
243
+ der to amplify states (index) that correspond to smaller val-
244
+ ues than the last one, quadratically faster than their clas-
245
+ sical counterparts.
246
+ For instance, supposing the following
247
+ dataset ⃗y = {5, 4, 12, 10, 8}, the list entries can be repre-
248
+ sented in the computational basis (with four qubits) as ⃗y =
249
+ {|0101⟩, |0100⟩, |1100⟩, |1010⟩, |1000⟩}.
250
+ Figure 2. Implementation of a quantum RAM, given by the operator
251
+ UX, as a quantum circuit. Each gray block stores a classical value
252
+ from the database.
253
+ If the first guess is (purely classical), for instance, 10 →
254
+ 1010 it is very unlikely that this number is the lowest. This
255
+ can be confirmed by looking for the number whose most sig-
256
+ nificant qubit is |0⟩.
257
+ Figure 3. Quantum operator P for searching all states whose the most
258
+ significant qubit is in the state |0⟩.
259
+ Thus, a Grover iteration with this oracle mark all states
260
+ whose the most significant qubit is in state |0⟩. A diagram rep-
261
+ resentation of the state before the measurement can be seen in
262
+ Fig. 4.
263
+ Figure 4. A diagramatic representation of the quantum state probabil-
264
+ ities. The states |x0⟩⟩ = |000⟩ and |x1⟩ = |001⟩ are amplified beacause
265
+ f(x0) = |0101⟩ and f(x1) = |0100⟩ whose the most significant qubits
266
+ are |0⟩ for both.
267
+ After that, by performing Grover’s search in that most sig-
268
+ nificant qubit, the states |0100⟩ and |0101⟩ will be had equal
269
+ probability to be measured. If we get the state |0101⟩ after the
270
+ measurement, the next step is to search for values whose two
271
+ first binary digits are |00⟩.
272
+ Figure 5. Quantum operator P for searching all states whose the two
273
+ most significant qubits are in the state |00⟩.
274
+ If there are one or more with which it is satisfied, a number
275
+ less than |0101⟩ will be measured (yi > yi+1), if not, a num-
276
+ ber greater will likely be measured (yi < yi+1), because none
277
+ rotation is performed in the initial state.
278
+ In the case where yi < yi+1, the process shows that the min-
279
+ imum is |0101⟩ or a less number whose the first two more
280
+ significant qubits are also |01⟩, so it is necessary to search for
281
+ values whose third most significant qubits are |010⟩. In this
282
+ particular case, the only remaining task is to verify if |0100⟩
283
+ is in the QRAM since it is the smaller possible number whose
284
+ three most significant qubits are in the state |010⟩.
285
+
286
+ UxX
287
+ XX
288
+ X
289
+ X
290
+ X4
291
+ Figure 6. Quantum operator P for checking if the value |4⟩ ≡ |0100⟩
292
+ is in the QRAM.
293
+ In this example, the state |0100⟩ will be measured with ap-
294
+ proximately 100% of probability (See Fig. 7). The process is
295
+ iteratively done with the rest of the qubits in order to find the
296
+ minimum ymin = |0100⟩ surely.
297
+ Figure 7.
298
+ Final distribution for the last iteration.
299
+ In this case,
300
+ f(001) = ymin = |0100⟩.
301
+ The task of finding a minimum in a vector can be performed
302
+ using an optimal algorithm, with time complexity O(|⃗y|). The
303
+ quantum algorithm proposed in this work solves the same
304
+ problem on a quantum computer by performing O(c
305
+
306
+ |⃗y|
307
+ t )
308
+ queries in Grover’s oracle, where c is a constant whose value
309
+ is a number of digits in a binary representation of the initial
310
+ value and t is the number of marked states. According to com-
311
+ plexity theory, a constant doesn’t affect time complexity, then
312
+ it is valid to rewrite the time complexity of this algorithm as
313
+ O(
314
+
315
+ |⃗y|
316
+ t ).
317
+ C.
318
+ Complexity analysis
319
+ In order to analyze and compare the time complexity be-
320
+ tween classical and quantum algorithms for different scenar-
321
+ ios, we take two classical algorithms with different complexi-
322
+ ties. For the proposed quantum algorithm, the same was done,
323
+ but using an increase in complexity by increasing the number
324
+ of bits in the database values (See Fig. 8). We know that
325
+ classical and quantum algorithms have complexities O(ccN)
326
+ and O(cq
327
+
328
+ N), respectively. The constants cc and cq indicate,
329
+ respectively, the constant inherent complexity factor of each
330
+ classical algorithm and the number of bits of the quantum al-
331
+ gorithm’s initial guess, as explained in the procedure.
332
+ Figure 8. Complexity analysis among algorithms. The shade be-
333
+ tween lines represents the complexity range among classical and
334
+ quantum algorithms. The upper and lower bounds of the classical
335
+ algorithms (blue) have time complexities O( 3
336
+ 2 N − 2) and O(N − 1).
337
+ For the case of the quantum algorithm (orange), the upper and lower
338
+ limits were drawn for the cases where cq = 14 and cq = 6, respec-
339
+ tively.
340
+ III.
341
+ K-MEANS CLUSTERING
342
+ In order to demonstrate the application of minimum search
343
+ in important computer science tasks, we implemented the
344
+ clustering algorithm called K-means, well known in statistics
345
+ and unsupervised machine learning. Given a set of points in
346
+ Euclidean space, the algorithm aims to determine the optimal
347
+ location of K-centroids in order to minimize the sum of all dis-
348
+ tances between the points and their respective centroids. The
349
+ objective function can be given by
350
+ f(x, y) =
351
+ K
352
+
353
+ j=0
354
+ |S |
355
+
356
+ i=0
357
+ ∥p( j)
358
+ i
359
+ − ci∥2
360
+ (3)
361
+ where pi is an observed point in Euclidean space, |S | is the
362
+ total number of observed points, ci is the position of a cen-
363
+ troid and K is the predetermined number of centroids. Fig. 9
364
+ shows a distribution of points in the Cartesian plane and the
365
+ randomly initialized centroids before starting the optimization
366
+ process. Although a simplified example, this one can be use-
367
+ ful to visually demonstrate how the algorithm works.
368
+
369
+ X
370
+ X
371
+ X
372
+ X
373
+ X
374
+ XClassical
375
+ 810
376
+ Quantum
377
+ : cormplexity
378
+ 0
379
+ 400
380
+ O
381
+ 0 -
382
+ 0
383
+ 100
384
+ 210
385
+ 300
386
+ 400
387
+ 500
388
+ N5
389
+ Figure 9. Distribution of |S | = 12 points (red) in two-dimensional
390
+ Euclidean space. In this example, four centroids (K = 4), represented
391
+ by stars (black), were randomly initialized.
392
+ We assume that in the quantum version of K-means, the
393
+ distances between each observed point, pi, and all centroids,
394
+ S = {c0, c1, c2, c3}, are stored in QRAM in constant time, that
395
+ is, O(1). Clusters are formed at each iteration by the proxim-
396
+ ity between each point and its closest centroid. The average
397
+ between the coordinates of each new cluster is calculated and
398
+ becomes the new centroid. This process is carried out until a
399
+ certain stopping criterion is satisfied. Fig. 10 shows the best
400
+ clustering found by the algorithm.
401
+ Figure 10. Optimal solution found by the quantum version of K-
402
+ means. Each of the four clusters found by the algorithm is being
403
+ represented by a color.
404
+ Note that the only difference between this procedure to its
405
+ classical analog is that the distances between each point and
406
+ all centroids are stored in a QRAM, and the QMS is used to
407
+ find the smallest one. Although we are using QMS for a spe-
408
+ cific example, it can be useful for a huge amount of computa-
409
+ tional tasks, such as unsupervised machine learning problems.
410
+ IV.
411
+ CONCLUSIONS
412
+ Classical computing can be outperformed by quantum com-
413
+ puting in a wide range of problems, from those with low to
414
+ those with high levels of computational complexity. The clas-
415
+ sical minimum search problem is characterized by linear com-
416
+ plexity and is connected to an extensive variety of applications
417
+ in the domain of computer science. In this scenario, this work
418
+ proposed a quantum algorithm for finding the minimum value
419
+ of a database that is quadratically faster than its best classical
420
+ analogs. The algorithm is based on D¨urr-Hoyer’s approach for
421
+ finding a minimum value in an unstructured list through the
422
+ use of Grover’s algorithm as a subroutine applied to a QRAM
423
+ that stores values from a defined database. Although it is not
424
+ considered a complex task, our results show that the suggested
425
+ QMS algorithm has the potential to significantly reduce the
426
+ execution time of minimum search algorithms for cases where
427
+ the database is very large. Moreover, an examination of the
428
+ complexity of the studied problem was performed in order to
429
+ highlight the advantages of this quantum algorithm over its
430
+ classical analogs. Furthermore, we show how the suggested
431
+ approach can be used in an unsupervised machine learning
432
+ task by performing a quantum adaptation of the K-means al-
433
+ gorithm. In conclusion, our results demonstrate that it is possi-
434
+ ble to search for minimums in a classical database by utilizing
435
+ information stored in a QRAM, which represents a significant
436
+ contribution to the development of fault-tolerant quantum al-
437
+ gorithms.
438
+ ACKNOWLEDGMENTS
439
+ We thank the Latin American Quantum Computing Cen-
440
+ ter and the High-Performance Computing Center, both from
441
+ SENAI CIMATEC for supporting this research. We also thank
442
+ the Quantum Open Source Foundation (QOSF) mentoring
443
+ program, whose developed project originated this article.
444
+ [1] A. Sedra and K. Smith, Microeletronic Circuits (Oxford Uni-
445
+ versity Press, 2004).
446
+ [2] V. Giovannetti, S. Lloyd, and L. Maccone, Physical review let-
447
+ ters 100, 160501 (2008).
448
+ [3] V. Giovannetti, S. Lloyd, and L. Maccone, Physical Review A
449
+ 78, 052310 (2008).
450
+ [4] D. K. Park, F. Petruccione, and J.-K. K. Rhee, Scientific reports
451
+ 9, 1 (2019).
452
+ [5] P. Yuan and S. Zhang, arXiv preprint arXiv:2202.11302 (2022).
453
+ [6] K.
454
+ Phalak,
455
+ J.
456
+ Li,
457
+ and
458
+ S.
459
+ Ghosh,
460
+ arXiv
461
+ preprint
462
+ arXiv:2210.14804 (2022).
463
+ [7] R. Asaka, K. Sakai, and R. Yahagi, Quantum Science and Tech-
464
+ nology 6, 035004 (2021).
465
+ [8] T. M. De Veras, I. C. De Araujo, D. K. Park, and A. J. Da Silva,
466
+ IEEE Transactions on Computers 70, 2125 (2020).
467
+ [9] C. T. Hann, G. Lee, S. Girvin, and L. Jiang, PRX Quantum 2,
468
+ 020311 (2021).
469
+ [10] O. Di Matteo, V. Gheorghiu, and M. Mosca, IEEE Transactions
470
+ on Quantum Engineering 1, 1 (2020).
471
+
472
+ 0.9
473
+ 0.B
474
+ 0.7
475
+ 0.6
476
+ > 0.5
477
+ 0.4
478
+ 0.3
479
+ 0.2
480
+ 0.1
481
+ 0.1
482
+ 0.2
483
+ 0.3
484
+ 0.4
485
+ 0.5
486
+ 0.6
487
+ 0.B
488
+ 0.90.9
489
+ 0.B
490
+ 0.7
491
+ 0.6
492
+ > 0.5
493
+ 0.4.
494
+ 0.3
495
+ 0.2
496
+ 0.1
497
+ 0.1
498
+ 0.2
499
+ 0.3
500
+ 0.4
501
+ 0.5
502
+ 0.6
503
+ 0.B
504
+ 0.96
505
+ [11] M. Blencowe, Nature 468, 44 (2010).
506
+ [12] T. Hur, L. Kim, and D. K. Park, Quantum Machine Intelligence
507
+ 4, 1 (2022).
508
+ [13] B. Broda, The European Physical Journal Plus 131, 1 (2016).
509
+ [14] V. Giovannetti, S. Lloyd, and L. Maccone, Physical review let-
510
+ ters 100, 230502 (2008).
511
+ [15] S. Lu, Y. Zhang, and F. Liu, Quantum information processing
512
+ 12, 3265 (2013).
513
+ [16] A. Hosoyamada, Y. Sasaki, S. Tani, and K. Xagawa, Theoreti-
514
+ cal Computer Science 842, 100 (2020).
515
+ [17] M. Naya-Plasencia, A. Schrottenloher, A. Chailloux,
516
+ and
517
+ L. Grassi, in QuAC: Quantum Algorithms for Cryptanalysis
518
+ (2019).
519
+ [18] X. Bonnetain, A. Chailloux, A. Schrottenloher, and Y. Shen,
520
+ arXiv preprint arXiv:2205.14023 (2022).
521
+ [19] B. Duan, J. Yuan, C.-H. Yu, J. Huang, and C.-Y. Hsieh, Physics
522
+ Letters A 384, 126595 (2020).
523
+ [20] L. Wossnig, Z. Zhao, and A. Prakash, Physical review letters
524
+ 120, 050502 (2018).
525
+ [21] I. Kerenidis and A. Prakash, Physical Review A 101, 022316
526
+ (2020).
527
+ [22] C. Shao and H. Xiang, Physical Review A 101, 022322 (2020).
528
+ [23] S.
529
+ Arunachalam,
530
+ V.
531
+ Gheorghiu,
532
+ T.
533
+ Jochym-O’Connor,
534
+ M. Mosca, and P. V. Srinivasan, New Journal of Physics 17,
535
+ 123010 (2015).
536
+ [24] L. Bugalho, E. Z. Cruzeiro, K. C. Chen, W. Dai, D. Englund,
537
+ and Y. Omar, arXiv preprint arXiv:2210.13494 (2022).
538
+ [25] A. Paler, O. Oumarou, and R. Basmadjian, Physical Review A
539
+ 102, 032608 (2020).
540
+ [26] M. A. Nielsen and I. Chuang, Quantum computation and quan-
541
+ tum information (American Association of Physics Teachers,
542
+ 2002).
543
+ [27] C. Figgatt, D. Maslov, K. A. Landsman, N. M. Linke, S. Deb-
544
+ nath, and C. Monroe, Nature communications 8, 1 (2017).
545
+ [28] R. Seidel, C. K.-U. Becker, S. Bock, N. Tcholtchev, I.-D.
546
+ Gheorge-Pop, and M. Hauswirth, Quantum Science and Tech-
547
+ nology (2023).
548
+ [29] P. J. Szabłowski, Quantum Information Processing 20, 1 (2021).
549
+ [30] C. Durr and P. Hoyer, arXiv preprint quant-ph/9607014 (1996).
550
+ [31] N. Wiebe, A. Kapoor, and K. M. Svore, Quantum information
551
+ and computation 15, 318 (2015).
552
+ [32] Y. Chen, S. Wei, X. Gao, C. Wang, Y. Tang, J. Wu, and H. Guo,
553
+ Quantum Information Processing 19, 1 (2020).
554
+
IdE4T4oBgHgl3EQfhA2z/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf,len=374
2
+ page_content='Quantum algorithm for finding minimum values in a Quantum Random Access Memory Anton S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
3
+ page_content=' Albino,1, ∗ Lucas Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
4
+ page_content=' Galv˜ao,1, † Ethan Hansen,2, ‡ Mauro Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
5
+ page_content=' Nooblath Neto,1, § and Clebson Cruz3, ¶ 1Latin American Quantum Computing Center, SENAI CIMATEC, Salvador, Brazil.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
6
+ page_content=' 2Zapata Computing, Canada 3Grupo de Informa¸c˜ao Quˆantica, Centro de Ciˆencias Exatas e das Tecnologias, Universidade Federal do Oeste da Bahia - Campus Reitor Edgard Santos.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
7
+ page_content=' Rua Bertioga, 892, Morada Nobre I, 47810-059 Barreiras, Bahia, Brasil.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
8
+ page_content=' Finding the minimum value in an unordered database is a common and fundamental task in computer science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
9
+ page_content=' However, the optimal classical deterministic algorithm can find the minimum value with a time complexity that grows linearly with the number of elements in the database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
10
+ page_content=' In this paper, we present the proposal of a quantum algorithm for finding the minimum value of a database, which is quadratically faster than its best classical analogs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
11
+ page_content=' We assume a Quantum Random Access Memory (QRAM) that stores values from a database and perform an iterative search based on an oracle whose role is to limit the searched values by controlling the states of the most significant qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
12
+ page_content=' A complexity analysis was performed in order to demonstrate the advantage of this quantum algorithm over its classical counterparts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
13
+ page_content=' Furthermore, we demonstrate how the proposed algorithm would be used in an unsupervised machine learning task through a quantum version of the K-means algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
14
+ page_content=' Keywords: Quantum RAM, Minimum search, Grover’s Algorithm I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
15
+ page_content=' INTRODUCTION Random Access Memory (RAM) is a versatile, short-term memory used in computing for storing and retrieving infor- mation via bits [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
16
+ page_content=' Similarly, the concept of Quantum RAM (QRAM) emerges with the same goal but employing qubits to apply a superposition of states to achieve faster results for computational applications, whether quantum or classical [2– 11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
17
+ page_content=' Several works discuss the potential of its applications to optimize the execution of quantum algorithms, including quantum searching on a classical database [10, 12–15], col- lision finding [12, 16–18], and algorithms for solving linear systems [19–22], for instance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
18
+ page_content=' These results have attracted the attention of the scien- tific community in the past few years, leading to the de- velopment of QRAM architectures that demonstrate the po- tential for producing efficient results in quantum computing [3, 4, 6, 7, 9, 23–25].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
19
+ page_content=' Some models, such as Fanout quantum RAM [3, 7, 9] and Bucket-Brigade quantum RAM [3, 23, 25], illustrate potential future implementations of a QRAM in practical scenarios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
20
+ page_content=' In addition, recent experiments have re- vealed a designed architecture for hybrid quantum computers that use superconducting qubits and spin-qubit memory crys- tals capable, in theory, of implementing a QRAM in real sys- tems [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
21
+ page_content=' Furthermore, other efforts have been made to construct quantum algorithms that are able to optimally access a QRAM in the process of searching for certain values stored in its cells [10].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
22
+ page_content=' In general, problems based on searching use the famous Grover’s algorithm to search quantum states in an unstruc- tured list [16, 26–29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
23
+ page_content=' On the other hand, a well-known exam- ple of determining the minimal value in a list is the so-called ∗ anton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
24
+ page_content='albino@fieb.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
25
+ page_content='org.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
26
+ page_content='br † lucas.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
27
+ page_content='g5847@ufob.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
28
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
29
+ page_content='br ‡ 1ethanhansen@proton.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
30
+ page_content='me § mauro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
31
+ page_content='neto@fbter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
32
+ page_content='org.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
33
+ page_content='br ¶ clebson.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
34
+ page_content='cruz@ufob.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
35
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
36
+ page_content='br D¨urr-Hoyer minimum finding algorithm [30], which employs Grover’s Algorithm as a fundamental subroutine to find the greatest or smallest entry in a list [31].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
37
+ page_content=' In this scenario, based on the core concept of Durr-Hoyer’s algorithm, we apply Grover’s Algorithm as a subroutine to de- velop a quantum algorithm for identifying the smallest value in a classical data set stored in a QRAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
38
+ page_content=' The proposed Quan- tum Minimum Search (QMS) algorithm is based on the itera- tive change of the oracle function, which limits the searched values by controlling the states of the most significant qubits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
39
+ page_content=' First, we present the description of the QMS algorithm, de- scribing the concept of QRAM and approaching an example to find the minimum in a list of four real values using the proposed algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
40
+ page_content=' In sequence, we analyze the complex- ity of the QMS algorithm compared with classical algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
41
+ page_content=' The results show that, whereas the complexity of the classi- cal algorithm grows linearly with the number of elements in the database, O(N), since the classical algorithms go through all the N items in the list, the presented QMS algorithm has a complexity of O( � N t ), with t being the number of marked states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
42
+ page_content=' Finally, we present an application of the proposed al- gorithm in the K-means problem of determining the optimal location of K-centroids in order to minimize the sum of all distances between the points and their respective centroids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
43
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
44
+ page_content=' QUANTUM MINIMUM SEARCH (QMS) ALGORITHM The search problem is a ubiquitous subject of discussion in classical computer science [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
45
+ page_content=' The problem consists of identifying the index of the database item (x) that fulfills some predetermined search criterion x = y, where y is the sought element, given an unstructured database with N elements.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
46
+ page_content=' In this context, it is possible to prepare the so-called response function (R(x)) that translates database entries to True if the entry x matches the search criterion (x = y) or False if x � y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
47
+ page_content=' This is possible by using the so-called Oracle subroutine, which queries the database until the desired item is located.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
48
+ page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
49
+ page_content='05122v1 [quant-ph] 12 Jan 2023 2 Consequently, the bigger the requested element’s position in the list, the greater the number of queries required to locate it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
50
+ page_content=' Therefore, the complexity of this task is exactly proportional to the number of items on the list [26, 27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
51
+ page_content=' On average, N 2 queries are required, and the complexity of the classical search problem is thus defined as being of order O (N) [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
52
+ page_content=' The renowned quantum search algorithm developed by Grover searches unstructured datasets and comprises an appli- cation that demonstrates the advantages of quantum comput- ing over classical analogs [26].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
53
+ page_content=' The introduction of the quan- tum superposition concept enables the algorithm the ability to map all the database items simultaneously, which allows for a reduction in the total number of queries, which gives an im- provement in the efficiency of the search process [27].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
54
+ page_content=' In this regard, Grover’s Algorithm presents a complexity that grows in order of O( √ N), being quadratically faster than its classical counterpart [29].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
55
+ page_content=' Therefore, many algorithms use Grover’s method as a subroutine in order to optimize some quantum processes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
56
+ page_content=' A famous example is the so-called D¨urr-Hoyer’s Algorithm for finding a minimum value in an unstructured database [32].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
57
+ page_content=' Based on the Grovers algorithm, the achieve a quadratic speed-up in the minimum search problem, which complexity can be expressed as O( � N t ), where t is the num- ber of marked states [30].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
58
+ page_content=' In this context, this study investigates the quantum mini- mum search (QMS) problem, proposing a quantum algorithm quadratically faster than any classical analogs for finding min- imal values in a quantum random access memory (QRAM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
59
+ page_content=' We proposed the algorithm’s description considering a data set represented by the vector ⃗y, which can be rewritten in the computational bases as quantum states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
60
+ page_content=' The problem is to find the minimum value of the list, ymin, using Grover’s Algorithm as a subroutine based on Durr-Hoyer’s approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
61
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
62
+ page_content=' Quantum Random Access Memory (QRAM) In order to use Grover’s Algorithm to find a minimum in a classical data set, one proposal is to use a QRAM, typically meaning a large classical memory, which can be queried in a quantum superposition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
63
+ page_content=' It can be built using an equivalent quantum circuit in which classical data is stored in a quan- tum register in binary form.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
64
+ page_content=' It can be done by creating two quantum registers (with n and m qubits, respectively) whose initialization should be |ψ0⟩ = 1 √ 2n 2n−1 � x=0 |x⟩ ⊗ |0⟩⊗m , (1) which can be implemented by the operation H⊗n ⊗ I⊗m that creates an equal superposition on the first register and keeps the second register in the state |0⟩⊗m.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
65
+ page_content=' The quantum RAM is implemented in the second register by applying an operator UX given by multicontrolled-NOT operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
66
+ page_content=' The goal is to store the classical values ⃗y = {y0, y1, y2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
67
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
68
+ page_content=', yk} into quantum states in order to obtain |ψ1⟩ = 1 √ 2n 2n−1 � x=0 |x⟩ ⊗ |yx⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
69
+ page_content=' (2) Thus, a quantum RAM can store 2n data values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
70
+ page_content=' In this scenario, we need to choose the number m of qubits used in the second register.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
71
+ page_content=' Since we are searching for the minimum value of the whole dataset, a random index can serve as our first iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
72
+ page_content=' Thus, it is possible to specify the number m such that the number associated with such an index can be expressed on a computational binary basis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
73
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
74
+ page_content=' Finding the minimum in a QRAM In order to perform the task of finding a smallest value stored in a QRAM, the adopted strategy is by performing a search analyzing the most (or less, if we want the maximum value) significant bits from a single measurement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
75
+ page_content=' The full quantum circuit can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
76
+ page_content=' 1, where the special sub- routine responsible for searching according to most significant qubits is the iterative phase flip, given by an operator P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
77
+ page_content=' Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
78
+ page_content=' Full quantum circuit for minimum search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
79
+ page_content=' UX is the rep- resentation of a QRAM;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
80
+ page_content=' the operator P is changed iteratively by analysing the most significant qubits in the last measurement;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
81
+ page_content=' and W is the diffuser operator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
82
+ page_content=' It is important to emphasize that the last qubit has its state initialized in |−⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
83
+ page_content=' The key idea of the algorithm is in the dynamics of the P op- erator.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
84
+ page_content=' The additional register (qubits further down in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
85
+ page_content=' 1) is used to represent the storage of classical values in QRAM and is also where the search is done.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
86
+ page_content=' It is known that if the most significant qubits have bits in the 0 state, it means that, in the decimal base, this number is smaller than if the most significant qubits were in the 1 state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
87
+ page_content=' Based on this logic, the P operator can be constructed through multicontrolled-NOT having qubits either with control at 0 or with control at 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
88
+ page_content=' Therefore, the algorithm that governs the dynamics of P is described on box Algorithm 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
89
+ page_content=' H EE M 人 X Ux Xn P3 Algorithm 1 Finding the minimum in a QRAM Input A classical database ⃗y 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
90
+ page_content=' Take a random value yi = f(xi), whose binary represen- tation demands m bits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
91
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
92
+ page_content=' Initialize a quantum computer in the state |ψ0⟩ = 1 √ 2n �2n x=0 |x⟩|0⟩⊗m|−⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
93
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
94
+ page_content=' Store the classical values in the QRAM in order to get the state |ψ1⟩ = 1 √ 2n �2n x=0 |x⟩|yx⟩|−⟩ 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
95
+ page_content=' Apply the oracle operator P in order to guarantee that the most signicant qubit is 0, that is, the marked states is less than yi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
96
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
97
+ page_content=' Apply the diffuser operator, W, to amplify the marked states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
98
+ page_content=' 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
99
+ page_content=' Perform a measurement in the computational basis to obtain yi+1 < yi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
100
+ page_content=' 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
101
+ page_content=' If all qubits have analyzed: – end if else: – repeat steps return yi Thus, Grover’s Algorithm can be used iteratively in or- der to amplify states (index) that correspond to smaller val- ues than the last one, quadratically faster than their clas- sical counterparts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
102
+ page_content=' For instance, supposing the following dataset ⃗y = {5, 4, 12, 10, 8}, the list entries can be repre- sented in the computational basis (with four qubits) as ⃗y = {|0101⟩, |0100⟩, |1100⟩, |1010⟩, |1000⟩}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
103
+ page_content=' Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
104
+ page_content=' Implementation of a quantum RAM, given by the operator UX, as a quantum circuit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
105
+ page_content=' Each gray block stores a classical value from the database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
106
+ page_content=' If the first guess is (purely classical), for instance, 10 → 1010 it is very unlikely that this number is the lowest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
107
+ page_content=' This can be confirmed by looking for the number whose most sig- nificant qubit is |0⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
108
+ page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
109
+ page_content=' Quantum operator P for searching all states whose the most significant qubit is in the state |0⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
110
+ page_content=' Thus, a Grover iteration with this oracle mark all states whose the most significant qubit is in state |0⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
111
+ page_content=' A diagram rep- resentation of the state before the measurement can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
112
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
113
+ page_content=' Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
114
+ page_content=' A diagramatic representation of the quantum state probabil- ities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
115
+ page_content=' The states |x0⟩⟩ = |000⟩ and |x1⟩ = |001⟩ are amplified beacause f(x0) = |0101⟩ and f(x1) = |0100⟩ whose the most significant qubits are |0⟩ for both.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
116
+ page_content=' After that, by performing Grover’s search in that most sig- nificant qubit, the states |0100⟩ and |0101⟩ will be had equal probability to be measured.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
117
+ page_content=' If we get the state |0101⟩ after the measurement, the next step is to search for values whose two first binary digits are |00⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
118
+ page_content=' Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
119
+ page_content=' Quantum operator P for searching all states whose the two most significant qubits are in the state |00⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
120
+ page_content=' If there are one or more with which it is satisfied, a number less than |0101⟩ will be measured (yi > yi+1), if not, a num- ber greater will likely be measured (yi < yi+1), because none rotation is performed in the initial state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
121
+ page_content=' In the case where yi < yi+1, the process shows that the min- imum is |0101⟩ or a less number whose the first two more significant qubits are also |01⟩, so it is necessary to search for values whose third most significant qubits are |010⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
122
+ page_content=' In this particular case, the only remaining task is to verify if |0100⟩ is in the QRAM since it is the smaller possible number whose three most significant qubits are in the state |010⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
123
+ page_content=' UxX XX X X X4 Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
124
+ page_content=' Quantum operator P for checking if the value |4⟩ ≡ |0100⟩ is in the QRAM.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
125
+ page_content=' In this example, the state |0100⟩ will be measured with ap- proximately 100% of probability (See Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
126
+ page_content=' 7).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
127
+ page_content=' The process is iteratively done with the rest of the qubits in order to find the minimum ymin = |0100⟩ surely.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
128
+ page_content=' Figure 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
129
+ page_content=' Final distribution for the last iteration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
130
+ page_content=' In this case, f(001) = ymin = |0100⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
131
+ page_content=' The task of finding a minimum in a vector can be performed using an optimal algorithm, with time complexity O(|⃗y|).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
132
+ page_content=' The quantum algorithm proposed in this work solves the same problem on a quantum computer by performing O(c � |⃗y| t ) queries in Grover’s oracle, where c is a constant whose value is a number of digits in a binary representation of the initial value and t is the number of marked states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
133
+ page_content=' According to com- plexity theory, a constant doesn’t affect time complexity, then it is valid to rewrite the time complexity of this algorithm as O( � |⃗y| t ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
134
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
135
+ page_content=' Complexity analysis In order to analyze and compare the time complexity be- tween classical and quantum algorithms for different scenar- ios, we take two classical algorithms with different complexi- ties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
136
+ page_content=' For the proposed quantum algorithm, the same was done, but using an increase in complexity by increasing the number of bits in the database values (See Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
137
+ page_content=' 8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
138
+ page_content=' We know that classical and quantum algorithms have complexities O(ccN) and O(cq √ N), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
139
+ page_content=' The constants cc and cq indicate, respectively, the constant inherent complexity factor of each classical algorithm and the number of bits of the quantum al- gorithm’s initial guess, as explained in the procedure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
140
+ page_content=' Figure 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
141
+ page_content=' Complexity analysis among algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
142
+ page_content=' The shade be- tween lines represents the complexity range among classical and quantum algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
143
+ page_content=' The upper and lower bounds of the classical algorithms (blue) have time complexities O( 3 2 N − 2) and O(N − 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
144
+ page_content=' For the case of the quantum algorithm (orange), the upper and lower limits were drawn for the cases where cq = 14 and cq = 6, respec- tively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
145
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
146
+ page_content=' K-MEANS CLUSTERING In order to demonstrate the application of minimum search in important computer science tasks, we implemented the clustering algorithm called K-means, well known in statistics and unsupervised machine learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
147
+ page_content=' Given a set of points in Euclidean space, the algorithm aims to determine the optimal location of K-centroids in order to minimize the sum of all dis- tances between the points and their respective centroids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
148
+ page_content=' The objective function can be given by f(x, y) = K � j=0 |S | � i=0 ∥p( j) i − ci∥2 (3) where pi is an observed point in Euclidean space, |S | is the total number of observed points, ci is the position of a cen- troid and K is the predetermined number of centroids.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
149
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
150
+ page_content=' 9 shows a distribution of points in the Cartesian plane and the randomly initialized centroids before starting the optimization process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
151
+ page_content=' Although a simplified example, this one can be use- ful to visually demonstrate how the algorithm works.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
152
+ page_content=' X X X X X XClassical 810 Quantum : cormplexity 0 400 O 0 - 0 100 210 300 400 500 N5 Figure 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
153
+ page_content=' Distribution of |S | = 12 points (red) in two-dimensional Euclidean space.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
154
+ page_content=' In this example, four centroids (K = 4), represented by stars (black), were randomly initialized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
155
+ page_content=' We assume that in the quantum version of K-means, the distances between each observed point, pi, and all centroids, S = {c0, c1, c2, c3}, are stored in QRAM in constant time, that is, O(1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
156
+ page_content=' Clusters are formed at each iteration by the proxim- ity between each point and its closest centroid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
157
+ page_content=' The average between the coordinates of each new cluster is calculated and becomes the new centroid.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
158
+ page_content=' This process is carried out until a certain stopping criterion is satisfied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
159
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
160
+ page_content=' 10 shows the best clustering found by the algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
161
+ page_content=' Figure 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
162
+ page_content=' Optimal solution found by the quantum version of K- means.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
163
+ page_content=' Each of the four clusters found by the algorithm is being represented by a color.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
164
+ page_content=' Note that the only difference between this procedure to its classical analog is that the distances between each point and all centroids are stored in a QRAM, and the QMS is used to find the smallest one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
165
+ page_content=' Although we are using QMS for a spe- cific example, it can be useful for a huge amount of computa- tional tasks, such as unsupervised machine learning problems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
166
+ page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
167
+ page_content=' CONCLUSIONS Classical computing can be outperformed by quantum com- puting in a wide range of problems, from those with low to those with high levels of computational complexity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
168
+ page_content=' The clas- sical minimum search problem is characterized by linear com- plexity and is connected to an extensive variety of applications in the domain of computer science.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
169
+ page_content=' In this scenario, this work proposed a quantum algorithm for finding the minimum value of a database that is quadratically faster than its best classical analogs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
170
+ page_content=' The algorithm is based on D¨urr-Hoyer’s approach for finding a minimum value in an unstructured list through the use of Grover’s algorithm as a subroutine applied to a QRAM that stores values from a defined database.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
171
+ page_content=' Although it is not considered a complex task, our results show that the suggested QMS algorithm has the potential to significantly reduce the execution time of minimum search algorithms for cases where the database is very large.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
172
+ page_content=' Moreover, an examination of the complexity of the studied problem was performed in order to highlight the advantages of this quantum algorithm over its classical analogs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
173
+ page_content=' Furthermore, we show how the suggested approach can be used in an unsupervised machine learning task by performing a quantum adaptation of the K-means al- gorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
174
+ page_content=' In conclusion, our results demonstrate that it is possi- ble to search for minimums in a classical database by utilizing information stored in a QRAM, which represents a significant contribution to the development of fault-tolerant quantum al- gorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
175
+ page_content=' ACKNOWLEDGMENTS We thank the Latin American Quantum Computing Cen- ter and the High-Performance Computing Center, both from SENAI CIMATEC for supporting this research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
176
+ page_content=' We also thank the Quantum Open Source Foundation (QOSF) mentoring program, whose developed project originated this article.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
177
+ page_content=' [1] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
178
+ page_content=' Sedra and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
179
+ page_content=' Smith, Microeletronic Circuits (Oxford Uni- versity Press, 2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
180
+ page_content=' [2] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
181
+ page_content=' Giovannetti, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
182
+ page_content=' Lloyd, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
183
+ page_content=' Maccone, Physical review let- ters 100, 160501 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
184
+ page_content=' [3] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
185
+ page_content=' Giovannetti, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
186
+ page_content=' Lloyd, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
187
+ page_content=' Maccone, Physical Review A 78, 052310 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
188
+ page_content=' [4] D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
189
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
190
+ page_content=' Park, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
191
+ page_content=' Petruccione, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
192
+ page_content='-K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
193
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
194
+ page_content=' Rhee, Scientific reports 9, 1 (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
195
+ page_content=' [5] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
196
+ page_content=' Yuan and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
197
+ page_content=' Zhang, arXiv preprint arXiv:2202.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
198
+ page_content='11302 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
199
+ page_content=' [6] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
200
+ page_content=' Phalak, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
201
+ page_content=' Li, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
202
+ page_content=' Ghosh, arXiv preprint arXiv:2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
203
+ page_content='14804 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
204
+ page_content=' [7] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
205
+ page_content=' Asaka, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
206
+ page_content=' Sakai, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
207
+ page_content=' Yahagi, Quantum Science and Tech- nology 6, 035004 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
208
+ page_content=' [8] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
209
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
210
+ page_content=' De Veras, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
211
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
212
+ page_content=' De Araujo, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
213
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
214
+ page_content=' Park, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
215
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
216
+ page_content=' Da Silva, IEEE Transactions on Computers 70, 2125 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
217
+ page_content=' [9] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
218
+ page_content=' T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
219
+ page_content=' Hann, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
220
+ page_content=' Lee, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
221
+ page_content=' Girvin, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
222
+ page_content=' Jiang, PRX Quantum 2, 020311 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
223
+ page_content=' [10] O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
224
+ page_content=' Di Matteo, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
225
+ page_content=' Gheorghiu, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
226
+ page_content=' Mosca, IEEE Transactions on Quantum Engineering 1, 1 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
227
+ page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
228
+ page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
229
+ page_content='B 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
230
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
231
+ page_content='6 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
232
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
233
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
234
+ page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
235
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
236
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
237
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
238
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
239
+ page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
240
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
241
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
242
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
243
+ page_content='B 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
244
+ page_content='90.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
245
+ page_content='9 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
246
+ page_content='B 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
247
+ page_content='7 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
248
+ page_content='6 > 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
249
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
250
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
251
+ page_content=' 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
252
+ page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
253
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
254
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
255
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
256
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
257
+ page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
258
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
259
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
260
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
261
+ page_content='B 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
262
+ page_content='96 [11] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
263
+ page_content=' Blencowe, Nature 468, 44 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
264
+ page_content=' [12] T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
265
+ page_content=' Hur, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
266
+ page_content=' Kim, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
267
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
268
+ page_content=' Park, Quantum Machine Intelligence 4, 1 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
269
+ page_content=' [13] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
270
+ page_content=' Broda, The European Physical Journal Plus 131, 1 (2016).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
271
+ page_content=' [14] V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
272
+ page_content=' Giovannetti, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
273
+ page_content=' Lloyd, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
274
+ page_content=' Maccone, Physical review let- ters 100, 230502 (2008).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
275
+ page_content=' [15] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
276
+ page_content=' Lu, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
277
+ page_content=' Zhang, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
278
+ page_content=' Liu, Quantum information processing 12, 3265 (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
279
+ page_content=' [16] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
280
+ page_content=' Hosoyamada, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
281
+ page_content=' Sasaki, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
282
+ page_content=' Tani, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
283
+ page_content=' Xagawa, Theoreti- cal Computer Science 842, 100 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
284
+ page_content=' [17] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
285
+ page_content=' Naya-Plasencia, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
286
+ page_content=' Schrottenloher, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
287
+ page_content=' Chailloux, and L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
288
+ page_content=' Grassi, in QuAC: Quantum Algorithms for Cryptanalysis (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
289
+ page_content=' [18] X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
290
+ page_content=' Bonnetain, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
291
+ page_content=' Chailloux, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
292
+ page_content=' Schrottenloher, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
293
+ page_content=' Shen, arXiv preprint arXiv:2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
294
+ page_content='14023 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
295
+ page_content=' [19] B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
296
+ page_content=' Duan, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
297
+ page_content=' Yuan, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
298
+ page_content='-H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
299
+ page_content=' Yu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
300
+ page_content=' Huang, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
301
+ page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
302
+ page_content=' Hsieh, Physics Letters A 384, 126595 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
303
+ page_content=' [20] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
304
+ page_content=' Wossnig, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
305
+ page_content=' Zhao, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
306
+ page_content=' Prakash, Physical review letters 120, 050502 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
307
+ page_content=' [21] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
308
+ page_content=' Kerenidis and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
309
+ page_content=' Prakash, Physical Review A 101, 022316 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
310
+ page_content=' [22] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
311
+ page_content=' Shao and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
312
+ page_content=' Xiang, Physical Review A 101, 022322 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
313
+ page_content=' [23] S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
314
+ page_content=' Arunachalam, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
315
+ page_content=' Gheorghiu, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
316
+ page_content=' Jochym-O’Connor, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
317
+ page_content=' Mosca, and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
318
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
319
+ page_content=' Srinivasan, New Journal of Physics 17, 123010 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
320
+ page_content=' [24] L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
321
+ page_content=' Bugalho, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
322
+ page_content=' Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
323
+ page_content=' Cruzeiro, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
324
+ page_content=' C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
325
+ page_content=' Chen, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
326
+ page_content=' Dai, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
327
+ page_content=' Englund, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
328
+ page_content=' Omar, arXiv preprint arXiv:2210.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
329
+ page_content='13494 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
330
+ page_content=' [25] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
331
+ page_content=' Paler, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
332
+ page_content=' Oumarou, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
333
+ page_content=' Basmadjian, Physical Review A 102, 032608 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
334
+ page_content=' [26] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
335
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
336
+ page_content=' Nielsen and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
337
+ page_content=' Chuang, Quantum computation and quan- tum information (American Association of Physics Teachers, 2002).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
338
+ page_content=' [27] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
339
+ page_content=' Figgatt, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
340
+ page_content=' Maslov, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
341
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
342
+ page_content=' Landsman, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
343
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
344
+ page_content=' Linke, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
345
+ page_content=' Deb- nath, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
346
+ page_content=' Monroe, Nature communications 8, 1 (2017).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
347
+ page_content=' [28] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
348
+ page_content=' Seidel, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
349
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
350
+ page_content='-U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
351
+ page_content=' Becker, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
352
+ page_content=' Bock, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
353
+ page_content=' Tcholtchev, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
354
+ page_content='-D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
355
+ page_content=' Gheorge-Pop, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
356
+ page_content=' Hauswirth, Quantum Science and Tech- nology (2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
357
+ page_content=' [29] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
358
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
359
+ page_content=' Szabłowski, Quantum Information Processing 20, 1 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
360
+ page_content=' [30] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
361
+ page_content=' Durr and P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
362
+ page_content=' Hoyer, arXiv preprint quant-ph/9607014 (1996).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
363
+ page_content=' [31] N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
364
+ page_content=' Wiebe, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
365
+ page_content=' Kapoor, and K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
366
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
367
+ page_content=' Svore, Quantum information and computation 15, 318 (2015).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
368
+ page_content=' [32] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
369
+ page_content=' Chen, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
370
+ page_content=' Wei, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
371
+ page_content=' Gao, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
372
+ page_content=' Wang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
373
+ page_content=' Tang, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
374
+ page_content=' Wu, and H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
375
+ page_content=' Guo, Quantum Information Processing 19, 1 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/IdE4T4oBgHgl3EQfhA2z/content/2301.05122v1.pdf'}
KtFJT4oBgHgl3EQfxS3S/content/tmp_files/2301.11634v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
KtFJT4oBgHgl3EQfxS3S/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
MtAyT4oBgHgl3EQfs_ll/content/tmp_files/2301.00586v1.pdf.txt ADDED
@@ -0,0 +1,1576 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00586v1 [math.FA] 2 Jan 2023
2
+ Indeterminate Jacobi operators
3
+ Christian Berg and Ryszard Szwarc
4
+ January 3, 2023
5
+ Abstract
6
+ We consider the Jacobi operator (T, D(T)) associated with an in-
7
+ determinate Hamburger moment problem, i.e., the operator in ℓ2 de-
8
+ fined as the closure of the Jacobi matrix acting on the subspace of
9
+ complex sequences with only finitely many non-zero terms. It is well-
10
+ known that it is symmmetric with deficiency indices (1, 1).
11
+ For a
12
+ complex number z let pz, qz denote the square summable sequences
13
+ (pn(z)) and (qn(z)) corresponding to the orthonormal polynomials pn
14
+ and polynomials qn of the second kind. We determine whether linear
15
+ combinations of pu, pv, qu, qv for u, v ∈ C belong to D(T) or to the
16
+ domain of the self-adjoint extensions of T in ℓ2. The results depend
17
+ on the four Nevanlinna functions of two variables associated with the
18
+ moment problem. We also show that D(T) is the common range of
19
+ an explicitly constructed family of bounded operators on ℓ2.
20
+ Mathematics Subject Classification: Primary 47B25, 47B36, 44A60
21
+ Keywords. Jacobi matrices and operators, indeterminate moment prob-
22
+ lems.
23
+ 1
24
+ Introduction
25
+ We shall consider the Jacobi matrix J associated with a moment sequence
26
+ s = (sn)n≥0 of the form
27
+ sn =
28
+
29
+ xn dµ(x),
30
+ n = 0, 1, . . . ,
31
+ (1)
32
+ 1
33
+
34
+ where µ is a positive measure on R with infinite support and moments of
35
+ every order. It is a tridiagonal matrix of the form
36
+ J =
37
+
38
+
39
+
40
+
41
+
42
+ b0
43
+ a0
44
+ 0
45
+ . . .
46
+ a0
47
+ b1
48
+ a1
49
+ . . .
50
+ 0
51
+ a1
52
+ b2
53
+ . . .
54
+ ...
55
+ ...
56
+ ...
57
+ ...
58
+
59
+
60
+
61
+
62
+  ,
63
+ (2)
64
+ where an > 0, bn ∈ R, n ≥ 0 are given by the three term recurrence relation
65
+ xpn(x) = anpn+1(x) + bnpn(x) + an−1pn−1(x), n ≥ 0,
66
+ a−1 := 0.
67
+ Here (pn)n≥0 is the sequence of orthonormal polynomials associated with µ,
68
+ hence satisfying
69
+
70
+ pn(x)pm(x) dµ(x) = δn,m,
71
+ and pn is a real polynomial of degree n with positive leading coefficient. In
72
+ this paper we follow the terminology of [17]. Basic results about the classical
73
+ moment problem can also be found in [1] and [16]. Recent results about
74
+ indeterminate moment problems can be found in [5], [6] [7], [8].
75
+ It is clear that the proportional measures λµ, λ > 0 lead to the same
76
+ Jacobi matrix J, and the well-known Theorem of Favard (see [17, Theorem
77
+ 5.14]) states that any matrix of the form (2) with an > 0, bn ∈ R comes
78
+ from a unique moment sequence (sn) as above, normalized such that s0 = 1.
79
+ In the following we shall always assume that this normalization holds, and
80
+ consequently the solutions µ of (1) are probability measures and p0 = 1.
81
+ The Jacobi matrix acts as a symmetric operator in the Hilbert space ℓ2 of
82
+ square summable complex sequences. Its domain F consists of the complex
83
+ sequences (cn)n≥0 with only finitely many non-zero terms, and the action is
84
+ multiplication of the matrix J by c ∈ F considered as a column, i.e.,
85
+ (Jc)n := an−1cn−1 + bncn + ancn+1,
86
+ n ≥ 0.
87
+ (3)
88
+ Denoting (en)n≥0 the standard orthonormal basis of ℓ2, we have
89
+ F = span{en|n ≥ 0}.
90
+ Definition 1.1. The Jacobi operator associated with J is by definition the
91
+ closure (T, D(T)) of the symmetric operator (J, F).
92
+ 2
93
+
94
+ It is a classical fact that (T, D(T)) is a closed symmetric operator, and its
95
+ deficiency indices are either (0, 0) or (1, 1). These cases occur precisely if the
96
+ moment sequence (1) is determinate or indeterminate, i.e., there is exactly
97
+ one or several solutions µ satisfying (1).
98
+ By definition D(T) consists of those c ∈ ℓ2 for which there exists a se-
99
+ quence (c(k)) ∈ F such that limk→∞ c(k) = c and (Jc(k)) is a convergent
100
+ sequence in ℓ2. For such c we have Tc = limk→∞ Jc(k), and this limit is
101
+ independent of the choice of approximating sequence (c(k)).
102
+ Clearly, D(T) is closed under complex conjugation and
103
+ Tc = Tc,
104
+ c ∈ D(T).
105
+ The purpose of the present paper is to study the Jacobi operator (T, D(T))
106
+ as well as its self-adjoint extensions (Tt, D(Tt)), t ∈ R∗ := R ∪ {∞} in the
107
+ indeterminate case. We shall in particular give some families of sequences
108
+ c ∈ ℓ2 which belong to D(T), see Theorem 1.2–Theorem 1.4.
109
+ Section 2 is devoted to the proof of Theorem 1.2 after a presentation of
110
+ the deficiency spaces of (T, D(T)). The self-adjoint extensions of (T, D(T))
111
+ as well as their corresponding N-extremal solutions to (1), cf.
112
+ (26), are
113
+ introduced in Section 3.
114
+ In Theorem 3.2, Theorem 3.4 and Theorem 3.7 we describe vectors be-
115
+ longing to D(Tt) \ D(T). Like the results in Theorem 1.2–Theorem 1.4, they
116
+ depend on the Nevanlinna functions of two variables defined in (6), (7), (8),
117
+ (9).
118
+ In Section 4 we construct for each z0 ∈ C a bounded operator Ξz0 in ℓ2
119
+ with range D(T). The restriction of Ξz0 to (T − z0I)(D(T)) is a bijection
120
+ onto D(T) equal to (T − z0I)−1, see Theorem 4.3. It is based on a study of
121
+ the function space E defined in (28), and known to be a de Branges space of
122
+ entire functions by [10, Theorem 23]. We prove in particular Theorem 4.2,
123
+ showing that E is stable under the formation of difference quotients.
124
+ Various technical results about the Nevanlinna functions are given in
125
+ Section 5.
126
+ After this summary of the content of the present paper, we recall that
127
+ the adjoint operator (T ∗, D(T ∗)) is the maximal operator associated with J,
128
+ cf. [17, Proposition 6.5]. In fact, the matrix product of J and any column
129
+ vector c makes sense, cf. (3), and D(T ∗) consists of those c ∈ ℓ2 for which
130
+ the product Jc belongs to ℓ2. For c ∈ D(T ∗) we have T ∗c = Jc.
131
+ In the determinate case with a unique solution µ of (1), the Jacobi opera-
132
+ tor is self-adjoint and (pn) is an orthonormal basis of L2(µ). The self-adjoint
133
+ 3
134
+
135
+ operator of multiplication Mµ in L2(µ) given by
136
+ D(Mµ) = {f ∈ L2(µ) | xf(x) ∈ L2(µ)},
137
+ Mµf(x) = xf(x)
138
+ is unitarily equivalent with (T, D(T)) via the unitary operator U : ℓ2 → L2(µ)
139
+ given by U(en) = pn, n ≥ 0. We shall not study the determinate case in this
140
+ paper, but concentrate on the indeterminate case, where it is known that the
141
+ set of solutions µ to (1) is an infinite convex set V . The polynomials of the
142
+ second kind (qn) are given as
143
+ qn(z) =
144
+ � pn(z) − pn(x)
145
+ z − x
146
+ dµ(x),
147
+ z ∈ C,
148
+ where µ ∈ V is arbitrary.
149
+ We define and recall
150
+ pz := (pn(z)), qz := (qn(z)) ∈ ℓ2,
151
+ z ∈ C,
152
+ (4)
153
+ where we have followed the terminology of [17]. It is known that ||pz|| and
154
+ ||qz|| are positive continuous functions on C. It is therefore possible for c ∈ ℓ2
155
+ to define entire functions Fc, Gc as
156
+ Fc(z) =
157
+
158
+
159
+ n=0
160
+ cnpn(z),
161
+ Gc(z) =
162
+
163
+
164
+ n=0
165
+ cnqn(z),
166
+ z ∈ C.
167
+ (5)
168
+ We also have the following four entire functions of two complex variables,
169
+ called the Nevanlinna functions of the indeterminate moment problem:
170
+ A(u, v)
171
+ =
172
+ (u − v)
173
+
174
+
175
+ k=0
176
+ qk(u)qk(v)
177
+ (6)
178
+ B(u, v)
179
+ =
180
+ −1 + (u − v)
181
+
182
+
183
+ k=0
184
+ pk(u)qk(v)
185
+ (7)
186
+ C(u, v)
187
+ =
188
+ 1 + (u − v)
189
+
190
+
191
+ k=0
192
+ qk(u)pk(v)
193
+ (8)
194
+ D(u, v)
195
+ =
196
+ (u − v)
197
+
198
+
199
+ k=0
200
+ pk(u)pk(v),
201
+ (9)
202
+ see Section 7.1 in [17]. The two-variable functions were introduced in [11]
203
+ in a slightly different form, which was subsequently used in [3],[14].
204
+ An
205
+ 4
206
+
207
+ approximation to the two-variable functions was already considered in [1, p.
208
+ 123]. If the functions of [11] are marked with a ∗, we have
209
+ A∗(u, v)
210
+ =
211
+ −A(u, v), B∗(u, v) = −C(u, v),
212
+ C∗(u, v)
213
+ =
214
+ −B(u, v), D∗(u, v) = −D(u, v).
215
+ In the following we need several formulas about these functions, see Theo-
216
+ rem 5.1 and Corollary 5.2 in the Appendix, but at this point we just recall
217
+ that
218
+ A(u, v)D(u, v) − B(u, v)C(u, v) = 1,
219
+ u, v ∈ C.
220
+ (10)
221
+ We define entire functions of one variable by setting the second variable to
222
+ 0, i.e.,
223
+ A(u) = A(u, 0), B(u) = B(u, 0), C(u) = C(u, 0), D(u) = D(u, 0),
224
+ (11)
225
+ and by specialization of (10) we get
226
+ A(u)D(u) − B(u)C(u) = 1,
227
+ u ∈ C.
228
+ (12)
229
+ By Section 6.5 in [17] we have
230
+ pz, qz ∈ D(T ∗),
231
+ T ∗pz = zpz, T ∗qz = e0 + zqz,
232
+ z ∈ C.
233
+ (13)
234
+ Our first main result is the following:
235
+ Theorem 1.2. For all z ∈ C we have pz, qz /∈ D(T).
236
+ Let u, v ∈ C be given.
237
+ (i) There exists α ∈ C such that pu+αpv ∈ D(T) if and only if D(u, v) = 0.
238
+ In the affirmative case α is uniquely determined as α = B(u, v).
239
+ (ii) There exists β ∈ C such that qu+βqv ∈ D(T) if and only if A(u, v) = 0.
240
+ In the affirmative case β is uniquely determined as β = −C(u, v).
241
+ (iii) There exists γ ∈ C such that pu+γqv ∈ D(T) if and only if B(u, v) = 0.
242
+ In the affirmative case γ is uniquely determined as γ = −D(u, v). In
243
+ particular pu + γqu /∈ D(T) for all u, γ ∈ C.
244
+ The proof will be given in Section 2.
245
+ We shall next give results about the zero-sets of the entire functions
246
+ A, . . . , D of two variables.
247
+ 5
248
+
249
+ Theorem 1.3. Let F(u, v) denote any of the four functions A, B, C, D on
250
+ C2. For v ∈ C define
251
+ Z(F)v := {u ∈ C | F(u, v) = 0}.
252
+ (14)
253
+ Then Z(F)v is countably infinite. If v ∈ R then Z(F)v ⊂ R, and if v is in
254
+ either the upper or lower half-plane, then Z(F)v belongs to the same half-
255
+ plane.
256
+ As a follow up on the two previous theorems we have the following:
257
+ Theorem 1.4. Let v ∈ R be given and consider the set of real zeros Z(F)v
258
+ from Theorem 1.3.
259
+ (i) Let u ∈ Z(D)v be such that u < v and such that ]u, v[∩Z(D)v = ∅.
260
+ Then B(u, v) > 0, where pu + B(u, v)pv ∈ D(T) according to Theo-
261
+ rem 1.2.
262
+ (ii) Let u ∈ Z(A)v be such that u < v and such that ]u, v[∩Z(A)v = ∅. Then
263
+ C(u, v) < 0, where qu − C(u, v)qv ∈ D(T) according to Theorem 1.2.
264
+ The proofs of Theorem 1.3 and Theorem 1.4 will be given in Section 5.
265
+ 2
266
+ Preliminaires and proof of Theorem 1.2
267
+ Fix z0 ∈ C in the open upper half-plane and consider the deficiency spaces
268
+ ∆+(z0)
269
+ =
270
+ ker(T ∗ − z0I) = Cpz0
271
+ ∆−(z0)
272
+ =
273
+ ker(T ∗ − z0I) = Cpz0,
274
+ cf. (13).
275
+ We know from [2, section 80] that
276
+ D(T ∗) = D(T) ⊕ ∆+(z0) ⊕ ∆−(z0),
277
+ (15)
278
+ and the sum is direct as indicated by the ⊕ signs.
279
+ Proposition 2.1. For any λ ∈ C we have the decomposition from (15)
280
+ pλ = sλ + s+
281
+ λ pz0 + s−
282
+ λ pz0,
283
+ (16)
284
+ 6
285
+
286
+ where sλ ∈ D(T) and
287
+ s+
288
+ λ =
289
+ D(λ, z0)
290
+ 2iIm (z0)||pz0||2,
291
+ s−
292
+ �� = −
293
+ D(λ, z0)
294
+ 2iIm (z0)||pz0||2.
295
+ (17)
296
+ Similarly, we have the decomposition
297
+ qλ = rλ + r+
298
+ λ pz0 + r−
299
+ λ pz0,
300
+ (18)
301
+ where rλ ∈ D(T) and
302
+ r+
303
+ λ =
304
+ C(λ, z0)
305
+ 2iIm (z0)||pz0||2,
306
+ r−
307
+ λ = −
308
+ C(λ, z0)
309
+ 2iIm (z0)||pz0||2.
310
+ (19)
311
+ Proof. Applying the operator T ∗ − z0I to the Equation (16) gives
312
+ (T ∗ − z0I)pλ = (T − z0I)sλ + s+
313
+ λ (z0 − z0)pz0,
314
+ which is the splitting of the left-hand side according to the orthogonal de-
315
+ composition
316
+ ℓ2 = (T − z0I)(D(T)) ⊕ ∆+(z0).
317
+ (20)
318
+ Therefore, s+
319
+ λ (z0 − z0)pz0 is the orthogonal projection of (T ∗ − z0I)pλ onto
320
+ ∆+(z0), and hence
321
+ 2iIm (z0)s+
322
+ λ pz0 = ⟨(T ∗ − z0I)pλ, pz0⟩
323
+ pz0
324
+ ||pz0||2,
325
+ which gives the first formula in (17). The second formula is obtained similarly
326
+ by applying the operator (T ∗−z0I) to the Equation (16). Notice that ||pz0|| =
327
+ ||pz0||.
328
+ Applying the operator T ∗ − z0I to the Equation (18) gives
329
+ (T ∗ − z0I)qλ = (T − z0I)rλ + r+
330
+ λ (z0 − z0)pz0,
331
+ which is the splitting of the left-hand side according to the orthogonal de-
332
+ composition (20). Therefore, r+
333
+ λ (z0 − z0)pz0 is the orthogonal projection of
334
+ (T ∗ − z0I)qλ onto ∆+(z0), and hence
335
+ 2iIm (z0)r+
336
+ λ pz0 = ⟨(T ∗ − z0I)qλ, pz0⟩
337
+ pz0
338
+ ||pz0||2,
339
+ which gives the first formula in (19) because T ∗(qλ) = λqλ + e0 by (13). The
340
+ second formula is obtained similarly by applying the operator (T ∗ − z0I) to
341
+ the Equation (18).
342
+ 7
343
+
344
+ Corollary 2.2. For all λ ∈ C we have pλ, qλ /∈ D(T).
345
+ Proof. For λ = z0 we get from (16) and (17)
346
+ sz0 = 0,
347
+ s+
348
+ z0 = 1,
349
+ s−
350
+ z0 = 0,
351
+ showing that pz0 /∈ D(T). The case λ = z0 follows because D(T) is closed
352
+ under complex conjugation. Since z0 in the upper half-plane is arbitrary, the
353
+ assertion about pλ follows for λ ∈ C \ R.
354
+ For λ ∈ R we note that s−
355
+ λ = s+
356
+ λ ̸= 0 because z �→ D(λ, z) has only real
357
+ zeros, cf. [4, Theorem 3] or Theorem 1.3.
358
+ For λ = z0 we get from (19) that
359
+ r−
360
+ z0 =
361
+ −1
362
+ 2iIm (z0)||pz0||2 ̸= 0,
363
+ showing that qz0 /∈ D(T) and hence also qz0 /∈ D(T). Since z0 in the upper
364
+ half-plane is arbitrary, the assertion about qλ follows for λ ∈ C \ R.
365
+ For λ ∈ R we note that r−
366
+ λ = r+
367
+ λ , and by (57) we have
368
+ C(λ, z0) = D(z0)[A(λ) − ρC(λ)] ̸= 0,
369
+ ρ := B(z0)/D(z0).
370
+ because D has only real zeros. Furthermore, Im ρ > 0 because B/D is a Pick
371
+ function, cf. Proposition 5.8, so also the second factor is non-zero.
372
+ Remark 2.3. Concerning Corollary 2.2, it is clear that pλ /∈ D(T) for λ /∈ R
373
+ because otherwise pλ would be an eigenvector for T with eigenvalue λ, and
374
+ as T is symmetric, the eigenvalues are real.
375
+ A small modification yields also that qλ /∈ D(T) for λ /∈ R.
376
+ In fact,
377
+ otherwise by symmetry of T
378
+ ⟨Tqλ, qλ⟩ = ⟨qλ, Tqλ⟩.
379
+ (21)
380
+ The left-hand side of (21) equals ⟨λqλ+e0, qλ⟩ = λ||qλ||2 because ⟨e0, qλ⟩ = 0.
381
+ Similarly, the right-hand side of (21) equals λ||qλ||2, and finally λ must
382
+ be real. We show later that (T, D(T)) has no eigenvalues at all, cf. (37).
383
+ Proof of Theorem 1.2.
384
+ Corollary 2.2 proves the first assertion, and from this assertion it is clear
385
+ that there exists at most one number α satisfying pu + αpv ∈ D(T), and
386
+ similarly with β, γ.
387
+ 8
388
+
389
+ Let us now prove assertion (i) of the theorem.
390
+ By (16) we get
391
+ pu + αpv = (su + αsv) + (s+
392
+ u + αs+
393
+ v )pz0 + (s−
394
+ u + αs−
395
+ v )pz0,
396
+ so pu + αpv ∈ D(T) if and only if
397
+ s+
398
+ u + αs+
399
+ v = s−
400
+ u + αs−
401
+ v = 0,
402
+ which by (17) is equivalent to
403
+ D(u, z0) + αD(v, z0) = D(u, z0) + αD(v, z0) = 0.
404
+ (22)
405
+ The determinant of this linear system is
406
+ D := D(u, z0)D(v, z0) − D(u, z0)D(v, z0)
407
+ and using Lemma 5.7 with
408
+ x =
409
+ �B(u)
410
+ D(u)
411
+
412
+ , y =
413
+ �B(z0)
414
+ D(z0)
415
+
416
+ , z = y, w =
417
+ �B(v)
418
+ D(v)
419
+
420
+ ,
421
+ we get from Corollary 5.2 and (58)
422
+ D =
423
+ ����
424
+ B(u)
425
+ B(v)
426
+ D(u)
427
+ D(v)
428
+ ����
429
+ ����
430
+ B(z0)
431
+ B(z0)
432
+ D(z0)
433
+ D(z0)
434
+ ���� = D(u, v)D(z0, z0).
435
+ However, D(z0, z0) = −2Im (z0)||pz0||2 ̸= 0 so D = 0 iff D(u, v) = 0. There-
436
+ fore, if α is a solution to (22) we have D(u, v) = 0.
437
+ Suppose next that
438
+ D(u, v) = 0. To see that (22) has a solution α, we notice that D(v, z0) and
439
+ D(v, z0) cannot both be zero. In fact, defining ρ := B(z0)/D(z0) we have
440
+ Im (ρ) > 0 because B/D is a Pick function, cf. Proposition 5.8, and by (58)
441
+ D(v, z0) = 0 iff B(v) = ρD(v) while D(v, z0) = 0 iff B(v) = ρD(v), so both
442
+ equations cannot hold. Here we use that B(v) = D(v) = 0 is impossible
443
+ because of (12).
444
+ If D(v, z0) ̸= 0, then α := −D(u, z0)/D(v, z0) satisfies (22) because D =
445
+ 0. Similarly α := −D(u, z0)/D(v, z0) satisfies (22) if D(v, z0) ̸= 0.
446
+ Furthermore, in the case D(v, z0) ̸= 0 we get using (54) and D(u, v) = 0
447
+ that
448
+ D(u, z0) = D(u, v)C(v, z0) − B(u, v)D(v, z0) = −B(u, v)D(v, z0),
449
+ 9
450
+
451
+ so finally α = B(u, v). The case D(v, z0) ̸= 0 is similar.
452
+ Proof of (ii):
453
+ By (18) we get
454
+ qu + βqv = (ru + βrv) + (r+
455
+ u + βr+
456
+ v )pz0 + (r−
457
+ u + βr−
458
+ v )pz0,
459
+ so qu + βqv ∈ D(T) if and only if
460
+ r+
461
+ u + βr+
462
+ v = r−
463
+ u + βr−
464
+ v = 0,
465
+ which by (19) is equivalent to
466
+ C(u, z0) + βC(v, z0) = C(u, z0) + βC(v, z0) = 0.
467
+ (23)
468
+ The determinant of this linear system is
469
+ D1 := C(u, z0)C(v, z0) − C(u, z0)C(v, z0)
470
+ and using Lemma 5.7 with
471
+ x =
472
+ �A(u)
473
+ C(u)
474
+
475
+ , y =
476
+ �B(z0)
477
+ D(z0)
478
+
479
+ , z = y, w =
480
+ �A(v)
481
+ C(v)
482
+
483
+ we get from Corollary 5.2 combined with (55), (57), (58)
484
+ D1 =
485
+ ����
486
+ A(u)
487
+ A(v)
488
+ C(u)
489
+ C(v)
490
+ ����
491
+ ����
492
+ B(z0)
493
+ B(z0)
494
+ D(z0)
495
+ D(z0)
496
+ ���� = A(u, v)D(z0, z0).
497
+ As in case (i) we see that D1 = 0 iff A(u, v) = 0. Therefore, if β is a solution
498
+ to (23), we have A(u, v) = 0. Suppose next that A(u, v) = 0. To see that
499
+ (23) has a solution β, we notice as in (i) that C(v, z0) and C(v, z0) cannot
500
+ both be zero. For this we use that A/C is a Pick function by Proposition 5.8.
501
+ If C(v, z0) ̸= 0, then β := −C(u, z0)/C(v, z0) satisfies (23). Similarly
502
+ β := −C(u, z0)/C(v, z0) satisfies (23) if C(v, z0) ̸= 0.
503
+ Furthermore, in the case C(v, z0) ̸= 0 we get using (53) and A(u, v) = 0
504
+ that
505
+ C(u, z0) = C(u, v)C(v, z0) − A(u, v)D(v, z0) = C(u, v)C(v, z0),
506
+ so finally β = −C(u, v). The case C(v, z0) ̸= 0 is similar.
507
+ 10
508
+
509
+ Proof of (iii): By (16) and (18) we get
510
+ pu + γqv = (su + γrv) + (s+
511
+ u + γr+
512
+ v )pz0 + (s−
513
+ u + γr−
514
+ v )pz0,
515
+ so pu + γqv ∈ D(T) if and only if
516
+ s+
517
+ u + γr+
518
+ v = s−
519
+ u + γr−
520
+ v = 0,
521
+ which by (17) and (19) is equivalent to
522
+ D(u, z0) + γC(v, z0) = D(u, z0) + γC(v, z0) = 0.
523
+ (24)
524
+ The determinant of this linear system is
525
+ D2 := D(u, z0)C(v, z0) − D(u, z0)C(v, z0)
526
+ and using Lemma 5.7 with
527
+ x =
528
+
529
+ B(u)
530
+ D(u)
531
+
532
+ , y =
533
+
534
+ B(z0)
535
+ D(z0)
536
+
537
+ , z = y, w =
538
+
539
+ A(v)
540
+ C(v)
541
+
542
+ we get from Corollary 5.2 combined with (56), (57), (58)
543
+ D2 =
544
+ ����
545
+ B(u)
546
+ A(v)
547
+ D(u)
548
+ C(v)
549
+ ����
550
+ ����
551
+ B(z0)
552
+ B(z0)
553
+ D(z0)
554
+ D(z0)
555
+ ���� = B(u, v)D(z0, z0).
556
+ As in case (i) we see that D2 = 0 iff B(u, v) = 0. Therefore, if γ is a solution
557
+ to (24), we have B(u, v) = 0. Suppose next that B(u, v) = 0. We see like in
558
+ (ii) that if C(v, z0) ̸= 0, then γ := −D(u, z0)/C(v, z0) satisfies (24), and if
559
+ C(v, z0) ̸= 0, then γ := −D(u, z0)/C(v, z0) satisfies (24).
560
+ We finally see that in both cases γ = −D(u, v) because of (54).
561
+
562
+ Remark 2.4. The case (ii) can be deduced from case (i) by using the obser-
563
+ vation that the polynomials (qn+1(x)/q1(x))n≥0 are the orthonormal polyno-
564
+ mials associated with the truncated Jacobi matrix J(1) obtained from J by
565
+ removing the first row and column. See [1, p. 28]. We have
566
+ J(1)(Sc) = S(Jc) − a0⟨c, e0⟩,
567
+ c ∈ F,
568
+ where S is the bounded shift operator in ℓ2 given by (Sc)n := cn+1, n ≥ 0.
569
+ If we let (T (1), D(T (1))) denote the Jacobi operator associated with J(1),
570
+ one can prove that
571
+ v ∈ D(T (1)) ⇐⇒ v = Su, u ∈ D(T)
572
+ and
573
+ T (1)(Su) = S(Tu) − a0⟨u, e0⟩,
574
+ u ∈ D(T).
575
+ 11
576
+
577
+ 3
578
+ Self-adjoint extensions of the Jacobi oper-
579
+ ator
580
+ As before (sn) is an indeterminate moment sequence with s0 = 1.
581
+ The
582
+ corresponding Jacobi operator (T, D(T)) has deficiency indices (1, 1) and
583
+ the self-adjoint extensions in ℓ2 can be parametrized as the operators Tt, t ∈
584
+ R∗ = R ∪ {∞} with domain
585
+ D(Tt) = D(T) ⊕ C(q0 + tp0) for t ∈ R,
586
+ D(T∞) = D(T) ⊕ Cp0
587
+ (25)
588
+ and defined by the restriction of T ∗ to the domain, cf. [17, Theorem 6.23].
589
+ We recall that p0, q0 are defined in (4).
590
+ The purpose of this section is to give some results about the domains
591
+ D(Tt) of the self-adjoint operators Tt, t ∈ R∗.
592
+ For t ∈ R∗ we define the solutions to the moment problem
593
+ µt(·) := ⟨Et(·)e0, e0⟩,
594
+ (26)
595
+ where Et(·) is the spectral measure of the self-adjoint operator Tt.
596
+ The measures µt, t ∈ R∗ are precisely those measures µ ∈ V for which
597
+ the polynomials C[x] are dense in L2(µ) according to a famous theorem of
598
+ M. Riesz, cf. [15], and they are called N-extremal in [1] and von Neumann
599
+ solutions in [16]. They form a closed subset of ext(V ), the set of extreme
600
+ points of the convex set V . However, ext(V ) is known to be a dense subset
601
+ of V . They are characterized by the formula
602
+ � dµt(x)
603
+ x − z = − A(z) + tC(z)
604
+ B(z) + tD(z),
605
+ z ∈ C \ R, t ∈ R∗,
606
+ (27)
607
+ where A, . . . , D are the entire functions given in (11), cf. [17, Theorem 7.6].
608
+ Recall that (12) holds.
609
+ We summarize some of the properties of µt, which can be found in [1] and
610
+ [17].
611
+ Proposition 3.1.
612
+ (i) The solution µt is a discrete measure with support
613
+ equal to the countable zero set Λt of the entire function B(z) + tD(z),
614
+ with the convention that Λ∞ is the zero set of D. In particular Λt ⊂ R
615
+ for t ∈ R∗.
616
+ 12
617
+
618
+ (ii) The support of two different N-extremal solutions are disjoint, and each
619
+ point x0 ∈ R belongs to the support of a unique N-extremal measure µt,
620
+ where t ∈ R∗ is given as t = −B(x0)/D(x0) if D(x0) ̸= 0 and t = ∞ if
621
+ D(x0) = 0.
622
+ Let us consider the vector space
623
+ E := {Fc(z) =
624
+
625
+
626
+ n=0
627
+ cnpn(z) | c ∈ ℓ2}
628
+ (28)
629
+ of entire functions, cf. (5). It is a Hilbert space under the norm
630
+ ||Fc||2 =
631
+
632
+
633
+ n=0
634
+ |cn|2 =
635
+
636
+ |Fc(x)|2 dµ(x),
637
+ where µ ∈ V can be arbitrary. It is a reproducing kernel Hilbert space of
638
+ functions with the reproducing kernel
639
+ K(u, v) :=
640
+
641
+
642
+ n=0
643
+ pn(u)pn(v),
644
+ u, v ∈ C,
645
+ in the sense that
646
+
647
+ K(u, x)Fc(x) dµ(x) = Fc(u),
648
+ µ ∈ V, u ∈ C.
649
+ Note that (pn) is an orthonormal basis of E, and the mapping c �→ Fc is a
650
+ unitary operator of the Hilbert space ℓ2 onto E.
651
+ For each N-extremal measure µt the mapping c �→ Fc|supp(µt) is a unitary
652
+ operator of ℓ2 onto L2(µt). The inverse mapping is given by
653
+ f �→
654
+
655
+ ⟨f, pn⟩L2(µt)
656
+
657
+ n≥0 ,
658
+ f ∈ L2(µt),
659
+ and
660
+ f(x) =
661
+
662
+
663
+ n=0
664
+ ⟨f, pn⟩L2(µt)pn(x),
665
+ x ∈ supp(µt).
666
+ (29)
667
+ The series in (29) converges to f in L2(µt) and converges also locally uni-
668
+ formly for x ∈ C, but f is apriori only defined on supp(µt), so the equality
669
+ holds pointwise for x ∈ supp(µt). The series represents a holomorphic exten-
670
+ sion of f to all of C.
671
+ 13
672
+
673
+ The self-adjoint operator Tt from (25) is unitarily equivalent with the
674
+ multiplication operator Mµt on L2(µt) given by
675
+ Mµtf(x) = xf(x),
676
+ f ∈ L2(µt), x ∈ supp(µt).
677
+ Theorem 3.2. Let µt be an N-extremal measure and let λ ∈ C \ supp µt.
678
+ Then
679
+ wµt(λ)pλ + qλ ∈ D(Tt),
680
+ (30)
681
+ where
682
+ wµt(λ) :=
683
+
684
+ 1
685
+ x − λ dµt(x) = −C(λ, x)
686
+ D(λ, x),
687
+ x ∈ supp µt.
688
+ (31)
689
+ In particular, the ratio C(λ, x)/D(λ, x) does not depend on x ∈ supp µt.
690
+ Proof. Since λ /∈ supp µt the functions (x−λ)−1 and x(x−λ)−1 are bounded
691
+ on supp µt and in particular they belong to L2(µt). Thus (x−λ)−1 ∈ D(Mµt)
692
+ and we find
693
+ � pn(x)
694
+ x − λ dµt(x) = wµt(λ)pn(λ) + qn(λ),
695
+ where wµt(λ) is given by the first equality of (31). Moreover, by (29)
696
+ (x − λ)−1 =
697
+
698
+
699
+ n=0
700
+ [wµt(λ)pn(λ) + qn(λ)]pn(x),
701
+ x ∈ supp µt.
702
+ (32)
703
+ In view of the unitary equivalence of Tt and Mµt we get (30). Multiplying
704
+ (32) sidewise by x − λ gives
705
+ 1 = wµt(λ)(x − λ)
706
+
707
+
708
+ n=0
709
+ pn(λ)pn(x) + (x − λ)
710
+
711
+
712
+ n=0
713
+ qn(λ)pn(x),
714
+ x ∈ supp(µt).
715
+ By (8) and (9) we therefore get
716
+ wµt(λ)D(λ, x) + C(λ, x) = 0,
717
+ x ∈ supp µt.
718
+ Assume D(λ, x) = 0. Then C(λ, x) = 0, but this gives a contradiction to
719
+ (10). Hence D(λ, x) ̸= 0 and
720
+ wµt(λ) = −C(λ, x)
721
+ D(λ, x),
722
+ x ∈ supp µt,
723
+ which gives the second part of (31).
724
+ 14
725
+
726
+ Remark 3.3. Using the formulas (57) and (58) in the last expression in (31),
727
+ we get formula (27) with t = −B(x)/D(x) independent of x ∈ supp(µt) if
728
+ D(x) ̸= 0, and t = ∞ if D(x) = 0.
729
+ Note also that wµt(λ)pλ + qλ /∈ D(T) by Theorem 1.2 (iii) because
730
+ B(λ, λ) = −1.
731
+ We know from Theorem 1.2 that pλ, qλ /∈ D(T) for every λ ∈ C. We shall
732
+ now clarify when pλ, qλ belong to the domain of the self-adjoint extension Tt
733
+ associated with the N-extremal measure µt.
734
+ Theorem 3.4. For λ ∈ C and t ∈ R∗ we have
735
+ pλ ∈ D(Tt) ⇐⇒ D(λ, x) = 0 ∀x ∈ supp(µt) ⇐⇒ λ ∈ supp(µt).
736
+ (33)
737
+ qλ ∈ D(Tt) ⇐⇒ C(λ, x) = 0 ∀x ∈ supp(µt).
738
+ (34)
739
+ Proof. We define the entire functions gλ, hλ ∈ L2(µt) by
740
+ gλ(x) :=
741
+
742
+
743
+ k=0
744
+ pk(λ)pk(x),
745
+ hλ(x) :=
746
+
747
+
748
+ k=0
749
+ qk(λ)pk(x),
750
+ x ∈ C.
751
+ If pλ ∈ D(Tt) then (Tt − λI)pλ = 0 by (13), because Tt is a restriction
752
+ of T ∗. Furthermore, by the unitary equivalence between Tt and the multi-
753
+ plication operator Mµt on L2(µt) we have xgλ(x) ∈ L2(µt) and D(λ, x) =
754
+ (λ − x)gλ(x) = 0 in L2(µt). By discreteness of µt we get D(λ, x) = 0 for all
755
+ x ∈ supp(µt). The last equivalence of (33) follows from Remark 5.10. On
756
+ the other hand, it is easy to see that the last two equivalent conditions of
757
+ (33) imply pλ ∈ D(Tt), because the zero-function x �→ (λ − x)gλ(x) as well
758
+ as λgλ(x) are in L2(µt), hence also xgλ(x) ∈ L2(µt). Therefore gλ ∈ D(Mµt)
759
+ and finally pλ ∈ D(Tt).
760
+ If qλ ∈ D(Tt) then (Tt − λI)qλ = e0 by (13), because Tt is a restriction
761
+ of T ∗. This shows that (x − λ)hλ(x) = 1 in L2(µt) and hence C(λ, x) = 0
762
+ for all x ∈ supp(µt). On the other hand, if C(λ, x) = 0 for x ∈ supp(µt), we
763
+ conclude that xhλ(x) ∈ L2(µt), hence qλ ∈ D(Tt). This establishes (34).
764
+ We know from Proposition 3.1 that supp(µt) is the the zero set of the
765
+ entire function B(z) + tD(z) understood as D(z) if t = ∞. Using this we
766
+ get the following Corollary about pλ. We get a similar result about qλ from
767
+ (57).
768
+ 15
769
+
770
+ Corollary 3.5. For t ∈ R and t = ∞ we have
771
+ pλ ∈ D(Tt)
772
+ ⇐⇒
773
+ B(λ) + tD(λ) = 0,
774
+ qλ ∈ D(Tt)
775
+ ⇐⇒
776
+ A(λ) + tC(λ) = 0.
777
+ pλ ∈ D(T∞)
778
+ ⇐⇒
779
+ D(λ) = 0,
780
+ qλ ∈ D(T∞)
781
+ ⇐⇒
782
+ C(λ) = 0.
783
+ In particular pλ and qλ only belong to D(Tt) if λ ∈ R, and for λ ∈ R they
784
+ belong to a unique D(Tt). Furthermore, they never belong to the same domain
785
+ D(Tt).
786
+ Remark 3.6. Since D(T) ⊂ D(Tt) for all t ∈ R∗, it is clear that Corollary 3.5
787
+ implies that pλ, qλ /∈ D(T) as stated in Corollary 2.2.
788
+ We also have a kind of converse to Theorem 3.2.
789
+ Theorem 3.7. Assume that λ, τ ∈ C are such that τpλ + qλ ∈ D(Tt) for
790
+ some t ∈ R∗. Then λ /∈ supp(µt) and τ = wµt(λ) given by (31).
791
+ Proof. Assume that λ ∈ supp(µt). By Theorem 3.4 we know that pλ ∈ D(Tt)
792
+ and hence qλ ∈ D(Tt), contradicting Corollary 3.5.
793
+ Having established λ /∈ supp(µt), we get by Theorem 3.2 that (τ −
794
+ wµt(λ))pλ ∈ D(Tt), but since pλ /∈ D(Tt), we get τ − wµt(λ) = 0.
795
+ Theorem 3.8. Let t ∈ R∗ and λ ∈ C \ supp(µt) be given. Then there exists
796
+ a unique pair (s, c) ∈ D(T) × C depending on t, λ such that
797
+ wµt(λ)pλ + qλ =
798
+
799
+ s + c(q0 + tp0),
800
+ t ∈ R
801
+ s + cp0,
802
+ t = ∞.
803
+ We have
804
+ c =
805
+ � −1/(B(λ) + tD(λ)),
806
+ t ∈ R
807
+ −1/D(λ),
808
+ t = ∞,
809
+ and s is given by inserting the value of c.
810
+ Proof. Recall that λ ∈ C \ supp(µt) if and only if B(λ) + tD(λ) ̸= 0 when
811
+ t ∈ R, and that λ ∈ C \ supp(µ∞) if and only if D(λ) ̸= 0. The existence
812
+ and uniqueness of (s, c) follow from Theorem 3.2 and formula (25).
813
+ In case t ∈ R we have
814
+ wµt(λ)pλ + qλ − c(q0 + tp0) ∈ D(T),
815
+ 16
816
+
817
+ and fixing z0 in the open upper half-plane we have by Proposition 2.1
818
+ wµt(λ)s+
819
+ λ + r+
820
+ λ − c(r+
821
+ 0 + ts+
822
+ 0 ) = wµt(λ)s−
823
+ λ + r−
824
+ λ − c(r−
825
+ 0 + ts−
826
+ 0 ) = 0,
827
+ or equivalently by (17) and (19)
828
+ wµt(λ)D(λ, z0) + C(λ, z0) − c(C(0, z0) + tD(0, z0)) = 0,
829
+ (35)
830
+ and
831
+ wµt(λ)D(λ, z0) + C(λ, z0) − c(C(0, z0) + tD(0, z0)) = 0.
832
+ (36)
833
+ From (36) and (31) we get for x ∈ supp(µt)
834
+ c
835
+ =
836
+ −wµt(λ)D(λ, z0) + C(λ, z0)
837
+ B(z0) + tD(z0)
838
+ =
839
+ C(λ, x)D(λ, z0) − D(λ, x)C(λ, z0)
840
+ (B(z0) + tD(z0))D(λ, x)
841
+ =
842
+ −D(z0, λ)C(λ, x) − B(z0, λ)D(λ, x)
843
+ (B(z0) + tD(z0))D(λ, x)
844
+ =
845
+
846
+ D(z0, x)
847
+ (B(z0) + tD(z0))D(λ, x) = −
848
+ 1
849
+ B(λ) + tD(λ).
850
+ Here we have first used (54) and next used (58) twice. Finally we recall that
851
+ t = −B(x)/D(x) for x ∈ supp(µt), cf. Proposition 3.1.
852
+ Note that (35) leads to the same expression for c.
853
+ The case t = ∞ is treated in the same way.
854
+ 4
855
+ Parametrizations of the domain of the Ja-
856
+ cobi operator
857
+ The Jacobi operator (T, D(T)) in the indeterminate case is regular in the
858
+ sense of [13, p. 20], i.e., for any z ∈ C there exists d(z) > 0 such that
859
+ ||(T − zI)c|| ≥ d(z)||c||,
860
+ c ∈ D(T).
861
+ (37)
862
+ For z ∈ C \ R this is true with d(z) = |Im (z)|, and for z ∈ R let t0 ∈ R∗ be
863
+ such that z ∈ supp(µt0). For t ∈ R∗ \ {t0} the distance
864
+ dt(z) := min{|z − x| | x ∈ supp(µt)} > 0,
865
+ 17
866
+
867
+ can be used in (37), since we have
868
+ ||(T − zI)c||2 =
869
+
870
+ |(x − z)Fc(x)|2 dµt(x) ≥ dt(z)2||c||2,
871
+ where Fc is given in (5).
872
+ We have the orthogonal decomposition in closed subspaces
873
+ ℓ2 = (T − zI)(D(T)) ⊕ Cpz,
874
+ z ∈ C.
875
+ (38)
876
+ The operator (T, D(T)) has no eigenvalues, has empty continuous spec-
877
+ trum, and the spectrum σ(T) = C is equal to the residual spectrum, cf. [18,
878
+ p.209].
879
+ For z0 ∈ C we have the orthogonal expansion
880
+ pn(z) − pn(z0)
881
+ z − z0
882
+ =
883
+ n−1
884
+
885
+ k=0
886
+ an,k(z0)pk(z),
887
+ z ∈ C
888
+ (39)
889
+ of the polynomial (pn(z) − pn(z0)/(z − z0) of degree n − 1, and it is easy to
890
+ see that
891
+ an,k(z0) =
892
+ � pn(x) − pn(z0)
893
+ x − z0
894
+ pk(x) dµ(x) = qn(z0)pk(z0) − pn(z0)qk(z0), (40)
895
+ where µ ∈ V is an arbitrary solution to (1), cf. [1, p. 18].
896
+ Lemma 4.1. The coefficients an,k(z0) from (40) satisfy
897
+ |an,k(z0)|2 ≤
898
+
899
+ |pn(z0)|2 + |qn(z0)|2� �
900
+ |pk(z0)|2 + |qk(z0)|2�
901
+ .
902
+ (41)
903
+ Therefore
904
+
905
+
906
+ n=k+1
907
+ |an,k(z0)|2 ≤ (||pz0||2 + ||qz0||2)(|pk(z0)|2 + |qk(z0)|2).
908
+ (42)
909
+ Furthermore,
910
+
911
+
912
+ n=0
913
+ ����
914
+ pn(z) − pn(z0)
915
+ z − z0
916
+ ����
917
+ 2
918
+ ≤ ||pz||2 �
919
+ ||pz0||2 + ||qz0||2�2 .
920
+ (43)
921
+ In particular, for z → z0
922
+
923
+
924
+ n=0
925
+ |p′
926
+ n(z0)|2 ≤ ||pz0||2 �
927
+ ||pz0||2 + ||qz0||2�2 .
928
+ 18
929
+
930
+ Proof. Formula (41) is a consequence of the Cauchy-Schwarz inequality.
931
+ From (39) and (41) we get
932
+ ����
933
+ pn(z) − pn(z0)
934
+ z − z0
935
+ ����
936
+ 2
937
+
938
+ n−1
939
+
940
+ k=0
941
+ |an,k(z0)|2
942
+ n−1
943
+
944
+ k=0
945
+ |pk(z)|2
946
+
947
+ ||pz||2
948
+ n−1
949
+
950
+ k=0
951
+
952
+ |pn(z0)|2 + |qn(z0)|2� �
953
+ |pk(z0)|2 + |qk(z0)|2�
954
+ ,
955
+ and finally
956
+
957
+
958
+ n=0
959
+ ����
960
+ pn(z) − pn(z0)
961
+ z − z0
962
+ ����
963
+ 2
964
+
965
+ ||pz||2
966
+
967
+
968
+ k=0
969
+
970
+
971
+ |pk(z0)|2 + |qk(z0)|2�
972
+
973
+
974
+ n=k+1
975
+
976
+ |pn(z0)|2 + |qn(z0)|2�
977
+
978
+
979
+ ||pz||2 �
980
+ ||pz0||2 + pz0||2�2 ,
981
+ which yields (43).
982
+ We shall now show that the Hilbert space E = {Fc(z)} defined in (28) is
983
+ stable under difference quotients:
984
+ Theorem 4.2. For c ∈ ℓ2 and z0 ∈ C there exists ξ(c, z0) ∈ ℓ2 such that
985
+ Fc(z) − Fc(z0)
986
+ z − z0
987
+ = Fξ(c,z0)(z) ∈ E,
988
+ (44)
989
+ and the coordinates of ξ(c, z0) are defined by
990
+ ξk(c, z0) =
991
+
992
+
993
+ n=k+1
994
+ cnan,k(z0),
995
+ k ≥ 0.
996
+ (45)
997
+ Furthermore,
998
+ ||ξ(c, z0)|| ≤ ||c||
999
+
1000
+ ||pz0||2 + ||qz0||2�
1001
+ .
1002
+ (46)
1003
+ Proof. The series in (45) is absolutely convergent being the product of two
1004
+ ℓ2 sequences. Furthermore, by the Cauchy-Schwarz inequality and (41) we
1005
+ get
1006
+ |ξk(c, z0)|2 ≤ ||c||2(||pz0||2 + ||qz0||2)
1007
+
1008
+ |pk(z0)|2 + |qk(z0)|2�
1009
+ ,
1010
+ 19
1011
+
1012
+ and therefore (ξk(c, z0)) ∈ ℓ2 and (46) holds.
1013
+ We next find
1014
+ Fc(z) − Fc(z0)
1015
+ z − z0
1016
+ =
1017
+
1018
+
1019
+ n=0
1020
+ cn
1021
+ pn(z) − pn(z0)
1022
+ z − z0
1023
+ ,
1024
+ z ̸= z0.
1025
+ Inserting the expression (39) on the right-hand side, we get for z ̸= z0
1026
+ Fc(z) − Fc(z0)
1027
+ z − z0
1028
+ =
1029
+
1030
+
1031
+ n=1
1032
+ cn
1033
+ n−1
1034
+
1035
+ k=0
1036
+ an,k(z0)pk(z)
1037
+ =
1038
+
1039
+
1040
+ k=0
1041
+ pk(z)
1042
+
1043
+
1044
+ n=k+1
1045
+ cnan,k(z0)
1046
+ =
1047
+
1048
+
1049
+ k=0
1050
+ ξk(c, z0)pk(z),
1051
+ where the rearrangement is possible due to absolute convergence:
1052
+ ����
1053
+ Fc(z) − Fc(z0)
1054
+ z − z0
1055
+ ����
1056
+
1057
+
1058
+
1059
+ n=1
1060
+ |cn|
1061
+ n−1
1062
+
1063
+ k=0
1064
+ |an,k(z0)| |pk(z)|
1065
+ =
1066
+
1067
+
1068
+ k=0
1069
+ |pk(z)|
1070
+
1071
+
1072
+ n=k+1
1073
+ |cn| |an,k(z0)|
1074
+
1075
+ ||c||
1076
+
1077
+
1078
+ k=0
1079
+ |pk(z)|
1080
+
1081
+
1082
+
1083
+ n=k+1
1084
+ |an,k(z0)|2
1085
+ �1/2
1086
+
1087
+ ||c||
1088
+
1089
+
1090
+ k=0
1091
+ |pk(z)|
1092
+ ��
1093
+ ||pz0||2 + ||qz0||2� �
1094
+ |pk(z0)|2 + |qk(z0)|2��1/2
1095
+
1096
+ ||c||
1097
+
1098
+ ||pz0||2 + ||qz0||2�1/2 ||pz||
1099
+ � ∞
1100
+
1101
+ k=0
1102
+
1103
+ |pk(z0)|2 + |qk(z0)|2�
1104
+ �1/2
1105
+ =
1106
+ ||c|| ||pz||
1107
+
1108
+ ||pz0||2 + ||qz0||2�
1109
+ ,
1110
+ where we have used (42).
1111
+ It is now clear that the entire functions z �→ (Fc(z) − Fc(z0))/(z − z0),
1112
+ with value F ′
1113
+ c(z0) for z = z0, and Fξ(c,z0)(z) agree.
1114
+ 20
1115
+
1116
+ Theorem 4.3. Let Ξz0 denote the bounded operator in ℓ2 defined by
1117
+ Ξz0(c) := ξ(c, z0),
1118
+ z0 ∈ C, c ∈ ℓ2,
1119
+ (47)
1120
+ where ξ(c, z0) is defined in Theorem 4.2.
1121
+ We have Ξz0(ℓ2) = D(T) and
1122
+ ker(Ξz0) = Ce0 for each z0 ∈ C.
1123
+ Furthermore, for z0 ∈ C
1124
+ (T − z0I)Ξz0(c) + Fc(z0)e0 = c,
1125
+ c ∈ ℓ2.
1126
+ (48)
1127
+ The restriction of Ξz0 to (T − z0I)(D(T)) is a bijection onto D(T) equal to
1128
+ (T − z0I)−1.
1129
+ Proof. Let U : ℓ2 → E denote the unitary mapping given by U(c) = Fc.
1130
+ Then
1131
+ U(Jc)(z) = z
1132
+
1133
+
1134
+ k=0
1135
+ ckpk(z),
1136
+ c ∈ F, z ∈ C,
1137
+ i.e., U is the intertwining operator between J and the densely defined oper-
1138
+ ator of multiplication with z on C[z] ⊂ E. Therefore
1139
+ U(Tc)(z) = zFc(z),
1140
+ c ∈ D(T), z ∈ C.
1141
+ (49)
1142
+ For c ∈ ℓ2 and z0 ∈ C we have
1143
+ c − Fc(z0)e0 ⊥ pz0,
1144
+ so by (38) c − Fc(z0)e0 belongs to (T − z0I)(D(T)). Therefore, there exists
1145
+ a unique vector v ∈ D(T) such that
1146
+ c − Fc(z0)e0 = (T − z0I)(v),
1147
+ (50)
1148
+ and applying U to (50) we get by (49)
1149
+ Fc(z) − Fc(z0) = (z − z0)Fv(z),
1150
+ z ∈ C.
1151
+ Now (44) shows that Fv(z) = Fξ(c,z0)(z) for z ̸= z0, hence for all z, and finally
1152
+ v = ξ(c, z0), showing that Ξz0(c) ∈ D(T). Inserting v = ξ(c, z0) in (50) yields
1153
+ (48).
1154
+ For v ∈ D(T) we define c = (T − z0I)(v). Then Fc(z0) = 0 as c ⊥ pz0 by
1155
+ (38), and then (48) gives (T − z0I)Ξz0(c) = c. By injectivity of T − z0I we
1156
+ get v = Ξz0(c) = (T − z0I)−1(c).
1157
+ It is easy to see that ξ(e0, z0) = 0, hence Ce0 ⊆ ker(Ξz0), and from (48)
1158
+ the converse inclusion follows.
1159
+ 21
1160
+
1161
+ Remark 4.4. The operator Ξz0 defined in (47) is seen to satisfy
1162
+ Ξz0(en) =
1163
+ n−1
1164
+
1165
+ k=0
1166
+ an,k(z0)ek,
1167
+ n ≥ 1,
1168
+ and it follows easily that Ξz0(F) = F.
1169
+ Moreover, since (Ce0)⊥ = {c ∈ ℓ2 | c0 = 0}, we have the following
1170
+ parametrizations of D(T)
1171
+ D(T) = {Ξz0(c) | c ∈ ℓ2, c0 = 0},
1172
+ z0 ∈ C.
1173
+ 5
1174
+ Appendix
1175
+ We need the following result about the Nevanlinna functions defined in the
1176
+ Introduction.
1177
+ Theorem 5.1. For u, v, w ∈ C we have
1178
+ A(u, v)
1179
+ =
1180
+ C(u, w)A(w, v) − A(u, w)B(w, v)
1181
+ (51)
1182
+ B(u, v)
1183
+ =
1184
+ D(u, w)A(w, v) − B(u, w)B(w, v)
1185
+ (52)
1186
+ C(u, v)
1187
+ =
1188
+ C(u, w)C(w, v) − A(u, w)D(w, v)
1189
+ (53)
1190
+ D(u, v)
1191
+ =
1192
+ D(u, w)C(w, v) − B(u, w)D(w, v).
1193
+ (54)
1194
+ From the obvious relations
1195
+ A(u, v) = −A(v, u), B(u, v) = −C(v, u), D(u, v) = −D(v, u)
1196
+ and putting w = 0 in the formulas of Theorem 5.1, we get the following
1197
+ formulas in terms of the one variable functions (11):
1198
+ Corollary 5.2. For u, v ∈ C we have
1199
+ A(u, v)
1200
+ =
1201
+ ����
1202
+ A(u)
1203
+ A(v)
1204
+ C(u)
1205
+ C(v)
1206
+ ����
1207
+ (55)
1208
+ B(u, v)
1209
+ =
1210
+ ����
1211
+ B(u)
1212
+ A(v)
1213
+ D(u)
1214
+ C(v)
1215
+ ����
1216
+ (56)
1217
+ C(u, v)
1218
+ =
1219
+ ����
1220
+ A(u)
1221
+ B(v)
1222
+ C(u)
1223
+ D(v)
1224
+ ����
1225
+ (57)
1226
+ D(u, v)
1227
+ =
1228
+ ����
1229
+ B(u)
1230
+ B(v)
1231
+ D(u)
1232
+ D(v)
1233
+ ���� .
1234
+ (58)
1235
+ 22
1236
+
1237
+ We have not been able to find the formulas of Theorem 5.1 in the litera-
1238
+ ture, so we indicate a proof. The formulas of Corollary 5.2 expressing the two
1239
+ variable functions in terms of the one variable functions were, as far as we
1240
+ know, first given in [11] and included in [17, exercise 7.8 (3)]. (Unfortunately
1241
+ there is a misprint in the exercise: B and C are interchanged.)
1242
+ We begin by introducing polynomial approximations to the Nevanlinna
1243
+ functions.
1244
+ Proposition 5.3. [17, Proposition 5.24] For u, v ∈ C and n ≥ 0 we have
1245
+ An(u, v)
1246
+ :=
1247
+ (u − v)
1248
+ n
1249
+
1250
+ k=0
1251
+ qk(u)qk(v) = an
1252
+ ����
1253
+ qn+1(u)
1254
+ qn+1(v)
1255
+ qn(u)
1256
+ qn(v)
1257
+ ����
1258
+ Bn(u, v)
1259
+ :=
1260
+ −1 + (u − v)
1261
+ n
1262
+
1263
+ k=0
1264
+ pk(u)qk(v) = an
1265
+ ����
1266
+ pn+1(u)
1267
+ qn+1(v)
1268
+ pn(u)
1269
+ qn(v)
1270
+ ����
1271
+ Cn(u, v)
1272
+ :=
1273
+ 1 + (u − v)
1274
+ n
1275
+
1276
+ k=0
1277
+ qk(u)pk(v) = an
1278
+ ����
1279
+ qn+1(u)
1280
+ pn+1(v)
1281
+ qn(u)
1282
+ pn(v)
1283
+ ����
1284
+ Dn(u, v)
1285
+ :=
1286
+ (u − v)
1287
+ n
1288
+
1289
+ k=0
1290
+ pk(u)pk(v) = an
1291
+ ����
1292
+ pn+1(u)
1293
+ pn+1(v)
1294
+ pn(u)
1295
+ pn(v)
1296
+ ���� .
1297
+ It is important to notice that
1298
+ ����
1299
+ An(u, v)
1300
+ Bn(u, v)
1301
+ Cn(u, v)
1302
+ Dn(u, v)
1303
+ ���� = 1 for (u, v) ∈ C2,
1304
+ cf. [17, Equation(5.57)].
1305
+ For later use we introduce the transfer matrix with determinant 1
1306
+ hn(u, v) =
1307
+
1308
+ Cn(u, v)
1309
+ An(u, v)
1310
+ −Dn(u, v)
1311
+ −Bn(u, v)
1312
+
1313
+ ,
1314
+ u, v ∈ C, n ≥ 0.
1315
+ (59)
1316
+ The name transfer matrix is motivated by
1317
+ Proposition 5.4. For u, v ∈ C, n ≥ 0 we have
1318
+
1319
+ pn(u)
1320
+ qn(u)
1321
+ pn+1(u)
1322
+ qn+1(u)
1323
+
1324
+ hn(u, v) =
1325
+
1326
+ pn(v)
1327
+ qn(v)
1328
+ pn+1(v)
1329
+ qn+1(v)
1330
+
1331
+ (60)
1332
+ 23
1333
+
1334
+ Proof. The four formulas of Proposition 5.3 can be expressed as the matrix
1335
+ equation
1336
+ hn(u, v) = an
1337
+
1338
+ qn+1(u)
1339
+ −qn(u)
1340
+ −pn+1(u)
1341
+ pn(u)
1342
+ � �
1343
+ pn(v)
1344
+ qn(v)
1345
+ pn+1(v)
1346
+ qn+1(v)
1347
+
1348
+ .
1349
+ However, by [17, Equation (5.52)]
1350
+
1351
+ qn+1(u)
1352
+ −qn(u)
1353
+ −pn+1(u)
1354
+ pn(u)
1355
+ �−1
1356
+ = an
1357
+
1358
+ pn(u)
1359
+ qn(u)
1360
+ pn+1(u)
1361
+ qn+1(u)
1362
+
1363
+ ,
1364
+ and (60) follows.
1365
+ By the uniqueness of a matrix hn(u, v) satisfying (60) we get:
1366
+ Corollary 5.5. For u, v, w ∈ C, n ≥ 0 we have
1367
+ hn(u, w)hn(w, v) = hn(u, v),
1368
+ hn(u, v) = hn(v, u)−1.
1369
+ Proof of Theorem 5.1. Letting n tend to infinity in (59) we obtain the
1370
+ entire matrix function
1371
+ h(u, v) =
1372
+
1373
+ C(u, v)
1374
+ A(u, v)
1375
+ −D(u, v)
1376
+ −B(u, v)
1377
+
1378
+ ,
1379
+ u, v ∈ C,
1380
+ with determinant 1 satisfying h(u, w)h(w, v) = h(u, v), which is equivalent
1381
+ to the formulas (51),(52),(53) and (54) of the theorem.
1382
+
1383
+ Remark 5.6. The M¨obius transformation M(u, v) : C∗ → C∗ defined by
1384
+ M(u, v)(z) :=
1385
+ C(u, v)z + A(u, v)
1386
+ −D(u, v)z − B(u, v),
1387
+ z ∈ C∗,
1388
+ maps the Weyl circle Kv onto the Weyl circle Ku, where Ku = R∗ if u ∈ R.
1389
+ For u ∈ C \ R the Weyl circle is defined in [17, Section 7.3].
1390
+ The following Lemma unifies some calculations:
1391
+ 24
1392
+
1393
+ Lemma 5.7. For vectors x, y, z, w ∈ C2 we have the following determinant
1394
+ equation
1395
+ ����
1396
+ |x y|
1397
+ |x z|
1398
+ |w y|
1399
+ |w z|
1400
+ ���� = |x w||y z|,
1401
+ where
1402
+ |x y| =
1403
+ ����
1404
+ x1
1405
+ y1
1406
+ x2
1407
+ y2
1408
+ ���� etc.
1409
+ Proof. First method: Direct computation.
1410
+ Second method: Define
1411
+ M(x, y, z, w) = |x y||z w| + |x z||w y| + |x w||y z|,
1412
+ where it should be noticed that y, z, w appear in its cyclic permutations.
1413
+ Clearly M is a 4-linear form on C2, and it is alternating, i.e., is zero, if
1414
+ any two arguments agree. An alternating 4-linear form on a vector space of
1415
+ dimension ≤ 3 is identically zero, hence M ≡ 0.
1416
+ In various proofs we need that certain functions are Pick function, i.e.,
1417
+ holomorphic functions in the cut plane C\R with certain properties, see [12].
1418
+ Proposition 5.8. The meromorphic functions B/D and A/C are Pick func-
1419
+ tions, i.e., they map the upper (resp. lower) open half-plane into itself.
1420
+ Proof. The result about B/D is in [3, Proposition 1.3]. The result about
1421
+ A/C can be deduced from the previous result by considering the indeter-
1422
+ minate moment problem corresponding to the truncated Jacobi matrix J(1)
1423
+ considered in Remark 2.4. There are simple relations between the Nevan-
1424
+ linna functions �A, . . . , �D of the truncated problem and those of the original
1425
+ moment problem, see [14]:
1426
+ A(z) = a−2
1427
+ 0 �D(z),
1428
+ C(z) = −b0a−2
1429
+ 0 �D(z) − �B(z),
1430
+ z ∈ C.
1431
+ Therefore −C/A = b0+a2
1432
+ 0( �B/ �D), which shows that −C/A is a Pick function,
1433
+ and so is A/C.
1434
+ By a famous Theorem of M. Riesz each of the functions Fc defined in (5)
1435
+ are of minimal exponential type meaning that for each ε > 0 there exists a
1436
+ constant Cε > 0 such that
1437
+ |Fc(z)| ≤ Cεeε|z|,
1438
+ z ∈ C.
1439
+ 25
1440
+
1441
+ This follows from the Cauchy-Schwarz inequality because the norm ||pz|| sat-
1442
+ isfies the same inequality by [1, Theorem 2.4.3]. Using that the polynomials
1443
+ qn+1/q1 are the orthonormal polynomials for the indeterminate truncated Ja-
1444
+ cobi matrix J(1), cf. Remark 2.4, we also get that the functions Gc from (5)
1445
+ are of minimal exponential type.
1446
+ We next recall an important property of these functions in case they are
1447
+ not polynomials.
1448
+ Proposition 5.9. For each c ∈ ℓ2\F the functions Fc, Gc are transcendental
1449
+ and have a countably infinite set of zeros.
1450
+ In particular, for each v ∈ C the functions of the variable u, A(u, v), B(u, v),
1451
+ C(u, v), D(u, v) have a countably infinite set of zeros.
1452
+ Proof. An entire transcendental function f of minimal exponential type has
1453
+ a countably infinite set of zeros. In fact, the order ρ of f is either strictly
1454
+ less than 1 or equal to 1, and in the latter case the type of f is zero. In the
1455
+ first case the result follows from the Hadamard factorization Theorem, cf.
1456
+ [9, p.22]. In the second case the result follows from a Theorem of Lindel¨of,
1457
+ see [9, Theorem 2.20.3].
1458
+ For v ∈ C and F being one of the functions A, . . . , D, we see that u �→
1459
+ F(u, v) is an entire transcendental function of minimal exponential type.
1460
+ Proof of Theorem 1.3: Case 1: Let us first consider the case of D with
1461
+ Z(D)v = {u ∈ C | D(u, v) = 0} for given v ∈ C, cf. (14).
1462
+ If v ∈ R then Z(D)v ⊂ R by [4, Theorem 3], and furthermore Z(D)v
1463
+ equals the support of the unique N-extremal measure which contains v in
1464
+ the support.
1465
+ If v ∈ C \ R, then D(v) ̸= 0, and using (58) we get
1466
+ D(u, v) = D(v)[B(u) − ρD(u)], ρ := B(v)/D(v),
1467
+ so u ∈ Z(D)v iff B(u) = ρD(u). For such u we must have D(u) ̸= 0 for
1468
+ otherwise B(u) = D(u) = 0 contradicting (12). This gives u ∈ Z(D)v iff
1469
+ B(u)/D(u) = ρ. Using that B/D is a Pick function by Proposition 5.8, we
1470
+ see that u, v belong to the same half-plane.
1471
+ Case 2: We consider Z(B)v and use (56), viz.
1472
+ B(u, v) = B(u)C(v) − D(u)A(v).
1473
+ 26
1474
+
1475
+ Let first v ∈ R. If C(v) = 0 then A(v) ̸= 0 by (12), so B(u, v) = 0 iff
1476
+ D(u) = 0, hence Z(B)v ⊂ R. If C(v) ̸= 0 then
1477
+ B(u, v) = C(v)[B(u) − τD(u)], τ := A(v)/C(v) ∈ R,
1478
+ so Z(B)v is the zero set of B − τD, hence real by Proposition 3.1.
1479
+ Let next v ∈ C \ R. Then C(v) ̸= 0, so u ∈ Z(B)v iff B(u) = τD(u) with
1480
+ τ as above, but this is only possible if D(u) ̸= 0 and hence B(u)/D(u) = τ.
1481
+ Using that both A/C and B/D are Pick functions, cf. Proposition 5.8, we
1482
+ see that u, v belong to the same half-plane.
1483
+ Case 3: We consider Z(C)v and use (57), viz.
1484
+ C(u, v) = A(u)D(v) − C(u)B(v).
1485
+ By considering the cases v ∈ R and v ∈ C\R separately and factor out D(v)
1486
+ in case it is non-zero, we may proceed as in case 2.
1487
+ Finally, the case of Z(A)v follows from the case 1 by considering the
1488
+ truncated case as in Remark 2.4. □
1489
+ Remark 5.10. As noticed in the proof above one has for v ∈ R:
1490
+ D(u, v) = 0 ⇐⇒ u ∈ supp(µ),
1491
+ where µ is the N-extremal measure such that v ∈ supp(µ).
1492
+ Compare also with Remark 3.3.
1493
+ Proof of Proposition 1.4:
1494
+ Case (i): From the proof of Theorem 1.2 (i) we know that B(u, v) =
1495
+ −D(u, z)/D(v, z) for all z ∈ C \ R since D(v, z) ̸= 0 for these z. By assump-
1496
+ tion D(x, v) ̸= 0 for u < x < v, so by continuity B(u, v) = −D(u, x)/D(v, x)
1497
+ for these x. We next observe that
1498
+ B(u, v)v − x
1499
+ x − u = v − x
1500
+ D(v, x)
1501
+ D(u, x)
1502
+ u − x ̸= 0,
1503
+ u < x < v,
1504
+ (61)
1505
+ and
1506
+ lim
1507
+ x→u+
1508
+ D(u, x)
1509
+ u − x = ||pu||2,
1510
+ lim
1511
+ x→v−
1512
+ D(v, x)
1513
+ v − x = ||pv||2,
1514
+ so the function in (61) is positive for u < x < v, hence B(u, v) > 0.
1515
+ Case (ii): This case is reduced to case (i) for the truncated Jacobi matrix
1516
+ from Remark 2.4. If �A, . . . , �D denote the Nevalinna functions of two variables
1517
+ 27
1518
+
1519
+ for the truncated case, the following formulas can be found in [14]. (The
1520
+ reader is warned that this reference follows the normalization of [11].)
1521
+ �B(u, v) = (v − b0)A(u, v) − C(u, v),
1522
+ �D(u, v) = a2
1523
+ 0A(u, v),
1524
+ and since we assume that A(u, v) = 0, we have −C(u, v) = �B(u, v) > 0.
1525
+
1526
+ References
1527
+ [1] N. I. Akhiezer, The Classical Moment Problem and Some Related Ques-
1528
+ tions in Analysis. English translation, Oliver and Boyd, Edinburgh,
1529
+ 1965.
1530
+ [2] N. I. Akhiezer, I.M. Glazman, Theory of linear operators in Hilbert space.
1531
+ Two volumes bound as one. Dover 1993.
1532
+ [3] C. Berg, Indeterminate moment problems and the theory of entire func-
1533
+ tions, J. Comput. Appl. Math. 65 (1995), 27–55.
1534
+ [4] C. Berg and J. P. R. Christensen, Density questions in the classical
1535
+ theory of moments, Ann. Inst. Fourier 31, no. 3 (1981), 99–114.
1536
+ [5] C. Berg and R. Szwarc, The Smallest Eigenvalue of Hankel Matrices,
1537
+ Constr. Approx. 34 (2011), 107–133.
1538
+ [6] C. Berg and R. Szwarc, Inverse of infinite Hankel moment matrices,
1539
+ SIGMA 14 (2018), 109, 48 pages.
1540
+ [7] C. Berg and R. Szwarc, Closable Hankel Operators and Moment Prob-
1541
+ lems, Integr. Equ. Oper. Theory 92(1) (2020), 1–9.
1542
+ [8] C. Berg and R. Szwarc, Self-adjoint operators associated with Hankel
1543
+ moment matrices, Journal of Functional Analysis, 283 (2022), 109674.
1544
+ [9] R. P. Boas, Entire Functions. Acdemic Press Inc., Publishers, New York
1545
+ 1954.
1546
+ [10] L. de Branges, Hilbert Spaces of Entire Functions. Prentice-Hall, Inc.
1547
+ Englewood Cliffs, N. J. 1968.
1548
+ 28
1549
+
1550
+ [11] H. Buchwalter and G. Cassier, La param´etrisation de Nevanlinna dans
1551
+ le probl`eme des moments de Hamburger, Expo Math. 2 (1984), 155–178.
1552
+ [12] W. F. Donoghue, Jr., Monotone Matrix Functions and Analytic Contin-
1553
+ uation. Springer-Verlag, Berlin, Heidelberg, New York, 1974.
1554
+ [13] M. L. Gorbachuk and V. I. Gorbachuk, M. G. Krein’s Lectures on Entire
1555
+ Operators. Birkh¨auser Verlag, Basel, Boston, Berlin, 1997.
1556
+ [14] H. L. Pedersen, The Nevanlinna matrix of entire functions associated
1557
+ with a shifted indeterminate Hamburger moment problem, Math. Scand.
1558
+ 74 (1994), 152–160.
1559
+ [15] M. Riesz, Sur le probl`eme des moments et le th´eor`eme de Parseval cor-
1560
+ respondant, Acta Litt. Ac. Sci. Szeged 1 (1923), 209–225.
1561
+ [16] B. Simon, The classical moment problem as a self-adjoint finite difference
1562
+ operator, Adv. Math. 137 (1998), 82–203.
1563
+ [17] K. Schm¨udgen, The Moment Problem, Graduate Texts in Mathematics
1564
+ Vol. 277. Springer International Publishing AG 2017.
1565
+ [18] K. Yosida, Functional Analysis, Third Edition, Springer Verlag, Berlin,
1566
+ Heidelberg, New York, 1971.
1567
+ Christian Berg
1568
+ Department of Mathematical Sciences, University of Copenhagen
1569
+ Universitetsparken 5, DK-2100 Copenhagen, Denmark
1570
+ e-mail: berg@math.ku.dk
1571
+ Ryszard Szwarc
1572
+ Institute of Mathematics, University of Wroc�law
1573
+ pl. Grunwaldzki 2/4, 50-384 Wroc�law, Poland
1574
+ e-mail: szwarc2@gmail.com
1575
+ 29
1576
+
MtAyT4oBgHgl3EQfs_ll/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
N9FIT4oBgHgl3EQfdisk/content/tmp_files/2301.11270v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
N9FIT4oBgHgl3EQfdisk/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
OdE3T4oBgHgl3EQfxQsz/content/tmp_files/2301.04709v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
OdE3T4oBgHgl3EQfxQsz/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
OdFKT4oBgHgl3EQffi6d/content/tmp_files/2301.11830v1.pdf.txt ADDED
@@ -0,0 +1,855 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.11830v1 [astro-ph.IM] 27 Jan 2023
2
+ Detection of Ultra High Energy Cosmic Rays and Neutrinos with
3
+ Lunar Orbital Radio Telescope
4
+ Linjie Chen,1,2∗ Marc Klein Wolt,3 Amin Aminaei,4 Stijn Buitink,5 Heino Falcke3
5
+ 1National Space Science Center, Chinese Academy of Sciences, Beijing, 100190, China
6
+ 2National Astronomical Observatories, Chinese Academy of Sciences, Beijing, 100101, China
7
+ 3Astronomical Institute, Radboud University Nijmegen, Heijendaalseweg 135, 6525 AJ Nijmegen, The Netherlands
8
+ 4UC Davis, Dept. of Physics and Astronomy, Physics Bldg, 1 Shields Ave, Davis, USA
9
+ 5Astrophysical Institute, Vrije Universiteit Brussel, Pleinlaan 2, Brussels, 1050, Belgium
10
+ ∗Corresponding author; E-mail: ljchen@nao.cas.cn.
11
+ Jan. 25, 2023
12
+ Particle cascades induced by ultra-high-energy (UHE) cosmic rays and neutrinos impacting on the lu-
13
+ nar regolith usually radiate Cherenkov radio emissions due to the presence of excess negative charge,
14
+ which is known as Askaryan effect. Several experiments have been carried out to detect the Cherenkov
15
+ radio emissions in the lunar regolith. To prepare for future lunar Ultra-Long Wavelength (ULW, fre-
16
+ quencies below 30 MHz) radio astronomy missions, we study the detection of the Cherenkov radio
17
+ emissions with the ULW radio telescope that are operating at the lunar orbit. We have carried out in-
18
+ strument modelling and analytic calculations for the analysis of aperture, flux and event rate, and the
19
+ analyses show the detectability of the Cherenkov radiation. Based on the properties of the Cherenkov
20
+ radiation, we have demonstrated that the cosmic ray and neutrino events could be reconstructed with
21
+ the three ULW vector antennas onboard the lunar satellites via measurements of the Askaryan radio
22
+ pulse intensity, polarizations, etc. The results obtained by this study would be useful for future lunar
23
+ radio explorer mission, where the detections of UHE cosmic rays and neutrinos could be successfully
24
+ attempted.
25
+ 1
26
+ Introduction
27
+ Studies on the behaviour of ultra-high energy (UHE) particles and their source of origins have been a major topic of
28
+ interest in modern high energy astrophysics and particle physics. However, the origin of ultra-high-energy (> 1018
29
+ eV) cosmic rays (UHECR) still remains unknown to date. It is mainly because of their deflected trajectories caused
30
+ by cosmic magnetic fields. Above a threshold energy of ∼ 1019.6 eV, cosmic rays usually interact with cosmic
31
+ microwave background to produce pions and lose a substantial amount of energy within a distance of tens of mega
32
+ parsec (Mpc). This is well known as Greisen-Zatsepin-Kuzmin (GZK) effect ([28], [54]). The few cosmic rays
33
+ that observed above this threshold energy must originate from the distant sources. In order to determine the source
34
+ origin of such UHE cosmic rays, the detection of UHE neutrinos, those produced together with the cosmic rays
35
+ 1
36
+
37
+ or during the GZK interaction, has usually been employed as one of the method. Since neutrinos are chargeless
38
+ and weakly interacting particles, the propagating UHE neutrinos (UHECν) usually remain unaffected over large
39
+ cosmic distances, and therefore, their arrival directions carry direct information of their source of origins.
40
+ The chief problem of both UHECR and UHECν event detection come from their extreme rarity. The flux at
41
+ GZK energy is around one particle per square kilometer per century, which calls for huge detectors (thousands km2)
42
+ to collect a significant amount of events at ultra-high energies. For instance, even the Pierre Auger Observatory
43
+ (PAO) with a collecting area of 3000 km2 can only detect of order 30 particles above the GZK energy per year [37].
44
+ However, for the detection of particles above 1021 eV, the detectors are needed to have a thousand-fold increase in
45
+ their collecting area. Thus, space-based detection systems are, therefore, appeared to be a feasible approach that is
46
+ currently being attempted and implemented.
47
+ The Moon acts as a huge natural detector with large collecting area and is first proposed to be a target to detect
48
+ cosmic ray and neutrino showers in [20]. The detection is based on the Askaryan effect, which is pointed out by
49
+ G.A. Askaryan from the Lebedev Physical Institute in 1961 [5]. When high-energy particle interaction occurs in
50
+ a denser medium like ice, rock salt, lunar regolith, or the atmosphere, it attracts electrons from the surrounding
51
+ medium and transfer them into the shower disk. With the annihilation of showering positrons in flight, a net
52
+ excess of electrons are produced. The excess electrons rapidly appear and disappear to produce a radio pulse that
53
+ could be observed by a sensitive telescope. Using the above technique several experiments have been proposed
54
+ and performed to detect high-energy cosmic particles, such as Parkes ([32], [33]), GLUE ([26],[25]), LUNASKA
55
+ [38],[39], NuMoon [13], RESUN [36], LaLuna [52], LOFAR [50], FAST [37], all of which employ the terrestrial
56
+ telescopes to carry out the observations. In the literatures ([31], [53], [4]), the scheme of a lunar orbital satellite
57
+ system was presented and analyzed to detect the UHE cosmic rays and neutrinos.
58
+ The ultra-long wavelength (ULW) (≥ 10 m) range, the last unexplored region of the electromagnetic (EM)
59
+ spectrum, is presently one of the new promising areas of radio astronomy. Strengthening the science cases for
60
+ extending the radio astronomy into the unexplored ULW window has been significantly increasing. Due to the
61
+ reflection of the ULW signals by the Earth’s ionosphere and the presence of the strong terrestrial anthropogenic
62
+ radio frequency interference (RFI), the far side of Moon can be an ideal site for carying out the ULW radio
63
+ observations. To date, various Moon-based telescopes have been made and launched for exploring the ULW
64
+ region, such as OLFAR [22], FARSIDE [47], DARE [14], LRX [44], DSL ([12], [18]), NCLE [11], LFRS [41],
65
+ etc. The lunar ULW radio telescopes provide ample opportunities to detect the Cherenkov radiation usually caused
66
+ by the UHNCR or UHECν on the Moon.
67
+ In this paper we present the radio detection of the UHNCR or UHECν with the lunar orbital ULW radio
68
+ antennas. The paper is organized as follows. In Section 2 we describe the model for the detection of the lunar
69
+ Cherenkov emission. The aperture and flux limit for the lunar UHECR and UHEC events are estimated and
70
+ analyzed in Section 3. In Section 4 we discuss the characteristic of the UHNCR or UHECν with the multi-satellite
71
+ detections. Finally, summary and conclusions are provided in Section 5.
72
+ 2
73
+
74
+ 2
75
+ Model for radio emission detection
76
+ When energetic particles interact in the denser medium like lunar regolith, intense coherent radio pulses are pro-
77
+ duced in the frequency range from MHz to GHz. The radio emissions that escaped from the lunar surface could
78
+ be detected by a Moon-based or terrestrial radio telescope as shown in Figure 1, which strongly depends on the
79
+ sensitivity of the telescope. Though the terrestrial radio telescope have a huge collecting area and the best sensitiv-
80
+ ity, the intensity of the lunar Askaryan radio emission is weak due to the long-distance attenuation. A lunar radio
81
+ telescope may provide a reasonable detection capability for the UHECR and UHECν events since it is thousand
82
+ times closer to the lunar surface than to the ground-based telescopes. For the detection of the UHE particles, the
83
+ effective aperture is another vital factor which is directly dependent on the physical lunar surface illuminated by
84
+ the antenna. The physical surface area is determined by the antenna beamwidth and antenna altitude from the lunar
85
+ surface, and the altitude affects the detected electric field of the Askaryan radio emission. In order to study the
86
+ lunar UHECR and UHECν detections, the electromagnetic properties of Cherenkov radiation have been modelled
87
+ including the system parameters of the radio detector.
88
+ Figure 1: Schematic view of lunar antenna detections of Askaryan radio emissions produced by the UHECR or
89
+ UHECν in the lunar soil.
90
+ 2.1
91
+ Lunar Cherenkov radio emission
92
+ According to numerous Monte Carlo simulations, the Cherenkov radio emissions from the UHE hadronic showers
93
+ have been parameterized for different dielectric media ([55], [7], [8], [25]). In the literatures ([31], [53], [23], [9], [4])
94
+ the radio detection of lunar Askaryan pulse caused by the UHECR and UHECν interacting with Moon have been
95
+ addressed. For the Cherenkov radiation, we utilized the formula given in Ref. ([39], [23], [9]) to calculate the
96
+ electric field strength ε0 (µV m−1 MHz−1) at the Cherenkov angle θc = cos−1(1/n).
97
+ ε0(E, R, ν) = 8.45 × 106 1
98
+ R( Es
99
+ E0
100
+ )(
101
+ ν
102
+ GHz)(
103
+ 1
104
+ 1 + (ν/ν0)1.23 )
105
+ (1)
106
+ 3
107
+
108
+ R
109
+ 0.
110
+ RM
111
+ 0
112
+ Os
113
+ v or CRwhere, R is the distance from the source point to the detector, Es is the shower energy in eV. E0 = 1020eV, ν0 =
114
+ 2.32GHz ([39], [9]). The angular distribution of the radio emissions around the Cherenkov angle θc is crucial for
115
+ the detection of lunar UHNCR and UHECν events. Due to the loss of coherence, the radio emission decreases in
116
+ the direction away from θc. [48] discussed the angular spread of the Cherenkov radiation around the Cherenkov
117
+ angle. Through comparing several different formulae of intensity distribution, Scholten concluded that the gaussian
118
+ parametrization formula is more accurate since it agrees with both the Monte Carlo simulations at high frequencies
119
+ and the analytic results at low frequencies. In the present study, we use the formula given in [48] for angular spread
120
+ of the Cherenkov radiation as follows,
121
+ Iθ = sin θ
122
+ sin θc
123
+ e− Z2
124
+ 2 ,
125
+ Z = cos θ − cos θc
126
+ ∆c sin θc
127
+ (2)
128
+ The spreading of the radiation intensity is determined by ∆c = 0.0754[L(E0)/(νL(Es)] (in radians), which is
129
+ inversely proportional to the shower length and the radiation frequency. Here L(E is the shower length determined
130
+ by the energy, L(E) = 12.7 + 2/3 log(E/E0) [48].
131
+ In the detection analysis of lunar UHE particle events, a point-source is treated for the coherent Cherenkov
132
+ radiation. When the Cherenkov radio emissions escape from the lunar surface, it will be reflected and transmitted
133
+ at the interface. According the Fresnel law, the transmission coefficient for parallel polarization can be expressed
134
+ as
135
+ ˆt∥(θs) =
136
+ 2 cosr
137
+ n cos r + cos i
138
+ (3)
139
+ where θs is the polar angle which is uniquely related to the angle r and i. r is the angle of refraction relative to the
140
+ normal (outside the Moon) as the rays pass through the lunar surface into free space, i is the angle of incidence as
141
+ shown in Figure 1. r and i have the relation as sin(i) = sin(r)/n, n is the refraction index of lunar regolith, and
142
+ chosen to be 1.73. The detected radiation from a lunar regolith shower, with energy E, at a frequency ν and an
143
+ angle θ, can be expressed as ε = ε0(E, R, ν) · ˆt∥(θs) · I(θ). Here the absorption of the radio waves before exiting
144
+ the lunar surface has not been discussed, and it will be taken into account in the aperture calculations.
145
+ 2.2
146
+ Detection with lunar ULW radio antenna
147
+ The radio emission from the hadronic cascades induced by the UHE cosmic rays or neutrinos in the lunar regolith
148
+ covers a broad frequency band from MHz to GHz, and it peaks in the GHz regime [39]. Therefore, most of the past
149
+ lunar radio experiments to UHECR or UHECν operate at GHz frequency band ([32], [26], [38], [36]). With the
150
+ decreasing frequency the peak intensity of the radiation decreases, however, the increasing angular spread allows
151
+ the radio emission detected by the lunar radio telescope to escape from the lunar surface within a broader solid
152
+ angle, which makes it increasingly efficient to detect the Cherenkov radiation at low frequencies. Furthermore, the
153
+ surface roughness does not play an important role in the detection analysis at lower frequencies since a consider-
154
+ able fraction of the radiation will penetrate the surface. According to these factors, some experiments running at
155
+ low frequencies(∼ 150 MHz) for the lunar particle detection have been carried out or planned with the Westerbork
156
+ Synthesis Radio Telescope (WSRT), NuMoon [13], the LOw Frequency ARray (LOFAR), and the Square Kilo-
157
+ meter Array (SKA-low). Although the ULW band ≤ 30MHz is beyond the optimum frequency window where
158
+ 4
159
+
160
+ the wavelength is comparable with the shower length [48], and the bandwidth is also limited at ULW band, the
161
+ increased particle energy could well compensate the loss of electric field strength at lower frequency. The net effect
162
+ is that the detection probability could increase reasonably at adequately high shower energies.
163
+ In the present study, we discuss the UHECR and UHECν detection with lunar orbital ULW radio telescope.
164
+ For the space ULW radio observations, a tripole antenna has been selected as the receiving element in most of the
165
+ radio missions as DSL ([12], [18]), NCLE [11], LFRS [41], and OLFAR [15], since it has specific advantages of
166
+ measuring the three-dimension electric field, estimating the directions of arrival (DOA) of incident signals with
167
+ single antenna ([17], [16]), and protecting the desired signals from interference signals [19]. In the lunar ULW
168
+ mission, the Dark Ages is undoubtedly one of the greatly interesting sciences, it requires the observation of the
169
+ entire 1 − 30 MHz band, which could be extend up to 50 MHz for cross-referencing with the terrestrial radio
170
+ facilities such as LOFAR. Therefore, the tripole antenna of the lunar ULW radio telescope is supposed to operate
171
+ at the frequency range up to 40 or 50 MHz ([41], [12]).
172
+ For a radio telescope, its sensitivity can be determined by the system equivalent flux density (SEFD) F =
173
+ kTsys/Aeff in Jansky (10−26 Wm−2Hz−1), . Here k is the Boltzmann’s constant, Tsys the system temperature
174
+ and Aeff the effective collecting area. However, a electric field strength (V/m/Hz) of a coherent radio pulse is
175
+ usually utilized to characterize the Cherenkov radiation. In order to analyze the sensitivity of a radio telescope for
176
+ the Askaryan pulse detection, it is required to express the telescope sensitivity in terms of the electric field strength.
177
+ If a radio telescope has a flat system noise spectrum over the observed bandwidth, the equivalent root mean square
178
+ (RMS) spectral electric field [9] can be written as follows,
179
+ εrms = Erms
180
+ ∆ν ,
181
+ Erms =
182
+
183
+ kTsysZ0∆ν
184
+ Aeff
185
+ (4)
186
+ where Erms is the RMS electric field strength over the bandwidth ∆ν in one polarization, and Z0 is the impedance
187
+ of free space. Then the sensitivity of an antenna to detect a coherent radio pulse (the minimum detectable electric
188
+ field) can be defined as εmin = Nσεrms, where Nσ is the minimum number of standard deviations needed to reject
189
+ statistical noise pulses.
190
+ At the ULW band, the galactic synchrotron emission dominates the sky radio foreground, its spectrum varies
191
+ greatly with the frequencies, the brightness temperature rises from about 104 K at 30 MHz to about 2.6 × 107 K at
192
+ around 1 MHz; Furthermore, the antenna effective collecting area changes significantly with the frequencies too.
193
+ Therefore, the Equation (4) cannot be applied directly to the electric field calculation for a ULW radio antenna.
194
+ In order to truly characterize the equivalent spectral electric field of the telescope, the RMS electric field strength
195
+ Erms should be re-computed by integrating the SEFD over the frequency. According to the studies in Ref. [16],
196
+ the sky noise temperature is absolutely dominant in the total system temperature for the ULW radio antennas of
197
+ different lengths, then Erms can be re-written as,
198
+ Erms =
199
+
200
+ kZ0
201
+ � νH
202
+ νL
203
+ Tsky(ν)
204
+ Aeff(ν) dν
205
+ (5)
206
+ Where νL and νH are the lower and upper limit of the frequency respectively. Using a sky temperature model
207
+ [40], the sensitivity of a ULW radio antenna longer than 5 meters [16] to detect the Askaryan radio pulse has been
208
+ calculated with a nominal value Nσ = 3 for different center frequencies and bandwidths, as shown in Table 1. Note
209
+ 5
210
+
211
+ Table 1: Sensitivity parameters of radio UHECR and UHECν observations, Frequencies and bandwidths are in
212
+ MHz.
213
+ Simulations
214
+ νmin
215
+ νmax
216
+ ∆ν
217
+ ν0
218
+ εmin(uV/m/MHz)
219
+ Case I
220
+ 10
221
+ 30
222
+ 20
223
+ 20
224
+ 2.06
225
+ Case II
226
+ 15
227
+ 25
228
+ 10
229
+ 20
230
+ 2.84
231
+ Case III
232
+ 10
233
+ 40
234
+ 30
235
+ 25
236
+ 1.61
237
+ Case IV
238
+ 15
239
+ 35
240
+ 20
241
+ 25
242
+ 1.93
243
+ Case V
244
+ 20
245
+ 30
246
+ 10
247
+ 25
248
+ 2.67
249
+ Case VI
250
+ 10
251
+ 50
252
+ 40
253
+ 30
254
+ 1.34
255
+ Case VII
256
+ 15
257
+ 45
258
+ 30
259
+ 30
260
+ 1.53
261
+ Case VIII
262
+ 20
263
+ 40
264
+ 20
265
+ 30
266
+ 1.83
267
+ Case IX
268
+ 25
269
+ 35
270
+ 10
271
+ 30
272
+ 2.55
273
+ that the antenna length has less influence on the sensitivity since the sky noise temperature is absolutely dominant.
274
+ In order to improve the detection probability of lunar UHECR and UHECν events, we use the central frequency
275
+ of 30 MHz and the bandwidth of 40 MHz to obtain the best sensitivity in our analysis.
276
+ 3
277
+ Detection analysis for lunar UHECR and UHECν events
278
+ We used the method presented in Ref. [31] to analyze the detection of lunar UHECR and UHECν events. In order
279
+ to decide the cascade event rates for the UHECR and UHECν, one way is in term of the aperture size, which is
280
+ multiplied by the isotropic cosmic ray or neutrino flux in the given energy bin to obtain the detection rate. We
281
+ calculate the analytic aperture by integrating the angular aperture for specified energy E over the angles (θs, ϕs) at
282
+ the lunar surface S, and over the angles (θn, ϕn) of the cascade axis relative to the normal to this surface, as shown
283
+ in Figure 1.
284
+ For a volume element near the lunar surface (z ≃ 0) with the polar angle θs, (see Figure 1) radio emissions
285
+ from it received by the detector has a given refraction angle r, sin(r) = sin(θs)(RM + h)/R(θs). Here R(θs) =
286
+ [R2
287
+ M + (RM + h)2 − 2RM(RM + h) cos θs]1/2, is the distance between the surface element and the detector.
288
+ The maximum value of the polar angle θmax
289
+ s
290
+ = arccos[RM/(RM + h)] is associated with the refraction angle
291
+ r −→ π/2.
292
+ At low frequencies, the lunar surface roughness will not have a major influence on the detection sensitivity, and
293
+ thus can be neglected [48], which could be another advantage of UHE particle detection with a lunar ULW radio
294
+ antenna. In our analysis, the Moon is assumed to be an approximately smooth sphere.
295
+ 3.1
296
+ UHE cosmic rays
297
+ For the UHE cosmic rays, they cannot penetrate through the lunar regolith, and cosmic ray events occur in the lunar
298
+ regolith near the surface. Therefore, only the upper hemisphere (cosθn > 0) contributes to the aperture calculation
299
+ for the cosmic ray cascades. The total aperture ACR(E) is defined by Equation (6) and (7) as the integral of the
300
+ angular aperture ∆ΩCR(E, θs) over the surface S [31].
301
+ ACR(E) = A0
302
+
303
+ ∆ΩCR(E, θs)d cos θs
304
+ (6)
305
+ 6
306
+
307
+ ∆ΩCR =
308
+
309
+ cos θnΘ[ε(E, θs, θ) − εrms] × Θ(cos θn)dϕnd cos θn
310
+ (7)
311
+ where, A0 = 2πR2
312
+ M is the physical area of lunar hemisphere, the unit step function Θ(ε − εrms) only selects the
313
+ cascade events that produce radio signals above the threshold value εmin. The radio attenuation length of lunar
314
+ soil depends on the fraction of its composition. For the lunar regolith we use the value ℓ = 60λ (λ, the wavelength
315
+ in the vacuum) given in the literatures ([39], [9]), it is a quite large values at low frequencies. Thus the attenuation
316
+ of radio wave prior to their escape from the lunar surface can be ignored since the cosmic ray cascades exist only
317
+ few meters deep.
318
+ Figure 2: Askaryan radio emissions initiated by UHE cosmic rays propagate between the lunar soil and lunar
319
+ orbital radio antenna. The lunar soil is modelled with two layers, the regolith and the sub-regolith.
320
+ The lunar subsurface soil consists of several different strata according to the detection of Lunar Penetrating
321
+ Radar. Therefore, the reflected radio waves by the strata interfaces that can be detected by the lunar orbital radio
322
+ antenna should also be taken into account besides the direct radio waves emitted by the hadronic shower, as shown
323
+ in Figure 2. Due to the very small contribution, the secondary reflections at the interfaces are neglected. In order to
324
+ simplify the analysis, the lunar soil structure is modelled as only two layers, lunar regolith and lunar sub-regolith.
325
+ The regolith is assumed to be 12 meter deep [45], and the refractive index of lunar regolith (n = 1.73) and sub-
326
+ regolith (n = 2.5) ([39], [9]) have been used in the aperture calculation.
327
+ Figure 3: Total aperture of UHE cosmic rays for a lunar orbital ULW radio antenna at different altitudes of 50 km,
328
+ 100 km, 300 km, and 500 km with a sensitivity of 1.34 uV/m/MHz.
329
+ Using analytic integrations of the angular aperture over the Moon surface, the total aperture are computed
330
+ 7
331
+
332
+ 107
333
+ 106
334
+ ...................
335
+ 10
336
+ 50 km
337
+ 103
338
+ 100 km
339
+ 300 km
340
+ 500 km
341
+ 10
342
+ 1018
343
+ 1020
344
+ 1021
345
+ 1022
346
+ 1022
347
+ E. [eV]To the antennafor the lunar orbital ULW radio antenna with different altitudes. As shown in Figure 3, with the higher altitude
348
+ of lunar ULW radio antenna, the physical aperture illuminated by the antenna increases, however, the detected
349
+ Cherenkov radiation on the antenna becomes weaker due to the distance-decay effect. As a result, the minimum
350
+ (threshold) cascade energy at which the radio emission intensity on the antenna is equal to the threshold intensity
351
+ εmin increases when the altitude rises. For all the altitudes, the aperture reaches around the peak above the energy
352
+ of ∼ 1022 eV.
353
+ Based on the model-independent estimate [46], for the zero detected events, the upper limit for differential (in
354
+ energy) flux J(E) of primary particles in the observation time T, can be obtained by the following relation,
355
+ dJ(E)
356
+ dE
357
+
358
+ Sup
359
+ EA(E)T
360
+ (8)
361
+ Here the Poisson factor Sup = 2.3 for a limit with 90% confidence level.
362
+ Figure 4: UHE cosmic ray flux of a lunar orbital ULW radio antenna at different altitudes of 50 km, 100 km, 300
363
+ km, and 500 km for one-year observation. The results are compared with the flux limits of both SKA Low for a
364
+ 1000-hour observation between 100 MHz and 350 MHz (reproduced from Bray et al., 2014) and LOFAR for a
365
+ 90-day observation between 125 MHz and 175 MHz (reproduced from Singh et al., 2009).
366
+ For one-year observation with the lunar orbital ULW radio antenna, the flux limits are calculated for different
367
+ antenna altitudes. As shown in Figure 4, only the cosmic ray with the energy above a threshold can be detected by
368
+ the lunar orbital antenna, it is 4 × 1019 eV for the antenna at the 50 km high orbit, and 4 × 1020 eV for the 500 km
369
+ high orbit. The results in Figure 4 also show that the flux limit of single lunar orbital antenna will be competitive
370
+ with the estimated limits of large terrestrial radio telescopes such as LOFAR [51] and SKA-Low [10]. Note that
371
+ the flux limit of SKA-Low is calculated according to the SKA array for Phase I.
372
+ For an isotropic cosmic ray or neutrino flux J(E), during the observation time T the number of detected events
373
+ in the given energy interval between E1 and E2 can be obtained by N =
374
+ � E2
375
+ E1 T J(E)A(E)dE, where A(E) is the
376
+ total aperture as a function of energy.
377
+ Using the flux parametrization of the Auger Collaboration in Ref. [1], the expected event rates of UHE cosmic
378
+ rays are calculated in the energy range between 1018 and 1021, as shown in Figure 5, for the lunar ULW radio
379
+ antenna with different altitudes. It can be seen that the total detected cosmic ray events decreases for the antenna
380
+ 8
381
+
382
+ 108
383
+ 50 km
384
+ 100 km
385
+ 109
386
+ 500 km
387
+ LOFAR Core
388
+ SKA LOw
389
+ 10
390
+ 23.
391
+ Flux
392
+ 11
393
+ 10°12
394
+ 101g
395
+ 10
396
+ 1021
397
+ 10
398
+ 1023
399
+ E. [eV]Figure 5: The number of detected (per year) cosmic ray events for the energy E ≥ 1018 eV with the flux
400
+ parametrization of the Auger Collaboration [1]. Top, the total number detected events as a function of lunar
401
+ antenna orbital altitude; Bottom, the detected events per energy interval for the lunar antenna of 50 km-high orbit
402
+ (left) and 200 km-high orbit (right).
403
+ with higher altitudes. The detected event number reaches to about 400 for the 10 km-high orbit, and it drops rapidly
404
+ from tens at the altitude of 200 km to zeros at the attitude above 500 km. The simulation results of detected cosmic
405
+ ray events per energy interval show that the events mostly lie in the higher energy range with the increasing of
406
+ orbital altitude.
407
+ 3.2
408
+ UHE neutrinos
409
+ The cascades initiated by the UHE neutrinos are different from that produced by the cosmic rays. Due to the long
410
+ attenuation length for neutrinos in lunar regolith, most of the UHE neutrinos induce cascades very deep inside the
411
+ Moon. Therefore, the attenuation of radio wave propagation prior to their escape from the lunar surface can not be
412
+ neglected, and the contribution of both the upper and lower lunar hemisphere have to be taken into account while
413
+ calculating the total aperture. Considering the neutrino absorption on the path L(z, θn), we integrate the angular
414
+ aperture over z and obtain the contributions of the upper and lower hemisphere respectively [31] as follows,
415
+ Aν(E) = A0
416
+
417
+ ∆Ων(E, θs)d cos θs
418
+ (9)
419
+ ∆Ω+
420
+ ν =
421
+
422
+ 2ℓ cos(i)
423
+ LνN(Eν) ln(
424
+ ε
425
+ εmin
426
+ ) × Θ[ε(E, θs, θ) − εmin]dϕnd cos θn
427
+ (10)
428
+ ∆Ω−
429
+ ν =
430
+
431
+ 2ℓ cos(i)
432
+ LνN(Eν) ln(
433
+ ε
434
+ εmin
435
+ ) × exp[−2RM| cos θn|
436
+ LνN(Eν)
437
+ ] × Θ[ε(E, θs, θ) − εmin]dϕnd cos θn
438
+ (11)
439
+ here, it is assumed that only 20% of the energy of the original neutrino goes into the hadronic particle cascade. ℓ is
440
+ the radio wave attenuation length. Since the showers produced by the UHE neutrinos occur in the lunar soil over a
441
+ 9
442
+
443
+ 10
444
+ Number of the detected events per year
445
+ 10
446
+ 10
447
+ 10
448
+ 10
449
+ 10°
450
+ 101
451
+ 102
452
+ 103
453
+ Altitude of lunar UL radio antenna, km
454
+ Events per energv interval for 50 km orbital altitude
455
+ Events per energy interval for 200 km orbital altitude
456
+ Number of detected events per year
457
+ 40
458
+ detected events peryear
459
+ 20
460
+ 35
461
+ 30
462
+ 15
463
+ 25
464
+ 20
465
+ 10
466
+ 15
467
+ 10
468
+ of
469
+ 5
470
+ L
471
+ 0
472
+ 18
473
+ 0
474
+ 18.5
475
+ 19
476
+ 19.5
477
+ 20
478
+ 20.5
479
+ 21
480
+ 18
481
+ 18.5
482
+ 19
483
+ 19.5
484
+ 20
485
+ 20.5
486
+ 21
487
+ E. [log(eV]
488
+ E, [log(eV)]large range of depths from the surface, we use the value ℓ = 29λ of lunar sub-regolith ([39], [9]) for the aperture
489
+ calculation. The neutrino-nucleon interaction length was LνN(km) = 122km(E0/Eν)1/3, where E0 = 1020eV
490
+ ([23], [9]). For the upper hemisphere cos θn > 0, we assume l/LνN ≪ 1 [31], which is valid for most of cases.
491
+ Figure 6: Direct and reflected radio emissions initiated by the UHE neutrinos propagate between the lunar soil and
492
+ lunar orbital radio antenna. The lunar soil is modelled with two layers, the regolith and the sub-regolith.
493
+ For the UHE neutrinos, the multi-strata structure of the lunar subsurface soil has also been taken into account in
494
+ the aperture calculation. Since the cascade showers generated by the UHE neutrinos mostly occur deep inside the
495
+ lunar soil, that is in the lunar sub-regolith, we consider the refractions at both the lunar surface and the subsurface
496
+ strata interface, as shown in Figure 6. Here, the reflected signals of the Askaryan radio emissions at the lunar
497
+ surface is ignored to reduce the complexity of aperture analysis.
498
+ Figure 7: Total aperture of the UHE neutrinos for a lunar orbital ULW radio antenna at different altitudes of 50
499
+ km, 100 km, 300 km, and 500 km with sensitivity of 1.34 uV/m/MHz.
500
+ Similar to the cosmic rays, the total aperture of the UHE neutrinos are calculated using analytic methods for
501
+ the lunar orbital ULW radio antenna at different altitudes. As shown in Figure 7, the aperture increases with the
502
+ rising of antenna attitude. However, the increase becomes slower above the attitude of 300 km. Being different
503
+ from the cosmic rays, the aperture of the UHE neutrinos increase over the entire energy range from 1020 eV to
504
+ 1024 eV without reaching peaks for all the four altitudes. For 50 km distance, the energy of detectable events begin
505
+ at 2 × 1020 eV. With the increasing of orbital attitudes, only higher energetic neutrino events become detectable,
506
+ for 500 km attitude the threshold energy of detectable events increases to ∼ 2 × 1021 eV.
507
+ 10
508
+
509
+ 107
510
+ JS
511
+ 103
512
+ 50 km
513
+ 100 km
514
+ 101
515
+ 300 km
516
+ 500 km
517
+ 101
518
+ 1020
519
+ 1021
520
+ 1022
521
+ 1032
522
+ 1024
523
+ E. [ev]To the antennaFigure 8: The UHE neutrino flux of a lunar orbital ULW radio antenna at different altitudes of 50 km, 100 km,
524
+ 300 km, and 500 km for one-year observation. The results are compared with the flux limits of the SKA Low for
525
+ a 1000-hour observation between 100 and 350 MHz (reproduced from Bray et al., 2014), LOFAR for a 90-day
526
+ observation between 125 MHz and 175 MHz (reproduced from Singh et al., 2009). The blue chain-dotted lines
527
+ show the predicted fluxes from the GZK [21], and topological defects (TD) [49]. The green dashed lines show the
528
+ flux limits set by the past experiments of ANITA-II [24], IceCube [3] and Auger [2].
529
+ Figure 8 shows the flux limits for one-year observation with the lunar orbital antennas of different altitudes.
530
+ The flux limits of the SKA-Low for a 1000-hour observation, the LOFAR for a 90-day observation, the past
531
+ experiments of ANITA-II [24], IceCube [3] and Auger [2] are also plotted for comparison, as well as the predicted
532
+ fluxes from the models of the GZK [21] and topological defects (TD) [49]. The results indicate that the lunar
533
+ orbital observations in one year will set the comparable flux limits with the estimated fluxes of the SKA-Low and
534
+ the LOFAR core (even lower than the energy of ∼ 3 × 1022 eV), which signifies the higher detection efficiency of
535
+ the lunar ULW radio antenna for the UHE neutrinos.
536
+ Figure 9: The total number of detected (per year) neutrino events with the flux limits set by the ANITA-II experi-
537
+ ment, and the predicted fluxes for the GZK neutrinos [21] and topological defects (TD) [21], as a function of the
538
+ lunar antenna altitude in the energy E ≥ 1018 eV.
539
+ Based on the predicted fluxes from the models of the GZK neutrinos, topological defects (TD), the neutrino
540
+ 11
541
+
542
+ 104
543
+ 10°
544
+ GZK
545
+ 10g
546
+ TD
547
+ ANITA-II
548
+ 10
549
+ 10"4
550
+ 100
551
+ 10
552
+ 102
553
+ 10°
554
+ Altitude of lunar ULW radio antenna, km10
555
+ 50 km
556
+ 100 km
557
+ ANITA-II
558
+ 300 km
559
+ 2010 :
560
+ 500 km
561
+ 10*5
562
+ . :
563
+ LOFAR Core
564
+ ceoube
565
+ SKALOW
566
+ 20:13
567
+ 10°
568
+ Auger
569
+ xn
570
+ 109
571
+ .: . .
572
+ GZK
573
+ TD
574
+ 10-11
575
+ :i.
576
+ 1018
577
+ 1019
578
+ 1020
579
+ 1031
580
+ 10-2
581
+ 1023
582
+ 1024
583
+ E. [eV]flux limit set by the experiment of ANITA-II, the expected event rates per year are estimated and presented in
584
+ Figure 9 as a function of attitudes for the lunar orbital ULW radio antenna. It is evident from Figure 9 that the
585
+ GZK neutrinos almost cannot be detected for all the altitudes. In order to increase the detection sensitivity of the
586
+ GZK neutrinos, the aperture must be improved by increasing the effective receiving area of the radio telescope,
587
+ for instance, employing a radio array. For the TD neutrino detection, the event rate depends on the altitude, and
588
+ reaches the peak of about 70 per year at the attitude of about 100 km, which could be the optimum attitude for
589
+ a lunar orbital ULW antenna. With the neutrino flux limit of ANITA-II, the expected neutrino events increase
590
+ exponentially at the altitude range from 1 km to 1000 km, and thousands of events per year could be detected at
591
+ most by the lunar ULW antenna.
592
+ 4
593
+ Characteristic analysis of UHE cosmic rays and neutrinos
594
+ For the detection of the UHE cosmic rays and neutrinos, the ultimate goal is to reconstruct the events to obtain all
595
+ the parameters characterizing the cosmic rays or neutrinos, including the source, the primary particle energy, etc.
596
+ In the literatures ([29], [30]), a random search method was employed to determine the UHECR event parameters
597
+ for the LORD experiment under the assumption that the cosmic ray flux is known. Due to the lack of direct infor-
598
+ mation, the reconstructed parameters have large uncertainties even for the detection with two satellites. Compared
599
+ with the LORD experiment, the future lunar ULW radio missions like DSL ([12], [18]), OLFAR [22] have more
600
+ advantages in the UHE particle detection. They consist of many antennas onboard micro-satellites to form a large
601
+ radio array, which make it feasible to detect the UHECR and UHECν with multi satellites simultaneously. Fur-
602
+ thermore, in lunar ULW radio observations, the tripole antenna can measure the three-dimension electric fields,
603
+ which make it possible to measure the three-dimension polarization of the Askaryan radio emission.
604
+ Figure 10: Scheme of Askaryan pulse detections in the lunar soil with three antennas onboard micro-satellites.
605
+ In the detection of the UHECR and UHECν, if the Askaryan radio pulses can be observed by multi antennas on-
606
+ 12
607
+
608
+ Rs1
609
+ V or CR
610
+ 0board lunar satellites, the time delays can be measured between the radio pulses sensed by different antennas, and
611
+ the particle cascade location (P) on the lunar surface can be solely determined with the time delays among at least
612
+ three non-linearly distributed antennas, as shown in Figure 10. We know the Askaryan radio emission is highly
613
+ linearly polarized, which is always in the same plane of the Poynting vector and the shower axis ([37], [34], [25]).
614
+ Therefore, using the three-dimension polarizations of the electric fields (E1, E2, E3) measured by the tripole an-
615
+ tennas onboard three orbital satellites, the show axis can be determined by the intersection line of the three planes
616
+ of E1 −Rs1, E2−Rs2, E3−Rs3 shown in Figure 10, which means the original direction of the UHECR or UHECν
617
+ is known now. For a linear ULW radio array like DSL, it is necessary to note that the cascade location deduced by
618
+ the time delays could have two candidates symmetric with respect to the satellite orbit plane. However, since only
619
+ the right location can account with the fact that the shower axis, the Poynting vectors and the polarizations of the
620
+ Askaryan radio emissions are in the same plane, the measured polarizations of the coherent radio pulses on three
621
+ tripole antennas will definitely exclude one of the candidate locations except for the case that the shower axis is
622
+ parallel to the satellite orbit plane.
623
+ Once the shower axis and the location of the UHE particle cascade are confirmed, the distance Rs between the
624
+ particle cascade and the radio detector, the three angles θn, r, i shown in Figure 1 will also be known according
625
+ to the simple geometrical relationship. Using the formulae (1),(2) and (3), the particle energy E can be calculated
626
+ with the intensities of electric fields induced on the lunar orbital antennas. The results of the three satellites will be
627
+ cross-checked with each other to improve the accuracy and reliability.
628
+ 5
629
+ Summary and conclusions
630
+ In the present study, using the lunar Askaryan technique, the detection of the UHE cosmic rays and neutrinos
631
+ has been analytically analyzed for the future lunar ULW radio missions. Our results indicate that the single lunar
632
+ ULW radio antenna could detect the cosmic ray and neutrino flux as low as that observed by the present or future
633
+ experiments in the energy range E > 1020eV. With the known flux of the UHE cosmic rays, the simulated
634
+ detectable events per year has found to increase when the antenna altitude is lowered, and it reaches the peak at
635
+ the altitude of ∼ 10 km. For the UHE neutrinos, during the observations in one year, it is shown that the single
636
+ lunar ULW radio antenna could detect ∼ 70 topological defect neutrinos at an optimal altitude of 100 km, while
637
+ the GZK neutrinos could not be detected.
638
+ Investigating the properties of the lunar Cherenkov radiation, it has been demonstrated that the UHECR or
639
+ UHECν events could be reconstructed directly using the radio observations with at least three tripole antennas
640
+ onboard the lunar satellites that makes it feasible to detect the UHECR and UHECν with the lunar ULW radio
641
+ mission. The method could also be used to build a dedicated lunar radio telescope for the detections of the UHE
642
+ cosmic ray and the UHE neutrinos.
643
+ Acknowledgements
644
+ This work is funded by the National Natural Science Foundation of China (NSFC) under No.11941003,11790305,
645
+ 11573043, the CE-4 mission of the Chinese Lunar Exploration Program: the Netherlands-China Low Frequency
646
+ 13
647
+
648
+ Explorer (NCLE), and the Chinese Academy of Science Strategic Priority Research Program XDA15020200. The
649
+ authors would like to thank Dr. C.W. James from the University of Adelaide, Australia, and Prof. Dr. Xuelei
650
+ Chen from National Astronomical Observatories, Chinese Academy of Sciences for all the useful discussions. In
651
+ addition the authors thank the referees for their useful comments and suggestions.
652
+ Appendix
653
+ A. Aperture calculation of UHE cosmic rays and neutrinos
654
+ Based on the analytic methods used in [31], the modifications have been made to the aperture analysis for both
655
+ cosmic rays and neutrinos in this paper. The differences include the calculations of electric field intensity and
656
+ angular spread, the attenuation length and neutrino-nucleon interaction length of lunar regolith, the model of
657
+ antenna sensitivity, etc., which have been addressed in this paper.
658
+ For the detection of UHE cosmic rays, the total aperture is calculated by integrating the angular aperture over
659
+ the upper hemisphere surface, where
660
+ ∆ΩCR =
661
+ � 1
662
+ 0
663
+ cos θnd cos θn
664
+ � π
665
+ −π
666
+ Θ[ε(E, θs, θ) − εrms]dϕn
667
+ (A.1)
668
+ In the angular coordinate (θ, ϕ) with a polar axis aligned with the radiation direction (see Figure 1), it can be
669
+ derived as,
670
+ cos θn = sin θ sin i cosϕ − cos θ cos i
671
+ (A.2)
672
+ dΩ = −d cosθndϕn = −d cosθdϕ
673
+ (A.3)
674
+ Then the angular aperture can be re-written as,
675
+ ∆ΩCR = 2
676
+ � cos θmin
677
+ cos θmax
678
+ d cos θ ×
679
+ � ϕm
680
+ 0
681
+ (sin θ sin i cos ϕ − cos θ cos i)dϕ
682
+ (A.4)
683
+ here cos ϕm = (cos θ cos i)/(sin θ sin i). θmin and θmax are the minimum and maximum angels of incidence
684
+ within which the detected radiation intensity ε is stronger than εmin. The angular apertures calculated as functions
685
+ of cos θs are shown in Figure 11 .
686
+ For the detection of UHE neutrinos, the neutrino absorption on the path up to the shower production point has
687
+ to be taken into account in the calculation of angular aperture, as well as the absorption of radio wave in the lunar
688
+ regolith.
689
+ ∆Ων =
690
+
691
+ dz
692
+ LνN
693
+
694
+ Θ{ε(E, θs, θ) × exp[−z/(2ℓ cosi)] − εrms} × exp[−L(z, θn)/LνN(Eν)]
695
+ (A.5)
696
+ here, L(z, θn) ≈ z/ cosθn for the lunar upper hemisphere cos θn > 0, and L(z, θn) ≈ 2RM | cos θn | for the lunar
697
+ lower hemisphere cos θn < 0. By integrating over the cascade depth z, we obtain, respectively, the contributions
698
+ of the upper and lower hemispheres to the neutrino angular aperture.
699
+ ∆Ω+
700
+ ν ≃
701
+ � zmax
702
+ 0
703
+ 1
704
+ LνN
705
+ exp(−
706
+ z
707
+ LνN cos θn
708
+ )dz ×
709
+
710
+ Θ[ε(E, θs, θ) − εmin]dϕnd cos θn
711
+
712
+
713
+ cos θn{1 − exp[
714
+ 2ℓ cosi
715
+ LνN cos θn
716
+ ln(
717
+ ε
718
+ εmin
719
+ )]} × Θ[ε(E, θs, θ) − εmin]dϕnd cos θn
720
+ (A.6)
721
+ 14
722
+
723
+ Figure 11: Angular aperture ∆ΩCR for the cosmic ray detection with a lunar orbital ULW radio antenna at different
724
+ altitudes of 50km, 100km and 200km, and for the initial cosmic ray energy W = 1021eV .
725
+ ∆Ω−
726
+ ν ≃
727
+ � zmax
728
+ 0
729
+ 1
730
+ LνN
731
+ exp(−2RM cos θn
732
+ LνN
733
+ )dz ×
734
+
735
+ Θ[ε(E, θs, θ) − εmin]dϕnd cos θn
736
+
737
+ � 2ℓ cosi
738
+ LνN
739
+ ln(
740
+ ε
741
+ εmin
742
+ ) exp(−2RM cos θn
743
+ LνN
744
+ ) × Θ[ε(E, θs, θ) − εmin]dϕnd cos θn
745
+ (A.7)
746
+ where, zmax = 2ℓ cosi ln(ε/εmin). For the ultra-high energy neutrino, when ℓ/LνN ≪ 1, formula A.6 will be
747
+ simplified to the formula (10). The angular aperture is then calculated numerically.
748
+ References and Notes
749
+ [1] Aab, A., et al., 2020, Physical Review Letters. 2020 Sep 18;125(12):121106. doi:
750
+ 10.1103/Phys-
751
+ RevLett.125.121106. PMID: 33016715.
752
+ [2] Aab, A., et al., 2015, Physical Review D, 91, 092008.
753
+ [3] Aartsen, M. G., et al., (IceCube Collaboration), 2013, Physics Review D, 88, 112008.
754
+ [4] Aminaei, A., Chen, L., Pourshaghaghi, H., Buitink, S., Klein-Wolt, M., Koopmans, L.V.E., Falcke, H., 2018,
755
+ Advances in Space Research, Vol. 62, Iss. 9, pp. 2708-2728.
756
+ [5] Askaryan, G., 1962, Soviet Physics JETP, 14, 441.
757
+ [6] Askaryan, G., 1962, Soviet Physics JETP, 48, 988.
758
+ [7] Alvarez-Muniz, J., Zas, E., Physics Letter, B 411 (1997) 218.
759
+ [8] Alvarez-Muniz, J., Vazquez, R.A., Zas, E., 2001, Physics Review D 61 (99) 23001.
760
+ [9] Bray, J.D., 2016, AstroparticlePhysics, 77 (2016) 1¨C20.
761
+ 15
762
+
763
+ 10°
764
+ Angular aperture,
765
+ 10
766
+ 100 km
767
+ 200 km
768
+ 10
769
+ 0.94
770
+ 0.95
771
+ 0.96
772
+ 0.97
773
+ 0.98
774
+ 0.99
775
+ cos (8s)[10] Bray, J.D., Alvarez-Muniz, J., Buitink, S., RDagkesamanskii, D., Ekers, R.D., Falcke, H., Gayley, K.G.,
776
+ Huege, T., James, C.W., Mevius, M., Mutel, R.L., Protheroe, R.J., Scholten, O., Spencer, R.E., ter Veen, S.,
777
+ 2014, Presented in Advancing Astrophysics with the SKA, Italy, p.144.
778
+ [11] Boonstra, A. J., et al., Porceeding of 32nd URSI GASS. Montreal, 19-26 August 2017.
779
+ [12] Boonstra, A. J., et al., 2016 IEEE Aerospace Conference. Yellowstone Conference Center, Big Sky, Montana,
780
+ Mar 5-12, 2016.
781
+ [13] Buitink, S., Scholten, O., Bacelar, J., Braun, R., deBruyn, A.G., Falcke, H., Singh, K., Stappers, B., Strom,
782
+ R.G., Yahyaoui, R.A., 2010, Astronomy and Astrophysics, 521 (2010) 1¨C12 ArticleID: A47.
783
+ [14] Burns, J. O., Lazio, J., Bale, S., Bowman, J., Bradley, R., Carilli, C., Furlanetto, S., Harker, G., Loeb, A.,
784
+ Pritchard, J., Advances in Space Research. Vol. 49, Iss. 3, 433-450.
785
+ [15] Bentum, M. J. and Chris, V. and Boonstra, A. J., 20th Annual Workshop on Circuits, Systems and Signal
786
+ Processing, ProRISC , Nov. 26, 2009.
787
+ [16] Chen, L., Aminaei, A., Gurvits, L. I., Wolt, M. K., Pourshaghaghi, H. Reza., Yan, Y., Falcke, H., Experimental
788
+ Astronomy, 2018, Vol. 45, Issue 2, pp.231-253.
789
+ [17] Chen, L., Aminaei, A., Falcke, H., Gurvits, L., 2010, Publication of Loughborough Antenna and Propagation
790
+ Conference (LAPC), UK, pp. 93¨C96.
791
+ [18] Chen, X., Yan, J., Deng, L., Wu, F., Wu, L., Xu, Y., Zhou, L., 2021, Philos Trans A Math Phys Eng Sci. 2021
792
+ Jan 11;379(2188):20190566. doi: 10.1098/rsta.2019.0566. Epub 2020 Nov 23. PMID: 33222649; PMCID:
793
+ PMC7739906.
794
+ [19] Compton, Jr. R. T, 1981, IEEE Trans. Antennas and Propagation. Vol. AP-29, No.6, 944-952.
795
+ [20] Dagkesamanskii, R. D., Zheleznykh, I. M., 1989, Pisma v Zhurnal Eksperimentalnoi i Teoreticheskoi Fiziki,
796
+ 50, 233.
797
+ [21] Engel, R., Seckel, D., Stanev, T., 2001, Physics Review D, 64, 093010.
798
+ [22] Engelen, S., Verhoeven, C. J. M., Bentum, M. J., 24th Anuual AIAA/USU Conference on Small Satellites,
799
+ Logan, UT, 2010.
800
+ [23] Gayley, K. G., Mutel, R. L., Jaeger, T.R., 2009, Astrophysical Journal, 706 (2009) 1556¨C1570.
801
+ [24] Gorham, P.W., Allison, P., Baughman, B., et al., 2010, Physics Review Letters, 82, 022004.
802
+ [25] Gorham P.W., Hebert C.L., Liewer K.M., Naudet C.J., Saltzberg D., Williams D., 2004, Physics Review
803
+ Letter, 93(4) 041101-1¨C041101-4.
804
+ [26] Gorham P.W., Liewer K.M., Naudet C.J., Saltzberg D., Williams D., 2000, Proceedings of the RAD-HEP
805
+ 2000,in: AIP ConferenceSeries, vol.579, 2001, pp.177¨C188.
806
+ 16
807
+
808
+ [27] Gopalswamy, N.; Makela, P.; Yashiro, S., 2014 United States National Committee of URSI National, 2014
809
+ vol., no., pp.1-1, 8-11 Jan.
810
+ [28] Greisen K., 1966, Physical Review Letter. 16, 748.
811
+ [29] Gusev, G. A., Maung, K., 2015, Bulletin of the Lebedev Physics Institute, Vol. 42, No. 6, pp. 180¨C186.
812
+ [30] Gusev, G. A., Maung, K., 2017, Bulletin of the Lebedev Physics Institute, 2017, Vol. 44, No. 6, pp. 177¨C181.
813
+ [31] Gusev, G. A., Lomonosov, B. N., Pichkhadze, K. M., Polukhina, N. G., Ryabov, V. A., Saito, T., Sysoev, V.
814
+ K., Feinberg, E. L., Tsarev, V. A., Chechin, V. A., 2006, Cosmic Research, Vol. 44, No. 1, pp. 19¨C38.
815
+ [32] Hankins T.H., Ekers R.D., O’Sullivan J.D., 1996, Monthly Notices of the Royal Astronomical Society, 283,
816
+ 1027¨C1030.
817
+ [33] Hankins T.H., Ekers R.D., O’Sullivan J.D., Proceedings of the RADHEP 2000, in: AIP Conference Series,
818
+ vol.579, 2001, pp.168¨C176.
819
+ [34] Falcke, H., Gorham, P., Protheroe, R.J., 2004, New Astronomy Reviews 48 (2004) 1487¨C1510.
820
+ [35] Herman, J. R., Caruso, J. A., Planet. Space Sci., 1973, Vol. 21, 443-461.
821
+ [36] Jaeger, T.R., Mutel, R.L., Gayley, K.G., 2010, Astroparticle Physics, 34, 293¨C303.
822
+ [37] James C. W., Bray J. D., Ekers R. D., 2019, Research Astronomy and Astrophysics, Vol.19, No, 2, 19.
823
+ [38] James C. W., Ekers R. D., Alvarez-Muniz J., Bray J.D., McFadden R.A., Phillips C.J., Protheroe R.J., Roberts
824
+ P.,2010, Physics Review D, 81(4)(2010)042003-1¨C042003-24.
825
+ [39] James, C. W., Protheroe, R.J., Astropartical Physics, 30(2009)318¨C332.
826
+ [40] Jester, S., Fackle, H., New Astronomy Review, 2009, 53, 1.
827
+ [41] Ji Y.-C, Zhao B.,Fang G.-Y, et al., Journal of Deep Space Exploration, 2017, 4 (2): 150
828
+ [42] Kaiser, M. L., Desch, M. D., Bougeret, J. L., Manning, R., Meetre, C. A., Geophysical Research Letters,
829
+ 1996, Vol. 23, Issue 10, 1287
830
+ [43] Konovalenko, A., Sodin, L., Zakharenko, V. et al., Exp Astron, 2016, 42: 11.
831
+ [44] Klein-Wolt, M., Aminaei A., Zarka, P., Schrader, J. R., Boonstra, A. J., Falcke, H., 2012, Planetary and Space
832
+ Science, 74, 167-178.
833
+ [45] Li C., Su Y., Pettinelli E., Xing S., Ding C., Liu J., Ren X., Lauro S. E., Soldovieri F., Zeng X., Gao X., Chen
834
+ W., Dai S., Liu D., Zhang G., Zuo W., Wen W., Zhang Z., Zhang X., Zhang H., The Moon¡¯s farside shallow
835
+ subsurface structure unveiled by Chang¡¯E-4 Lunar Penetrating Radar. Sci. Adv. 6, eaay6898 (2020).
836
+ [46] Lehtinen, N.G., Gorham, P.W., Jacobson, A.R., and Roussel-Dupre, R.A., Phys. Rev. D: Part. Fields, 2004,
837
+ vol. 69, p. 013008 (astroph/0309656).
838
+ 17
839
+
840
+ [47] Mimoun, D., et al.,2011, Experimental Astronomy, 33: 529-585.
841
+ [48] Scholten, O., Bacelar, J., Braun, R., de Bruyn, A. G., Falcke, H., Stappers, B., Strom, R. G., 2006, Astropar-
842
+ tical Physics, 26, 219.
843
+ [49] Semikoz, D.V., Sigl, G., 2004, Journal of Cosmology and Astroparticle Physics, 04, 003.
844
+ [50] Singh, K., Mevius, M., Scholten, O., Anderson, J.M., vanArdenne, A., Arts, M., Avruch, M., Asgekar, A.,
845
+ Bell, M., Bennema, P., 2012, Nuclear Instruments and Methods, A 664 (2012) 171¨C185.
846
+ [51] Singh, K., Bahren, L., Buitink, S., Falcke, H., Horandel, J., Horneffer, A.,Scholten, O., 2009, Proceedings of
847
+ the 31st ICRC, LODZ 2009.
848
+ [52] Spencer, R.E., Macfarlane, A., Mills, O., Piccirillo, L., Proceedings of Science on 10th EVN Symposium,
849
+ vol.125,2010, number97.
850
+ [53] Stal, O., Bergman, J. E. S., Thide, B., Daldorff, L. K. S., Ingelman, G., 2007, Physics Review Letter,98 (7),
851
+ 071103.
852
+ [54] Zatsepin G. T. and Kuzmin V. A., 1966, JETP LETTERS. 4, 78.
853
+ [55] Zas, E., Halzen, F., Stanev, T., Physics Review D 45 (1992) 362.
854
+ 18
855
+
OdFKT4oBgHgl3EQffi6d/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
PNA0T4oBgHgl3EQfC__J/content/tmp_files/2301.01998v1.pdf.txt ADDED
@@ -0,0 +1,1198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DP-SIPS: A simpler, more scalable mechanism for differentially
2
+ private partition selection
3
+ Marika Swanberg∗
4
+ Boston University
5
+ Department of Computer Science
6
+ marikas@bu.edu
7
+ Damien Desfontaines
8
+ Tumult Labs
9
+ damien@desfontain.es
10
+ Samuel Haney
11
+ Tumult Labs
12
+ sam.haney@tmlt.io
13
+ ABSTRACT
14
+ Partition selection, or set union, is an important primitive in differ-
15
+ entially private mechanism design: in a database where each user
16
+ contributes a list of items, the goal is to publish as many of these
17
+ items as possible under differential privacy.
18
+ In this work, we present a novel mechanism for differentially
19
+ private partition selection. This mechanism, which we call DP-SIPS,
20
+ is very simple: it consists of iterating the naive algorithm over
21
+ the data set multiple times, removing the released partitions from
22
+ the data set while increasing the privacy budget at each step. This
23
+ approach preserves the scalability benefits of the naive mechanism,
24
+ yet its utility compares favorably to more complex approaches
25
+ developed in prior work.
26
+ Along the way, this work also gives an alternate definition of
27
+ approximate zero-concentrated DP, and reports some empirical
28
+ observations on the utility of other partition selection mechanisms.
29
+ KEYWORDS
30
+ Differential privacy
31
+ 1
32
+ INTRODUCTION
33
+ Given a set of users, where each is associated to certain items, how
34
+ can we publish as many of these items as possible in a differentially
35
+ private manner? This problem, simple in its formulation, is an im-
36
+ portant primitive for building differentially private systems: it is
37
+ central to privately releasing the result of group-by aggregations
38
+ where the group-by keys are a priori unknown, or impractical to
39
+ enumerate. This is useful in applications like vocabulary extrac-
40
+ tion [10], the private release of search queries [12], or in building
41
+ general-purpose differentially private tooling [1, 6, 18]. In these
42
+ last two use cases, scalability is a primary concern: the input of
43
+ this operation might be too large to fit in the memory of a single
44
+ machine, and the computation must be parallelized across multiple
45
+ machines to avoid unacceptably long running times.
46
+ Differentially private partition selection mechanisms have been
47
+ proposed as early as 2009 [12], but recent work has shed a new light
48
+ on this problem, and proposed alternative approaches that bring
49
+ significant utility gains [5, 10]. To obtain these utility improvements,
50
+ these newer mechanisms use a greedy approach: each user considers
51
+ what items have been contributed by previous users so far, and
52
+ “chooses” which items to contribute according to a policy, chosen
53
+ carefully to maintain a sensitivity bound. Doing so, however, limits
54
+ the scalability of the mechanism obtained in this way: each user
55
+ chooses their contribution based on the contribution of all previous
56
+ ∗Work done while at Tumult Labs
57
+ users, so the data has to be processed one user after another, and
58
+ the overall algorithm cannot be parallelized.
59
+ This raises a natural question: can we achieve the utility benefits
60
+ of policy-based approaches, while preserving the scalability of more
61
+ naive approaches? In this work, we show that both benefits can be
62
+ combined. We introduce a new approach, which we call DP-SIPS,
63
+ short for scalable, iterative partition selection. DP-SIPS relies a simple
64
+ idea: rather than having to process the data of one user at a time,
65
+ we run the naive, massively-parallelizable algorithm multiple times,
66
+ splitting the privacy budget between each step. Surprisingly, this
67
+ mechanism turns out to have a similar utility to greedy approaches,
68
+ but scales horizontally: increasing the number of cluster nodes
69
+ significantly reduces runtime.
70
+ The rest of this paper is organized as follows:
71
+ • In Section 2, we formally define the problem and the building
72
+ blocks we use for DP-SIPS and its privacy accounting.
73
+ • In Section 3, we give an alternate definition of approxi-
74
+ mate zCDP and prove that it satisfies composition, post-
75
+ processing, and other useful properties.
76
+ • In Section 4, we detail existing approaches to differentially
77
+ private partition selection.
78
+ • In Section 5, we introduce our algorithm and the proof of its
79
+ privacy guarantees.
80
+ • In Section 6, we report on the experimental evaluation of
81
+ DP-SIPS.
82
+ • Finally, in Section 7, we discuss our results, report on some
83
+ unsuccessful approaches that we tried, and outline directions
84
+ for future work.
85
+ 2
86
+ PRELIMINARIES
87
+ A data set 𝑥 = (𝑊1, . . . ,𝑊𝑁 ) contains a set of user lists 𝑊𝑖 ∈ U∗.
88
+ We refer to elements in U as items or partitions, and define partition
89
+ selection (also called key selection or set union) as follows:
90
+ Definition 1 (Private Partition Selection Problem). Given
91
+ a (possibly unbounded) universe U of items and a data set 𝑥 =
92
+ (𝑊1, . . . ,𝑊𝑁 ) of user lists 𝑊𝑖 ∈ U∗, an algorithm M solves the
93
+ private partition selection problem if it is differentially private, and
94
+ M outputs a set 𝑆 ⊆ ∪𝑖𝑊𝑖.
95
+ We begin by presenting the standard notion of differential pri-
96
+ vacy. Two data sets 𝑥,𝑥 ′ are neighbors if they differ on one user’s list:
97
+ 𝑥 = 𝑥 ′ ∪𝑊𝑖∗. Informally, differential privacy requires that an algo-
98
+ rithm’s output is distributed similarly on every pair of neighboring
99
+ data sets.
100
+ Definition 2 (Differential Privacy [7, 8]). A randomized
101
+ algorithm M : U∗ → Y is (𝜀,𝛿)-differentially private if for every
102
+ arXiv:2301.01998v1 [cs.CR] 5 Jan 2023
103
+
104
+ Marika Swanberg, Damien Desfontaines, and Samuel Haney
105
+ pair of neighboring datasets 𝑥,𝑥 ′ ∈ U∗ and for all subsets 𝑌 ⊆ Y,
106
+ Pr[M(𝑥) ∈ 𝑌] ≤ 𝑒𝜀 · Pr[M(𝑥 ′) ∈ 𝑌] + 𝛿.
107
+ A common variant of differential privacy, called zCDP, is use-
108
+ ful for analyzing algorithms that sample noise from a Gaussian
109
+ distribution (as ours will). The definition of zCDP uses the Rényi
110
+ Divergence:
111
+ Definition 3 (Rényi Divergence). Fix two probability distribu-
112
+ tions 𝑃 and 𝑄 over a discrete domain 𝑆. Given a positive 𝛼 ≠ 1, Rényi
113
+ divergence of order 𝛼 of distributions 𝑃 and 𝑄 is
114
+ 𝐷𝛼 (𝑃||𝑄) =
115
+ 1
116
+ 1 − 𝛼 log
117
+ �∑︁
118
+ 𝑥 ∈𝑆
119
+ 𝑃(𝑥)𝛼𝑄(𝑥)1−𝛼
120
+
121
+ .
122
+ Definition 4 (𝜌-zCDP [3]). A randomized mechanism M : X∗ →
123
+ Y satisfies 𝜌-zCDP if, for all 𝑥,𝑥 ′ ∈ X∗ differing on a single entry,
124
+ 𝐷𝛼 (𝑀(𝑥)||𝑀(𝑥 ′)) ≤ 𝜌 · 𝛼
125
+ ∀𝛼 ∈ (1, ∞).
126
+ (1)
127
+ We will relax this definition to a definition approximate zCDP
128
+ in the following section. A common primitive in building private
129
+ algorithms, the Gaussian Mechanism, satisfies 𝜌-zCDP.
130
+ Definition 5 (Gaussian Distribution). The Gaussian distribu-
131
+ tion with parameter 𝜎 and mean 0, denoted N (0, 𝜎2) is defined for
132
+ all ℓ ∈ R and has probability density
133
+ ℎ(ℓ) =
134
+ 1
135
+ 𝜎
136
+
137
+ 2𝜋
138
+ 𝑒− ℓ2
139
+ 2𝜎2 .
140
+ Definition 6 (ℓ2-Sensitivity). Let 𝑓 : U𝑛 → R𝑑 be a function.
141
+ Its ℓ2-sensitivity is
142
+ Δ𝑓 =
143
+ max
144
+ 𝑥,𝑥′∈U
145
+ 𝑥,𝑥′𝑛𝑒𝑖𝑔ℎ𝑏𝑜𝑟𝑠
146
+ ∥𝑓 (𝑥) − 𝑓 (𝑥 ′)∥2.
147
+ Definition 7 (Gaussian Mechanism). Let 𝑓 : U𝑛 → R𝑑 be a
148
+ function with ℓ2-sensitivity Δ𝑓 . Then the Gaussian mechanism is the
149
+ algorithm
150
+ M𝑓 (𝑥) = 𝑓 (𝑥) + (𝑍1, . . . ,𝑍𝑑),
151
+ where 𝑍𝑖 ∼ N
152
+
153
+ 0,
154
+ Δ2
155
+ 𝑓
156
+ 2𝜌 · I
157
+
158
+ . Algorithm M𝑓 satisfies 𝜌-zCDP.
159
+ 3
160
+ ALTERNATE DEFINITION OF
161
+ APPROXIMATE ZCDP
162
+ In this section, we give an alternate definition of the definition
163
+ of approximate zCDP given in [3]. While Bun et. al. write that
164
+ they intended for their definition to be a generalization of (𝜖,𝛿)-
165
+ approximate differential privacy, the definition seems more similar
166
+ to what is often called probabilistic differential privacy [13], although
167
+ this may not be the interpretation the authors intended 1. As shown
168
+ in [13], (an interpretation of) this definition is not closed under
169
+ post processing, as the authors of [3] claim. To resolve this issue,
170
+ we give a new definition of approximate zCDP.
171
+ 1From an exchange with the authors of [3], we believe the intended definition of
172
+ approximate zCDP is more similar to the definition that we propose in this paper,
173
+ depending on the interpretation of an “event” in their definition. However, the inter-
174
+ pretation used in [13] that leads to a breakdown in closure under postprocesssing is
175
+ likely to be a common one. For that reason, we give an alternate definition in this
176
+ paper.
177
+ Definition 8 (Approximate zCDP). A randomized mechanism
178
+ 𝑀 : X∗ → Y is 𝛿-approximately 𝜌-zCDP if, for all 𝑥,𝑥 ′ ∈ X∗
179
+ differing on a single entry, letting 𝑃 and 𝑄 denote the distributions of
180
+ 𝑀(𝑥) and 𝑀(𝑥 ′) respectively, there exists a distribution 𝑃 ′ over the
181
+ output domain of 𝑀 such that
182
+ 𝑇𝑉𝐷(𝑃, 𝑃 ′) ≤ 𝛿, and
183
+ 𝐷𝛼 (𝑃 ′||𝑄) ≤ 𝜌 · 𝛼,
184
+ for all 𝛼 ∈ (1, ∞). Here, 𝑇𝑉𝐷 is the total variation distance, which is
185
+ defined formally as
186
+ 𝑇𝑉𝐷(𝑃,𝑄) = sup
187
+ 𝑆
188
+ |𝑃(𝑆) − 𝑄(𝑆)|.
189
+ Intuitively, this definition says that the mechanism should be
190
+ "close" to one that satisfies pure zCDP, in terms of the distribution
191
+ on each output.
192
+ Note that we can define approximate differential privacy in sim-
193
+ ilar way. That is, [9] shows the follow equivalence:
194
+ Lemma 9 ([9] (Lemma 3.17)). A randomized mechanism 𝑀 :
195
+ X∗ → Y is (𝜖,𝛿)-approximately DP if and only if, for all 𝑥,𝑥 ′ ∈ X∗
196
+ differing on a single entry, letting 𝑃 and 𝑄 denote the distributions of
197
+ 𝑀(𝑥) and 𝑀(𝑥 ′) respectively, there exists a distribution 𝑃 ′ over the
198
+ output domain of 𝑀 such that
199
+ 𝑇𝑉𝐷(𝑃, 𝑃 ′) ≤ 𝛿, and
200
+ 𝑃 ′(𝑆) ≤ exp(𝜖)𝑄(𝑆)
201
+ for all sets 𝑆 of possible outputs of 𝑀.
202
+ We can use Definition 8 in place of Definition 15 from [3] and
203
+ most results from [3] hold as is, except that our conversion from ap-
204
+ proximate zCDP to approximate DP is slightly worse (see Lemma 12),
205
+ and our composition bound is slightly worse (see Lemma 10). In
206
+ the rest of this section, we state some facts about approximate
207
+ zCDP that we will use in the rest of the paper. Proofs appear in
208
+ Appendix C.
209
+ First, we give composition and postprocessing lemmas.
210
+ Lemma 10 (Composition). Suppose M : X𝑁 → Y and M′ :
211
+ X𝑁 → Z satisfy 𝛿-approximate 𝜌-zCDP and 𝛿′-approximate 𝜌′-
212
+ zCDP, respectively. Then the composition M′′(𝑥) = (M(𝑥), M′(𝑥))
213
+ satisfies (𝛿 + 𝛿′)-approximate (𝜌 + 𝜌′)-zCDP.
214
+ Lemma 11 (Post-Processing). Suppose M : X𝑁 → Y satisfies
215
+ 𝛿-approximate 𝜌-zCDP and let 𝑓 : Y → Z be a function. Then,
216
+ 𝑓 ◦ M satisfies 𝛿-approximate 𝜌-zCDP.
217
+ Next, we can convert approximate zCDP to approximate DP
218
+ using the following lemma.
219
+ Lemma 12. Suppose we can show that every mechanism that sat-
220
+ isfies 𝜌-zCDP must satisfy 𝜖∗(𝜌),𝛿∗(𝜌)-approximate DP. That is,
221
+ (𝜖∗,𝛿∗) is a function converting a (pure) zCDP guarantee to an approx-
222
+ imate DP guarantee. Suppose M : X𝑁 → Y satisfies 𝛿-approximate
223
+ 𝜌-zCDP. Then, M satisfies (𝜖∗(𝜌),𝛿 + 𝛿∗(𝜌))-DP.
224
+ Lemma 12 differs from the conversion lemma in [3] in two ways.
225
+ First, we are only able to show a slightly worse bound, 𝛿 + 𝛿∗(𝜌)
226
+ rather than 𝛿 + (1 − 𝛿)𝛿∗(𝜌). Second, we change the presentation
227
+ of the lemma so we can use any conversion function from zCDP
228
+ to approximate DP. Specifically, [4] give the following conversion,
229
+ improving on the conversion given in [3]:
230
+
231
+ DP-SIPS: A simpler, more scalable mechanism for differentially private partition selection
232
+ Lemma 13 ([4], Proposition 7). Suppose M : X𝑁 → Y satisfies
233
+ 𝜌-zCDP. Then M satisfies (𝜖,𝛿)-approximate DP for any 𝜖 > 0 and
234
+ 𝛿 =
235
+ inf
236
+ 𝛼 ∈(1,∞)
237
+ exp((𝛼 − 1)(𝛼 · 𝜌 − 𝜖))
238
+ 𝛼 − 1
239
+
240
+ 1 − 1
241
+ 𝛼
242
+ �𝛼
243
+ .
244
+ (2)
245
+ Combining Lemma 12 and Lemma 13 gives us the following.
246
+ Corollary 14. Suppose M : X𝑁 → Y satisfies 𝛿-approximate
247
+ 𝜌-zCDP. Then M satisfies (𝜖,𝛿 + 𝛿′)-approximate DP for any 𝜖 > 0
248
+ and
249
+ 𝛿′ =
250
+ inf
251
+ 𝛼 ∈(1,∞)
252
+ exp((𝛼 − 1)(𝛼 · 𝜌 − 𝜖))
253
+ 𝛼 − 1
254
+
255
+ 1 − 1
256
+ 𝛼
257
+ �𝛼
258
+ .
259
+ (3)
260
+ Finally, we define a notion of probabilistic zCDP that is inspired
261
+ by, but not equivalent to, Definition 8.1 in [3]. We show that proba-
262
+ bilistic zCDP implies approximate DP (though the converse is not
263
+ true). This will be useful, e.g., in the proof of Theorem 17 since the
264
+ probabilistic definition can be more natural to use.
265
+ Definition 15 (Probabilistic zCDP). A randomized mechanism
266
+ 𝑀 : X∗ → Y is 𝛿-probabilistically 𝜌-zCDP if, for all 𝑥,𝑥 ′ ∈ X∗
267
+ differing on a single entry, there exists an event 𝐸 ⊆ Y such that, for
268
+ all 𝛼 ∈ (1, ∞),
269
+ 𝐷𝛼 (𝑀(𝑥)|𝐸||𝑀(𝑥 ′)) ≤ 𝜌 · 𝛼.
270
+ and Pr[𝑀(𝑥) ∈ 𝐸] ≥ 1 − 𝛿. Here 𝑀(𝑥)|𝐸 denotes the output distri-
271
+ bution of 𝑀(𝑥) conditioned on 𝐸.
272
+ We next claim that Definition 15 implies Definition 8 (an analagous
273
+ fact is true about probabilistic DP and approximate DP).
274
+ Lemma 16. Any mechanism satisfying 𝛿-probabilistic 𝜌-zCDP also
275
+ satisfies 𝛿-approximate 𝜌-zCDP.
276
+ 4
277
+ PRIOR APPROACHES
278
+ In this section we discuss three existing algorithms for differentially
279
+ private partition selection. We begin with the naive algorithm,
280
+ called Weighted Gaussian, in Section 4.1. In Section 4.2, we present
281
+ Policy Gaussian [10] and Greedy updates Without sampling [5].
282
+ The algorithms all have three main steps: first, they compute a
283
+ weighted histogram, which is simply a mapping from an item𝑢 ∈ U
284
+ to a weight 𝐻 [𝑢] ∈ R; second, they add calibrated noise to each item
285
+ in the histogram; and lastly, they release items that are above some
286
+ appropriately-chosen threshold 𝑇. The primary difference between
287
+ the algorithms is in how they compute the weighted histogram.
288
+ This high-level algorithm for private partition selection is described
289
+ in Algorithm 1, which can be composed with different weighted
290
+ histogram algorithms.
291
+ 4.1
292
+ Baseline: Weighted Gaussian
293
+ The Weighted Gaussian algorithm (Algorithm 2) is one of the sim-
294
+ plest algorithms for private partition selection. To build a weighted
295
+ histogram, the algorithm first pre-processes the data set by remov-
296
+ ing any duplicates within a user’s set and truncating each user’s set
297
+ to have at most Δ0 items. Then, the users compute a histogram with
298
+ bounded ℓ2-sensitivity as follows: each user 𝑖 updates the weight
299
+ 𝐻 [𝑢] for each of the items 𝑢 in their set 𝑊𝑖 with the following rule:
300
+ 𝐻 [𝑢] ← 𝐻 [𝑢] +
301
+ 1
302
+ √︁
303
+ |𝑊𝑖 |
304
+ .
305
+ Algorithm 1 High-Level Partition Selection Algorithm
306
+ Input: Data set of user partitions 𝑥 = (𝑊1, . . . ,𝑊𝑁 )
307
+ Weighted Histogram Algorithm Weighted_Hist
308
+ Threshold 𝑇
309
+ Noise distribution D
310
+ Output: Partitions 𝑆 ⊆ ∪𝑖𝑊𝑖
311
+ 1: Initialize empty set 𝑆 ← {}
312
+ 2: 𝐻 ← Weighted_Hist(𝑥)
313
+ ⊲ Compute weighted histogram
314
+ 3: for 𝑢 ∈ 𝑆𝑢𝑝𝑝(𝐻) do
315
+ 4:
316
+ 𝑍𝑢 ∼ D
317
+ 5:
318
+ ˆ𝐻 [𝑢] ← 𝐻 [𝑢] + 𝑍𝑢
319
+ 6:
320
+ if ˆ𝐻 [𝑢] ≥ 𝑇 then
321
+ 7:
322
+ 𝑆 ← 𝑆 ∪ {𝑢}
323
+ return 𝑆
324
+ The resulting weighted histogram has an ℓ2-sensitivity of 1, so it
325
+ can be composed with the high-level algorithm using calibrated
326
+ Gaussian noise and an appropriate threshold to ensure that the over-
327
+ all algorithm satisfies 𝛿-approximate 𝜌-zCDP. In the statement of
328
+ Algorithm 2, Φ(·) is used to denote the cumulative density function
329
+ of the standard Gaussian distribution and Φ−1(·) is its inverse.
330
+ Algorithm 2 Weighted Gaussian
331
+ Input: Data set 𝑥 = (𝑊1, . . . ,𝑊𝑁 )
332
+ Privacy parameters (𝜌,𝛿)
333
+ Maximum per-user contribution Δ0
334
+ Output: Partitions 𝑆 ⊆ ∪𝑖𝑊𝑖
335
+ 1: Initialize empty histogram 𝐻 ← {}
336
+ 2: Initialize empty set 𝑆 ← {}
337
+ 3: for 𝑖 = 1, . . . , 𝑁 do
338
+ 4:
339
+ 𝑊𝑖 ← get rid of duplicate items from 𝑊𝑖
340
+ 5:
341
+ 𝑊𝑖 ← uniformly sample at most Δ0 items from 𝑊𝑖
342
+ 6:
343
+ for 𝑢 ∈ 𝑊𝑖 do
344
+ 7:
345
+ 𝐻 [𝑢] ← 𝐻 [𝑢] +
346
+ 1
347
+
348
+ |𝑊𝑖 |
349
+ 8: 𝜎 ←
350
+ 1
351
+ √2𝜌
352
+ 9: 𝑇 ← max𝑘 ∈[Δ0]
353
+
354
+ 1
355
+
356
+ 𝑘 + 𝜎 · Φ−1 �
357
+ (1 − 𝛿)1/𝑘��
358
+ 10: for 𝑢 ∈ 𝑆𝑢𝑝𝑝(𝐻) do
359
+ 11:
360
+ ˆ𝐻 [𝑢] ← 𝐻 [𝑢] + N (0, 1
361
+ 2𝜌 )
362
+ 12:
363
+ if ˆ𝐻 [𝑢] ≥ 𝑇 then
364
+ 13:
365
+ 𝑆 ← 𝑆 ∪ {𝑢}
366
+ return 𝑆
367
+ Theorem 17. Fix any 𝜌 > 0, any 𝛿 ∈ (0, 1), and any Δ0 ∈ N. The
368
+ Weighted Gaussian algorithm (Algorithm 2) satisfies 𝛿-approximate
369
+ 𝜌-zCDP.
370
+ See Appendix A for the proof of Theorem 17.
371
+ The Weighted Gaussian algorithm benefits from being highly
372
+ scalable. In particular, it lends itself well to parallel computation
373
+ across several computers within a cluster, because the weights on
374
+ each histogram item can be computed in parallel as well as the noise
375
+ addition and thresholding steps (see Figure 4). Thus, Algorithm 2 is
376
+ the standard approach for doing partition selection on data sets that
377
+ are too large to fit in a single machine’s memory. Unfortunately,
378
+
379
+ Marika Swanberg, Damien Desfontaines, and Samuel Haney
380
+ Weighted Gaussian suffers from poor accuracy compared to the
381
+ greedy approaches we discuss next.
382
+ 4.2
383
+ Greedy Approaches
384
+ One problem with the Weighted Gaussian algorithm is users waste
385
+ their sensitivity budget on histogram items that are already well
386
+ above the threshold. Most real-world data has highly skewed item
387
+ frequencies, but Weighted Gaussian increments all items in a user’s
388
+ set by the same amount.
389
+ The two greedy algorithms we discuss next solve this problem by
390
+ iterating through the users one-by-one and using an update policy
391
+ and the current state of the histogram to decide how to allocate
392
+ weight across the items in their set. That is, each user’s update de-
393
+ pends on previous users’ updates. For example, in both algorithms,
394
+ users do not contribute to items in 𝐻 that have already reached 𝑇 ∗,
395
+ the threshold 𝑇 plus some positive buffer; items that have reached
396
+ the buffered threshold are very likely to be returned after noise
397
+ is added and thus do not need more weight. These adaptive up-
398
+ date rules are carefully chosen so that the overall algorithm has
399
+ bounded global sensitivity. As we will discuss in later sections, the
400
+ main downside of these algorithms is their sequential nature. By
401
+ design, they require iterating over the entire data set, which may
402
+ be prohibitively slow for industrial data sets.
403
+ 4.2.1
404
+ Policy Gaussian [10]. As with the Weighted Gaussian algo-
405
+ rithm, the data set is pre-processed by removing duplicate items
406
+ from each user’s set and truncating the user sets to some fixed
407
+ maximum size Δ0. Then, the algorithm iterates sequentially over
408
+ the users and, for each item 𝑢 in user 𝑖’s set𝑊𝑖, the user increments
409
+ 𝐻 [𝑢] by a weight that is proportional to 𝑇 ∗ − 𝐻 [𝑢], where 𝑇 ∗ is
410
+ equal to 𝑇 plus some positive buffer. Essentially, items that are fur-
411
+ ther from the buffered threshold get more weight added to them, and
412
+ those that have already reached the buffered threshold get none. The
413
+ weight that a user adds to each item is normalized so a single user’s
414
+ update to 𝐻 has an ℓ2-norm of at most 1.
415
+ Gopi et al. prove that the global ℓ2-sensitivity of the entire Policy
416
+ Gaussian algorithm is bounded by 1, so applying the high-level al-
417
+ gorithm (Algorithm 1) with appropriately-scaled Gaussian noise to
418
+ each item and thresholding are sufficient for satisfying differential
419
+ privacy.
420
+ 4.2.2
421
+ Greedy updates Without sampling (GW) [5]. Carvalho et
422
+ al. observe that, rather than removing duplicate items in a user’s
423
+ set, one can use this frequency information to decide where to
424
+ allocate sensitivity budget. GW iterates over the users, and each
425
+ user computes 𝑢∗: the most frequent item in their list such that
426
+ 𝐻 [𝑢∗] is below the buffered threshold 𝑇 ∗. Then, the user incre-
427
+ ments 𝐻 [𝑢∗] by min(1,𝑇 ∗ − 𝐻 [𝑢∗],𝑏𝑢𝑑𝑔𝑒𝑡) where 𝑏𝑢𝑑𝑔𝑒𝑡 is the
428
+ user’s remaining ℓ1 budget. This process is repeated until the user’s
429
+ initial budget of 1 is consumed or the user has no items left that are
430
+ below 𝑇 ∗2. Carvalho et al. prove that their GW algorithm for build-
431
+ ing a weighted histogram has a global ℓ1-sensitivity bound of 1, so
432
+ running the high-level algorithm (Algorithm 1) with Laplace noise
433
+ and thresholding is sufficient for satisfying differential privacy.
434
+ 2As the algorithm is presented in [5], it does not always terminate. In particular, they
435
+ do not consider the case when a user has budget left but all of its items have reached
436
+ 𝑇 ∗, but adding this condition luckily does not affect the privacy proof.
437
+ One notable feature of their algorithm is that it can use item fre-
438
+ quency information from a public data set to increase the accuracy
439
+ of the frequency estimates. We do not use this feature when com-
440
+ paring it to other algorithms, since our goal is to create a general-
441
+ purpose algorithm that could be incorporated into a privacy frame-
442
+ work without requiring a data analyst to input a public data set.
443
+ 5
444
+ DP-SIPS
445
+ In this section we present DP-SIPS: Differentially Private Scalable,
446
+ Iterative Partition Selection, detailed in Algorithm 3. The basic
447
+ structure is quite simple: it runs Weighted Gaussian on the data set
448
+ multiple times with increasing privacy budget (and corresponding
449
+ decreasing thresholds); on each iteration, the partitions returned
450
+ by Weighted Gaussian are removed from each of the users’ sets.
451
+ Because Weighted Gaussian updates the histogram uniformly
452
+ across a user’s items, when returned partitions are removed from
453
+ the user’s set, they can allocate more weight to their remaining
454
+ items (see Figure 1). The first iteration returns the very popular
455
+ items using only a tiny fraction of the overall privacy budget, and
456
+ subsequent iterations yield less frequent items. The action of the
457
+ algorithm is twofold: in each iteration, the threshold is lowered at
458
+ the same time as users allocate more weight to each of the items that
459
+ remain in their sets (since the user sets get smaller when previously-
460
+ returned items are removed). DP-SIPS is parallelizable because
461
+ the steps within each iteration are parallelizable, and only a few
462
+ iterations are required to achieve good accuracy (see Figure 4).
463
+ Furthermore, the user sets are re-truncated on each iteration
464
+ after the previously returned partitions are removed. For data sets
465
+ that are both skewed in the item frequencies and in the sizes of
466
+ users’ sets, this allows additional items to be included on each
467
+ iteration.
468
+ Algorithm 3 DP-SIPS: Scalable, Iterative Partition Selection
469
+ Input: Data set of user partitions 𝑥 = (𝑊1, . . . ,𝑊𝑁 )
470
+ Maximum per-user contribution Δ0
471
+ Privacy parameters 𝜌,𝛿
472
+ Number of iterations 𝐼
473
+ Privacy budget allocation factor 𝑟
474
+ Output: Subset 𝑆 ⊆ ∪𝑖𝑊𝑖
475
+ 1: 𝑆 ← {}
476
+ 2: for 𝑖 = 0, . . . , 𝐼 − 1 do
477
+ 3:
478
+ (𝜌𝑖,𝛿𝑖) ←
479
+
480
+ 𝜌 · 𝑟𝐼−𝑖−1 · 1−𝑟
481
+ 1−𝑟𝐼 ,
482
+ 𝛿 · 𝑟𝐼−𝑖−1 · 1−𝑟
483
+ 1−𝑟𝐼
484
+
485
+ 4:
486
+ 𝑃𝑖 ← Weighted_Gauss (𝑥 = (𝑊1, . . . ,𝑊𝑁 ), 𝜌𝑖,𝛿𝑖, Δ0)
487
+ 5:
488
+ for 𝑗 ∈ 𝑁 do
489
+ 6:
490
+ 𝑊𝑗 ← 𝑊𝑗 \ 𝑃𝑖
491
+ ⊲ Remove already-found paritions
492
+ 7:
493
+ 𝑆 ← 𝑆 ∪ 𝑃𝑖
494
+ return 𝑆
495
+ Theorem 18. For any 𝜌 > 0 and any 𝛿 ∈ (0, 1], Algorithm 3
496
+ satisfies 𝛿-approximate 𝜌-zCDP.
497
+ Proof. By Theorem 17, each call to Weighted_Gauss satisfies𝛿𝑖-
498
+ approximate 𝜌𝑖-zCDP. Applying composition and postprocessing
499
+ (Lemmas 10 and 11), Algorithm 3 satisfies
500
+ ��𝐼−1
501
+ 𝑖=0 𝛿𝑖
502
+
503
+ -approximate
504
+ ��𝐼−1
505
+ 𝑖=0 𝜌𝑖
506
+
507
+ -zCDP.
508
+
509
+ DP-SIPS: A simpler, more scalable mechanism for differentially private partition selection
510
+ A B C D E F G H I J K
511
+ Weighted Gaussian
512
+ DP-SIPS Iteration 1
513
+ DP-SIPS Iteration 2
514
+ DP-SIPS Iteration 3
515
+ A B C D E F G H I J K
516
+ A B C D E F G H I J K
517
+ A B C D E F G H I J K
518
+ Previously returned:
519
+ A, B
520
+ Previously returned:
521
+ A, B, C, D, E
522
+ A B C D E F G H I J K
523
+ Weighted Gaussian
524
+ DP-SIPS Iteration 1
525
+ DP-SIPS Iteration 2
526
+ DP-SIPS Iteration 3
527
+ A B C D E F G H I J K
528
+ A B C D E F G H I J K
529
+ A B C D E F G H I J K
530
+ Previously returned:
531
+ A, B
532
+ Previously returned:
533
+ A, B, C, D, E
534
+ Figure 1: Depiction of Weighted Gaussian noisy histogram (left) compared to intermediate SIPS noisy histograms (right three
535
+ diagrams) on a skewed data set. Solid blue bars represent partitions that will be returned, and yellow outlines represent the
536
+ weight of each item on the previous iteration. Although the threshold for Weighted Gaussian is lower than each of the SIPS
537
+ thresholds, SIPS benefits from less-frequent items getting increased weight as returned partitions are removed from user sets.
538
+ Now, we will solve for these summations using the closed-form
539
+ formula for geometric sums.
540
+ 𝐼−1
541
+ ∑︁
542
+ 𝑖=0
543
+ 𝜌𝑖 =
544
+ 𝐼−1
545
+ ∑︁
546
+ 𝑖=0
547
+ 𝜌 · 𝑟𝐼−𝑖−1 · 1 − 𝑟
548
+ 1 − 𝑟𝐼 = 𝜌 · 1 − 𝑟
549
+ 1 − 𝑟𝐼 ·
550
+ 𝐼−1
551
+ ∑︁
552
+ 𝑖=0
553
+ 𝑟𝐼−𝑖−1
554
+ = 𝜌 · 1 − 𝑟
555
+ 1 − 𝑟𝐼 ·
556
+ 𝐼−1
557
+ ∑︁
558
+ 𝑗=0
559
+ 𝑟 𝑗 = 𝜌 · 1 − ����
560
+ 1 − 𝑟𝐼 · 1 − 𝑟𝐼
561
+ 1 − 𝑟 = 𝜌.
562
+ An identical calculation holds for �𝐼−1
563
+ 𝑖=0 𝛿𝑖. Thus, Algorithm 3 satis-
564
+ fies 𝛿-approximate 𝜌-zCDP.
565
+
566
+ 6
567
+ EXPERIMENTAL RESULTS
568
+ We use data sets of several different sizes from varied text domains
569
+ to validate the empirical performance of the DP-SIPS algorithm. As
570
+ with prior works, we focus on the problem of vocabulary discovery
571
+ under the constraint of user-level privacy. We begin by describing
572
+ the data sets and computing clusters for our experiments in Sec-
573
+ tions 6.1 and 6.2. Then, in Section 6.3 we discuss how the empirical
574
+ accuracy of DP-SIPS compares to that of existing algorithms, and in
575
+ Section 6.4 we discuss the results from our scalability experiments.
576
+ 6.1
577
+ Data sets
578
+ We use six publicly-available data sets to study the accuracy of
579
+ DP-SIPS against existing partition selection algorithms, and we
580
+ use the largest of the six for scalability experiments on Amazon
581
+ Elastic Map Reduce clusters. Reddit [15] is a data set of text posts
582
+ collected from r/AskReddit which appeared as a benchmark in
583
+ [5, 10]. We also use four data sets that appeared in [5]: Twitter [16],
584
+ comprising customer support tweets to and from large corporations;
585
+ Finance [2], financial headlines for stocks; IMDb [14], a set of movie
586
+ reviews scraped from IMDb; Wikipedia [17], a set of wikipedia
587
+ abstracts (where we treat each abstract as a separate user set).
588
+ For the scalability experiments, we use Amazon [11], a publicly-
589
+ available text data set from Kaggle of 4 million Amazon product
590
+ reviews. Using the same methods as [5, 10], we preprocess all data
591
+ sets using tokenization with nltk.word_tokenize, removing URLs
592
+ and symbols, and lower casing all words.
593
+ 6.2
594
+ Cluster settings
595
+ For the two scalability experiments described in Tables 5 and 6, we
596
+ implement all of the algorithms in PySpark to take advantage of
597
+ parallelism in the algorithms. We then run the algorithms on the
598
+ Amazon dataset on Amazon Elastic Map Reduce (EMR) clusters. In
599
+ the first experiment, we use clusters with one master node of type
600
+ m5a.2xlarge and varying numbers of core nodes of type m5a.2xlarge.
601
+ All of the privacy parameters and hyperparameters are identical to
602
+ those listed in Table 1.
603
+ For the second experiment (Table 6), we fix a cluster with one
604
+ master node and two core nodes and vary the machine size of the
605
+ nodes. We modify the PySpark session settings to proportionally
606
+ increase the number of cores and memory allocation for both dri-
607
+ ver and executor nodes to reflect the available resources of the
608
+ machines.
609
+ In both benchmark experiments, we measure the amount of time
610
+ required to run the algorithm (after the dataset has already been
611
+ read in to PySpark) and return the number of partitions that were
612
+ discovered, in order to ensure PySpark’s lazy evaluation executed
613
+ the algorithm during the timing phase.
614
+ 6.3
615
+ Accuracy results
616
+ Because the goal of partition selection is to privately output as
617
+ many partitions as possible, we measure accuracy as the number
618
+ of partitions released. We test the accuracy of the four algorithms
619
+ on the data sets described in Section 6.1: Weighted Gaussian (Al-
620
+ gorithm 2), SIPS (our Algorithm 3), Policy Gaussian (DPSU) from
621
+ [10], and Greedy updates Without sampling (GW) from [5]. Table 1
622
+ shows the following trends for SIPS:
623
+ • In general, the accuracy of our algorithm (SIPS) is on-par
624
+ with or better than that of DPSU on the 6 data sets tested
625
+ • The accuracy of SIPS is consistently approximately double
626
+ that of Weighted Gaussian.
627
+ The accuracy of GW is higher than that of DPSU on 4 of the 6
628
+ data sets; however, surprisingly on the IMDb and Wikipedia data
629
+ sets its accuracy is below that of Weighted Gaussian. To further
630
+ investigate this phenomenon, we modify these two data sets in
631
+
632
+ Marika Swanberg, Damien Desfontaines, and Samuel Haney
633
+ addition to Finance to remove repeated items within each user’s
634
+ set (see the data sets marked as “deduplicated” in Table 1). Without
635
+ any item frequency information, the algorithm adds all of its weight
636
+ to a randomly-selected item, so one would expect the performance
637
+ on all three data sets to be worse than Weighted Gaussian. To the
638
+ contrary, GW’s accuracy on the deduplicated Finance data set is
639
+ still significantly higher than the others, and GW’s accuracy on the
640
+ deduplicated IMDb and Wikipedia is again worse than Weighted
641
+ Gaussian.
642
+ One possible explanation is the relatively small vocabulary size
643
+ compared to the number of users and the small user sets in the
644
+ Finance data set, since stock headlines are often short and for-
645
+ mulaic. We do not attempt to resolve this discrepancy in GW’s
646
+ accuracy; however, we present these results as a caution when se-
647
+ lecting a partition selection algorithm. Note also that the accuracy
648
+ of the other three algorithms is unaffected by deduplication since
649
+ they all perform this preprocessing step to the data sets.
650
+ 6.3.1
651
+ Comparing zCDP to DP. We implement our algorithm to sat-
652
+ isfy approximate zCDP to take advantage of its simpler composition
653
+ properties over standard DP. In [10], the Policy Gaussian algorithm
654
+ satisfies (𝜀,𝛿)-DP, but the threshold and Gaussian noise can easily
655
+ be recalibrated to satisfy approximate zCDP instead. We do so in
656
+ this work to facilitate the comparison to our algorithm. Unfortu-
657
+ nately, the GW algorithm from [5] has a bounded ℓ1-sensitivity
658
+ and uses Laplace noise, so it is not easily converted to satisfy ap-
659
+ proximate zCDP without a significant loss in accuracy. Instead, we
660
+ use Corollary 14 with 𝛼 = 10 to choose (𝜀,𝛿′) parameters implied
661
+ by the settings of 𝜌 and 𝛿 used on the other algorithms. This con-
662
+ version from approximate zCDP to approximate DP is not tight
663
+ (meaning the given (𝜀,𝛿) may be higher than the true privacy guar-
664
+ antee given by the approximate zCDP parameters). Because of the
665
+ looseness of the conversion, the GW algorithm’s accuracy may be
666
+ slightly inflated when compared to the other algorithms.
667
+ 6.3.2
668
+ Selecting hyperparameters. Our algorithm has several hyper-
669
+ parameters (aside from the privacy parameters 𝜌 and 𝛿) that need
670
+ to be set by the data analyst: Δ0 the maximum number of items per
671
+ user, 𝑟 the ratio between the privacy budget for iteration 𝑖 + 1 and
672
+ 𝑖, and 𝐼 the number of iterations in the algorithm. One option is
673
+ to divide the privacy budget and try several hyperparameter set-
674
+ tings and select the setting with the highest accuracy; however, this
675
+ wastes a lot of privacy budget. We find that the accuracy of DP-SIPS
676
+ is largely invariant to reasonable settings of the hyperparameters,
677
+ and we provide general rules of thumb for selecting them.
678
+ Figures 2 and 3 display the accuracy of our algorithm on five data
679
+ sets with varying 𝑟 and Δ0, respectively. For the given setting of 𝛿
680
+ and 𝜌, the accuracy of DP-SIPS is largely unaffected by different
681
+ choices of 𝑟 and Δ0 across 5 data sets, and Table 2 shows that the
682
+ accuracy of DP-SIPS only slightly increases with the number of
683
+ iterations 𝐼 on the Reddit data set (for the given settings of the
684
+ other parameters).
685
+ While it is impossible to test the accuracy of every possible
686
+ combination of parameters, these results suggest that DP-SIPS is
687
+ generally insensitive to its hyperparameters. We recommend using
688
+ the following settings: 𝑟 ∈ [0.2, 0.4], 𝐼 ≥ 3, and Δ0 set to an overes-
689
+ timate of the true maximum number of per-user contributions. If
690
+ 0.1
691
+ 0.2
692
+ 0.3
693
+ 0.4
694
+ 0.5
695
+ 0.6
696
+ 0.7
697
+ 0.8
698
+ 0.9
699
+ Ratio Parameter r
700
+ 0
701
+ 5000
702
+ 10000
703
+ 15000
704
+ 20000
705
+ 25000
706
+ 30000
707
+ Number of Partitions Returned
708
+ Finance
709
+ Twitter
710
+ Wikipedia
711
+ Reddit
712
+ IMDB
713
+ Figure 2: Number of partitions returned versus the choice
714
+ of the privacy ratio parameter, 𝑟. All other hyperparameters
715
+ are identical to those listed in Table 1. For all 5 data sets, the
716
+ accuracy is maximized for 𝑟 ∈ [0.1, 0.4].
717
+ 5
718
+ 50
719
+ 100
720
+ 150
721
+ 200
722
+ 250
723
+ 300
724
+ 350
725
+ 400
726
+ Maximum Number of Contributions per User
727
+ 0
728
+ 5000
729
+ 10000
730
+ 15000
731
+ 20000
732
+ 25000
733
+ 30000
734
+ Number of Partitions Returned
735
+ Finance
736
+ Twitter
737
+ Wikipedia
738
+ Reddit
739
+ IMDB
740
+ Figure 3: Number of partitions returned for varying per-user
741
+ maximum contribution bounds, Δ0. All other hyperparame-
742
+ ters are identical to those listed in Table 1. On all 5 data sets,
743
+ increasing Δ0 past 100 has almost no affect on accuracy, with
744
+ maximum accuracy for Δ0 ∈ [5, 150]. We note that Finance is
745
+ unusual as each financial headline is quite short.
746
+ the true maximum number of contributions per user is public, Δ0
747
+ should be set to that value.
748
+ 6.4
749
+ Scalability results
750
+ We benchmark the algorithms on the Amazon data set consisting
751
+ of product reviews from 4 million users. Figure 5 shows the re-
752
+ sults from the first experiment, in which we measure the runtimes
753
+ of the algorithms on Amazon EMR clusters with a single master
754
+ node and varying numbers of core nodes. All nodes are of type
755
+ m5a.2xlarge, which has 8 processor cores and 32 GB of memory.
756
+ Figure 5 illustrates the following runtime trends:
757
+
758
+ DP-SIPS: A simpler, more scalable mechanism for differentially private partition selection
759
+ Data set
760
+ Wt. Gauss
761
+ SIPS
762
+ DPSU
763
+ GW
764
+ Reddit
765
+ 6,160
766
+ 11,392
767
+ 11,186
768
+ 11,984
769
+ Twitter
770
+ 12,632
771
+ 23,649
772
+ 23,576
773
+ 27,184
774
+ Finance
775
+ 17,350
776
+ 27,559
777
+ 29,005
778
+ 37,503
779
+ IMDb
780
+ 3,728
781
+ 7,759
782
+ 5,845
783
+ 3,133
784
+ Wikipedia
785
+ 11,340
786
+ 21,037
787
+ 18,129
788
+ 11,251
789
+ Amazon
790
+ 67,522
791
+ 144,805
792
+ 143,997
793
+ 185,563
794
+ Finance (deduplicated)
795
+ -
796
+ -
797
+ -
798
+ 37,563
799
+ IMDb (deduplicated)
800
+ -
801
+ -
802
+ -
803
+ 3,005
804
+ Wikipedia (deduplicated)
805
+ -
806
+ -
807
+ -
808
+ 9,802
809
+ Table 1: Number of partitions returned by Wt. Gauss (Algorithm 2), SIPS (Algorithm 3), DPSU (Policy Gaussian from [10], and
810
+ GW (from [5]) on six data sets, and additionally we run GW on three data sets where duplicates within user lists have been
811
+ removed. Note that the other three algorithms already deduplicate user sets. For SIPS we use 3 iterations and 𝑟 = 1/3. For Wt.
812
+ Gaussian, SIPS, and DPSU, the privacy budget is set to 𝜌 = 0.1,𝛿 = 10−5, and the user contributions are truncated to Δ0 = 100.
813
+ For GW, 𝜀 = 1.7 and 𝛿 = 8.1142 × 10−5.
814
+ Start
815
+ End Iteration
816
+ End
817
+ Start
818
+ End
819
+ Start
820
+ End
821
+ Figure 4: Parallelism diagram for Weighted Gaussian (left), SIPS (middle), and greedy algorithms (right). Arrows represent
822
+ computational steps while boxes represent the data set. The steps of Weighted Gaussian can run in parallel on parts of the
823
+ data set while greedy algorithms must run sequentially over the data set. Each iteration of SIPS can run in parallel, but the
824
+ iterations must be done sequentially.
825
+ Iterations 𝐼
826
+ Partitions Returned
827
+ 1
828
+ 5,464
829
+ 2
830
+ 10,041
831
+ 3
832
+ 11,126
833
+ 4
834
+ 11,182
835
+ 5
836
+ 11,541
837
+ 6
838
+ 11,061
839
+ 8
840
+ 11,585
841
+ 10
842
+ 11,637
843
+ Table 2: Accuracy as a function of the number of iterations
844
+ on Reddit for Algorithm 3 with parameters 𝜌 = 0.1,𝛿 = 10−5,
845
+ 𝑟 = 1/3, and Δ0 = 50.
846
+ • The runtimes of Weighted Gaussian and SIPS both decrease
847
+ as the number of core nodes increases.
848
+ • The runtimes of DPSU and GW remains approximately the
849
+ same even as the number of core nodes increases.
850
+ These results confirm the intuition that Weighted Gaussian and
851
+ SIPS are both parallelizable and thus scale well with increased
852
+ cluster sizes, even on large data sets. Additionally, it confirms our
853
+ observation that since DPSU and GW need to iterate sequentially
854
+ over each user, they do not scale well with larger cluster sizes as
855
+ this iteration is the main bottleneck.
856
+ Next, we run a slightly different scalability experiment: instead
857
+ of increasing the number of core nodes, we increase the sizes of the
858
+ core nodes. We use a single master node and two core nodes, and
859
+
860
+ Marika Swanberg, Damien Desfontaines, and Samuel Haney
861
+ 2
862
+ 4
863
+ 8
864
+ 16
865
+ 24
866
+ Number of Core Machines
867
+ 0
868
+ 100
869
+ 200
870
+ 300
871
+ 400
872
+ 500
873
+ 600
874
+ Runtime (sec)
875
+ DPSU
876
+ GW
877
+ SIPS
878
+ Weighted Gaussian
879
+ Figure 5: Algorithm runtimes on Amazon dataset with increas-
880
+ ing number of m5a.2xlarge cores in the cluster. The algo-
881
+ rithms were run with the same hyperparameters as those
882
+ listed in Table 1.
883
+ m5a.xlarge
884
+ m5a.2xlarge
885
+ m5a.4xlarge
886
+ m5a.8xlarge
887
+ Machine Sizes in Cluster
888
+ 0
889
+ 100
890
+ 200
891
+ 300
892
+ 400
893
+ 500
894
+ 600
895
+ 700
896
+ Runtime (sec)
897
+ DPSU
898
+ GW
899
+ SIPS
900
+ Weighted Gaussian
901
+ Figure 6: Algorithm runtimes on Amazon dataset with increas-
902
+ ing sizes of nodes in a cluster with 1 master node and 2 core
903
+ nodes. The algorithms were run with the same hyperparam-
904
+ eters as those listed in Table 1.
905
+ increase the sizes of all three nodes. Table 6 shows similar trends
906
+ as the previous experiment: Weighted Gauss and SIPS scale with
907
+ increased node sizes while DPSU and GW do not.
908
+ Comparing Figures 5 and 6, we see that for DP-SIPS, the com-
909
+ munication overhead between machines is small. Specifically we
910
+ can compare SIPS’s performance on 8 cores in Figure 5, a cluster
911
+ with 1 master node and 8 core nodes each of type m5a.2xlarge (8
912
+ CPU cores, 32 GB memory) to m5a.8xlarge (32 CPU cores, 128 GB
913
+ memory) in Figure 6, a cluster with 1 master node and 2 core nodes.
914
+ In both, the core nodes have in total 8 · 8 = 2 · 32 CPU cores and
915
+ 8 · 32 = 2 · 128 GB of memory among them. The runtimes of SIPS
916
+ in the first case is 58.41 seconds while in the second case, it is 66.28
917
+ seconds, which suggests that the communication overhead is small.
918
+ In fact, SIPS runs faster on a cluster of 9 smaller machines than a
919
+ cluster of 3 larger machines. This discrepancy could be the result
920
+ of any number of factors in the machine specifications.
921
+ While the Amazon data set is not large enough that GW and DPSU
922
+ are infeasible to use on it, the experiments suggest that SIPS could
923
+ better handle industrial scale data sets with hundreds of millions
924
+ of users (rather than just 4 million) where the greedy algorithms
925
+ truly would be infeasible to use–all at little to no loss in accuracy.
926
+ 7
927
+ DISCUSSION
928
+ Differentially private partition selection (or set union) is a fun-
929
+ damental problem for many private data analysis tasks. Prior ap-
930
+ proaches to this problem either suffer from poor accuracy or are
931
+ slow on large data sets because they are designed to sequentially
932
+ iterate over the users. We present a simple algorithm for differ-
933
+ entially private partition selection that achieves accuracy that is
934
+ comparable to DPSU and runtimes that scale well with increased
935
+ computational resources.
936
+ 7.1
937
+ Unsuccessful Attempts
938
+ One natural question we explored is: can we boost the accuracy of
939
+ DPSU using our method of iterating several times and removing
940
+ partitions from the data set that were previously returned? We
941
+ found that empirically this did not boost the accuracy on the data
942
+ sets that we tested. Intuitively, this makes sense because the per-
943
+ user Gaussian update policy in DPSU prevents users from adding
944
+ weight to items that have already reached the buffered threshold.
945
+ Such items will very likely be released after the noise addition and
946
+ thresholding steps. The DPSU approach achieves the same goal as
947
+ iterating, albeit with more precision due to the greedy nature of
948
+ the Policy Gaussian per-user updates.
949
+ 7.2
950
+ Future work
951
+ This work raises several natural questions. From a practical per-
952
+ spective, one may wonder whether there is a scalable algorithm
953
+ with higher accuracy than DP-SIPS. Another interesting line of
954
+ inquiry could consider theoretical guarantees on the number of
955
+ partitions released, possibly under some distributional assumptions
956
+ on the data. We believe these lines of inquiry would give important
957
+ insights into a fundamental problem in private data analysis.
958
+ REFERENCES
959
+ [1] Kareem Amin, Jennifer Gillenwater, Matthew Joseph, Alex Kulesza, and Sergei
960
+ Vassilvitskii. Plume: Differential privacy at scale. arXiv preprint arXiv:2201.11603,
961
+ 2022.
962
+ [2] Bot_Developer.
963
+ Daily
964
+ financial
965
+ news
966
+ for
967
+ 6000+
968
+ stocks.
969
+ https://www.kaggle.com/datasets/miguelaenlle/massive-stock-news-analysis-
970
+ db-for-nlpbacktests.
971
+ [3] Mark Bun and Thomas Steinke. Concentrated differential privacy: Simplifications,
972
+ extensions, and lower bounds. In Theory of Cryptography Conference, pages 635–
973
+ 658. Springer, 2016.
974
+ [4] Clément L Canonne, Gautam Kamath, and Thomas Steinke. The Discrete Gauss-
975
+ ian for Differential Privacy. In Advances in Neural Information Processing Systems,
976
+ volume 33, pages 15676–15688. Curran Associates, Inc., 2020.
977
+ [5] Ricardo Silva Carvalho, Ke Wang, and Lovedeep Singh Gondara. Incorporating
978
+ item frequency for differentially private set union. In Proceedings of the AAAI
979
+ Conference on Artificial Intelligence, volume 36, pages 9504–9511, 2022.
980
+ [6] Damien Desfontaines, James Voss, Bryant Gipson, and Chinmoy Mandayam.
981
+ Differentially private partition selection. Proceedings on Privacy Enhancing Tech-
982
+ nologies, 2022(1):339–352, 2022.
983
+
984
+ DP-SIPS: A simpler, more scalable mechanism for differentially private partition selection
985
+ [7] Cynthia Dwork, Krishnaram Kenthapadi, Frank McSherry, Ilya Mironov, and
986
+ Moni Naor. Our data, ourselves: Privacy via distributed noise generation. In
987
+ Annual international conference on the theory and applications of cryptographic
988
+ techniques, pages 486–503. Springer, 2006.
989
+ [8] Cynthia Dwork, Frank McSherry, Kobbi Nissim, and Adam Smith. Calibrating
990
+ noise to sensitivity in private data analysis. In Theory of cryptography conference,
991
+ pages 265–284. Springer, 2006.
992
+ [9] Cynthia Dwork and Aaron Roth. The algorithmic foundations of differential
993
+ privacy. Foundations and Trends in Theoretical Computer Science, 9(3-4):211–407,
994
+ 2014.
995
+ [10] Sivakanth Gopi, Pankaj Gulhane, Janardhan Kulkarni, Judy Hanwen Shen, Milad
996
+ Shokouhi, and Sergey Yekhanin. Differentially private set union. In International
997
+ Conference on Machine Learning, pages 3627–3636. PMLR, 2020.
998
+ [11] Kritanjali Jain.
999
+ Amazon reviews.
1000
+ https://www.kaggle.com/datasets/
1001
+ kritanjalijain/amazon-reviews.
1002
+ [12] Aleksandra Korolova, Krishnaram Kenthapadi, Nina Mishra, and Alexandros
1003
+ Ntoulas. Releasing search queries and clicks privately. In Proceedings of the 18th
1004
+ international conference on World wide web, pages 171–180, 2009.
1005
+ [13] Sebastian Meiser. Approximate and Probabilistic Differential Privacy Definitions,
1006
+ 2018.
1007
+ [14] Lakshmipathi
1008
+ N.
1009
+ Imdb
1010
+ dataset
1011
+ of
1012
+ 50k
1013
+ movie
1014
+ reviews.
1015
+ https://www.kaggle.com/datasets/lakshmi25npathi/imdb-dataset-of-50k-
1016
+ movie-reviews.
1017
+ [15] Judy Hanwen Shen. Ask reddit. https://github.com/heyyjudes/differentially-
1018
+ private-set-union/tree/ea7b39285dace35cc9e9029692802759f3e1c8e8/data.
1019
+ [16] Thought Vector and Stuart Axelbrooke.
1020
+ Customer support on twitter.
1021
+ https://www.kaggle.com/datasets/thoughtvector/customer-support-on-twitter.
1022
+ [17] Mark
1023
+ Wijkhuizen.
1024
+ Simple/normal
1025
+ wikipedia
1026
+ abstracts
1027
+ v1.
1028
+ https://www.kaggle.com/datasets/markwijkhuizen/simplenormal-wikipedia-
1029
+ abstracts-v1.
1030
+ [18] Royce J Wilson, Celia Yuxin Zhang, William Lam, Damien Desfontaines, Daniel
1031
+ Simmons-Marengo, and Bryant Gipson. Differentially private SQL with bounded
1032
+ user contribution. Proceedings on Privacy Enhancing Technologies, 2:230–250,
1033
+ 2020.
1034
+ A
1035
+ PROOF OF THEOREM 17
1036
+ Proof of Theorem 17. Since the algorithm is simply comput-
1037
+ ing a stable histogram, the proof follows from standard arguments.
1038
+ We will denote Algorithm 2 by M. Fix any 𝜌 > 0, any 𝛿 ∈ (0, 1),
1039
+ and any Δ0 ∈ N. Fix two neighboring data sets 𝑥 = (𝑊1, . . . ,𝑊𝑁 )
1040
+ and 𝑥 ′, where 𝑥 ′ contains one extra user set𝑊 ∗. Let 𝐸 be the event
1041
+ that M(𝑥 ′) ⊆ ∪𝑖 ∈[𝑁 ]𝑊𝑖; that is, the partitions returned for data set
1042
+ 𝑥 ′ are in the support of M(𝑥). We will first argue that, conditioned
1043
+ on event 𝐸, the output distributions of M(𝑥) and M(𝑥 ′) satisfy
1044
+ pure zCDP (Definition 4). Then we will argue that, because of the
1045
+ thresholding step, event 𝐸 occurs with probability at least 1 − 𝛿.
1046
+ If we condition both M(𝑥) and M(𝑥 ′) on event 𝐸, then by the
1047
+ Gaussian mechanism (Definition 7) and post-processing (Lemma 10),
1048
+ Algorithm 2 satisfies pure zCDP (Definition 4).
1049
+ Now, it remains to show that Pr[𝐸] ≥ 1 − 𝛿. First, note that each
1050
+ user contributes to most Δ0 items in the histogram, and further
1051
+ note that event 𝐸𝑐 occurs when at least one item from 𝑊 ∗ that is
1052
+ not in 𝑥 is released. Let 𝑊 ′ denote the set of items in 𝑊 ∗ that do
1053
+ not appear in 𝑥. Then,
1054
+ Pr[𝐸] = Pr[∀𝑢 ∈ 𝑊 ′,𝑢 ∉ M(𝑥)]
1055
+ = Pr[∩𝑢∈𝑊 ′ ˆ𝐻 [𝑢] ≤ 𝑇]
1056
+ = Pr[∩𝑢∈𝑊 ′𝐻 [𝑢] + 𝑍𝑢 ≤ 𝑇]
1057
+ For 𝑍𝑢 ∼ N (0, 1/2𝜌)
1058
+ =
1059
+
1060
+ 𝑢∈𝑊 ′
1061
+ Pr[𝐻 [𝑢] + 𝑍𝑢 ≤ 𝑇]
1062
+ By independence of 𝑍𝑢’s
1063
+ =
1064
+
1065
+ 𝑢∈𝑊 ′
1066
+ Pr
1067
+
1068
+ 1
1069
+ √︁
1070
+ |𝑊 ′|
1071
+ + 𝑍𝑢 ≤ 𝑇
1072
+
1073
+ =
1074
+
1075
+ Pr
1076
+
1077
+ 1
1078
+ √︁
1079
+ |𝑊 ′|
1080
+ + 𝑍𝑢 ≤ 𝑇
1081
+ �� |𝑊 ′|
1082
+ Since 𝑍𝑢’s are i.i.d.
1083
+ ≥ min
1084
+ 𝑘 ∈[Δ0]
1085
+ ��
1086
+ Pr
1087
+ � 1
1088
+
1089
+ 𝑘
1090
+ + 𝑍𝑢 ≤ 𝑇
1091
+ ��𝑘�
1092
+ Since |𝑊 ′| ≤ Δ0
1093
+ ≥ 1 − 𝛿
1094
+ By definition of 𝑇 .
1095
+
1096
+ B
1097
+ AMAZON EMR CLUSTER SPECIFICATIONS
1098
+ Table 3 shows the number of CPU cores and amount of memory
1099
+ available to each type of machine on Amazon EMR that we use in
1100
+ our experiments.
1101
+ Machine Size
1102
+ Cores
1103
+ Memory (GB)
1104
+ m5a.xlarge
1105
+ 4
1106
+ 16
1107
+ m5a.2xlarge
1108
+ 8
1109
+ 32
1110
+ m5a.4xlarge
1111
+ 16
1112
+ 64
1113
+ m5a.8xlarge
1114
+ 32
1115
+ 128
1116
+ Table 3: Number of cores and memory per EMR machine of
1117
+ each size.
1118
+ C
1119
+ OMITTED PROOFS FOR SECTION 3
1120
+ In this section, we give the proofs omitted from Section 3
1121
+ Proof of Lemma 10. Suppose M : X∗ → Y and M′ : X∗ →
1122
+ Z satisfy 𝛿-approximate 𝜌-zCDP and 𝛿′-approximate 𝜌′-zCDP,
1123
+ respectively. Fix any neighboring data sets 𝑥,𝑥 ′, and let 𝑃1, 𝑄1,
1124
+ 𝑃2, 𝑄2 denote the distributions of M(𝑥), M(𝑥 ′), M′(𝑥), M′(𝑥 ′)
1125
+ respectively. Then there exist distributions 𝑃 ′
1126
+ 1 and 𝑃 ′
1127
+ 2 such that
1128
+ 𝑇𝑉𝐷(𝑃1, 𝑃 ′
1129
+ 1) ≤ 𝛿,𝑇𝑉𝐷(𝑃2, 𝑃 ′
1130
+ 2) ≤ 𝛿 and
1131
+ 𝐷𝛼 (𝑃 ′
1132
+ 1||𝑄1) ≤ 𝜌 · 𝛼, 𝐷𝛼 (𝑃 ′
1133
+ 2||𝑄2) ≤ 𝜌 · 𝛼,
1134
+ for all 𝛼 ∈ (1, ∞). The output distribution of M′′(𝑥) is the joint
1135
+ distribution (𝑃1,𝑄1), and similarly (𝑃2,𝑄2) for M′′(𝑥 ′). Note that,
1136
+ (1) The joint distribution (𝑃 ′
1137
+ 𝑖 ,𝑄′
1138
+ 𝑖 ) satisfies𝑇𝑉𝐷((𝑃 ′
1139
+ 𝑖 ,𝑄′
1140
+ 𝑖 ), (𝑃𝑖,𝑄𝑖)) =
1141
+ 𝑇𝑉𝐷(𝑃𝑖, 𝑃 ′
1142
+ 𝑖 ) +𝑇𝑉𝐷(𝑄′
1143
+ 𝑖,𝑄𝑖) ≤ 𝛿 + 𝛿′ since the random out-
1144
+ puts of the two mechanisms are independent.
1145
+ (2) 𝐷𝛼 ((𝑃 ′
1146
+ 1, 𝑃 ′
1147
+ 2)||(𝑄1,𝑄2)) ≤ (𝜌 + 𝜌′) · 𝛼 by the same argument
1148
+ as composition for pure zCDP.
1149
+ Therefore, M′′(𝑥) satisfies (𝛿 +𝛿′)-approximate (𝜌+𝜌′)-zCDP.
1150
+
1151
+ Proof of Lemma 11. Suppose M : X𝑁 → Y satisfies𝛿-approximate
1152
+ 𝜌-zCDP and let 𝑓 : Y → Z be a function. Let 𝑃 and 𝑄 denote the
1153
+
1154
+ Marika Swanberg, Damien Desfontaines, and Samuel Haney
1155
+ distributions of M(𝑥) and M(𝑥 ′) respectively. Let 𝑃 ′ be the dis-
1156
+ tribution promised by the definition of approximate zCDP such
1157
+ that
1158
+ 𝑇𝑉𝐷(𝑃, 𝑃 ′) ≤ 𝛿, and
1159
+ 𝐷𝛼 (𝑃 ′||𝑄) ≤ 𝜌 · 𝛼
1160
+ for all 𝛼 ∈ (1, ∞). Let 𝑓 (𝑃) denote the distribution over Z obtained
1161
+ by setting 𝑓 (𝑃)(𝑆) = 𝑃(𝑓 −1(𝑆)) for all 𝑆 ⊆ Z, where 𝑓 −1(𝑆) is the
1162
+ preimage of 𝑆 in Y. Then the following are true:
1163
+ (1) 𝐷𝛼 (𝑓 (𝑃 ′)||𝑓 (𝑄)) ≤ 𝜌 · 𝛼
1164
+ (2) 𝑇𝑉𝐷(𝑓 (𝑃 ′), 𝑓 (𝑃)) ≤ 𝛿
1165
+ (1) follows from the fact that zCDP is closed under postprocessing.
1166
+ For (2), note that |𝑓 (𝑃)(𝑆) − 𝑓 (𝑃 ′)(𝑆)| = |𝑃(𝑓 −1(𝑆)) − 𝑃 ′(𝑓 −1(𝑆))|
1167
+ for all 𝑆 ⊆ Z. Therefore, 𝑇𝑉𝐷(𝑓 (𝑃 ′), 𝑓 (𝑃)) ≤ 𝑇𝑉𝐷(𝑃, 𝑃 ′) ≤ 𝛿.
1168
+
1169
+ Proof of Lemma 12. Suppose M : X𝑁 → Y satisfies𝛿-approximate
1170
+ 𝜌-zCDP and suppose every mechanism that satisfies 𝜌-zCDP must
1171
+ satisfy 𝜖∗(𝜌),𝛿∗(𝜌)-approximate DP. Let 𝑃 and 𝑄 denote the dis-
1172
+ tributions of 𝑀(𝑥) and 𝑀(𝑥 ′) respectively. By the definition of
1173
+ approximate zCDP, there exists 𝑃 ′ such that
1174
+ 𝑇𝑉𝐷(𝑃, 𝑃 ′) ≤ 𝛿, and
1175
+ 𝐷𝛼 (𝑃 ′||𝑄) ≤ 𝜌 · 𝛼
1176
+ for all 𝛼 ∈ (1, ∞). Next, by assumption in the lemma, distributions
1177
+ 𝑃 ′ and 𝑄 satisfy
1178
+ 𝑃 ′(𝑆) ≤ exp(𝜖)𝑄(𝑆) + 𝛿.
1179
+ Then by Lemma 9, we have the following:
1180
+ 𝑇𝑉𝐷(𝑃 ′′, 𝑃 ′) ≤ 𝛿∗(𝜌), and
1181
+ 𝑃 ′′(𝑆) ≤ exp(𝜖)𝑄(𝑆).
1182
+ Then,
1183
+ 𝑇𝑉𝐷(𝑃 ′′, 𝑃) ≤ 𝑇𝑉𝐷(𝑃 ′′, 𝑃 ′) +𝑇𝑉𝐷(𝑃 ′, 𝑃)
1184
+ = 𝛿 + 𝛿∗(𝜌).
1185
+ Applying Lemma 9 again completes the proof.
1186
+
1187
+ Proof of Lemma 16. Suppose M : X𝑁 → Y satisfies (𝜖,𝛿)-
1188
+ approximate differential privacy. Let 𝑃 and 𝑄 denote the distribu-
1189
+ tions of M(𝑥) and M(𝑥 ′) respectively. Let 𝑃 ′ denote 𝑀(𝑥)|𝐸. We
1190
+ claim that 𝑇𝑉𝐷(𝑃, 𝑃 ′) ≤ 𝛿. Note that for any 𝑠 ∈ ¬𝐸, 0 = 𝑃 ′(𝑠) ≤
1191
+ 𝑃(𝑠) and for any 𝑠 ∈ 𝐸, 𝑃(𝑠) ≤ 𝑃 ′(𝑠). Therefore
1192
+ sup
1193
+ 𝑆
1194
+ |𝑃(𝑆) − 𝑃 ′(𝑆)| = |𝑃(𝐸) − 𝑃 ′(𝐸)|
1195
+ ≤ 𝛿.
1196
+
1197
+ Marika Swanberg, Damien Desfontaines, Samuel Haney„
1198
+
PNA0T4oBgHgl3EQfC__J/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
RdE2T4oBgHgl3EQfWQda/content/tmp_files/2301.03831v1.pdf.txt ADDED
@@ -0,0 +1,1310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Dynamic Grained Encoder for Vision Transformers
2
+ Lin Song1∗
3
+ Songyang Zhang2,4,5∗
4
+ Songtao Liu3
5
+ Zeming Li3
6
+ Xuming He2
7
+ Hongbin Sun1†
8
+ Jian Sun3
9
+ Nanning Zheng1
10
+ 1 College of Artificial Intelligence, Xi’an Jiaotong University
11
+ 2 ShanghaiTech University
12
+ 3 Megvii Inc. (Face++)
13
+ 4University of Chinese Academy of Sciences
14
+ 5Shanghai Institute of Microsystem and Information Technology, Chinese Academy of Sciences
15
+ stevengrove@stu.xjtu.edu.cn, sy.zhangbuaa@gmail.com,
16
+ liusongtao@megvii.com, hexm@shanghaitech.edu.cn,
17
+ {hsun, nnzheng}@mail.xjtu.edu.cn, {lizeming, sunjian}@megvii.com
18
+ Abstract
19
+ Transformers, the de-facto standard for language modeling, have been recently
20
+ applied for vision tasks. This paper introduces sparse queries for vision trans-
21
+ formers to exploit the intrinsic spatial redundancy of natural images and save
22
+ computational costs. Specifically, we propose a Dynamic Grained Encoder for
23
+ vision transformers, which can adaptively assign a suitable number of queries to
24
+ each spatial region. Thus it achieves a fine-grained representation in discriminative
25
+ regions while keeping high efficiency. Besides, the dynamic grained encoder is
26
+ compatible with most vision transformer frameworks. Without bells and whistles,
27
+ our encoder allows the state-of-the-art vision transformers to reduce computa-
28
+ tional complexity by 40%-60% while maintaining comparable performance on
29
+ image classification. Extensive experiments on object detection and segmenta-
30
+ tion further demonstrate the generalizability of our approach. Code is available
31
+ at https://github.com/StevenGrove/vtpack.
32
+ 1
33
+ Introduction
34
+ Following the evolution of network architectures in natural language processing (NLP), Vision
35
+ Transformers [1–5] have recently attracted increasing research attention and demonstrated promising
36
+ results on several vision tasks, such as image classification, object detection, and other pixel-level
37
+ tasks. Vision transformers are notable for modeling long-range dependencies and introducing less
38
+ inductive bias, considered to be a solid alternative to CNNs for vision tasks.
39
+ One of the eminent obstacles for vision transformers is the high computational cost. Vision tasks
40
+ typically require high-resolution image features to obtain detail and structure representation, which
41
+ is critical for pixel-level tasks [6–10]. However, since the encoders in vision transformers need to
42
+ establish pairwise relationships, high-resolution features could impose unacceptable computational
43
+ and memory costs. Therefore, similar to the efficient transformers [11–13] in NLP, many variants [2–
44
+ 4] of vision transformers are proposed to perform sparse self-attentions with dense queries and sparse
45
+ key-value pairs based on fixed pattern or heuristic rules.
46
+ In this paper, we notice that different from natural language, natural images involve much spatial
47
+ redundancy, especially in flat or low-texture regions [14–18]. This could enable the image features to
48
+ have a low resolution in some regions while maintaining similar representational capabilities.
49
+ To verify the spatial redundancy in vision transformers, we give an empirical analysis for DeiT [19]
50
+ on ImageNet [20] classification dataset (the details refer to Sec. 3.1). It demonstrates the existence
51
+ ∗Equal contribution. This work was done in Megvii Research.
52
+ †Corresponding author.
53
+ 35th Conference on Neural Information Processing Systems (NeurIPS 2021).
54
+ arXiv:2301.03831v1 [cs.CV] 10 Jan 2023
55
+
56
+ Dynamic
57
+ Grained
58
+ Router
59
+ Vanilla Encoder Block
60
+ Pooling
61
+ Unpooling
62
+ Dynamic Grained Encoder
63
+ Reshape to 2D
64
+ Feature
65
+ Mixed-Grained Patches
66
+ Flatten
67
+ x
68
+ p
69
+ q
70
+ ,k v
71
+ y
72
+ z
73
+ Sparse Tokens
74
+ z
75
+ y
76
+ ˆy
77
+ Figure 1: The overall diagram of the proposed dynamic grained encoder. x is the input sequence,
78
+ and y is the output sequence. The dynamic grained router automatically split a 2D feature into
79
+ mixed-grained patches with a different number of tokens in a patch. Each patch is then flattened as a
80
+ sparse query by an average pooling operator. The vanilla encoder block can be a standard transformer
81
+ encoder or other efficient variants. Besides, the dash lines are only used in the training phase.
82
+ of spatial redundancy in queries, and the complexity can be dramatically reduced by downsampling
83
+ some highly redundant regions while maintaining comparable performance. These properties allow
84
+ the queries to use mixed granularity to achieve a balance between effectiveness and efficiency, i.e.,
85
+ more tokens in more discriminative regions while fewer tokens in less informative regions. However,
86
+ the distribution of spatial redundancy varies greatly among different input images, making it difficult
87
+ for a static method to handle complex and variable features.
88
+ We thus attempt to explore a new perspective: introducing dynamic network mechanism into vision
89
+ transformers to reduce the spatial redundancy of image features. As shown in Fig. 1, we propose
90
+ a Dynamic Grained Encoder (DGE) to replace the vanilla encoder in vision transformers. It could
91
+ assign a suitable number of queries for each region by using a dynamic grained router, e.g., the
92
+ foreground regions of the cat head in Fig. 1 are assigned more queries than the background regions.
93
+ Concretely, a reshaped 2D feature is first divided into regions using a fixed window. For each
94
+ region, the number of patches is decided by a data-dependent routing process, and each patch is
95
+ average pooled to obtain a 1D token. All the tokens are then concatenated into a sequence as the
96
+ queries. Since our method focuses on the sparsity of queries, it is compatible with many efficient
97
+ transformer encoders [2,3,11–13], making our approach available as a generic plugin in most vision
98
+ transformers [1–3,19,21]. Furthermore, the output of the encoder is restored to the input resolution
99
+ by an un-pooling operation and compensates for detailed information with the input feature.
100
+ To demonstrate the effectiveness, we conduct extensive experiments on three typical vision transform-
101
+ ers, i.e., DeiT [19], PVT [3] and DPVT, where DPVT is a new framework based on the deformable
102
+ attention [2]. In the image classification task, our dynamic grained encoder allows these models to
103
+ reduce computational complexity by 40%-60% while maintaining comparable performance. On the
104
+ other hand, with lower computational complexity, the accuracy can be improved by up to 4.4% on
105
+ ImageNet val set. In addition, the experiments on object detection and segmentation show the strong
106
+ robustness and generalization of our method.
107
+ 2
108
+ Related Work
109
+ 2.1
110
+ Vision Transformer
111
+ Recently, Vision Transformers, inspired by the significant success of transformer [22] achieved in the
112
+ NLP field, have received more attention in the vision community. ViT [1], which converts the image
113
+ into a sequence and applies the transformer encoder structure directly on it for image classification,
114
+ has pioneered this direction in visual recognition. To tackle the issue of training efficiency and data
115
+ efficiency, DeiT [19] introduces several training strategies to enable learning the vision transformer
116
+ on ImageNet. PVT [3] further develops a feature pyramid based on the transformer structure and
117
+ makes it applicable for the various downstream vision tasks. Swin [21] introduces the local window
118
+ idea to improve the efficiency of the transformer structure. Our work mainly focuses on reducing the
119
+ spatial redundancy and improving the model efficiency in a data-dependent manner, which is rarely
120
+ explored in previous works and complementary with various vision transformer structures.
121
+ 2
122
+
123
+ 0
124
+ 20
125
+ 40
126
+ 60
127
+ 80
128
+ 100
129
+ 0.8~1.00.6~0.80.4~0.60.2~0.40.0~0.2
130
+ Proportion (%)
131
+ Correlation coefficient
132
+ (a) Distribution
133
+ 0
134
+ 0.2
135
+ 0.4
136
+ 0.6
137
+ 0.8
138
+ 1
139
+ 1
140
+ 0.8
141
+ 0.6
142
+ 0.4
143
+ 0.2
144
+ 0
145
+ 0
146
+ 20
147
+ 40
148
+ 60
149
+ 80
150
+ 100
151
+ Complexity Ratio
152
+ Correlation coefficient threshold
153
+ Top1 Accuracy (%)
154
+ Complexity
155
+ Accuracy
156
+ (b) Accuracy v.s. Complexity
157
+ Layer index of DeiT-S
158
+ 1 2 3 4 5 6 7 8 9 101112
159
+ Correlation coefficient
160
+ 0.1
161
+ 0.2
162
+ 0.3
163
+ 0.4
164
+ 0.5
165
+ 0.6
166
+ 0.7
167
+ 0.8
168
+ 0.9
169
+ 1
170
+ (c) Layer
171
+ Figure 2: Spatial redundancy statistics of the vanilla encoders in DeiT-S [19]. The correlation
172
+ coefficient is used to measure the similarity of queries in a local region. Higher the correlation
173
+ corresponds to more spatial redundancy. (a) indicates that most queries are highly redundant in a local
174
+ region. (b) reflects that reducing the queries with high redundancy has little impact on performance.
175
+ (c) means that the redundancy varies greatly in some layers.
176
+ 2.2
177
+ Efficient Transformer
178
+ To improve the efficiency of transformers, prior works mainly concentrate on reducing the quadratic
179
+ computation of self-attention. These works can be roughly summarized as three types: learnable/fixed
180
+ pattern based methods, low-rank/kernel based methods and memory based methods. Some recent
181
+ approaches [12, 23–26] try to reduce the complexity of the self-attention mechanism by using a
182
+ heuristic method to generate fixed or learnable patterns. Other efforts [11, 13, 27, 28] focus on
183
+ utilizing the low-rank property of the attention matrix or introducing kernels to avoid computing the
184
+ attention matrix explicitly. Moreover, some works [29–31] also explore the memory mechanism to
185
+ improve efficiency. However, previous attempts mainly concentrate on the NLP tasks. Different from
186
+ the language sequence, which has a highly abstract representation of information, natural images
187
+ typically have much spatial redundancy. It makes the vision transformers require expensive costs for
188
+ downstream vision tasks, especially the dense-prediction tasks, e.g., object detection, segmentation.
189
+ Our work tries to utilize this intrinsic property of natural images to achieve redundancy reduction in a
190
+ data-dependent manner.
191
+ 2.3
192
+ Dynamic Network
193
+ Dynamic networks [32] are proposed to adaptively change the network architecture and parame-
194
+ ters according to input, which have been widely explored in computer vision and natural language
195
+ processing tasks. Most of the dynamic networks focus on coarse-grained strategy by dropping
196
+ blocks [33–36], pruning channels [37, 38] or adjusting layer-level scales [39, 40]. For instance,
197
+ MSDNet [34] proposes an early existing mechanism to achieve efficient inference for image classifi-
198
+ cation. Switch Transformer [41] uses the Mixture of Experts (MoE) model [42] to select different
199
+ parameters for each input sample. DRNet [39] attempts to perform adaptive scale transformation
200
+ in a feature pyramid network for semantic segmentation. The closest works to ours are probably
201
+ the Dynamic Convolution [43] and the Dynamic Head [10], which use a learnable mask to skip
202
+ specific spatial locations. However, they are only applicable to the CNN-based networks, and the
203
+ skipping-location strategy could result in significant performance degradation for vision transformers
204
+ (refer to Sec. 4.1.2). Different from them, our method adapts the region-level granularity to the input
205
+ feature for the vision transformers, which is more general and flexible.
206
+ 3
207
+ Method
208
+ 3.1
209
+ Empirical Analyses on Spatial Redundancy
210
+ To investigate the spatial redundancy of vision transformer on image data, we conduct a series
211
+ of experiments on the ImageNet [20] val set with a pre-trained DeiT-S [19] model. Our main
212
+ purpose is to explore the relationship among the granularity of queries, computational complexity,
213
+ and classification performance. Specifically, for each encoder layer in DeiT-S, we reshape its input
214
+ 3
215
+
216
+ queries (excluding the extra embedding) as a 2D feature map and split it into 2 × 2 non-overlap
217
+ patches. For each patch, we calculate its average token, and measure the similarity of each token in
218
+ the patch with the average token by using the Pearson Correlation Coefficient (PCC) metric.
219
+ Then we have three valuable observations. (1) Queries share similar patterns in a local region. From
220
+ the correlation coefficient histogram plotted in Fig. 2(a), most of the correlation coefficients are
221
+ greater than 0.8, which indicates the queries typically have a strong correlation in a local region. (2)
222
+ Large potential of reducing spatial redundancy. Furthermore, in each patch, we replace the tokens
223
+ with the average token when their correlation coefficient is above a given threshold. As shown in
224
+ Fig. 2(b), we illustrate the accuracy/complexity curve varying correlation thresholds. When the
225
+ threshold is 0.9, the complexity decreases by 27%, but the top-1 accuracy decreases by only 0.3%.
226
+ This evidence demonstrates the potential of reducing the spatial redundancy on vision transformers.
227
+ (3) Static strategy is sub-optimal. As shown in Fig. 2(c), some encoders have large variance of
228
+ correlation coefficients among different images. Thus, using data-independent methods to reduce
229
+ spatial redundancy is sub-optimal, which may lead to considerable performance degradation. These
230
+ observations motivate us to explore a data-dependent manner to reduce spatial redundancy.
231
+ 3.2
232
+ Dynamic Grained Encoder
233
+ 3.2.1
234
+ Overall Architecture
235
+ In this paper, we propose a new encoder block for vision transformers, called Dynamic Grained
236
+ Encoder (DGE). As shown in Fig. 1, the proposed encoder consists of two main modules, i.e.,
237
+ dynamic grained router and vanilla encoder block. Specifically, the dynamic grained router adaptively
238
+ generates mixed-grained patches for a 2D feature. The vanilla encoder block can be a standard
239
+ encoder block [22] or other efficient variants [2, 9, 11–13, 44], which is made up of a multi-head
240
+ attention and a feed-forward network. If there are extra tokens in the input sequence, such as class
241
+ embedding in ViT [1], we handle them separately with the vanilla encoder. For ease of presentation,
242
+ the rest of this section only considers the input sequence without extra tokens.
243
+ Given an input sequence x ∈ R(H×W )×C for the dynamic grained encoder, (H, W) denotes the
244
+ resolution of the feature, C is the number of channels. To compatible with most vanilla encoders,
245
+ we only generate sparse queries q ∈ RN×C by the dynamic grained router, where N indicates the
246
+ number of queries. Then the sparse queries as well as dense keys k and values v are transformed by
247
+ a vanilla encoder. It is worth mentioning that keys and values can be sparse in the vanilla encoder to
248
+ improve efficiency further. The output sequence of the vanilla encoder is restored to a 2D feature
249
+ with the original resolution by using an un-pooling operation. Furthermore, to enhance the details of
250
+ the output feature and alleviate the vanishing gradient problem, we add a residual connection [45] to
251
+ fuse the input sequence.
252
+ 3.2.2
253
+ Dynamic Grained Router
254
+ To achieve dynamic grained patches in space, we first partition the 2D feature, denoting as z, into
255
+ multiple regions, which can perform in regular or irregular ways. Although the irregular ways,
256
+ e.g., superpixels [46] and segmentation [47], may lead to better performance, it is very unfriendly
257
+ to memory access and inducing inefficiency. Therefore, as shown in Fig. 3, we adopt a S × S
258
+ non-overlap window3 to split image features into multiple regular regions. Furthermore, we define a
259
+ set of candidate granularities Φ = {φ1, φ2, ..., φK} to represent the optional patch size in a region,
260
+ where K is the number of candidate granularities. The granularity denotes the side length of a patch,
261
+ e.g., φ = 8 corresponds to an 8 × 8 patch. Since each patch is pooled into one query in the encoder,
262
+ larger granularity indicates fewer queries and less computation. For convenience, we set the region
263
+ size with the maximum granularity, i.e., S = max(Φ), in the experiments.
264
+ Inference. For a region i ∈ {1, 2, ..., ⌈ H
265
+ S ⌉·⌈ W
266
+ S ⌉}, we use a gating network to select a granularity from
267
+ the set of candidate granularities. Concretely, we reduce the region feature zi into a representative
268
+ token by using the average pooling operation and linearly project it to the gating logits:
269
+ h(zi) = 1
270
+ S2
271
+ S2
272
+
273
+ j=1
274
+ zi,jW + b,
275
+ (1)
276
+ 3Bottom-right padding is adopted on the feature if needed.
277
+ 4
278
+
279
+ Multi-Grained
280
+ Patch Split
281
+ Gumbel Noise
282
+ ArgMax
283
+ MLP
284
+ Pooling
285
+ SoftMax
286
+ Routing
287
+ Region Split
288
+ z
289
+ iz
290
+ 2
291
+ i =
292
+ ip
293
+ p
294
+ Output Patches
295
+ Dynamic Routing
296
+ for Each Region
297
+ Dynamic Routing for a Region
298
+ Dynamic Grained Router
299
+ iz
300
+ z
301
+ Figure 3: The diagram of the dynamic grained router in a DGE. As shown in the left part, a 2D
302
+ feature is split into multiple regions. For each region, as shown in the right part, we generate multiple
303
+ groups of patches with different granularities and select a specific group by the gating network. The
304
+ Gumbel noise is added to achieve end-to-end training. Besides, the modules in dash lines are only
305
+ used in the training phase.
306
+ where W ∈ RC×K and b ∈ R1×K indicate the weight and bias, respectively. The gating logits is
307
+ used to decide the granularity for the region by calculating the gating indices:
308
+ θi = arg max
309
+ k
310
+ (h(zi)k) ∈ {1, 2, ..., K}.
311
+ (2)
312
+ As shown in Fig. 3, we split the region feature into multiple groups of patches1 with K granularities.
313
+ We then choose a group of specific granularity according to the gating indices. We denote the selected
314
+ group as z′i ∈ RNi×φ2
315
+ θi×C, where Ni = ⌈ S
316
+ φθi ⌉ · ⌈ S
317
+ φθi ⌉ is the number of patches in the group.
318
+ As shown in Fig. 1, to construct a sequence as queries, we use the spatial mean vector of each patch as
319
+ the representative token by a pooling operation and concatenate all the tokens for the vanilla encoder:
320
+ ˆyi = VanillaEncoder(qi, k, v), where qi =
321
+ 1
322
+ φ2
323
+ θi
324
+ φ2
325
+ θi
326
+
327
+ j=1
328
+ z′
329
+ i,j ∈ RNi×C.
330
+ (3)
331
+ Compared with the previous encoders [2,11–13], the number of queries is reduced to 1/φ2
332
+ θi of the
333
+ original, the efficiency of the encoder can be improved, and the acceleration is more significant when
334
+ selected granularity θi is larger.
335
+ Training. To enable the end-to-end training for the gating network, motivated by [43,48–50], we
336
+ replace the determined decisions in Eq. 2 with a stochastic sampling process during the training phase.
337
+ Specifically, given a categorical distribution with unnormalized log probabilities, a discrete gating
338
+ index can be yielded with noise samples gj drawn from a standard Gumbel distribution:
339
+ θi = arg max
340
+ k
341
+ (h(zi)k + gk), where gk ∼ Gumbel(0, 1).
342
+ (4)
343
+ Furthermore, since the Eq. 4 is a hard decision process, it is not straightforward to train the gating
344
+ logits. To enable the back-propagation, we adopt the Gumbel-Softmax technique [51] to give a
345
+ continuous and differentiable approximation by replacing the argmax with a softmax operation. The
346
+ soft gating score for a region is then selected by the gating index:
347
+ pi =
348
+ exp((h(xi)θi + gθi)/τ)
349
+ �K
350
+ k exp((h(xi)k + gk)/τ)
351
+ ∈ [0, 1],
352
+ (5)
353
+ where a fixed temperature τ = 1 is used in our experiments for convenience. Similar with [43,52],
354
+ we further use a straight-through estimator for the gradients of gating logits, which are obtained
355
+ through the soft gating score pi during the backward pass:
356
+ y′
357
+ i =
358
+
359
+ ˆy
360
+ forward
361
+ pi · ˆy
362
+ backward
363
+ (6)
364
+ The above stochastic process is only adopted in the training phase. Our method requires no random
365
+ sampling and exponential functions during inference, guaranteeing high efficiency in practice.
366
+ 5
367
+
368
+ (a) gating indices of different images
369
+ (b) gating indices of different stages
370
+ Figure 4: Visualization of predicted gating indices of PVT-S+DGE on ImageNet val set. The
371
+ candidate granularity set is Φ = {1, 2, 4}, which are shown in red, green and blue respectively.
372
+ Higher granularity corresponds to less computational complexity. Our dynamic encoder tends to
373
+ assign more queries to the representative foreground regions than the background regions, thus
374
+ significantly reducing the computational cost. The left and right parts of Fig.4(a) come from stage
375
+ 1 and stage 2 of PVT, respectively. From left to right, the heatmaps of each instance in Fig.4(b)
376
+ correspond to stage 1, stage 2, and stage 3, respectively.
377
+ 3.2.3
378
+ Budget Constraint
379
+ In the absence of a budget constraint, our encoder typically prefers to assign more queries to
380
+ each region to achieve high performance. To obtain a better balance between effectiveness and
381
+ efficiency, we define a computational budget denoted as γ ∈ [0, 1], which corresponds to the desired
382
+ computational complexity ratio relative to the vanilla encoder without dynamic grained.
383
+ Given a vision transformer with L dynamic grained encoders, we can calculate the used computational
384
+ complexity ratio of the transformer by:
385
+ β =
386
+ �L
387
+ l Clψl
388
+ �L
389
+ l ClHlW l , where ψl =
390
+ � �
391
+ i φ2
392
+ θi
393
+ forward
394
+
395
+ i pl
396
+ i · φ2
397
+ θi
398
+ backward
399
+ (7)
400
+ The Cl indicates the computational complexity required to compute a query in an encoder layer. The
401
+ ψl corresponds to the number of queries, adopting a straight-through estimator to enable end-to-end
402
+ training. This strategy ensures an accurate complexity estimation when computing the training loss.
403
+ Moreover, we use the Euclidean distance for the budget loss to narrow the computational complexity
404
+ to a predetermined bound:
405
+ L = Ltask + λLbudget, where Lbudget = (β − γ)2.
406
+ (8)
407
+ The hyper-parameter λ balances losses among different tasks, making the gradients have the same
408
+ order of magnitude. Besides, for batched image inputs, β is averaged along the batch dimension to
409
+ estimate the average load of the network.
410
+ 4
411
+ Experiment
412
+ In this section, we apply our encoder to the state-of-the-art vision transformers and conduct exten-
413
+ sive experiments on image classification, object detection, and segmentation. To demonstrate the
414
+ generalization of our method, we conduct experiments on three Vision Transformer frameworks,
415
+ i.e., DeiT [19], PVT [3] and DPVT. Where DPVT is a new framework we proposed, which is
416
+ based on the architecture of PVT [3] but using the deformable attention [2] as the vanilla encoder.
417
+ Different from the dense self-attention process in DeiT, PVT and DPVT utilize sparse key-value
418
+ pairs in position-insensitive and position-sensitive ways, respectively. These three frameworks could
419
+ represent the vanilla encoder used by most vision transformers.
420
+ 6
421
+
422
+ 767
423
+ 69
424
+ 71
425
+ 73
426
+ 75
427
+ 77
428
+ 79
429
+ 81
430
+ 83
431
+ 0
432
+ 2
433
+ 4
434
+ 6
435
+ 8
436
+ 10
437
+ 12
438
+ 14
439
+ Top1 Accuracy (%)
440
+ FLOPs (G)
441
+ PVT
442
+ PVT+DGE
443
+ DeiT-Ti
444
+ ResNet-18
445
+ ResNet-101
446
+ DeiT-S
447
+ Ti
448
+ S
449
+ M
450
+ L
451
+ Ti
452
+ S
453
+ L
454
+ M
455
+ MSDNet
456
+ (a) Model size (γ = 0.5)
457
+ 70
458
+ 72
459
+ 74
460
+ 76
461
+ 78
462
+ 80
463
+ 82
464
+ 1
465
+ 2
466
+ 3
467
+ 4
468
+ 5
469
+ 6
470
+ 7
471
+ Top1 Accuracy (%)
472
+ FLOPs (G)
473
+ DPVT-S+DGE
474
+ PVT-S+DGE
475
+ DeiT-S+DGE
476
+ DeiT-Ti
477
+ PVT-Ti
478
+ PVT-S
479
+ DeiT-S
480
+ DPVT-Ti
481
+ DPVT-S
482
+ (b) Budget
483
+ Layer index of DeiT-S+DGE
484
+ 1
485
+ 2
486
+ 3
487
+ 4
488
+ 5
489
+ 6
490
+ 7
491
+ 8
492
+ 9
493
+ 10 11 12
494
+ Complexity Ratio
495
+ 0.2
496
+ 0.3
497
+ 0.4
498
+ 0.5
499
+ 0.6
500
+ 0.7
501
+ 0.8
502
+ 0.9
503
+ 1
504
+ 1.1
505
+ (c) Layer (γ = 0.5)
506
+ 41
507
+ 41.5
508
+ 42
509
+ 42.5
510
+ 43
511
+ 43.5
512
+ 100
513
+ 200
514
+ 300
515
+ 400
516
+ 500
517
+ 600
518
+ 700
519
+ mIoU (%)
520
+ FLOPs (G)
521
+ PVT
522
+ PVT+DGE
523
+ 768
524
+ 640
525
+ 512
526
+ 512
527
+ 640
528
+ 768
529
+ (d) Resolution (γ = 0.5)
530
+ Figure 5: Visualization of accuracy and computational complexity of different configurations. (a), (b)
531
+ and (c) are evaluated on ImageNet val set. The PVT and PVT+DGE in (a) is scaled by model size,
532
+ i.e., "tiny", "small" "medium" and "large". (b) indicates the performance of our method with different
533
+ budget constraints. (c) reflects the distribution of computational complexity in different encoder
534
+ layers of the DeiT-S+DGE. (d) is evaluated on ADE-20K val set with varying image resolutions.
535
+ 4.1
536
+ Image Classification on ImageNet
537
+ 4.1.1
538
+ Implementation Detail
539
+ All the experiments for image classification are based on ImageNet [20] classification dataset. We use
540
+ 256×2564 as the input image resolution for training and evaluation. For a fair comparison, we follow
541
+ the training settings in DeiT and PVT. Specifically, the random-size cropping, random horizontal
542
+ flipping [53] and mixup [54] are used for data augmentation. We use the AdamW [55] optimizer with
543
+ the weight decay of 0.05 and the momentum of 0.9. The learning rate is initially set to 0.001 and
544
+ decreases according to the cosine schedule [56]. All the models are trained for 300 epochs with 128
545
+ images per batch. The label-smoothing regularization is used in the training phase. Besides, for the
546
+ dynamic grained encoders, λ is set to 1.0 and Φ is set to {1, 2, 4} by default. During the training
547
+ phase, we use four compute nodes with 32 Nvidia Tesla V100 GPUs. For instance, we spend about
548
+ 1.2 days training the PVT-S with DGE model for 300 epochs. For the runtime evaluation, we measure
549
+ the frameworks on both Intel Xeon Gold 6130 CPU and Nvidia Tesla V100 GPU to demonstrate the
550
+ efficiency of our dynamic networks.
551
+ 4To achieve efficient region splitting, we choose 256 × 256 instead of 224 × 224 as it is divisible by more
552
+ optional granularities. We re-train all involved vision transformers in this work for a fair comparison.
553
+ 7
554
+
555
+ Table 1: Performance of dynamic grained encoder with different configurations on ImageNet val set.
556
+ The budget for DGE is set to 0.5. "Region" means using region-wise routing instead of layer-wise
557
+ routing in the encoder.
558
+ Framework
559
+ Dynamic
560
+ Region
561
+ Φ
562
+ Top1 Acc
563
+ Top5 Acc
564
+ FLOPs
565
+ #Param
566
+ PVT-S
567
+ 
568
+ -
569
+ -
570
+ 80.2
571
+ 95.2
572
+ 6.2G
573
+ 28.2M
574
+ PVT-S+DGE
575
+ 
576
+ 
577
+ 1, 2, 4
578
+ 79.1
579
+ 94.5
580
+ 3.4G
581
+ +12.1K
582
+ 
583
+ 0, 1
584
+ 78.8
585
+ 94.4
586
+ 3.5G
587
+ +8.1K
588
+ 1, 2
589
+ 80.0
590
+ 95.0
591
+ 3.5G
592
+ +8.1K
593
+ 1, 2, 4
594
+ 80.2
595
+ 95.0
596
+ 3.5G
597
+ +12.1K
598
+ 1, 2, 4, 8
599
+ 79.9
600
+ 95.0
601
+ 3.4G
602
+ +16.1K
603
+ 4.1.2
604
+ Ablation Study
605
+ Where are Fine-Grained Queries Assigned?
606
+ To reveal the undergoing properties of our dynamic
607
+ grained encoder, we illustrate the predicted gating indices θ on ImageNet val set, which is shown in
608
+ Fig. 4. Without additional supervision other than classification, our dynamic network can generate
609
+ instance-aware masks with rich details. It allows the encoder to assign more queries on the foreground
610
+ regions with discriminative features than background regions. This ensures that the network can
611
+ consume less computational cost while maintaining fine-grained representation. In addition, as
612
+ presented in Fig. 4(b), the predicted gating indices have similar patterns among different stages in the
613
+ PVT. It demonstrates the effectiveness for a pyramid network, which is crucial for applying to the
614
+ downstream tasks.
615
+ Dynamic vs Static
616
+ To demonstrate the superiority of the dynamic mechanism, we give a compar-
617
+ ison on the PVT framework with different model sizes in Fig. 5(a). For convenience, we fix the
618
+ budget constraint γ at 0.5. Our dynamic grained encoder can reduce the computational complexity
619
+ by half while maintaining comparable performance. On the other hand, with similar computational
620
+ complexity, our method can improve the static transformers by up to 4.4%. The results demonstrate
621
+ the effectiveness of our method even on the efficient vision transformers. In addition, as shown in
622
+ Fig. 5(c), we calculate the complexity ratio of each layer in DeiT-S with DGE, where the complexity
623
+ of the network in the middle layers varies significantly due to the dynamic mechanism. Interestingly,
624
+ the deeper layer has lower average computational complexity, which means the deeper layer tends
625
+ to assign fewer queries. Thus, DeiT is turned into a dynamic feature pyramid structure, which is
626
+ consistent with the observation in CNNs.
627
+ Budget Constraint and Candidate Granularity Set
628
+ As illustrated in Fig. 5(b), we give a com-
629
+ parison of varying the budget constraints γ, which is selected from {0.25, 0.5, 0.75, 1.0} respectively.
630
+ The redundancy in space allows the network to achieve comparable performance with much less
631
+ computational cost even on the efficient transformers, e.g., PVT and DPVT. Our encoder achieves the
632
+ optimal balance between effectiveness and efficiency when the budget is about half. Therefore, we set
633
+ the budget constraint to 0.5 for other experiments by default. In addition, we report the performance
634
+ of PVT-S with DGE with different candidate granularity set Φ in Tab. 1. When Φ = {0, 1}, the
635
+ gating indices degenerate into a learnable binary mask similar to dynamic convolutions [10,43], but
636
+ this strategy results in significant performance degradation. There is no significant difference in
637
+ performance between other granularity settings. The performance is highest when Φ = {1, 2, 4},
638
+ which becomes our default setting.
639
+ Region-wise Routing vs Layer-wise Routing
640
+ The Fig. 4 clearly demonstrates that DGE can
641
+ perform dynamic granularity in space to adapt to different object structures. Nevertheless, most
642
+ previous dynamic networks are based on layer-wise routing [32]. To demonstrate the advantages of
643
+ our method, we set the region size S × S to the input feature size so that DGE can be degraded from
644
+ region-wise routing to layer-wise routing. As shown in Tab. 1, region-wise gating achieves 1.1%
645
+ absolute gains over layer-wise gating with similar complexity, which agrees well with the empirical
646
+ analysis in Sec.3.1.
647
+ 8
648
+
649
+ Table 2: Performance of dynamic grained encoder on COCO val set. All experiments are conducted
650
+ with 1x schedule [57]. Time and FLOPs are measured on an 800 × 1280 image. "C" and "G" indicate
651
+ the backbone latency on CPU (Xeon 6130) and GPU (Tesla V100). All the budget for DGE is 0.5.
652
+ Backbone
653
+ Size
654
+ #Param
655
+ Latency
656
+ FLOPS
657
+ Mask R-CNN(1x)
658
+ (M)
659
+ C(ms)
660
+ G(ms)
661
+ (G)
662
+ APb
663
+ AP50
664
+ b
665
+ AP75
666
+ b
667
+ APm
668
+ AP50
669
+ m
670
+ AP75
671
+ m
672
+ ResNet
673
+ 50
674
+ 44.2
675
+ -
676
+ -
677
+ 189
678
+ 38.0
679
+ 59.6
680
+ 41.4
681
+ 34.4
682
+ 55.1
683
+ 36.7
684
+ PVT
685
+ Small
686
+ 44.3
687
+ 880
688
+ 33
689
+ 251
690
+ 40.4
691
+ 62.9
692
+ 43.8
693
+ 37.8
694
+ 60.1
695
+ 40.3
696
+ PVT+DGE
697
+ Small
698
+ 44.3
699
+ 440
700
+ 26
701
+ 185
702
+ 40.1
703
+ 62.6
704
+ 43.2
705
+ 37.5
706
+ 59.7
707
+ 40.0
708
+ DPVT
709
+ Small
710
+ 37.7
711
+ 1090
712
+ 50
713
+ 186
714
+ 44.0
715
+ 65.9
716
+ 48.2
717
+ 40.3
718
+ 62.9
719
+ 43.4
720
+ DPVT+DGE
721
+ Small
722
+ 37.7
723
+ 720
724
+ 34
725
+ 147
726
+ 43.8
727
+ 65.7
728
+ 47.7
729
+ 40.0
730
+ 62.6
731
+ 43.2
732
+ ResNet
733
+ 101
734
+ 63.2
735
+ -
736
+ -
737
+ 263
738
+ 40.4
739
+ 61.1
740
+ 44.2
741
+ 36.4
742
+ 57.7
743
+ 38.8
744
+ ResNeXt
745
+ 101(32x4)
746
+ 62.8
747
+ -
748
+ -
749
+ 354
750
+ 41.9
751
+ 62.5
752
+ 45.9
753
+ 37.5
754
+ 59.4
755
+ 40.2
756
+ PVT
757
+ Medium
758
+ 63.9
759
+ 1260
760
+ 73
761
+ 339
762
+ 42.0
763
+ 64.4
764
+ 45.6
765
+ 39.0
766
+ 61.6
767
+ 42.1
768
+ PVT+DGE
769
+ Medium
770
+ 63.9
771
+ 620
772
+ 40
773
+ 228
774
+ 41.7
775
+ 64.1
776
+ 45.0
777
+ 38.3
778
+ 62.0
779
+ 40.6
780
+ DPVT
781
+ Medium
782
+ 49.9
783
+ 1800
784
+ 75
785
+ 236
786
+ 46.4
787
+ 68.0
788
+ 51.1
789
+ 42.0
790
+ 65.2
791
+ 45.2
792
+ DPVT+DGE
793
+ Medium
794
+ 49.9
795
+ 1240
796
+ 50
797
+ 169
798
+ 45.8
799
+ 67.2
800
+ 50.0
801
+ 41.4
802
+ 64.5
803
+ 44.6
804
+ Table 3: Performance of different backbones for semantic
805
+ segmentation on ADE-20K val set.The inference time
806
+ (backbone) is measured for a 512 × 2048 input image.
807
+ "C" and "G" indicate the latency on CPU and GPU.
808
+ Backbone
809
+ #Param
810
+ FLOPs
811
+ mIoU
812
+ Latency
813
+ (M)
814
+ (G)
815
+ (%)
816
+ C(ms)
817
+ G(ms)
818
+ PVT-S
819
+ 28.2
820
+ 226
821
+ 41.8
822
+ 1350
823
+ 65
824
+ PVT-S+DGE
825
+ 28.2
826
+ 155
827
+ 41.7
828
+ 720
829
+ 42
830
+ PVT-M
831
+ 48.0
832
+ 316
833
+ 44.0
834
+ 1910
835
+ 100
836
+ PVT-M+DGE
837
+ 48.0
838
+ 202
839
+ 43.9
840
+ 1100
841
+ 64
842
+ DPVT-S
843
+ 21.7
844
+ 157
845
+ 44.4
846
+ 1470
847
+ 55
848
+ DPVT-S+DGE
849
+ 21.7
850
+ 121
851
+ 44.4
852
+ 860
853
+ 32
854
+ DPVT-M
855
+ 34.3
856
+ 209
857
+ 46.8
858
+ 1990
859
+ 110
860
+ DPVT-M+DGE
861
+ 34.3
862
+ 148
863
+ 46.1
864
+ 1260
865
+ 50
866
+ Table 4: Comparisons with state-of-the-art
867
+ vision transformers on ADE-20K val set.
868
+ FLOPs is tested on 512×2048 resolution.
869
+ Backbone
870
+ #Param
871
+ FLOPs
872
+ mIoU
873
+ (M)
874
+ (G)
875
+ (%)
876
+ ResNet-50 [45]
877
+ 28.5
878
+ 184
879
+ 36.7
880
+ PVT-S [3]
881
+ 28.2
882
+ 226
883
+ 41.8
884
+ Swin-Ti [21]
885
+ 31.9
886
+ 187
887
+ 41.5
888
+ Twins-S [60]
889
+ 28.3
890
+ 174
891
+ 43.2
892
+ DPVT-S+DGE
893
+ 21.7
894
+ 121
895
+ 44.4
896
+ ResNet-101 [45]
897
+ 47.5
898
+ 262
899
+ 38.8
900
+ PVT-M [3]
901
+ 48.0
902
+ 316
903
+ 44.0
904
+ Swin-S [21]
905
+ 53.2
906
+ 280
907
+ 44.9
908
+ Twins-B [60]
909
+ 60.4
910
+ 318
911
+ 45.3
912
+ DPVT-M+DGE
913
+ 34.3
914
+ 148
915
+ 46.1
916
+ 4.2
917
+ Experiments for Downstream Tasks
918
+ 4.2.1
919
+ Object Detection/Instance Segmentation on COCO
920
+ We apply our models for object detection and instance segmentation on the COCO dataset [58]. We
921
+ resize the images so that the shorter side is 768 pixels. All experiments are conducted on 8 GPUs with
922
+ 2 images per GPU (effective minibatch size 16) for 90K iterations. The learning rate is initialized to
923
+ 1e-4, which is decreased by 10 at the 60K and 80K iteration. Following the settings in PVT [3], we
924
+ report the performance with 1x training schedule [57,59].
925
+ The results are reported in Tab. 2. When equipped with DGE, the PVT-S achieves comparable
926
+ performance at 40.1% APbox with a significant complexity reduction (185G vs 251G) and inference
927
+ speed up by 22%. Even with larger models or different vanilla encoders, our method is still effective
928
+ and efficient. In addition, the proposed vision transformer variant, i.e., DPVT, is also competitive in
929
+ terms of parameter, computational cost and performance. Moreover, DPVT-M+DGE achieves 45.8
930
+ APbox with 169G FLOPs, even efficient than the ResNet-50 backbone.
931
+ 4.2.2
932
+ Semantic Segmentation on ADE-20K
933
+ We further evaluate our models as the backbones for Semantic-FPN [61] on ADE-20K [62] dataset.
934
+ All the experiments are based on MM-Segmentation toolkit [63]. In the training phase, we follow
935
+ the settings in PVT [3] and set the learning rate to 1e-4, which gradually decreases to 0 by the poly
936
+ strategy [64]. The images are cropped to 512 × 512 and augmented with random scaling (from 0.5 to
937
+ 2.0) and flipping. All models are trained in 80k iterations with a batch size of 32.
938
+ 9
939
+
940
+ We conduct several ablation studies by introducing the DGE block into PVT [3] and our proposed
941
+ DPVT. As shown in Tab. 3, with our dynamic grained encoder, DPVT+DGE and PVT+DGE both
942
+ achieve competitive performance with a significant computation cost reduction by about 30% FLOPs.
943
+ On the other hand, PVT-M+DGE achieves 2.1% mIoU absolute gains over PVT-S but with less
944
+ computational complexity. As illustrated in Fig. 5(d), this phenomenon also occurs for different
945
+ image sizes on the same framework, e.g., our method has up to 1.2% mIoU absolute gains against
946
+ the baseline with similar computational complexity. In addition, as shown in Tab. 4, our DPVT
947
+ models with DGE are superior to the state-of-the-art vision transformers in terms of parameters,
948
+ computational complexity and performance. These results well demonstrate the generalization ability
949
+ and robustness of our method.
950
+ 5
951
+ Conclusion
952
+ In this paper, we analyze the spatial redundancy in vision transformers and propose a dynamic grained
953
+ encoder to speed up inference. Our encoder can adaptively yield a suitable number of queries for
954
+ different regions to reduce spatial redundancy while maintaining comparable performance. Besides,
955
+ our encoder is compatible with many efficient transformers and can be trained in an end-to-end
956
+ manner. The extensive experiments demonstrate the effectiveness and generalization of our method.
957
+ In general, this paper explores a new perspective, i.e., leveraging the intrinsic properties of natural
958
+ images with the dynamic network mechanism to achieve efficient vision transformers. We hope that
959
+ our dynamic grained encoder can provide insights into future works and beyond.
960
+ Acknowledgments and Disclosure of Funding
961
+ This research was supported by National Key R&D Program of China (No. 2017YFA0700800),
962
+ National Natural Science Foundation of China (No. 61790563 and 61774125), Shanghai Science and
963
+ Technology Program (No. 21010502700).
964
+ A
965
+ Additional Experiments
966
+ A.1
967
+ Quantitative Analysis on Dynamic Grained Router
968
+ We follow the weakly supervised segmentation [64] to show how well the dense query region captures
969
+ the foreground region. The metric in [64] is used to measure the gating scores in each DGE layer.
970
+ Specifically, we set the candidate granularities Φ to {1, 2}, so that the finer-grained gating scores are
971
+ taken as a soft-segmentation of the image. We adopt the evaluation protocol in [64] to report the
972
+ quantitative segmentation results. As shown in Tab. 5 and Tab. 6, our gating scores have significant
973
+ superiority even over the weakly supervised method, i.e., GradCAM. These results demonstrate that
974
+ the DGE could guide the transformer to focus on the foreground regions, which is consistent with the
975
+ visualization.
976
+ Table 5: The quantitative analysis on DeiT-S with DGE (γ = 0.5).
977
+ Metric
978
+ Random
979
+ GradCAM [64]
980
+ Layer 1
981
+ Layer 4
982
+ Layer 8
983
+ Accuracy
984
+ 50.0
985
+ 64.4
986
+ 55.4
987
+ 56.3
988
+ 67.6
989
+ mAP
990
+ 50.0
991
+ 71.6
992
+ 63.5
993
+ 60.7
994
+ 78.8
995
+ mIoU
996
+ 31.9
997
+ 40.8
998
+ 36.4
999
+ 37.7
1000
+ 48.2
1001
+ Table 6: The quantitative analysis on PVT-S with DGE (γ = 0.5).
1002
+ Metric
1003
+ Random
1004
+ Layer 1
1005
+ Layer 6
1006
+ Layer 11
1007
+ Layer 16
1008
+ Accuracy
1009
+ 50.0
1010
+ 55.4
1011
+ 49.1
1012
+ 67.8
1013
+ 65.5
1014
+ mAP
1015
+ 50.0
1016
+ 68.0
1017
+ 45.2
1018
+ 71.3
1019
+ 79.4
1020
+ mIoU
1021
+ 31.9
1022
+ 34.5
1023
+ 32.5
1024
+ 50.2
1025
+ 46.6
1026
+ 10
1027
+
1028
+ A.2
1029
+ Runtime Analysis on GPUs
1030
+ The efficiency of our DGE modules on GPUs mainly relies on the throughput of sparse matrix
1031
+ multiplication, which is dependent on hardware architecture and code optimization. To demonstrate
1032
+ the potential of our method for parallel devices, we implement an optimized CUDA kernel with
1033
+ multiple streams for batched sparse matrix multiplication. With this kernel, we report the runtime
1034
+ comparison of different backbones for multiple downstream tasks on a Tesla V100 GPU. The results
1035
+ are reported in Tab. 7 and Tab. 8, where the latency indicates the runtime of backbone.
1036
+ Table 7: Runtime comparison of MaskRCNN (1x) framework on COCO val set (γ = 0.5).
1037
+ Backbone
1038
+ APb
1039
+ APm
1040
+ FLOPs
1041
+ Latency (CPU)
1042
+ Latency (GPU)
1043
+ PVT-S
1044
+ 40.4
1045
+ 37.8
1046
+ 251G
1047
+ 0.88s
1048
+ 33ms
1049
+ PVT-S+DGE
1050
+ 40.1
1051
+ 37.5
1052
+ 185G
1053
+ 0.44s
1054
+ 26ms
1055
+ DPVT-S
1056
+ 44.0
1057
+ 40.3
1058
+ 186G
1059
+ 1.09s
1060
+ 50ms
1061
+ DPVT-S+DGE
1062
+ 43.8
1063
+ 40.0
1064
+ 147G
1065
+ 0.72s
1066
+ 34ms
1067
+ PVT-M
1068
+ 42.0
1069
+ 39.0
1070
+ 339G
1071
+ 1.26s
1072
+ 73ms
1073
+ PVT-M+DGE
1074
+ 41.7
1075
+ 38.3
1076
+ 228G
1077
+ 0.62s
1078
+ 40ms
1079
+ DPVT-M
1080
+ 46.4
1081
+ 42.0
1082
+ 236G
1083
+ 1.80s
1084
+ 75ms
1085
+ DPVT-M+DGE
1086
+ 45.8
1087
+ 41.4
1088
+ 169G
1089
+ 1.24s
1090
+ 50ms
1091
+ Table 8: Runtime comparison of Semantic-FPN framework on ADE20K val set (γ = 0.5).
1092
+ Backbone
1093
+ mIoU
1094
+ FLOPs
1095
+ Latency (CPU)
1096
+ Latency (GPU)
1097
+ PVT-S
1098
+ 41.8
1099
+ 226G
1100
+ 1.35s
1101
+ 65ms
1102
+ PVT-S+DGE
1103
+ 41.7
1104
+ 155G
1105
+ 0.72s
1106
+ 42ms
1107
+ DPVT-S
1108
+ 44.4
1109
+ 157G
1110
+ 1.47s
1111
+ 55ms
1112
+ DPVT-S+DGE
1113
+ 44.4
1114
+ 121G
1115
+ 0.86s
1116
+ 32ms
1117
+ PVT-M
1118
+ 44.0
1119
+ 316G
1120
+ 1.91s
1121
+ 100ms
1122
+ PVT-M+DGE
1123
+ 43.9
1124
+ 202G
1125
+ 1.10s
1126
+ 64ms
1127
+ DPVT-M
1128
+ 46.8
1129
+ 209G
1130
+ 1.99s
1131
+ 110ms
1132
+ DPVT-M+DGE
1133
+ 46.1
1134
+ 148G
1135
+ 1.26s
1136
+ 50ms
1137
+ A.3
1138
+ Implementation Details for Complexity Computation
1139
+ We report the FLOPs following the conventional protocol of dynamic networks [32]. Specifically,
1140
+ we split the entire network into static and dynamic parts. The complexity of the static part, i.e., the
1141
+ modules without dynamic mechanism including the gating networks in DGE, is computed in the
1142
+ standard way [1,3,19]. For the complexity of the dynamic part, i.e., the dynamic modules in DGE,
1143
+ we accumulate the complexity associate with each enabled query according to the gating indices.
1144
+ References
1145
+ [1] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas
1146
+ Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth
1147
+ 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.
1148
+ [2] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable
1149
+ transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159, 2020.
1150
+ [3] Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, and
1151
+ Ling Shao. Pyramid vision transformer: A versatile backbone for dense prediction without convolutions.
1152
+ arXiv preprint arXiv:2102.12122, 2021.
1153
+ [4] Li Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zihang Jiang, Francis EH Tay, Jiashi Feng,
1154
+ and Shuicheng Yan. Tokens-to-token vit: Training vision transformers from scratch on imagenet. arXiv
1155
+ preprint arXiv:2101.11986, 2021.
1156
+ 11
1157
+
1158
+ [5] Hila Chefer, Shir Gur, and Lior Wolf. Transformer interpretability beyond attention visualization. In
1159
+ IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021.
1160
+ [6] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dollár. Focal loss for dense object
1161
+ detection. In IEEE International Conference on Computer Vision, 2017.
1162
+ [7] Tsung-Yi Lin, Piotr Dollár, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature
1163
+ pyramid networks for object detection. In IEEE Conference on Computer Vision and Pattern Recognition,
1164
+ 2017.
1165
+ [8] Adam W Harley, Konstantinos G Derpanis, and Iasonas Kokkinos. Segmentation-aware convolutional
1166
+ networks using local attention masks. In IEEE International Conference on Computer Vision, 2017.
1167
+ [9] Lin Song, Yanwei Li, Zhengkai Jiang, Zeming Li, Xiangyu Zhang, Hongbin Sun, Jian Sun, and Nanning
1168
+ Zheng. Rethinking learnable tree filter for generic feature transform. Advances in Neural Information
1169
+ Processing Systems, 2020.
1170
+ [10] Lin Song, Yanwei Li, Zhengkai Jiang, Zeming Li, Hongbin Sun, Jian Sun, and Nanning Zheng. Fine-
1171
+ grained dynamic head for object detection. Advances in Neural Information Processing Systems, 2020.
1172
+ [11] Krzysztof Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos,
1173
+ Peter Hawkins, Jared Davis, Afroz Mohiuddin, Lukasz Kaiser, et al. Rethinking attention with performers.
1174
+ arXiv preprint arXiv:2009.14794, 2020.
1175
+ [12] Nikita Kitaev, Łukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. arXiv preprint
1176
+ arXiv:2001.04451, 2020.
1177
+ [13] Sinong Wang, Belinda Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear
1178
+ complexity. arXiv preprint arXiv:2006.04768, 2020.
1179
+ [14] Wenjin Wang, Sander Stuijk, and Gerard De Haan. Exploiting spatial redundancy of image sensor for
1180
+ motion robust rppg. IEEE transactions on Biomedical Engineering, 2014.
1181
+ [15] Yunpeng Chen, Haoqi Fan, Bing Xu, Zhicheng Yan, Yannis Kalantidis, Marcus Rohrbach, Shuicheng Yan,
1182
+ and Jiashi Feng. Drop an octave: Reducing spatial redundancy in convolutional neural networks with
1183
+ octave convolution. In IEEE/CVF International Conference on Computer Vision, 2019.
1184
+ [16] Marco Tagliasacchi, Alan Trapanese, Stefano Tubaro, Joao Ascenso, Catarina Brites, and Fernando Pereira.
1185
+ Exploiting spatial redundancy in pixel domain wyner-ziv video coding. In International Conference on
1186
+ Image Processing, 2006.
1187
+ [17] Lin Song, Shiwei Zhang, Gang Yu, and Hongbin Sun. Tacnet: Transition-aware context network for
1188
+ spatio-temporal action detection. In IEEE Conference on Computer Vision and Pattern Recognition, 2019.
1189
+ [18] Shiwei Zhang, Lin Song, Changxin Gao, and Nong Sang. Glnet: Global local network for weakly
1190
+ supervised action localization. IEEE Transactions on Multimedia, 2019.
1191
+ [19] Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, and Hervé
1192
+ Jégou.
1193
+ Training data-efficient image transformers & distillation through attention.
1194
+ arXiv preprint
1195
+ arXiv:2012.12877, 2020.
1196
+ [20] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical
1197
+ image database. In IEEE Conference on Computer Vision and Pattern Recognition, 2009.
1198
+ [21] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin
1199
+ transformer: Hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030,
1200
+ 2021.
1201
+ [22] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Łukasz
1202
+ Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in Neural Information Processing
1203
+ Systems, 2017.
1204
+ [23] Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse
1205
+ transformers. arXiv preprint arXiv:1904.10509, 2019.
1206
+ [24] Jonathan Ho, Nal Kalchbrenner, Dirk Weissenborn, and Tim Salimans. Axial attention in multidimensional
1207
+ transformers. arXiv preprint arXiv:1912.12180, 2019.
1208
+ [25] Yi Tay, Dara Bahri, Liu Yang, Donald Metzler, and Da-Cheng Juan. Sparse sinkhorn attention. In
1209
+ International Conference on Machine Learning, 2020.
1210
+ [26] Minghao Chen, Houwen Peng, Jianlong Fu, and Haibin Ling. Autoformer: Searching transformers for
1211
+ visual recognition. In IEEE/CVF International Conference on Computer Vision, 2021.
1212
+ [27] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast
1213
+ autoregressive transformers with linear attention. In International Conference on Machine Learning, 2020.
1214
+ [28] Yi Tay, Dara Bahri, Donald Metzler, Da-Cheng Juan, Zhe Zhao, and Che Zheng. Synthesizer: Rethinking
1215
+ self-attention in transformer models. arXiv preprint arXiv:2005.00743, 2020.
1216
+ 12
1217
+
1218
+ [29] Aurko Roy, Mohammad Saffar, Ashish Vaswani, and David Grangier. Efficient content-based sparse
1219
+ attention with routing transformers. Transactions of the Association for Computational Linguistics, 9:53–68,
1220
+ 2021.
1221
+ [30] Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip
1222
+ Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: Transformers for longer sequences. arXiv
1223
+ preprint arXiv:2007.14062, 2020.
1224
+ [31] Iz Beltagy, Matthew E Peters, and Arman Cohan. Longformer: The long-document transformer. arXiv
1225
+ preprint arXiv:2004.05150, 2020.
1226
+ [32] Yizeng Han, Gao Huang, Shiji Song, Le Yang, Honghui Wang, and Yulin Wang. Dynamic neural networks:
1227
+ A survey. arXiv preprint arXiv:2102.04906, 2021.
1228
+ [33] Zuxuan Wu, Tushar Nagarajan, Abhishek Kumar, Steven Rennie, Larry S Davis, Kristen Grauman, and
1229
+ Rogerio Feris. Blockdrop: Dynamic inference paths in residual networks. In IEEE Conference on Computer
1230
+ Vision and Pattern Recognition, 2018.
1231
+ [34] Gao Huang, Danlu Chen, Tianhong Li, Felix Wu, Laurens van der Maaten, and Kilian Q Weinberger.
1232
+ Multi-scale dense networks for resource efficient image classification. arXiv preprint arXiv:1703.09844,
1233
+ 2017.
1234
+ [35] Ravi Teja Mullapudi, William R Mark, Noam Shazeer, and Kayvon Fatahalian. Hydranets: Specialized
1235
+ dynamic architectures for efficient inference. In IEEE Conference on Computer Vision and Pattern
1236
+ Recognition, 2018.
1237
+ [36] Xin Wang, Fisher Yu, Zi-Yi Dou, Trevor Darrell, and Joseph E Gonzalez. Skipnet: Learning dynamic
1238
+ routing in convolutional networks. In European Conference on Computer Vision, 2018.
1239
+ [37] Shaohui Lin, Rongrong Ji, Yuchao Li, Yongjian Wu, Feiyue Huang, and Baochang Zhang. Accelerating
1240
+ convolutional networks via global & dynamic filter pruning. In IJCAI, 2018.
1241
+ [38] Zhonghui You, Kun Yan, Jinmian Ye, Meng Ma, and Ping Wang. Gate decorator: Global filter pruning
1242
+ method for accelerating deep convolutional neural networks. arXiv preprint arXiv:1909.08174, 2019.
1243
+ [39] Yanwei Li, Lin Song, Yukang Chen, Zeming Li, Xiangyu Zhang, Xingang Wang, and Jian Sun. Learning
1244
+ dynamic routing for semantic segmentation.
1245
+ In IEEE Conference on Computer Vision and Pattern
1246
+ Recognition, 2020.
1247
+ [40] Le Yang, Yizeng Han, Xi Chen, Shiji Song, Jifeng Dai, and Gao Huang. Resolution adaptive networks for
1248
+ efficient inference. In IEEE Conference on Computer Vision and Pattern Recognition, 2020.
1249
+ [41] William Fedus, Barret Zoph, and Noam Shazeer. Switch transformers: Scaling to trillion parameter models
1250
+ with simple and efficient sparsity. arXiv preprint arXiv:2101.03961, 2021.
1251
+ [42] Noam Shazeer, Azalia Mirhoseini, Krzysztof Maziarz, Andy Davis, Quoc Le, Geoffrey Hinton, and Jeff
1252
+ Dean. Outrageously large neural networks: The sparsely-gated mixture-of-experts layer. arXiv preprint
1253
+ arXiv:1701.06538, 2017.
1254
+ [43] Thomas Verelst and Tinne Tuytelaars. Dynamic convolutions: Exploiting spatial sparsity for faster
1255
+ inference. In IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020.
1256
+ [44] Lin Song, Yanwei Li, Zeming Li, Gang Yu, Hongbin Sun, Jian Sun, and Nanning Zheng. Learnable tree
1257
+ filter for structure-preserving feature transform. In Advances in Neural Information Processing Systems,
1258
+ 2019.
1259
+ [45] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition.
1260
+ In IEEE Conference on Computer Vision and Pattern Recognition, 2016.
1261
+ [46] Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Süsstrunk. Slic
1262
+ superpixels compared to state-of-the-art superpixel methods. IEEE transactions on pattern analysis and
1263
+ machine intelligence, 2012.
1264
+ [47] David A Forsyth and Jean Ponce. Computer vision: a modern approach. Pearson, 2012.
1265
+ [48] Andreas Veit and Serge Belongie. Convolutional networks with adaptive inference graphs. In European
1266
+ Conference on Computer Vision (ECCV), 2018.
1267
+ [49] Charles Herrmann, Richard Strong Bowen, and Ramin Zabih. An end-to-end approach for speeding up
1268
+ neural network inference. arXiv e-prints, 2018.
1269
+ [50] Zhenda Xie, Zheng Zhang, Xizhou Zhu, Gao Huang, and Stephen Lin. Spatially adaptive inference with
1270
+ stochastic feature sampling and interpolation. In European Conference on Computer Vision, 2020.
1271
+ [51] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv
1272
+ preprint arXiv:1611.01144, 2016.
1273
+ [52] Yoshua Bengio, Nicholas Léonard, and Aaron Courville. Estimating or propagating gradients through
1274
+ stochastic neurons for conditional computation. 2013.
1275
+ 13
1276
+
1277
+ [53] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru
1278
+ Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In IEEE Conference
1279
+ on Computer Vision and Pattern Recognition, 2015.
1280
+ [54] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz. mixup: Beyond empirical risk
1281
+ minimization. arXiv preprint arXiv:1710.09412, 2017.
1282
+ [55] Ilya Loshchilov and Frank Hutter.
1283
+ Decoupled weight decay regularization.
1284
+ arXiv preprint
1285
+ arXiv:1711.05101, 2017.
1286
+ [56] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint
1287
+ arXiv:1608.03983, 2016.
1288
+ [57] Yuxin Wu, Alexander Kirillov, Francisco Massa, Wan-Yen Lo, and Ross Girshick. Detectron2. https:
1289
+ //github.com/facebookresearch/detectron2, 2019.
1290
+ [58] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dollár,
1291
+ and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European Conference on
1292
+ Computer Vision, 2014.
1293
+ [59] Jianfeng Wang, Lin Song, Zeming Li, Hongbin Sun, Jian Sun, and Nanning Zheng. End-to-end object
1294
+ detection with fully convolutional network. In IEEE/CVF Conference on Computer Vision and Pattern
1295
+ Recognition, 2021.
1296
+ [60] Xiangxiang Chu, Zhi Tian, Yuqing Wang, Bo Zhang, Haibing Ren, Xiaolin Wei, Huaxia Xia, and
1297
+ Chunhua Shen. Twins: Revisiting the design of spatial attention in vision transformers. arXiv preprint
1298
+ arXiv:2104.13840, 2021.
1299
+ [61] Alexander Kirillov, Ross Girshick, Kaiming He, and Piotr Dollár. Panoptic feature pyramid networks. In
1300
+ IEEE Conference on Computer Vision and Pattern Recognition, 2019.
1301
+ [62] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing
1302
+ through ade20k dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern
1303
+ Recognition(CVPR), 2017.
1304
+ [63] MMSegmentation Contributors. MMSegmentation: Openmmlab semantic segmentation toolbox and
1305
+ benchmark. https://github.com/open-mmlab/mmsegmentation, 2020.
1306
+ [64] Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi, and Amit
1307
+ Agrawal. Context encoding for semantic segmentation. In IEEE Conference on Computer Vision and
1308
+ Pattern Recognition, 2018.
1309
+ 14
1310
+
RdE2T4oBgHgl3EQfWQda/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
SNE4T4oBgHgl3EQfLAy5/content/tmp_files/2301.04935v1.pdf.txt ADDED
@@ -0,0 +1,2879 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A Stochastic Proximal Polyak Step Size
2
+ Fabian Schaipp
3
+ schaippf@ma.tum.de
4
+ Department of Mathematics
5
+ Technical University of Munich
6
+ Robert M. Gower
7
+ rgower@flatironinstitute.org
8
+ Center for Computational Mathematics
9
+ Flatiron Institute, New York
10
+ Michael Ulbrich
11
+ mulbrich@ma.tum.de
12
+ Department of Mathematics
13
+ Technical University of Munich
14
+ Abstract
15
+ Recently, the stochastic Polyak step size (SPS) has emerged as a competitive adap-
16
+ tive step size scheme for stochastic gradient descent. Here we develop ProxSPS, a
17
+ proximal variant of SPS that can handle regularization terms. Developing a prox-
18
+ imal variant of SPS is particularly important, since SPS requires a lower bound of
19
+ the objective function to work well. When the objective function is the sum of a
20
+ loss and a regularizer, available estimates of a lower bound of the sum can be loose.
21
+ In contrast, ProxSPS only requires a lower bound for the loss which is often readily
22
+ available.
23
+ As a consequence, we show that ProxSPS is easier to tune and more
24
+ stable in the presence of regularization. Furthermore for image classification tasks,
25
+ ProxSPS performs as well as AdamW with little to no tuning, and results in a network
26
+ with smaller weight parameters. We also provide an extensive convergence analysis
27
+ for ProxSPS that includes the non-smooth, smooth, weakly convex and strongly
28
+ convex setting.
29
+ 1
30
+ Introduction
31
+ Consider problems of the form
32
+ min
33
+ x∈Rn f(x),
34
+ f(x) := EP [f(x; S)] =
35
+
36
+ S
37
+ f(x; s)dP(s),
38
+ (1)
39
+ where S is a sample space (or sample set). Formally, we can see S as a random variable mapping
40
+ to S and P(s) as the associated probability measure.
41
+ Let us assume that for each s ∈ S, the
42
+ function f(·; s) : Rn → R is locally Lipschitz and hence possesses the Clarke subdifferential ∂f(·; s)
43
+ (Clarke, 1983). Problems of form (1) arise in machine learning tasks where S is the space of available
44
+ data points (Bottou et al., 2018). An efficient method for such problems is stochastic (sub)gradient
45
+ descent (Robbins & Monro, 1951; Bottou, 2010; Davis & Drusvyatskiy, 2019), given by
46
+ xk+1 = xk − αkgk,
47
+ gk ∈ ∂f(xk; Sk),
48
+ where Sk ∼ P.
49
+ (SGD)
50
+ 1
51
+ arXiv:2301.04935v1 [math.OC] 12 Jan 2023
52
+
53
+ Moreover, we will also consider the composite problem
54
+ min
55
+ x∈Rn ψ(x),
56
+ ψ(x) := f(x) + ϕ(x),
57
+ (2)
58
+ where ϕ : Rn → R ∪ {∞} is a proper, closed, and convex regularization function.
59
+ In practical
60
+ situations, the expectation in the objective function f is typically approximated by a sample average
61
+ over N ∈ N data points. We formalize this special case with
62
+ S = {s1, . . . , sN}, P(si) = 1
63
+ N , fi := f(·; si)
64
+ i = 1, . . . , N.
65
+ (ER)
66
+ In this case, problem (1) becomes the empirical risk minimization problem
67
+ min
68
+ x∈Rn
69
+ 1
70
+ N
71
+ N
72
+
73
+ i=1
74
+ fi(x).
75
+ 1.1
76
+ Background and Contributions
77
+ Polyak step size. For minimizing a convex, possibly non-differentiable function f, Polyak (1987,
78
+ Chapter 5.3) proposed
79
+ xk+1 = xk − αkgk,
80
+ αk = f(xk) − min f
81
+ ∥gk∥2
82
+ ,
83
+ gk ∈ ∂f(xk) \ {0}.
84
+ This particular choice of αk, requiring the knowledge of min f, has been subsequently called the
85
+ Polyak step size for the subgradient method. Recently, Berrada et al. (2019); Loizou et al. (2021);
86
+ Orvieto et al. (2022) adapted the Polyak step size to the stochastic setting: consider the (ER) case
87
+ and assume that each fi is differentiable and that a lower bound C(si) ≤ infx fi(x) is known for all
88
+ i ∈ [N]. The method proposed by (Loizou et al., 2021) is
89
+ xk+1 = xk − min
90
+
91
+ γb, fik(xk) − C(sik)
92
+ c∥∇fik(xk)∥2
93
+
94
+ ∇fik(xk),
95
+ (SPSmax)
96
+ with hyper-parameters c, γb > 0 and where in each iteration ik is drawn from {1, . . . , N} uniformly at
97
+ random. It is important to note that the initial work (Loizou et al., 2021) used C(si) = inf fi; later,
98
+ Orvieto et al. (2022) established theory for (SPSmax) for the more general case of C(si) ≤ infx fi(x)
99
+ and allowing for mini-batching. Other works analyzed the Polyak step size in the convex, smooth
100
+ setting (Hazan & Kakade, 2019) and in the convex, smooth and stochastic setting (Prazeres &
101
+ Oberman, 2021). Further, the stochastic Polyak step size is closely related to stochastic model-
102
+ based proximal point (Asi & Duchi, 2019) as well as stochastic bundle methods (Paren et al., 2022).
103
+ Contribution. We propose a proximal version of the stochastic Polyak step size, called ProxSPS,
104
+ which explicitly handles regularization functions. Our proposal is based crucially on the fact that
105
+ the stochastic Polyak step size can be motivated with stochastic proximal point for a truncated linear
106
+ model of the objective function (we explain this in detail in Section 3.1). Our method has closed-form
107
+ updates for squared ℓ2-regularization. We provide theoretical guarantees for ProxSPS for any closed,
108
+ proper, and convex regularization function (including indicator functions for constraints). Our main
109
+ results, Theorem 7 and Theorem 8, also give new insights for SPSmax, in particular showing exact
110
+ convergence for convex and non-convex settings.
111
+ 2
112
+
113
+ Lower bounds and regularization. Methods such as SPSmax need to estimate a lower bound C(s)
114
+ for each loss function f(·; s). Though infx f(x; s) can be precomputed in some restricted settings,
115
+ in practice the lower bound C(s) = 0 is used for non-negative loss functions.1 The tightness of the
116
+ choice C(s) is further reflected in the constant σ2 := min f −EP [C(S)], which affects the convergence
117
+ guarantees of SPSmax (Orvieto et al., 2022).
118
+ Contribution. For regularized problems (2) and if ϕ is differentiable, the current proposal of SPSmax
119
+ would add ϕ to every loss function f(·; s).
120
+ In this case, for non-negative regularization terms,
121
+ such as the squared ℓ2-norm, the lower bound C(s) = 0 is always loose. Indeed, if ϕ ≥ 0, then
122
+ infx∈Rn(f(x; s)+ϕ(x)) ≥ infx∈Rn f(x; s) and this inequality is strict in most practical scenarios. For
123
+ our proposed method ProxSPS, we now need only estimate a lower bound for the loss f(x; s) and
124
+ not for the composite function f(x; s) + ϕ(x). Further, ProxSPS decouples the adaptive step size for
125
+ the gradient of the loss from the regularization (we explain this in detail in Section 4.1 and Fig. 1).
126
+ Proximal and adaptive methods.
127
+ The question on how to handle regularization terms has
128
+ also been posed for other families of adaptive methods. For Adam (Kingma & Ba, 2015) with ℓ2-
129
+ regularization it has been observed that it generalizes worse and is harder to tune than AdamW
130
+ (Loshchilov & Hutter, 2019) which uses weight decay. Further, AdamW can be seen as an approx-
131
+ imation to a proximal version of Adam (Zhuang et al., 2022).2 On the other hand, Loizou et al.
132
+ (2021) showed that – without regularization – default hyperparameter settings for SPSmax give very
133
+ encouraging results on matrix factorization and image classification tasks. This is promising since
134
+ it suggests that SPSmax is an adaptive method, and can work well across varied tasks without the
135
+ need for extensive hyperparameter tuning.
136
+ Contribution.
137
+ We show that by handling ℓ2-regularization using a proximal step, our resulting
138
+ ProxSPS is less sensitive to hyperparameter choice as compared to SPSmax. This becomes apparent
139
+ in matrix factorization problems, where ProxSPS converges for a wide range of regularization pa-
140
+ rameters and learning rates, while SPSmax is more sensitive to these settings. We also show similar
141
+ results for image classification over the CIFAR10 dataset when using a ResNet56 and ResNet110
142
+ model, where we also compare our method to AdamW.
143
+ The remainder of our paper is organized as follows: we will first recall how the stochastic Polyak
144
+ step size, in the case of ϕ = 0, can be derived using the model-based approach of (Asi & Duchi,
145
+ 2019; Davis & Drusvyatskiy, 2019) and how this is connected to SPSmax. We then derive ProxSPS
146
+ based on the connection to model-based methods, and present our theoretical results, based on the
147
+ proof techniques in (Davis & Drusvyatskiy, 2019).
148
+ 2
149
+ Preliminaries
150
+ 2.1
151
+ Notation
152
+ Throughout, we will write E instead of EP . For any random variable X(s), we denote E[X(S)] :=
153
+
154
+ S X(s)dP(s). We denote (·)+ := max{·, 0}. We write ˜O when we drop logarithmic terms in the
155
+ O-notation, e.g. ˜O( 1
156
+ K ) = O( ln(1+K)
157
+ K
158
+ ).
159
+ 1See for instance https://github.com/IssamLaradji/sps.
160
+ 2For SGD treating ℓ2-regularization as a part of the loss can be seen to be equivalent to its proximal version (cf.
161
+ Appendix C).
162
+ 3
163
+
164
+ 2.2
165
+ General assumptions
166
+ Throughout the article, we assume the following:
167
+ Assumption 1. It is possible to generate infinitely many i.i.d. realizations S1, S2, . . . from S.
168
+ Assumption 2. For every s ∈ S there exists C(s) satisfying C(s) ≤ infx f(x; s).
169
+ In many machine learning applications, non-negative loss functions are used and thus we can satisfy
170
+ the second assumption choosing C(s) = 0 for all s ∈ S.
171
+ 2.3
172
+ Convex analysis
173
+ Let g : Rn → R be convex and α > 0. The proximal operator is given by
174
+ proxαg(x) := arg min
175
+ y
176
+ g(y) + 1
177
+ 2α∥y − x∥2.
178
+ Further, the Moreau envelope is defined by envα
179
+ g (x) := miny g(y) +
180
+ 1
181
+ 2α∥y − x∥2, and its derivative
182
+ is ∇envα
183
+ g (x) = 1
184
+ α(x − proxαg(x)) (Drusvyatskiy & Paquette, 2019, Lem. 2.1). Moreover, due to the
185
+ optimality conditions of the proximal operator, if g ∈ C1 then
186
+ ˆx = proxαg(x) =⇒ ∥∇g(ˆx)∥ = α−1∥x − ˆx∥ = ∥∇envα
187
+ g (x)∥.
188
+ (3)
189
+ Davis & Drusvyatskiy (2019) showed how to use the Moreau envelope as a measure of stationarity:
190
+ if ∥∇envα
191
+ g (x)∥ is small, then x is close to ˆx and ˆx is an almost stationary point of g. Formally, the
192
+ gradient of the Moreau envelope can be related to the gradient mapping (cf. (Drusvyatskiy & Lewis,
193
+ 2018, Thm. 3.5) and Lemma 11).
194
+ We say that a function g : Rn → R is L-smooth if its gradient is L–Lipschitz, that is
195
+ ∥∇g(x) − ∇g(y)∥ ≤ L∥x − y∥,
196
+ ∀x, y ∈ Rn.
197
+ (4)
198
+ If g is L-smooth, then
199
+ g(y) ≤ g(x) + ⟨∇g(x), y − x⟩ + L
200
+ 2 ∥y − x∥2
201
+ for all x, y, ∈ Rn.
202
+ A function g : Rn → R is ρ–weakly convex for ρ ≥ 0 if g + ρ
203
+ 2∥ · ∥2 is convex. Any L–smooth function
204
+ is weakly convex with parameter less than or equal to L (Drusvyatskiy & Paquette, 2019, Lem. 4.2).
205
+ The above results on the proximal operator and Moreau envelope can immediately be extended to
206
+ g being ρ–weakly convex if α ∈ (0, ρ−1), since then g + ρ
207
+ 2∥ · ∥2 is convex.
208
+ If we assume that each f(·; s) is ρs-weakly convex for ρs ≥ 0, then applying (Bertsekas, 1973, Lem.
209
+ 2.1) to the convex function f(·; s) + ρs
210
+ 2 ∥ · ∥2 yields that f + ρ
211
+ 2∥ · ∥2 is convex and thus f is ρ–weakly
212
+ convex for ρ := E[ρS]. In particular, f is convex if each f(·; s) is assumed to be convex. For a weakly
213
+ convex function g, we denote with ∂g the regular subdifferential (cf. (Davis & Drusvyatskiy, 2019,
214
+ section 2.2) and (Rockafellar & Wets, 1998, Def. 8.3)).
215
+ 3
216
+ The unregularized case
217
+ For this section, consider problems of form (1), i.e. no regularization term ϕ is added to the loss f.
218
+ 4
219
+
220
+ 3.1
221
+ A model-based view point
222
+ Many classical methods for solving (1) can be summarized by model-based stochastic proximal point:
223
+ in each iteration, a model fx(·; s) is constructed approximating f(·; s) locally around x. With Sk ∼ P
224
+ being drawn at random, this yields the update
225
+ xk+1 = arg min
226
+ y∈Rn fxk(y; Sk) +
227
+ 1
228
+ 2αk
229
+ ∥y − xk∥2.
230
+ (5)
231
+ The theoretical foundation for this family of methods has been established by Asi & Duchi (2019)
232
+ and Davis & Drusvyatskiy (2019). They give the following three models as examples:
233
+ (i) Linear: fx(y; s) := f(x; s) + ⟨g, y − x⟩ with g ∈ ∂f(x; s).
234
+ (ii) Full: fx(y; s) := f(y; s).
235
+ (iii) Truncated: fx(y; s) := max{f(x; s) + ⟨g, y − x⟩, infz∈Rn f(z; s)} where g ∈ ∂f(x; s).
236
+ It is easy to see that update (5) for the linear model is equal to (SGD) while the full model results in
237
+ the stochastic proximal point method. For the truncated model, (5) results in the update
238
+ xk+1 = xk − min
239
+
240
+ αk, f(xk; Sk) − infz∈Rn f(z; Sk)
241
+ ∥gk∥2
242
+
243
+ gk,
244
+ gk ∈ ∂f(xk, Sk).
245
+ (6)
246
+ More generally, one can replace the term infx∈Rn f(x; Sk) with an arbitrary lower bound of f(·; Sk)
247
+ (cf. Lemma 10). The model-based stochastic proximal point method for the truncated model is
248
+ given in Algorithm 1. The connection between the truncated model and the method depicted in
249
+ (6) is not a new insight and has been pointed out in several works (including (Asi & Duchi, 2019;
250
+ Loizou et al., 2021) and (Berrada et al., 2019, Prop. 1)). For simplicity, we refer to Algorithm 1 as
251
+ SPS throughout this article. However, it should be pointed out that this acronym (and variations of
252
+ it) have been used for stochastic Polyak-type methods in slightly different ways (Loizou et al., 2021;
253
+ Gower et al., 2021).
254
+ Algorithm 1 SPS
255
+ Require: x0 ∈ Rn, step sizes αk > 0.
256
+ for k = 0, 1, 2, . . . , K − 1 do
257
+ 1. Sample Sk and set Ck := C(Sk).
258
+ 2. Choose gk ∈ ∂f(xk; Sk). If gk = 0, set xk+1 = xk. Otherwise, set
259
+ xk+1 = xk − γkgk,
260
+ γk = min
261
+
262
+ αk, f(xk; Sk) − Ck
263
+ ∥gk∥2
264
+
265
+ .
266
+ (7)
267
+ return xK
268
+ For instance consider again the SPSmax method
269
+ xk+1 = xk − min
270
+
271
+ γb, fik(xk) − C(sik)
272
+ c∥∇fik(xk)∥2
273
+
274
+ ∇fik(xk),
275
+ (SPSmax)
276
+ 5
277
+
278
+ where c, γb > 0. Clearly, for c = 1 and αk = γb, update (7) is identical to SPSmax. With this in mind,
279
+ we can interpret the hyperparameter γb in SPSmax simply as a step size for the model-based stochastic
280
+ proximal point step. For the parameter c on the other hand, the model-based approach motivates the
281
+ choice c = 1, which potentially reduces the amount of hyperparameter tuning. However, according
282
+ to (Loizou et al., 2021), c = 1/2 gives the best rate of convergence in the strongly convex case.
283
+ 4
284
+ The regularized case
285
+ Now we consider regularized problems of the form (2), i.e.
286
+ min
287
+ x∈Rn ψ(x),
288
+ ψ(x) = f(x) + ϕ(x),
289
+ where ϕ : Rn → R ∪ {∞} is a proper, closed, λ-strongly convex function for λ ≥ 0 (we allow
290
+ λ = 0). For s ∈ S, denote by ψx(·; s) a stochastic model of the objective ψ at x. We aim to analyze
291
+ algorithms with the update
292
+ xk+1 = arg min
293
+ x∈Rn ψxk(x; Sk) +
294
+ 1
295
+ 2αk
296
+ ∥x − xk∥2,
297
+ (8)
298
+ where Sk ∼ P and αk > 0. Naively, if we know a lower bound ˜C(s) of f(·; s) + ϕ(·), the truncated
299
+ model could be constructed for the function f(x; s) + ϕ(x), resulting in
300
+ ψx(y; s) = max{f(x; s) + ϕ(x) + ⟨g + u, y − x⟩, ˜C(s)},
301
+ g ∈ ∂f(x; s),
302
+ u ∈ ∂ϕ(x).
303
+ (9)
304
+ In fact, Asi & Duchi (2019) and Loizou et al. (2021) work in the setting of unregularized problems
305
+ and hence their approaches would handle regularization in this way. What we propose instead, is to
306
+ only truncate a linearization of the loss f(x; s), yielding the model
307
+ ψx(y; s) = fx(y; s) + ϕ(y),
308
+ fx(y; s) = max{f(x; s) + ⟨g, y − x⟩, C(s)},
309
+ g ∈ ∂f(x; s).
310
+ (10)
311
+ Solving (8) with the model in (10) results in
312
+ xk+1 = arg min
313
+ y∈Rn
314
+ max{f(xk; Sk) + ⟨gk, y − xk⟩, C(Sk)} + ϕ(y) +
315
+ 1
316
+ 2αk
317
+ ∥y − xk∥2.
318
+ (11)
319
+ The resulting model-based stochastic proximal point method is given in Algorithm 2 3. Lemma 12
320
+ shows that, if proxϕ is known, update (11) can be computed by minimizing a strongly convex function
321
+ over a compact one-dimensional interval. The relation to the proximal operator of ϕ motivates the
322
+ name ProxSPS. Further, the ProxSPS update (11) has a closed form solution when ϕ is the squared
323
+ ℓ2-norm, as we detail in the next section.
324
+ Algorithm 2 ProxSPS
325
+ Require: x0 ∈ Rn, step sizes αk > 0.
326
+ for k = 0, 1, 2, . . . , K − 1 do
327
+ 1. Sample Sk and set Ck := C(Sk).
328
+ 2. Choose gk ∈ ∂f(xk; Sk).
329
+ Update xk+1 according to (11).
330
+ return xK
331
+ 3For ϕ = 0, Algorithm 2 is identical to Algorithm 1.
332
+ 6
333
+
334
+ −6
335
+ −4
336
+ −2
337
+ 0
338
+ 2
339
+ 4
340
+ 6
341
+ −1
342
+ 0
343
+ 1
344
+ 2
345
+ 3
346
+ 4
347
+ 5
348
+ 6
349
+ x0
350
+ x⋆
351
+ ˆx1
352
+ x1
353
+ f(x; s) = ln(1 + exp(−0.5 · x)), α = 10.0, λ = 0.1
354
+ f(·; s) + ϕ
355
+ ProxSPS model
356
+ SPS model
357
+ ProxSPS objective
358
+ SPS objective
359
+ (a) Regularized logistic loss.
360
+ −2
361
+ 0
362
+ 2
363
+ −3
364
+ −2
365
+ −1
366
+ 0
367
+ 1
368
+ 2
369
+ 3
370
+ ProxSPS
371
+ −2
372
+ 0
373
+ 2
374
+ −3
375
+ −2
376
+ −1
377
+ 0
378
+ 1
379
+ 2
380
+ 3
381
+ SPS
382
+ (b) Regularized squared loss with αk = 1, λ = 1.
383
+ Figure 1: a) SPS refers to model (9) whereas ProxSPS refers to (10). We plot the corresponding
384
+ model ψx0(y; s) and the objective function of (8). x1 (resp. ˆx1) denotes the new iterate for ProxSPS
385
+ (resp. SPS), x⋆ is the minimizer of f(·; s) + ϕ. b) Streamlines of the vector field V (xk) := xk+1 − xk,
386
+ for f(x) = ∥Ax − b∥2 and for the deterministic update, i.e. f(x; s) = f(x). ProxSPS refers to update
387
+ (14) and SPS refers to (13). The circle marks the minimizer of f(x) + λ
388
+ 2 ∥x∥2.
389
+ 4.1
390
+ The special case of ℓ2-regularization
391
+ When ϕ(x) = λ
392
+ 2 ∥x∥2 for some λ > 0, ProxSPS (11) has a closed form solution as we show next
393
+ in Lemma 1.
394
+ For this lemma, recall that the proximal operator of ϕ(x) =
395
+ λ
396
+ 2 ∥x∥2 is given by
397
+ proxαϕ(x) =
398
+ 1
399
+ 1+αλx for all α > 0, x ∈ Rn.
400
+ Lemma 1. Let ϕ(x) = λ
401
+ 2 ∥x∥2 and let g ∈ ∂f(x; s) and C(s) ≤ infz∈Rn f(z; s) hold for all s ∈ S.
402
+ For ψx(y; s) = fx(y; s) + ϕ(y) with fx(y; s) = max{f(x; s) + ⟨g, y − x⟩, C(s)} consider the update
403
+ xk+1 = arg min
404
+ x∈Rn
405
+ ψxk(x; Sk) +
406
+ 1
407
+ 2αk
408
+ ∥x − xk∥2.
409
+ Denote Ck := C(Sk) and let gk ∈ ∂f(xk; Sk). Define
410
+ τ +
411
+ k :=
412
+
413
+
414
+
415
+ 0
416
+ if gk = 0,
417
+ min
418
+
419
+ αk,
420
+
421
+ (1+αkλ)(f(xk;Sk)−Ck)−αkλ⟨gk,xk⟩
422
+ ∥gk∥2
423
+
424
+ +
425
+
426
+ else.
427
+ Update (11) is given by
428
+ xk+1 =
429
+ 1
430
+ 1 + αkλ
431
+
432
+ xk − τ +
433
+ k gk
434
+
435
+ = proxαkϕ(xk − τ +
436
+ k gk).
437
+ (12)
438
+ See Lemma 9 in the appendix for an extended version of the above lemma and its proof.
439
+ The
440
+ update (12) can be naturally decomposed into two steps, one stochastic gradient step with an
441
+ adaptive stepsize, that is ¯xk+1 = xk −τ +
442
+ k gk followed by a proximal step xk+1 = proxαkϕ(¯xk+1). This
443
+ decoupling into two steps, makes it easier to interpret the effect of each step, with τ +
444
+ k adjusting for
445
+ the scale/curvature and the following proximal step shrinking the resulting parameters. There is no
446
+ clear separation of tasks if we apply the SPS method to the regularized problem, as we see next.
447
+ 7
448
+
449
+ Algorithm 3 ProxSPS for ϕ = λ
450
+ 2 ∥ · ∥2
451
+ Require: x0 ∈ Rn, step sizes αk > 0.
452
+ for k = 0, 1, 2, . . . , K − 1 do
453
+ 1. Sample Sk and set Ck := C(Sk).
454
+ 2. Choose gk ∈ ∂f(xk; Sk). If gk = 0, set xk+1 =
455
+ 1
456
+ 1+αkλxk. Otherwise, set
457
+ xk+1 =
458
+ 1
459
+ 1 + αkλ
460
+
461
+ xk − min
462
+
463
+ αk,
464
+ �(1 + αkλ)(f(xk; Sk) − Ck) − αkλ⟨gk, xk⟩
465
+ ∥gk∥2
466
+
467
+ +
468
+
469
+ gk
470
+
471
+ .
472
+ return xK
473
+ 4.2
474
+ Comparing the model of SPS and ProxSPS
475
+ For simplicity, assume again the discrete sample space setting (ER) with differentiable loss functions
476
+ fi and let ϕ = λ
477
+ 2 ∥ · ∥2. Clearly, the composite problem (2) can be transformed to an instance of (1)
478
+ by setting ℓi(x) := fi(x) + λ
479
+ 2 ∥x∥2 and solving minx ℓ(x) with ℓ(x) :=
480
+ 1
481
+ N
482
+ �N
483
+ i=1 ℓi(x). Assume that a
484
+ lower bound ℓi ≤ infx ℓi(x) is known. In this case (9) becomes
485
+ ψx(y; si) = max
486
+
487
+ fi(x) + λ
488
+ 2 ∥x∥2 + ⟨∇fi(x) + λx, y − x⟩, ℓi
489
+
490
+ .
491
+ Due to Lemma 10, if ∇fik(xk) + λxk ̸= 0, the update (8) is given by
492
+ xk+1 = xk − min
493
+
494
+ αk, fik(xk) + λ
495
+ 2 ∥xk∥2 − ℓik
496
+ ∥∇fik(xk) + λxk∥2
497
+
498
+ (∇fik(xk) + λxk).
499
+ (13)
500
+ We refer to this method, which is using model (9), as SPS. On the other hand, using model (10) and
501
+ if ∇fik(xk) ̸= 0, the update of ProxSPS (12) is
502
+ xk+1 =
503
+ 1
504
+ 1+αkλ
505
+
506
+ xk − min
507
+
508
+ αk,
509
+ � (1+αkλ)(fik (xk)−C(sik ))−αkλ⟨∇fik (xk),xk⟩
510
+ ∥∇fik (xk)∥2
511
+
512
+ +
513
+
514
+ ∇fik(xk)
515
+
516
+ .
517
+ (14)
518
+ In Fig. 1a, we illustrate the two models (9) (denoted by SPS) and (10) (denoted by ProxSPS) for the
519
+ logistic loss with squared ℓ2-regularization. We can see that the ProxSPS model is a much better
520
+ approximation of the (stochastic) objective function as it still captures the quadratic behaviour of ϕ.
521
+ Furthermore, as noted in the previous section, ProxSPS decouples the step size of the gradient and
522
+ of the shrinkage, and hence the update direction depends on αk. In contrast, the update direction of
523
+ SPS does not depend on αk, and the regularization effect is intertwined with the adaptive step size.
524
+ Another way to see that the model (10) on which ProxSPS is based on is a more accurate model
525
+ as compared to the SPS model (9), is that the resulting vector field of ProxSPS takes a more direct
526
+ route to the minimum, as illustrated in Fig. 1b.
527
+ Update (14) needs to compute the term ⟨∇fik(xk), xk⟩ while (13) needs to evaluate ∥xk∥2. Other
528
+ than that, the computational costs are roughly identical. For (14), a lower bound ℓi is required. For
529
+ non-negative loss functions, in practice both ℓi and C(si) are often set to zero, in which case (10)
530
+ will be a more accurate model as compared to (9). 4
531
+ 4For single element sampling, inf ℓi can sometimes be precomputed (e.g. regularized logistic regression, see (Loizou
532
+ et al., 2021, Appendix D)). But even in this restricted setting it is not clear how to estimate inf ℓi when using
533
+ mini-batching.
534
+ 8
535
+
536
+ 4.3
537
+ Convergence analysis
538
+ For the convergence analysis of Algorithm 2, we can work with the following assumption on ϕ.
539
+ Assumption 3. ϕ : Rn → R ∪ {∞} is a proper, closed, λ-strongly convex function with λ ≥ 0.
540
+ Throughout this section we consider model (10), i.e. for g ∈ ∂f(x; s), let
541
+ ψx(y; s) = fx(y; s) + ϕ(y),
542
+ fx(y; s) = max{f(x; s) + ⟨g, y − x⟩, C(s)}.
543
+ Let us first state a lemma on important properties of the truncated model:
544
+ Lemma 2. Consider fx(y; s) = max{f(x; s) + ⟨g, y − x⟩, C(s)}, where g ∈ ∂f(x; s) is arbitrary and
545
+ C(s) ≤ infz∈Rn f(z; s). Then, it holds:
546
+ (i) The mapping y �→ fx(y; s) is convex.
547
+ (ii) For all x ∈ Rn, it holds fx(x; s) = f(x; s). If f(·; s) is ρs–weakly convex for all s ∈ S, then
548
+ fx(y; s) ≤ f(y; s) + ρs
549
+ 2 ∥y − x∥2
550
+ for all x, y ∈ Rn.
551
+ Proof.
552
+ (i) The maximum over a constant and linear term is convex.
553
+ (ii) Recall that C(s) ≤ f(y; s) for all y ∈ Rn. Therefore, fx(x; s) = max{C(s), f(x; s)} = f(x; s).
554
+ From weak convexity of f(·; s) it follows f(x; s)+⟨g, y−x⟩ ≤ f(y; s)+ ρs
555
+ 2 ∥y−x∥2 and therefore
556
+ fx(y; s) ≤ max{C(s), f(y; s) + ρs
557
+ 2 ∥y − x∥2} = f(y; s) + ρs
558
+ 2 ∥y − x∥2
559
+ for all y ∈ Rn.
560
+ 4.3.1
561
+ Globally bounded subgradients
562
+ In this section, we show that the results for stochastic model-based proximal point methods in
563
+ Davis & Drusvyatskiy (2019) can be immediately applied to our specific model – even though this
564
+ model has not been explicitly analyzed in their article. This, however, requires assuming that the
565
+ subgradients are bounded.
566
+ Proposition 3. Let Assumption 3 hold and assume that there is an open, convex set U containing
567
+ dom ϕ. Let f(·; s) be ρs–weakly convex for all s ∈ S and let ρ = E[ρS]. Assume that there exists
568
+ Gs ∈ R+ for all s ∈ S, such that G :=
569
+
570
+ E[G2
571
+ S] < ∞ and
572
+ ∥g(x; s)∥ ≤ Gs
573
+ ∀g(x; s) ∈ ∂f(x; s), ∀x ∈ U.
574
+ (15)
575
+ Then, ψx(y; s) satisfies (Davis & Drusvyatskiy, 2019, Assum. B).
576
+ Proof. We verify properties (B1)–(B4) of (Davis & Drusvyatskiy, 2019, Assum. B), for r = ϕ and
577
+ fx(·, ξ) = fx(·; s): (B1) is identical to Assumption 1. (B2) holds with τ = ρ due to our assumptions
578
+ on ϕ, and Lemma 2, (ii) and the definition of f, i.e. f(x) = E[f(x; S)]. Due to Lemma 2, (i) and
579
+ convexity of ϕ, the mapping fx(·; s)+ϕ(·) is convex which shows (B3) for η = 0 . Now, for arbitrary
580
+ g ∈ ∂f(x; s) and x, y ∈ U, we have
581
+ fx(x; s) − fx(y; s) ≤ f(x; s) − f(x; s) − ⟨g, y − x⟩ ≤ ∥g∥∥y − x∥ ≤ Gs∥y − x∥.
582
+ Hence, (B4) holds for L = G and L(ξ) = Gs.
583
+ 9
584
+
585
+ Corollary 4 (Weakly convex case). Let the assumptions of Proposition 3 hold with ρs > 0 for all
586
+ s ∈ S. Let ρ = E[ρS] < ∞ and let ∆ ≥ env1/(2ρ)
587
+ ψ
588
+ (x0) − min ψ. Let {xk}k=0,...,K be generated by
589
+ Algorithm 2 for constant step sizes αk =
590
+
591
+ 2ρ +
592
+
593
+ 4ρG2K
594
+
595
+ �−1
596
+ . Then, it holds
597
+ E∥∇env1/(2ρ)
598
+ ψ
599
+ (xK
600
+ ∼)∥2 ≤ 8ρ∆
601
+ K
602
+ + 16G
603
+
604
+ ρ∆
605
+ K ,
606
+ where xK
607
+ ∼ is uniformly drawn from {x0, . . . , xK−1}.
608
+ Proof. The claim follows from Proposition 3 and (Davis & Drusvyatskiy, 2019, Thm. 4.3), (4.16)
609
+ setting η = 0, ¯ρ = 2ρ, T = K − 1 and βt = α−1
610
+ k .
611
+ Corollary 5 ((Strongly) convex case). Let the assumptions of Proposition 3 hold with ρs = 0 for
612
+ all s ∈ S. Let λ > 0 and x⋆ = arg minx ψ(x). Let {xk}k=0,...,K be generated by Algorithm 2 for step
613
+ sizes αk =
614
+ 2
615
+ λ(k+1). Then, it holds
616
+ E
617
+
618
+ ψ
619
+
620
+ 2
621
+ (K+1)(K+2)−2
622
+ K
623
+
624
+ k=1
625
+ (k + 1)xk�
626
+ − ψ(x⋆)
627
+
628
+
629
+ λ
630
+ (K + 1)2 ∥x0 − x⋆∥2 +
631
+ 8G2
632
+ λ(K + 1).
633
+ Proof. As ρs = 0 and hence ρ = 0, from the proof of Proposition 3 we have that (Davis & Drusvy-
634
+ atskiy, 2019, Assum. B) is satisfied with τ = 0 (in the notation of (Davis & Drusvyatskiy, 2019)).
635
+ Moreover, by Lemma 2, (i) and λ–strong convexity of ϕ, we have λ–strong convexity of ψx(·; s).
636
+ The claim follows from Proposition 3 and (Davis & Drusvyatskiy, 2019, Thm. 4.5) setting µ = λ,
637
+ T = K − 1 and βt = α−1
638
+ k .
639
+ 4.3.2
640
+ Lipschitz smoothness
641
+ Assumption (15), i.e. having globally bounded subgradients, is strong: it implies Lipschitz continuity
642
+ of f (cf. (Davis & Drusvyatskiy, 2019, Lem. 4.1)) and simple functions such as the squared loss do
643
+ not satisfy this.
644
+ Therefore, we provide additional guarantees for the smooth case, without the
645
+ assumption of globally bounded gradients.
646
+ The following result, similar to (Davis & Drusvyatskiy, 2019, Lem. 4.2), is the basic inequality for
647
+ the subsequent convergence analysis.
648
+ Lemma 6. Let Assumption 3 hold. Let xk+1 be given by (11) and ψxk be given in (10). For every
649
+ x ∈ Rn it holds
650
+ (1 + αkλ)∥xk+1 − x∥2 ≤ ∥xk − x∥2 − ∥xk+1 − xk∥2 + 2αk
651
+
652
+ ψxk(x; Sk) − ψxk(xk+1; Sk)
653
+
654
+ .
655
+ (16)
656
+ Moreover, it holds
657
+ ψxk(xk+1; Sk) ≥ f(xk; Sk) + ⟨gk, xk+1 − xk⟩ + ϕ(xk+1).
658
+ (17)
659
+ Proof. The objective of (11) is given by Ψk(y) := ψxk(y; Sk) +
660
+ 1
661
+ 2αk ∥y − xk∥2. Using Lemma 2, (i)
662
+ and λ-strong convexity of ϕ, Ψk(y) is (λ +
663
+ 1
664
+ αk )–strongly convex. As xk+1 is the minimizer of Ψk(y),
665
+ for all x ∈ Rn we have
666
+ Ψk(x) ≥ Ψk(xk+1) + 1 + αkλ
667
+ 2αk
668
+ ∥xk+1 − x∥2 ⇐⇒
669
+ (1 + αkλ)∥xk+1 − x∥2 ≤ ∥xk − x∥2 − ∥xk+1 − xk∥2 + 2αk
670
+
671
+ ψxk(x; Sk) − ψxk(xk+1; Sk)
672
+
673
+ .
674
+ 10
675
+
676
+ Moreover, by definition of fx(y; s) in (10) we have
677
+ ψxk(xk+1; Sk) = fxk(xk+1; Sk) + ϕ(xk+1) ≥ f(xk; Sk) + ⟨gk, xk+1 − xk⟩ + ϕ(xk+1).
678
+ We will work in the setting of differentiable loss functions with bounded gradient noise.
679
+ Assumption 4. The mapping f(·; s) is differentiable for all s ∈ S and there exists β ≥ 0 such that
680
+ E∥∇f(x; S) − ∇f(x)∥2 ≤ β
681
+ for all x ∈ Rn.
682
+ (18)
683
+ The assumption of bounded gradient noise (18) (in the differentiable setting) is indeed a weaker
684
+ assumption than (15) since E[∇f(x; S)] = ∇f(x) and
685
+ E∥∇f(x; S) − ∇f(x)∥2 ≤ β ⇐⇒ E∥∇f(x; S)∥2 ≤ ∥∇f(x)∥2 + β.
686
+ Remark 1. Assumption 4 (and the subsequent theorems) could be adapted to the case where f(·; s)
687
+ is weakly convex but non-differentiable: for fixed x ∈ Rn, due to (Bertsekas, 1973, Prop. 2.2) and
688
+ (Davis & Drusvyatskiy, 2019, Lem. 2.1) it holds
689
+ E[∂f(x; S)] = E
690
+
691
+
692
+
693
+ f(x; S) + ρS
694
+ 2 ∥x∥2�
695
+ − ρSx
696
+
697
+ = ∂f(x) + ρx − E[ρSx] = ∂f(x),
698
+ where we used ρ = E[ρS]. Hence, for gs ∈ ∂f(x; s) we have E[gS] ∈ ∂f(x) and (18) is replaced by
699
+ E∥gS − E[gS]∥2 ≤ β
700
+ for all x ∈ Rn.
701
+ However, as we will still require that f is Lipschitz-smooth, we present our results for the differen-
702
+ tiable setting.
703
+ The proof of the subsequent theorems can be found in Appendix A.2 and Appendix A.3.
704
+ Theorem 7. Let Assumption 3 and Assumption 4 hold. Let f(·; s) be convex for all s ∈ S and
705
+ let f be L–smooth (4). Let x⋆ = arg minx∈Rn ψ(x) and let θ > 1. Let {xk}k=0,...,K be generated by
706
+ Algorithm 2 for step sizes αk > 0 such that
707
+ αk ≤ 1 − 1/θ
708
+ L
709
+ .
710
+ (19)
711
+ Then, it holds
712
+ (1 + αkλ)E∥xk+1 − x⋆∥2 ≤ E∥xk − x⋆∥2 + 2αkE[ψ(x⋆) − ψ(xk+1)] + θβα2
713
+ k.
714
+ (20)
715
+ Moreover, we have:
716
+ a) If λ > 0 and αk =
717
+ 1
718
+ λ(k+k0) with k0 ≥ 1 large enough such that (19) is fulfilled, then
719
+ E
720
+
721
+ ψ
722
+
723
+ 1
724
+ K
725
+ K−1
726
+
727
+ k=0
728
+ xk+1�
729
+ − ψ(x⋆)
730
+
731
+ ≤ λk0
732
+ 2K ∥x0 − x⋆∥2 + θβ(1 + ln K)
733
+ 2λK
734
+ .
735
+ (21)
736
+ 11
737
+
738
+ b) If λ = 0 and αk =
739
+ α
740
+ √k+1 with α ≤ 1−1/θ
741
+ L
742
+ , then
743
+ E
744
+
745
+ ψ
746
+
747
+ 1
748
+ �K−1
749
+ k=0 αk
750
+ K−1
751
+
752
+ k=0
753
+ αkxk+1�
754
+ − ψ(x⋆)
755
+
756
+
757
+ ∥x0 − x⋆∥2
758
+ 4α(
759
+
760
+ K + 1 − 1) + θβα(1 + ln K)
761
+ 4(
762
+
763
+ K + 1 − 1).
764
+ (22)
765
+ c) If f is µ–strongly convex with µ ≥ 0,5 and αk = α fulfilling (19), then
766
+ E∥xK − x⋆∥2 ≤ (1 + α(µ + 2λ))−K∥x0 − x⋆∥2 +
767
+ θβα
768
+ µ + 2λ.
769
+ (23)
770
+ Remark 2. If λ > 0, for the decaying step sizes in item a) we get a rate of ˜O( 1
771
+ K ) if λ > 0.
772
+ In the strongly convex case in item c), for constant step sizes, we get a linear convergence upto a
773
+ neighborhood of the solution. Note that the constant on the right-hand side of (23) can be forced to be
774
+ small using a small α. Further, the rate (23) has a 2λ term, instead of λ. This slight improvement
775
+ in the rate occurs because we do not linearize ϕ in the ProxSPS model.
776
+ Theorem 8. Let Assumption 3 and Assumption 4 hold. Let f(·; s) be ρs–weakly convex for all
777
+ s ∈ S and let ρ := E[ρS] < ∞. Let f be L–smooth6 and assume that inf ψ > −∞. Let {xk}k≥0 be
778
+ generated by Algorithm 2. For θ > 1, under the condition
779
+ η ∈
780
+
781
+ (0,
782
+ 1
783
+ ρ−λ)
784
+ if ρ > λ
785
+ (0, ∞)
786
+ else
787
+ ,
788
+ αk ≤ 1 − θ−1
789
+ L + η−1 ,
790
+ (24)
791
+ it holds
792
+ K−1
793
+
794
+ k=0
795
+ αkE∥∇envη
796
+ ψ(xk)∥2 ≤
797
+ 2(envη
798
+ ψ(x0) − inf ψ)
799
+ 1 − η(ρ − λ)
800
+ +
801
+ βθ
802
+ η(1 − η(ρ − λ))
803
+ K−1
804
+
805
+ k=0
806
+ α2
807
+ k.
808
+ (25)
809
+ Moreover, for the choice αk =
810
+ α
811
+ √k+1 and with α ≤ 1−θ−1
812
+ L+η−1 , we have
813
+ min
814
+ k=0,...,K−1 E∥∇envη
815
+ ψ(xk)∥2 ≤
816
+ envη
817
+ ψ(x0) − inf ψ
818
+ α(1 − η(ρ − λ))(
819
+
820
+ K + 1 − 1) +
821
+ βθ
822
+ 2η(1 − η(ρ − λ))
823
+ α(1 + ln K)
824
+ (
825
+
826
+ K + 1 − 1).
827
+ If instead we choose αk =
828
+ α
829
+
830
+ K and with α ≤
831
+
832
+ K 1−θ−1
833
+ L+η−1 , we have
834
+ E∥∇envη
835
+ ψ(xK
836
+ ∼)∥2 ≤
837
+ 2(envη
838
+ ψ(x0) − inf ψ)
839
+ α(1 − η(ρ − λ))
840
+
841
+ K
842
+ +
843
+ βθ
844
+ η(1 − η(ρ − λ))
845
+ α
846
+
847
+ K
848
+ ,
849
+ where xK
850
+ ∼ is uniformly drawn from {x0, . . . , xK−1}.
851
+ 4.3.3
852
+ Comparison to existing theory
853
+ Recalling that Algorithm 1 is equivalent to SPSmax with c = 1 and γb = αk, we can apply Theorem 7
854
+ and Theorem 8 for the unregularized case where ϕ = 0 and hence obtain new theory for (SPSmax). We
855
+ start by summarizing the main theoretical results for SPSmax given in (Loizou et al., 2021; Orvieto
856
+ et al., 2022): in the (ER) setting, recall the interpolation constant σ2 = E[f(x⋆; S) − C(S)] =
857
+ 5Note that as f(·; s) is convex, so is f, and that we allow µ = 0 here.
858
+ 6As f is ρ–weakly convex, this implies ρ ≤ L.
859
+ 12
860
+
861
+ 1
862
+ N
863
+ �N
864
+ i=1 fi(x⋆) − C(si).
865
+ If fi is Li-smooth and convex, (Orvieto et al., 2022, Thm. 3.1) proves
866
+ convergence to a neighborhood of the solution, i.e. the iterates {xk} of SPSmax satisfy
867
+ E[f(¯xK) − f(x⋆)] ≤ ∥x0 − x⋆∥2
868
+ αK
869
+ + 2γbσ2
870
+ α
871
+ ,
872
+ (26)
873
+ where ¯xK :=
874
+ 1
875
+ K
876
+ �K−1
877
+ k=0 xk, α := min{
878
+ 1
879
+ 2cLmax , γb}, and Lmax := maxi∈[N] Li.7 For the nonconvex
880
+ case, if fi is Li-smooth and under suitable assumptions on the gradient noise, (Loizou et al., 2021,
881
+ Thm. 3.8) states that, for constants c1 and c2, we have
882
+ min
883
+ k=1,...,K E∥∇f(xk)∥2 ≤
884
+ 1
885
+ c1K + c2.
886
+ (27)
887
+ The main advantage of these results is that γb can be held constant; furthermore in the convex
888
+ setting (26), the choice of γb requires no knowledge of the smoothness constants Li. For both results
889
+ however, we can not directly conclude that the right-hand side goes to zero as K → ∞ as there is
890
+ an additional constant. Choosing γb sufficiently small does not immediately solve this as c1, α and
891
+ c2 all go to zero as γb goes to zero.
892
+ Our results complement this by showing exact convergence for the (weakly) convex convex case, i.e.
893
+ without constants on the right-hand side. This comes at the cost of an upper bound on the step
894
+ sizes αk which depends on the smoothness constant L. For exact convergence, it is important to
895
+ use decreasing step sizes αk: Theorem 8 shows that the gradient of the Moreau envelope converges
896
+ to zero at the rate O(1/
897
+
898
+ K) for the choice of αk =
899
+ α
900
+
901
+ K .8 Another minor difference to (Loizou
902
+ et al., 2021) is that we do not need to assume Lipschitz-smoothness for all f(·; s) and work instead
903
+ with the (more general) assumption of weak convexity. However, we still need to assume Lipschitz
904
+ smoothness of f.
905
+ Another variant of SPSmax, named DecSPS, has been proposed in (Orvieto et al., 2022): for unreg-
906
+ ularized problems (1) it is given by
907
+ xk+1 = xk − ˆγkgk,
908
+ ˆγk = 1
909
+ ck
910
+ min
911
+ �f(xk; Sk) − Ck
912
+ ∥gk∥2
913
+ , ck−1ˆγk−1
914
+
915
+ (DecSPS)
916
+ where {ck}k≥0 is an increasing sequence. In the (ER) setting, if all fi are Lipschitz-smooth and
917
+ strongly convex, DecSPS converges with a rate of O(
918
+ 1
919
+
920
+ K ), without knowledge of the smoothness or
921
+ convexity constants (cf. (Orvieto et al., 2022, Thm. 5.5)). However, under these assumptions, the
922
+ objective f is strongly convex and the optimal rate is O( 1
923
+ K ), which we achieve up to a logarithmic
924
+ factor in Theorem 7, (21). Moreover, for DecSPS no guarantees are given for nonconvex problems.
925
+ 5
926
+ Numerical experiments
927
+ Throughout we denote Algorithm 1 with SPS and Algorithm 3 with ProxSPS. For all experiments
928
+ we use PyTorch (Paszke et al., 2019).
929
+ 5.1
930
+ General parameter setting
931
+ For SPS and ProxSPS we always use C(s) = 0 for all s ∈ S. For αk, we use the following schedules:
932
+ 7The theorem also handles the mini-batch case but, for simplicity, we state the result for sampling a single ik in
933
+ each iteration.
934
+ 8Notice that αk then depends on the total number of iterations K and hence one would need to fix K before
935
+ starting the method.
936
+ 13
937
+
938
+ • constant: set αk = α0 for all k and some α0 > 0.
939
+ • sqrt: set αk = α0
940
+ √j for all iterations k during epoch j.
941
+ As we consider problems with ℓ2-regularization, for SPS we handle the regularization term by incor-
942
+ porating it into all individual loss functions, as depicted in (13). With ϕ = λ
943
+ 2 ∥ · ∥2 for λ ≥ 0, we
944
+ denote by ζk the adaptive step size term of the following algorithms:
945
+ • for SPS we have ζk := f(xk;Sk)+ λ
946
+ 2 ∥xk∥2
947
+ ∥gk+λxk∥2
948
+ (cf. (13) with ℓik = 0 ),
949
+ • for ProxSPS we have ζk :=
950
+
951
+ (1+αkλ)f(xk;Sk)−αkλ⟨gk,xk⟩
952
+ ∥gk∥2
953
+
954
+ + and thus τ +
955
+ k = min{αk, ζk} (cf.
956
+ Lemma 1 with C(Sk) = 0).
957
+ 5.2
958
+ Regularized matrix factorization
959
+ Problem description: For A ∈ Rq×p, consider the problem
960
+ min
961
+ W1∈Rr×p,W2Rq×r Ey∼N(0,I)∥W2W1y − Ay∥2 =
962
+ min
963
+ W1∈Rr×p,W2Rq×r ∥W2W1 − A∥2
964
+ F .
965
+ For the above problem, SPSmax has shown superior performance than other methods in the numerical
966
+ experiments of (Loizou et al., 2021). The problem can can be turned into a (nonconvex) empirical
967
+ risk minimization problem by drawing N samples {y(1), . . . , y(N)}. Denote b(i) := Ay(i). Adding
968
+ squared norm regularization with λ ≥ 0 (cf. (Srebro et al., 2004)), we obtain the problem
969
+ min
970
+ W1∈Rr×p,W2Rq×r
971
+ 1
972
+ N
973
+ N
974
+
975
+ i=1
976
+ ∥W2W1y(i) − b(i)∥2 + λ
977
+ 2
978
+
979
+ ∥W1∥2
980
+ F + ∥W2∥2
981
+ F
982
+
983
+ .
984
+ (28)
985
+ This is a problem of form (2), setting x = (W1, W2), using a finite sample space S = {s1, . . . , sN},
986
+ f(x; si) = ∥W2W1y(i) − Ay(i)∥2, and ϕ = λ
987
+ 2 ∥ · ∥2
988
+ F . Clearly, zero is a lower bound of f(·; si) for all
989
+ i ∈ [N].
990
+ We investigate ProxSPS for problems of form (28) on synthetic data. For details on the experimental
991
+ procedure, we refer to Appendix D.1.
992
+ Discussion: We discuss the results for the setting matrix-fac1 in Table 1 in the appendix. We
993
+ first fix λ = 0.001 and consider the three methods SPS, ProxSPS and SGD. Fig. 2 shows the objective
994
+ function over 50 epochs, for both step size schedules sqrt and constant, and several initial values
995
+ α0. For the constant schedule, we observe that ProxSPS converges quickly for all initial values
996
+ while SPS is unstable. Note that for SGD we need to pick much smaller values here in order to avoid
997
+ divergence (SGD diverges for the largest value of α0). Hence, SPS for large α0 is unstable, while
998
+ for small α0 we can expect similar performance to SGD (as γk is capped by αk = α0). However, in
999
+ the regime of small α0, convergence will be very slow. Hence, one of the main advantages of SPS,
1000
+ namely that its step size can be chosen constant and moderately large (compared to SGD), is not
1001
+ observed here. ProxSPS fixes this by admitting a large range of initial step sizes, which results in
1002
+ fast convergence, and therefore is more robust than SGD with respect to the tuning of α0.
1003
+ For the sqrt schedule, we observe in Fig. 2 that SPS can be stabilized by reducing the values of αk
1004
+ over the course of the iterations. However, for large α0 we still see instability in the early iterations,
1005
+ whereas ProxSPS does not show this behaviour. We again observe that ProxSPS is less sensitive with
1006
+ 14
1007
+
1008
+ 0
1009
+ 10
1010
+ 20
1011
+ 30
1012
+ 40
1013
+ Epoch
1014
+ 10−5
1015
+ 10−4
1016
+ 10−3
1017
+ 10−2
1018
+ 10−1
1019
+ ψ(xk) − mink ψ(xk)
1020
+ constant
1021
+ 0
1022
+ 10
1023
+ 20
1024
+ 30
1025
+ 40
1026
+ Epoch
1027
+ 10−5
1028
+ 10−4
1029
+ 10−3
1030
+ 10−2
1031
+ 10−1 sqrt
1032
+ α0
1033
+ 2.0
1034
+ 1.62
1035
+ 1.25
1036
+ 0.88
1037
+ 0.5
1038
+ 2.0
1039
+ 1.62
1040
+ 1.25
1041
+ 0.88
1042
+ 0.5
1043
+ 0.7
1044
+ 0.56
1045
+ 0.41
1046
+ 0.27
1047
+ 0.12
1048
+ prox-sps
1049
+ sps
1050
+ sgd
1051
+ Figure 2: Objective function for the Matrix Factorization problem (28), with constant (left) and
1052
+ sqrt (right) step size schedule and several choices of initial values. Here mink ψ(xk) is the best
1053
+ objective function value found over all methods and all iterations.
1054
+ 0
1055
+ 10
1056
+ 20
1057
+ 30
1058
+ 40
1059
+ Epoch
1060
+ 10−4
1061
+ 10−2
1062
+ 100
1063
+ 102
1064
+ 104
1065
+ Validation Error
1066
+ constant
1067
+ 0
1068
+ 10
1069
+ 20
1070
+ 30
1071
+ 40
1072
+ Epoch
1073
+ 10−4
1074
+ 10−3
1075
+ 10−2
1076
+ 10−1
1077
+ 100
1078
+ sqrt
1079
+ α0
1080
+ 2.0
1081
+ 1.62
1082
+ 1.25
1083
+ 0.88
1084
+ 0.5
1085
+ 2.0
1086
+ 1.62
1087
+ 1.25
1088
+ 0.88
1089
+ 0.5
1090
+ 0.7
1091
+ 0.56
1092
+ 0.41
1093
+ 0.27
1094
+ 0.12
1095
+ prox-sps
1096
+ sps
1097
+ sgd
1098
+ Figure 3: Validation error for the Matrix Factorization problem (28), with constant (left) and sqrt
1099
+ (right) step size schedule and several choices of initial values.
1100
+ respect to the choice of α0 as compared to SGD. The empirical results also confirm our theoretical
1101
+ statement, showing exact convergence if αk is decaying in the order of 1/
1102
+
1103
+ k. From Fig. 3, we can
1104
+ make similar observations for the validation error, defined as
1105
+ 1
1106
+ Nval
1107
+ �Nval
1108
+ i=1 ∥W2W1y(i) − b(i)
1109
+ val∥2, where
1110
+ b(i)
1111
+ val are the Nval = N measurements from the validation set (cf. Appendix D.1 for details).
1112
+ We now consider different values for λ and only consider the sqrt schedule, as we have seen that
1113
+ for constant step sizes, SPS would not work for large step sizes and be almost identical to SGD for
1114
+ small step sizes. Fig. 4 shows the objective function and validation error. Again, we can observe
1115
+ that SPS is unstable for large initial values α0 for all λ ≥ 10−4. On the other hand, ProxSPS has a
1116
+ good performance for a wide range of α0 ∈ [1, 10] if λ is not too large. Indeed, ProxSPS convergence
1117
+ only starts to deteriorate when both α0 and λ are very large. For α0 = 1, the two methods give
1118
+ almost identical results. Finally, in Fig. 5a we plot the validation error as a function of λ (taking
1119
+ the median over the last ten epochs). The plot shows that the best validation error is obtained for
1120
+ λ = 10−4 and for large α0. With SPS the validation error is higher, in particular for large α0 and
1121
+ 15
1122
+
1123
+ 0
1124
+ 20
1125
+ 40
1126
+ Epoch
1127
+ 0.000
1128
+ 0.002
1129
+ 0.004
1130
+ 0.006
1131
+ 0.008
1132
+ 0.010
1133
+ Objective ψ(xk)
1134
+ λ = 1e − 05
1135
+ 0
1136
+ 20
1137
+ 40
1138
+ Epoch
1139
+ 0.002
1140
+ 0.004
1141
+ 0.006
1142
+ 0.008
1143
+ 0.010
1144
+ λ = 0.0001
1145
+ 0
1146
+ 20
1147
+ 40
1148
+ Epoch
1149
+ 0.01
1150
+ 0.02
1151
+ 0.03
1152
+ 0.04
1153
+ λ = 0.001
1154
+ 0
1155
+ 20
1156
+ 40
1157
+ Epoch
1158
+ 0.100
1159
+ 0.125
1160
+ 0.150
1161
+ 0.175
1162
+ 0.200
1163
+ 0.225
1164
+ λ = 0.01
1165
+ 0
1166
+ 20
1167
+ 40
1168
+ Epoch
1169
+ 1.0
1170
+ 1.5
1171
+ 2.0
1172
+ 2.5
1173
+ λ = 0.1
1174
+ prox-sps, sqrt, α0=10.0
1175
+ prox-sps, sqrt, α0=5.0
1176
+ prox-sps, sqrt, α0=1.0
1177
+ sps, sqrt, α0=10.0
1178
+ sps, sqrt, α0=5.0
1179
+ sps, sqrt, α0=1.0
1180
+ 0
1181
+ 20
1182
+ 40
1183
+ Epoch
1184
+ 0.0000
1185
+ 0.0005
1186
+ 0.0010
1187
+ 0.0015
1188
+ 0.0020
1189
+ 0.0025
1190
+ Validation Error
1191
+ λ = 1e − 05
1192
+ 0
1193
+ 20
1194
+ 40
1195
+ Epoch
1196
+ 0.0000
1197
+ 0.0005
1198
+ 0.0010
1199
+ 0.0015
1200
+ 0.0020
1201
+ 0.0025
1202
+ 0.0030
1203
+ λ = 0.0001
1204
+ 0
1205
+ 20
1206
+ 40
1207
+ Epoch
1208
+ 0.000
1209
+ 0.002
1210
+ 0.004
1211
+ 0.006
1212
+ 0.008
1213
+ 0.010
1214
+ λ = 0.001
1215
+ 0
1216
+ 20
1217
+ 40
1218
+ Epoch
1219
+ 0.00
1220
+ 0.02
1221
+ 0.04
1222
+ 0.06
1223
+ 0.08
1224
+ λ = 0.01
1225
+ 0
1226
+ 20
1227
+ 40
1228
+ Epoch
1229
+ 0.00
1230
+ 0.25
1231
+ 0.50
1232
+ 0.75
1233
+ 1.00
1234
+ 1.25
1235
+ λ = 0.1
1236
+ prox-sps, sqrt, α0=10.0
1237
+ prox-sps, sqrt, α0=5.0
1238
+ prox-sps, sqrt, α0=1.0
1239
+ sps, sqrt, α0=10.0
1240
+ sps, sqrt, α0=5.0
1241
+ sps, sqrt, α0=1.0
1242
+ Figure 4: Objective function value and validation error over the course of optimization. For the
1243
+ validation error, we plot a rolling median over five epochs in order to avoid clutter.
1244
+ 10−5
1245
+ 10−4
1246
+ 10−3
1247
+ 10−2
1248
+ 10−1
1249
+ λ
1250
+ 10−5
1251
+ 10−4
1252
+ 10−3
1253
+ 10−2
1254
+ 10−1
1255
+ 100
1256
+ Validation Error
1257
+ prox-sps, sqrt, α0=1.0
1258
+ prox-sps, sqrt, α0=5.0
1259
+ prox-sps, sqrt, α0=10.0
1260
+ sps, sqrt, α0=1.0
1261
+ sps, sqrt, α0=5.0
1262
+ sps, sqrt, α0=10.0
1263
+ (a) Validation error
1264
+ 10−5
1265
+ 10−4
1266
+ 10−3
1267
+ 10−2
1268
+ 10−1
1269
+ λ
1270
+ 4.00
1271
+ 4.25
1272
+ 4.50
1273
+ 4.75
1274
+ 5.00
1275
+ 5.25
1276
+ 5.50
1277
+ ∥xk∥
1278
+ prox-sps, sqrt, α0=1.0
1279
+ prox-sps, sqrt, α0=5.0
1280
+ prox-sps, sqrt, α0=10.0
1281
+ sps, sqrt, α0=1.0
1282
+ sps, sqrt, α0=5.0
1283
+ sps, sqrt, α0=10.0
1284
+ (b) Model norm
1285
+
1286
+ ∥W1∥2 + ∥W2∥2
1287
+ Figure 5: Validation error and model norm as a function of the regularization parameter λ. Shaded
1288
+ area is one standard deviation (computed over ten independent runs). For all values, we take the
1289
+ median over epochs [40, 50].
1290
+ λ. Finally, we plot the actual step sizes for both methods in Fig. 6. We observe that the adaptive
1291
+ step size ζk (Definition at end of Section 5.1) is typically larger and has more variance for SPS than
1292
+ ProxSPS, in particular for large λ. This increased variance might explain why SPS is unstable when
1293
+ α0 is large: the actual step size is the minimum between αk and ζk and hence both terms being
1294
+ large could lead to instability. On the other hand, if α0 = 1, the plot confirms that SPS and ProxSPS
1295
+ are almost identical methods as ζk > αk for most iterations. We plot the results for the setting
1296
+ matrix-fac2 of Table 1 in Appendix D.2.
1297
+ 16
1298
+
1299
+ Figure 6: Adaptive step size selection for SPS and ProxSPS. We plot ζk (see definition in Section 5.1)
1300
+ as dots for each iteration as well as their median over each epoch. For this plot, we use the results
1301
+ of only one of the ten runs.
1302
+ 5.3
1303
+ Deep networks for image classification
1304
+ We train a ResNet56 and ResNet110 model (He et al., 2016) on the CIFAR10 dataset.
1305
+ We use
1306
+ the data loading and preprocessing procedure and network implementation from https://github.
1307
+ com/akamaster/pytorch_resnet_cifar10. We do not use batch normalization. The loss function
1308
+ is the cross-entropy loss of the true image class with respect to the predicted class probabilities,
1309
+ being the output of the ResNet56 network. We add λ
1310
+ 2 ∥x∥2 as regularization term, where x consists
1311
+ of all learnable parameters of the model. The CIFAR10 dataset consist of 60,000 images, each of
1312
+ size 32 × 32, from ten different classes. We use the PyTorch split into 50,000 training and 10,000
1313
+ test examples and use a batch size of 128. For AdamW, we set the weight decay parameter to λ
1314
+ and set all other hyperparameters to its default. We use the AdamW-implementation from https:
1315
+ //github.com/zhenxun-zhuang/AdamW-Scale-free as it does not – in contrast to the Pytorch
1316
+ implementation – multiply the weight decay parameter with the learning rate, which leads to better
1317
+ comparability to SPS and ProxSPS for identical values of λ. For SPS and ProxSPS we use the sqrt-
1318
+ schedule and α0 = 1. We run each method repeatedly using (the same) three different seeds for the
1319
+ dataset shuffling.
1320
+ Discussion: For Resnet56, from the bottom plot in Fig. 7, we observe that both SPS and ProxSPS
1321
+ work well with ProxSPS leading to smaller weights. For λ = 5e−4, the progress of ProxSPS stagnates
1322
+ after roughly 25 epochs. This can be explained by looking at the adaptive step size term ζk in Fig. 9a:
1323
+ as it decays over time we have τ +
1324
+ k = ζk ≪ αk. Since every iteration of ProxSPS shrinks the weights
1325
+ 17
1326
+
1327
+ prox-sps, αo = 1.0, 入= le - 05
1328
+ prox-sps, αo = 5.0, 入= le - 05
1329
+ prox-sps, αo = 10.0, 入 = 1e - 05
1330
+ sps, αo = 1.0, 入= le - 05
1331
+ sps, αo = 5.0, 入 = le - 05
1332
+ sps, αo = 10.0, 入= 1e - 05
1333
+ 103
1334
+ median(Sk)
1335
+ median(Sk)
1336
+ nedian(Sk)
1337
+ median(Sk)
1338
+ median(Sk)
1339
+ median(Sk)
1340
+ 101
1341
+ 10-3
1342
+ 0.0001
1343
+ 0.0001
1344
+ 0.0001
1345
+ prox-sps, αo
1346
+ 10.0.
1347
+ 0.0001
1348
+ 5.0.
1349
+ 0.0001
1350
+ 0.0.
1351
+ 0.0001
1352
+ .0. 入
1353
+ sps,α
1354
+ sps, αo
1355
+ sps, αo
1356
+ 103
1357
+ median(Ck)
1358
+ median(Sk)
1359
+ nedian(Sk)
1360
+ nedian(Sh)
1361
+ median(Sk)
1362
+ nedian(Sk)
1363
+ 101
1364
+ 10-3
1365
+ prox-sps, αo = 1.0, 入 = 0.001
1366
+ prox-sps, αo = 5.0, 入 = 0.001
1367
+ 10.0.)
1368
+ = 0.001
1369
+ sps, αo = 5.0, 入= 0.001
1370
+ sps, αo = 10.0, 入 = 0.001
1371
+ 103
1372
+ nedian(Sk)
1373
+ median(Sk)
1374
+ median
1375
+ 101
1376
+ 10-1
1377
+ 10-3
1378
+ prox-sps, αo = 1.0, 入= 0.01
1379
+ prox-sps, αo =
1380
+ = 5.0, 入= 0.01
1381
+ prox-sps, αo =
1382
+ sps, αo = 1.0, 入 = 0.0j
1383
+ sps, αo = 5.0, 入 = 0.01
1384
+ sps, αo = 10.0, 入 = 0.01
1385
+ 103
1386
+ median(Sk)
1387
+ median(Sk)
1388
+ edian(Sk)
1389
+ median(Sk)
1390
+ n(Sk)
1391
+ cep size
1392
+ 101
1393
+ 10-3
1394
+ prox-sps, αo
1395
+ 1.0,入
1396
+ 0.1
1397
+ 5.0,)
1398
+ 0.1
1399
+ prox-sps,
1400
+ 10.0,入
1401
+ 0.1
1402
+ .0,
1403
+ 0.1
1404
+ 5.0.
1405
+ sps,
1406
+ 0.
1407
+ 103
1408
+ median(Sk)
1409
+ median(Sk)
1410
+ median(Sk)
1411
+ median(Sk)
1412
+ median(
1413
+ median(Sk)
1414
+ size
1415
+ 101
1416
+ 10-3
1417
+ 20
1418
+ 0
1419
+ 40
1420
+ 20
1421
+ 40
1422
+ 0
1423
+ 20
1424
+ 40
1425
+ 0
1426
+ 20
1427
+ 40
1428
+ 0
1429
+ 20
1430
+ 40
1431
+ 20
1432
+ 40
1433
+ 0
1434
+ 0
1435
+ Epoch
1436
+ Epoch
1437
+ Epoch
1438
+ Epoch
1439
+ Epoch
1440
+ Epoch0
1441
+ 25
1442
+ 50
1443
+ 75
1444
+ 100
1445
+ Epoch
1446
+ 0.0
1447
+ 0.2
1448
+ 0.4
1449
+ 0.6
1450
+ 0.8
1451
+ 1.0
1452
+ Validation Accuracy
1453
+ λ = 5e − 06
1454
+ 0
1455
+ 25
1456
+ 50
1457
+ 75
1458
+ 100
1459
+ Epoch
1460
+ 0.0
1461
+ 0.2
1462
+ 0.4
1463
+ 0.6
1464
+ 0.8
1465
+ 1.0
1466
+ λ = 5e − 05
1467
+ 0
1468
+ 25
1469
+ 50
1470
+ 75
1471
+ 100
1472
+ Epoch
1473
+ 0.0
1474
+ 0.2
1475
+ 0.4
1476
+ 0.6
1477
+ 0.8
1478
+ 1.0
1479
+ λ = 0.0005
1480
+ prox-sps, sqrt, α0=1.0
1481
+ sps, sqrt, α0=1.0
1482
+ adamw, constant, α0=0.001
1483
+ 0
1484
+ 25
1485
+ 50
1486
+ 75
1487
+ 100
1488
+ Epoch
1489
+ 80
1490
+ 100
1491
+ 120
1492
+ ∥xk∥
1493
+ λ = 5e − 06
1494
+ 0
1495
+ 25
1496
+ 50
1497
+ 75
1498
+ 100
1499
+ Epoch
1500
+ 50
1501
+ 60
1502
+ 70
1503
+ 80
1504
+ λ = 5e − 05
1505
+ 0
1506
+ 25
1507
+ 50
1508
+ 75
1509
+ 100
1510
+ Epoch
1511
+ 10
1512
+ 20
1513
+ 30
1514
+ 40
1515
+ 50
1516
+ 60
1517
+ λ = 0.0005
1518
+ prox-sps, sqrt, α0=1.0
1519
+ sps, sqrt, α0=1.0
1520
+ adamw, constant, α0=0.001
1521
+ Figure 7: ResNet56: (Top): Validation accuracy and model norm for three values of the regular-
1522
+ ization parameter λ. Validation accuracy is defined as the ratio of correctly labeled images on the
1523
+ validation set (i.e. Top-1 accuracy), plotted as five-epoch running median. (Bottom): With ∥xk∥
1524
+ we denote the norm of all learnable parameters at the k-th iteration. Shaded area is two standard
1525
+ deviations over three independent runs.
1526
+ by a factor
1527
+ 1
1528
+ 1+αkλ, this leads to a bias towards zero. This suggests that we should choose αk roughly
1529
+ of the order of ζk, for example by using the values of ζk from the previous epoch.
1530
+ For the larger model Resnet110 however, SPS does not make progress for a long time because the
1531
+ adaptive step size is very small (see Fig. 8 and Fig. 9b). ProxSPS does not share this issue and
1532
+ performs well after a few initial epochs. For larger values of λ, the training is also considerably
1533
+ faster than for AdamW. Generally, we observe that ProxSPS (and SPS for Resnet56) performs well in
1534
+ comparison to AdamW. This is achieved without extensive hyperparameter tuning (in particular this
1535
+ suggests that setting c = 1 in SPSmax leads to good results and reduces tuning effort).
1536
+ 6
1537
+ Conclusion
1538
+ We proposed and analyzed ProxSPS, a proximal version of the stochastic Polyak step size.
1539
+ We
1540
+ arrived at ProxSPS by using the framework of stochastic model-based proximal point methods. We
1541
+ then used this framework to argue that the resulting model of ProxSPS is a better approximation
1542
+ as compared to the model used by SPS when using regularization. Our theoretical results cover
1543
+ a wide range of optimization problems, including convex and nonconvex settings. We performed
1544
+ a series of experiments comparing ProxSPS, SPS, SGD and AdamW when using ℓ2-regularization. In
1545
+ particular we found that SPS can be very hard to tune when using ℓ2-regularization, and in contrast,
1546
+ ProxSPS performs well for a wide choice of step sizes and regularization parameters. Finally, for our
1547
+ 18
1548
+
1549
+ 0
1550
+ 25
1551
+ 50
1552
+ 75
1553
+ 100
1554
+ Epoch
1555
+ 0.0
1556
+ 0.2
1557
+ 0.4
1558
+ 0.6
1559
+ 0.8
1560
+ 1.0
1561
+ Validation Accuracy
1562
+ λ = 5e − 06
1563
+ 0
1564
+ 25
1565
+ 50
1566
+ 75
1567
+ 100
1568
+ Epoch
1569
+ 0.0
1570
+ 0.2
1571
+ 0.4
1572
+ 0.6
1573
+ 0.8
1574
+ 1.0
1575
+ λ = 5e − 05
1576
+ 0
1577
+ 25
1578
+ 50
1579
+ 75
1580
+ 100
1581
+ Epoch
1582
+ 0.0
1583
+ 0.2
1584
+ 0.4
1585
+ 0.6
1586
+ 0.8
1587
+ 1.0
1588
+ λ = 0.0005
1589
+ prox-sps, sqrt, α0=1.0
1590
+ sps, sqrt, α0=1.0
1591
+ adamw, constant, α0=0.001
1592
+ 0
1593
+ 25
1594
+ 50
1595
+ 75
1596
+ 100
1597
+ Epoch
1598
+ 90
1599
+ 95
1600
+ 100
1601
+ 105
1602
+ ∥xk∥
1603
+ λ = 5e − 06
1604
+ 0
1605
+ 25
1606
+ 50
1607
+ 75
1608
+ 100
1609
+ Epoch
1610
+ 40
1611
+ 50
1612
+ 60
1613
+ 70
1614
+ 80
1615
+ 90
1616
+ λ = 5e − 05
1617
+ 0
1618
+ 25
1619
+ 50
1620
+ 75
1621
+ 100
1622
+ Epoch
1623
+ 20
1624
+ 40
1625
+ 60
1626
+ 80
1627
+ λ = 0.0005
1628
+ prox-sps, sqrt, α0=1.0
1629
+ sps, sqrt, α0=1.0
1630
+ adamw, constant, α0=0.001
1631
+ Figure 8: ResNet110: Validation accuracy as five-epoch running median (top) and model norm
1632
+ (bottom) for three values of λ. Shaded area is two standard deviations over three independent runs.
1633
+ experiments on image classification, we found that ProxSPS is competitive to AdamW, whereas SPS
1634
+ can fail for larger models. At the same time ProxSPS produces smaller weights in the trained neural
1635
+ network. Having small weights may help reduce the memory footprint of the resulting network, and
1636
+ even suggests which weights can be pruned.
1637
+ References
1638
+ Hilal Asi and John C. Duchi.
1639
+ Stochastic (approximate) proximal point methods: convergence,
1640
+ optimality, and adaptivity. SIAM Journal on Optimization, 29(3):2257–2290, 2019. ISSN 1052-
1641
+ 6234. doi: 10.1137/18M1230323.
1642
+ Amir Beck. First-order methods in optimization, volume 25 of MOS-SIAM Series on Optimization.
1643
+ Society for Industrial and Applied Mathematics (SIAM), Philadelphia, 2017. ISBN 978-1-611974-
1644
+ 98-0. doi: 10.1137/1.9781611974997.ch1.
1645
+ Leonard Berrada, Andrew Zisserman, and M. Pawan Kumar. Training neural networks for and by
1646
+ interpolation. June 2019.
1647
+ D. P. Bertsekas. Stochastic optimization problems with nondifferentiable cost functionals. Journal
1648
+ of Optimization Theory and Applications, 12:218–231, 1973.
1649
+ ISSN 0022-3239.
1650
+ doi: 10.1007/
1651
+ BF00934819.
1652
+ Léon Bottou.
1653
+ Large-scale machine learning with stochastic gradient descent.
1654
+ In Proceedings of
1655
+ COMPSTAT’2010, pp. 177–186. Physica-Verlag/Springer, Heidelberg, 2010.
1656
+ 19
1657
+
1658
+ (a) ResNet56
1659
+ (b) ResNet110
1660
+ Figure 9: Adaptive step sizes for SPS and ProxSPS. See definition of ζk in Section 5.1. For this plot,
1661
+ we use the results of only one of the three runs.
1662
+ Léon Bottou, Frank E. Curtis, and Jorge Nocedal. Optimization methods for large-scale machine
1663
+ learning. SIAM Review, 60(2):223–311, 2018. ISSN 0036-1445. doi: 10.1137/16M1080173.
1664
+ Frank H. Clarke. Optimization and nonsmooth analysis. Canadian Mathematical Society Series of
1665
+ Monographs and Advanced Texts. John Wiley & Sons, Inc., New York, 1983. ISBN 0-471-87504-X.
1666
+ A Wiley-Interscience Publication.
1667
+ Damek Davis and Dmitriy Drusvyatskiy. Stochastic model-based minimization of weakly convex
1668
+ functions. SIAM Journal on Optimization, 29(1):207–239, 2019. ISSN 1052-6234. doi: 10.1137/
1669
+ 18M1178244.
1670
+ D. Drusvyatskiy and C. Paquette. Efficiency of minimizing compositions of convex functions and
1671
+ smooth maps. Mathematical Programming, 178(1-2, Ser. A):503–558, 2019. ISSN 0025-5610. doi:
1672
+ 10.1007/s10107-018-1311-3.
1673
+ Dmitriy Drusvyatskiy and Adrian S. Lewis. Error bounds, quadratic growth, and linear convergence
1674
+ of proximal methods. Mathematics of Operations Research, 43(3):919–948, 2018. ISSN 0364-765X.
1675
+ doi: 10.1287/moor.2017.0889.
1676
+ Robert Gower, Othmane Sebbouh, and Nicolas Loizou. Sgd for structured nonconvex functions:
1677
+ Learning rates, minibatching and interpolation. In Arindam Banerjee and Kenji Fukumizu (eds.),
1678
+ Proceedings of The 24th International Conference on Artificial Intelligence and Statistics, volume
1679
+ 130 of Proceedings of Machine Learning Research, pp. 1315–1323. PMLR, 13–15 Apr 2021. URL
1680
+ https://proceedings.mlr.press/v130/gower21a.html.
1681
+ Elad Hazan and Sham Kakade. Revisiting the polyak step size. May 2019.
1682
+ 20
1683
+
1684
+ prox-sps, αo = 1.0, 入 = 5e 06
1685
+ sps, α0 = 1.0, 入 = 5e - 06
1686
+ αk
1687
+ αk
1688
+ median(Sk)
1689
+ median(Sk)
1690
+ Step size
1691
+ 101
1692
+ 10-
1693
+ 0
1694
+ 10-3
1695
+ prox-sps, αo = 1.0, 入 = 5e - 05
1696
+ sps, α0 = 1.0, 入 = 5e - 05
1697
+ 103
1698
+ αk
1699
+ αk
1700
+ median(Sk)
1701
+ median(Sk)
1702
+ Step size
1703
+ 101
1704
+ 10
1705
+ 000
1706
+ 00000
1707
+ 10-
1708
+ pr0x-sps, αo = 1.0, 入 = 0.0005
1709
+ sps, α0 = 1.0, 入 = 0.0005
1710
+ 103
1711
+ αk
1712
+ αk
1713
+ median(Sk)
1714
+ median(
1715
+ Ck
1716
+ Step size
1717
+ 101
1718
+ 10-1
1719
+ 10-3
1720
+ 0
1721
+ 50
1722
+ 100
1723
+ 0
1724
+ 50
1725
+ 100
1726
+ Epoch
1727
+ Epochprox-sps, αo = 1.0, 入 = 5e 一 06
1728
+ sps, αo = 1.0, 入 = 5e 06
1729
+ 102
1730
+ αk
1731
+ αk
1732
+ median(Sk)
1733
+ median(Sk)
1734
+ size
1735
+ 10
1736
+ Step
1737
+ 10
1738
+ 10-10
1739
+ prox-sps, αo = 1.0, 入 = 5e - 05
1740
+ sps,α0 = 1.0, 入 = 5e - 05
1741
+ 102
1742
+ αk
1743
+ αk
1744
+ median(Sk)
1745
+ medi
1746
+ size
1747
+ 10
1748
+ Step
1749
+ 10
1750
+ prox-sps, αo = 1.0, 入 = 0.0005
1751
+ sps, αo = 1.0, 入 = 0.0005
1752
+ 102
1753
+ αk
1754
+ αk
1755
+ median(Sk)
1756
+ size
1757
+ 10-6
1758
+ 10-10
1759
+ 0
1760
+ 50
1761
+ 100
1762
+ 0
1763
+ 50
1764
+ 100
1765
+ Epoch
1766
+ EpochKaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recog-
1767
+ nition. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pp.
1768
+ 770–778, 2016. doi: 10.1109/CVPR.2016.90.
1769
+ Diederik P. Kingma and Jimmy Ba. Adam: A method for stochastic optimization. In Yoshua Bengio
1770
+ and Yann LeCun (eds.), 3rd International Conference on Learning Representations, ICLR 2015,
1771
+ San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015.
1772
+ Nicolas Loizou, Sharan Vaswani, Issam Hadj Laradji, and Simon Lacoste-Julien. Stochastic polyak
1773
+ step-size for sgd: An adaptive learning rate for fast convergence.
1774
+ In Proceedings of The 24th
1775
+ International Conference on Artificial Intelligence and Statistics, volume 130 of Proceedings of
1776
+ Machine Learning Research, pp. 1306–1314. PMLR, 13–15 Apr 2021.
1777
+ Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In 7th International
1778
+ Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019.
1779
+ OpenReview.net, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7.
1780
+ Antonio Orvieto, Simon Lacoste-Julien, and Nicolas Loizou. Dynamics of sgd with stochastic polyak
1781
+ stepsizes: Truly adaptive variants and convergence to exact solution. May 2022.
1782
+ Alasdair Paren, Leonard Berrada, Rudra P. K. Poudel, and M. Pawan Kumar. A stochastic bundle
1783
+ method for interpolation. Journal of Machine Learning Research, 23(15):1–57, 2022. URL http:
1784
+ //jmlr.org/papers/v23/20-1248.html.
1785
+ Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor
1786
+ Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Ed-
1787
+ ward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit
1788
+ Steiner, Lu Fang, Junjie Bai, and Soumith Chintala.
1789
+ Pytorch:
1790
+ An imperative style, high-
1791
+ performance deep learning library.
1792
+ In Advances in Neural Information Processing Systems
1793
+ 32, pp. 8024–8035. Curran Associates, Inc., 2019.
1794
+ URL http://papers.neurips.cc/paper/
1795
+ 9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf.
1796
+ Boris T. Polyak. Introduction to optimization. Translations Series in Mathematics and Engineer-
1797
+ ing. Optimization Software, Inc., Publications Division, New York, 1987. ISBN 0-911575-14-6.
1798
+ Translated from the Russian, With a foreword by Dimitri P. Bertsekas.
1799
+ Mariana Prazeres and Adam M. Oberman.
1800
+ Stochastic gradient descent with Polyak’s learning
1801
+ rate.
1802
+ Journal of Scientific Computing, 89(1):Paper No. 25, 16, 2021.
1803
+ ISSN 0885-7474.
1804
+ doi:
1805
+ 10.1007/s10915-021-01628-3.
1806
+ Herbert Robbins and Sutton Monro. A stochastic approximation method. Ann. Math. Statistics,
1807
+ 22:400–407, 1951. ISSN 0003-4851. doi: 10.1214/aoms/1177729586.
1808
+ R. Tyrrell Rockafellar and Roger J.-B. Wets. Variational analysis, volume 317 of Grundlehren der
1809
+ mathematischen Wissenschaften [Fundamental Principles of Mathematical Sciences]. Springer-
1810
+ Verlag, Berlin, 1998. ISBN 3-540-62772-3. doi: 10.1007/978-3-642-02431-3.
1811
+ Nathan Srebro, Jason Rennie, and Tommi Jaakkola.
1812
+ Maximum-margin matrix factorization.
1813
+ In L. Saul, Y. Weiss, and L. Bottou (eds.), Advances in Neural Information Processing Sys-
1814
+ tems, volume 17. MIT Press, 2004. URL https://proceedings.neurips.cc/paper/2004/file/
1815
+ e0688d13958a19e087e123148555e4b4-Paper.pdf.
1816
+ Zhenxun Zhuang, Mingrui Liu, Ashok Cutkosky, and Francesco Orabona. Understanding adamw
1817
+ through proximal methods and scale-freeness. January 2022.
1818
+ 21
1819
+
1820
+ Contents
1821
+ 1
1822
+ Introduction
1823
+ 1
1824
+ 1.1
1825
+ Background and Contributions
1826
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1827
+ 2
1828
+ 2
1829
+ Preliminaries
1830
+ 3
1831
+ 2.1
1832
+ Notation . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1833
+ 3
1834
+ 2.2
1835
+ General assumptions . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1836
+ 4
1837
+ 2.3
1838
+ Convex analysis . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1839
+ 4
1840
+ 3
1841
+ The unregularized case
1842
+ 4
1843
+ 3.1
1844
+ A model-based view point . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1845
+ 5
1846
+ 4
1847
+ The regularized case
1848
+ 6
1849
+ 4.1
1850
+ The special case of ℓ2-regularization
1851
+ . . . . . . . . . . . . . . . . . . . . . . . . . . .
1852
+ 7
1853
+ 4.2
1854
+ Comparing the model of SPS and ProxSPS
1855
+ . . . . . . . . . . . . . . . . . . . . . . .
1856
+ 8
1857
+ 4.3
1858
+ Convergence analysis . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1859
+ 9
1860
+ 4.3.1
1861
+ Globally bounded subgradients . . . . . . . . . . . . . . . . . . . . . . . . . .
1862
+ 9
1863
+ 4.3.2
1864
+ Lipschitz smoothness . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1865
+ 10
1866
+ 4.3.3
1867
+ Comparison to existing theory
1868
+ . . . . . . . . . . . . . . . . . . . . . . . . . .
1869
+ 12
1870
+ 5
1871
+ Numerical experiments
1872
+ 13
1873
+ 5.1
1874
+ General parameter setting . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1875
+ 13
1876
+ 5.2
1877
+ Regularized matrix factorization
1878
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1879
+ 14
1880
+ 5.3
1881
+ Deep networks for image classification . . . . . . . . . . . . . . . . . . . . . . . . . .
1882
+ 17
1883
+ 6
1884
+ Conclusion
1885
+ 18
1886
+ A Missing Proofs
1887
+ 23
1888
+ A.1 Proofs of model-based update formula . . . . . . . . . . . . . . . . . . . . . . . . . .
1889
+ 23
1890
+ A.2 Proof of Theorem 7
1891
+ . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1892
+ 25
1893
+ A.3 Proof of Theorem 8 . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1894
+ 27
1895
+ B Auxiliary Lemmas
1896
+ 28
1897
+ C Model equivalence for SGD
1898
+ 30
1899
+ 22
1900
+
1901
+ D Additional information on numerical experiments
1902
+ 30
1903
+ D.1 Matrix Factorization . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1904
+ 30
1905
+ D.2 Plots for matrix-fac2 . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
1906
+ 31
1907
+ A
1908
+ Missing Proofs
1909
+ A.1
1910
+ Proofs of model-based update formula
1911
+ Lemma 9. For λ ≥ 0, let ϕ(x) = λ
1912
+ 2 ∥x∥2 and let g ∈ ∂f(x; s) and C(s) ≤ infz∈Rn f(z; s) hold for
1913
+ all s ∈ S. For
1914
+ ψx(y; s) = fx(y; s) + ϕ(y),
1915
+ fx(y; s) = max{f(x; s) + ⟨g, y − x⟩, C(s)},
1916
+ consider the update
1917
+ xk+1 = arg min
1918
+ x∈Rn
1919
+ ψxk(x; Sk) +
1920
+ 1
1921
+ 2αk
1922
+ ∥x − xk∥2.
1923
+ (29)
1924
+ Denote Ck := C(Sk) and let gk ∈ ∂f(xk; Sk). Define
1925
+ τ +
1926
+ k :=
1927
+
1928
+
1929
+
1930
+ 0
1931
+ if gk = 0,
1932
+ min
1933
+
1934
+ αk,
1935
+
1936
+ (1+αkλ)(f(xk;Sk)−Ck)−αkλ⟨gk,xk⟩
1937
+ ∥gk∥2
1938
+
1939
+ +
1940
+
1941
+ else.
1942
+ Then, we have
1943
+ xk+1 =
1944
+ 1
1945
+ 1 + αkλxk −
1946
+ τ +
1947
+ k
1948
+ 1 + αkλgk =
1949
+ 1
1950
+ 1 + αkλ
1951
+
1952
+ xk − τ +
1953
+ k gk
1954
+
1955
+ = proxαkϕ(xk − τ +
1956
+ k gk).
1957
+ (30)
1958
+ Define τk := 0 if gk = 0 and τk := min
1959
+
1960
+ αk, (1+αkλ)(f(xk;Sk)−Ck)−αkλ⟨gk,xk⟩
1961
+ ∥gk∥2
1962
+
1963
+ else. Then, it holds
1964
+ τk ≤ τ +
1965
+ k and
1966
+ ψxk(xk+1; Sk) = f(xk; Sk) −
1967
+ αkλ
1968
+ 1+αkλ⟨gk, xk⟩ −
1969
+ τk
1970
+ 1+αkλ∥gk∥2 + ϕ(xk+1).
1971
+ (31)
1972
+ Proof. Note that max{f(xk; Sk) + ⟨gk, y − xk⟩, Ck} is convex as a function of y. The update is
1973
+ therefore unique. First, if gk = 0, then clearly xk+1 = proxαkϕ(xk) =
1974
+ 1
1975
+ 1+αkλxk and (31) holds
1976
+ true. Now, let gk ̸= 0. The solution of (29) is either in {y|f(xk; Sk) + ⟨gk, y − xk⟩ < Ck}, or in
1977
+ {y|f(xk; Sk) + ⟨gk, y − xk⟩ > Ck} or in {y|f(xk; Sk) + ⟨gk, y − xk⟩ = Ck}. We therefore solve three
1978
+ problems:
1979
+ (P1) Solve
1980
+ y+ = arg min
1981
+ y
1982
+ Ck + λ
1983
+ 2 ∥y∥2 +
1984
+ 1
1985
+ 2αk
1986
+ ∥y − xk∥2.
1987
+ Clearly, the solution is y+ =
1988
+ 1
1989
+ 1+αkλxk. This y+ solves (29) if f(xk; Sk)+⟨gk, y+ −xk⟩ < Ck.
1990
+ (P2) Solve
1991
+ y+ = arg min
1992
+ y
1993
+ f(xk; Sk) + ⟨gk, y − xk⟩ + λ
1994
+ 2 ∥y∥2 +
1995
+ 1
1996
+ 2αk
1997
+ ∥y − xk∥2.
1998
+ The optimality condition is 0 = αkgk + αkλy+ + y+ − xk.
1999
+ Thus, the solution is y+ =
2000
+ 1
2001
+ 1+αkλ(xk − αkgk). This y+ solves (29) if f(xk; Sk) + ⟨gk, y+ − xk⟩ > Ck.
2002
+ 23
2003
+
2004
+ (P3) Solve
2005
+ y+ = arg min
2006
+ y
2007
+ λ
2008
+ 2 ∥y∥2 +
2009
+ 1
2010
+ 2αk
2011
+ ∥y − xk∥2,
2012
+ s.t. f(xk; Sk) + ⟨gk, y − xk⟩ = Ck.
2013
+ The KKT conditions are given by
2014
+ αkλy + y − xk + µgk = 0,
2015
+ f(xk; Sk) + ⟨gk, y − xk⟩ = Ck.
2016
+ Taking the inner product of the first equation with gk, we get
2017
+ (1 + αkλ)⟨gk, y⟩ − ⟨gk, xk⟩ + µ∥gk∥2 = 0.
2018
+ From the second KKT condition we have ⟨gk, y⟩ = Ck − f(xk; Sk) + ⟨gk, xk⟩, hence
2019
+ (1 + αkλ)
2020
+
2021
+ Ck − f(xk; Sk) + ⟨gk, xk⟩
2022
+
2023
+ − ⟨gk, xk⟩ + µ∥gk∥2 = 0.
2024
+ Solving for µ gives µ = (1+αkλ)(f(xk;Sk)−Ck)−αkλ⟨gk,xk⟩
2025
+ ∥gk∥2
2026
+ . From the first KKT condition, we
2027
+ obtain
2028
+ y+ =
2029
+ 1
2030
+ 1 + αkλ
2031
+
2032
+ xk − µgk
2033
+
2034
+ =
2035
+ 1
2036
+ 1 + αkλ
2037
+
2038
+ xk − (1 + αkλ)(f(xk; Sk) − Ck) − αkλ⟨gk, xk⟩
2039
+ ∥gk∥2
2040
+ gk
2041
+
2042
+ .
2043
+ This y+ solves (29) if neither (P1) nor (P2) provided a solution.
2044
+ For all three cases, the solution takes the form y+ =
2045
+ 1
2046
+ 1+αkλ[xk −tgk] =: y(t). As ∥gk∥2 > 0, the term
2047
+ f(xk; Sk) + ⟨gk, y(t) − xk⟩ is strictly monotonically decreasing in t. We know f(xk; Sk) + ⟨gk, y(t) −
2048
+ xk⟩ = Ck for t = µ (from (P3)). Hence, f(xk; Sk) + ⟨gk, y(t) − xk⟩ < Ck (> Ck) if and only if
2049
+ t > µ (t < µ).
2050
+ We conclude:
2051
+ • If f(xk; Sk) + ⟨gk, y(0) − xk⟩ < Ck, then the solution to (P1) is the solution to (29). This
2052
+ condition is equivalent to µ < 0.
2053
+ • If f(xk; Sk) + ⟨gk, y(αk) − xk⟩ > Ck, then the solution to (P2) is the solution to (29). This
2054
+ condition is equivalent to αk < µ.
2055
+ • If neither 0 > µ nor αk < µ hold, i.e. if µ ∈ [0, αk], then the solution to (29) comes from
2056
+ (P3) and hence is given by y(µ).
2057
+ Altogether, we get that xk+1 =
2058
+ 1
2059
+ 1+αkλ[xk − τ +
2060
+ k gk] with τ +
2061
+ k = min{αk, (µ)+}.
2062
+ Now, we prove (31). Note that if gk ̸= 0, then τk = min{αk, µ} with µ defined as in (P3). In the
2063
+ case of (P1), we have ψxk(xk+1; Sk) = Ck + ϕ(xk+1). Moreover, it holds µ < 0 and as αk > 0 we
2064
+ have τk = µ. Plugging τk = µ into the right hand-side of (31), we obtain Ck + ϕ(xk+1).
2065
+ In the case of (P2) or (P3), we have Ck ≤ f(xk; Sk) + ⟨gk, xk+1 − xk⟩. Due to f(xk; Sk) + ⟨gk, y(t) −
2066
+ xk⟩ = f(xk; Sk) −
2067
+ 1
2068
+ 1+αkλ⟨gk, xk⟩ +
2069
+ t
2070
+ 1+αkλ∥gk∥2, we obtain (31) as xk+1 = y(αk) and µ > αk in the
2071
+ case of (P2) and xk+1 = y(µ) and µ ≤ αk in the case of (P3).
2072
+ 24
2073
+
2074
+ Lemma 10. Consider the model fx(y; s) := max{f(x; s) + ⟨g, y − x⟩, C(s)} where g ∈ ∂f(x; s) and
2075
+ C(s) ≤ infz∈Rn f(z; s) holds for all s ∈ S. Then, update (5) is given as
2076
+ xk+1 = xk − γkgk,
2077
+ γk =
2078
+
2079
+ 0
2080
+ if gk = 0,
2081
+ min
2082
+
2083
+ αk, f(xk;Sk)−C(Sk)
2084
+ ∥gk∥2
2085
+
2086
+ else.
2087
+ where gk ∈ ∂f(xk; Sk). Moreover, it holds
2088
+ fxk(xk+1; Sk) = max{C(Sk), f(xk; Sk) − αk∥gk∥2},
2089
+ (32)
2090
+ and therefore fxk(xk+1; Sk) = f(xk; Sk) − γk∥gk∥2.
2091
+ Proof. We apply Lemma 9 with λ = 0. As f(xk; SK) ≥ C(Sk), we have that τ +
2092
+ k = τk = γk.
2093
+ A.2
2094
+ Proof of Theorem 7
2095
+ Proof of Theorem 7. In the proof, we will denote gk = ∇f(xk; Sk). We apply Lemma 6, (16) with
2096
+ x = x⋆. Due to Lemma 2 (ii) and convexity of f(·; s) it holds
2097
+ ψxk(x⋆; Sk) ≤ f(x⋆; Sk) + ϕ(x⋆).
2098
+ Together with (17), we have
2099
+ (1 + αkλ)∥xk+1 − x⋆∥2 ≤ ∥xk − x⋆∥2 − ∥xk+1 − xk∥2 + 2αk[ϕ(x⋆) − ϕ(xk+1)]
2100
+ + 2αk
2101
+
2102
+ f(x⋆; Sk) − f(xk; Sk) − ⟨gk, xk+1 − xk⟩
2103
+
2104
+ .
2105
+ (33)
2106
+ Smoothness of f yields
2107
+ −f(xk) ≤ −f(xk+1) + ⟨∇f(xk), xk+1 − xk⟩ + L
2108
+ 2 ∥xk+1 − xk∥2.
2109
+ Consequently,
2110
+ − ⟨gk, xk+1 − xk⟩ = f(xk) − f(xk) − ⟨gk, xk+1 − xk⟩
2111
+ ≤ f(xk) − f(xk+1) + ⟨∇f(xk) − gk, xk+1 − xk⟩ + L
2112
+ 2 ∥xk+1 − xk∥2
2113
+ ≤ f(xk) − f(xk+1) + θαk
2114
+ 2 ∥∇f(xk) − gk∥2 +
2115
+ 1
2116
+ 2θαk
2117
+ ∥xk+1 − xk∥2 + L
2118
+ 2 ∥xk+1 − xk∥2.
2119
+ for any θ > 0, where we used Young’s inequality in the last step. Plugging into (33) gives
2120
+ (1 + αkλ)∥xk+1 − x⋆∥2 ≤ ∥xk − x⋆∥2 +
2121
+
2122
+ αkL + 1
2123
+ θ − 1
2124
+
2125
+ ∥xk+1 − xk∥2 + 2αk[ϕ(x⋆) − ϕ(xk+1)]
2126
+ + 2αk
2127
+
2128
+ f(x⋆; Sk) − f(xk; Sk) + f(xk) − f(xk+1)
2129
+
2130
+ + θα2
2131
+ k∥∇f(xk) − gk∥2.
2132
+ Applying conditional expectation, we have E[f(x⋆; Sk)|Fk] = f(x⋆) and
2133
+ E[−f(xk; Sk) + f(xk)|Fk] = 0,
2134
+ E[∥∇f(xk) − gk∥2|Fk] ≤ β.
2135
+ Moreover, by assumption, αkL + 1
2136
+ θ − 1 ≤ 0. Altogether, applying total expectation yields
2137
+ (1 + αkλ)E∥xk+1 − x⋆∥2 ≤ E∥xk − x⋆∥2 + 2αkE[ψ(x⋆) − ψ(xk+1)] + θβα2
2138
+ k
2139
+ 25
2140
+
2141
+ which proves (20).
2142
+ Proof of a): let αk =
2143
+ 1
2144
+ λ(k+k0). Denote ∆k := E∥xk − x⋆∥2. Rearranging and summing (20), we
2145
+ have
2146
+ K−1
2147
+
2148
+ k=0
2149
+ E[ψ(xk+1) − ψ(x⋆)] ≤
2150
+ K−1
2151
+
2152
+ k=0
2153
+
2154
+ 1
2155
+ 2αk ∆k − 1+αkλ
2156
+ 2αk ∆k+1 + θβαk
2157
+ 2
2158
+
2159
+ .
2160
+ Plugging in αk, we have 1+αkλ
2161
+ 2αk
2162
+ = λ(k+k0)
2163
+ 2
2164
+ + λ
2165
+ 2 and thus
2166
+ K−1
2167
+
2168
+ k=0
2169
+ E[ψ(xk+1) − ψ(x⋆)] ≤
2170
+ K−1
2171
+
2172
+ k=0
2173
+
2174
+ λ(k+k0)
2175
+ 2
2176
+ ∆k − λ(k+1+k0)
2177
+ 2
2178
+ ∆k+1
2179
+
2180
+ + θβ
2181
+ 2
2182
+ K−1
2183
+
2184
+ k=0
2185
+ 1
2186
+ λ(k+k0).
2187
+ Dividing by K and using convexity of ψ9, we have
2188
+ E
2189
+
2190
+ ψ
2191
+
2192
+ 1
2193
+ K
2194
+ K−1
2195
+
2196
+ k=0
2197
+ xk+1�
2198
+ − ψ(x⋆)
2199
+
2200
+ ≤ λk0
2201
+ 2K ∥x0 − x⋆∥2 +
2202
+ θβ
2203
+ 2λK
2204
+ K−1
2205
+
2206
+ k=0
2207
+ 1
2208
+ k+k0 .
2209
+ Finally, as k0 ≥ 1, we estimate �K−1
2210
+ k=0
2211
+ 1
2212
+ k+k0 ≤ �K−1
2213
+ k=0
2214
+ 1
2215
+ k+1 ≤ 1 + ln K by Lemma 13 and obtain (21).
2216
+ Proof of b): Similar to the proof above, we rearrange and sum (20) from k = 0, . . . , K − 1, and
2217
+ obtain
2218
+ K−1
2219
+
2220
+ k=0
2221
+ αkE[ψ(xk+1) − ψ(x⋆)] ≤ ∥x0 − x⋆∥2
2222
+ 2
2223
+ + θβ �K−1
2224
+ k=0 α2
2225
+ k
2226
+ 2
2227
+ .
2228
+ We divide by �K−1
2229
+ k=0 αk and use convexity of ψ in order to obtain the left-hand side of (22). Moreover,
2230
+ by Lemma 13 we have
2231
+ K−1
2232
+
2233
+ k=0
2234
+ αk ≥ 2α(
2235
+
2236
+ K + 1 − 1),
2237
+ K−1
2238
+
2239
+ k=0
2240
+ α2
2241
+ k ≤ α2(1 + ln K).
2242
+ Plugging in the above estimates, gives
2243
+ E
2244
+
2245
+ ψ
2246
+
2247
+ 1
2248
+ �K−1
2249
+ k=0 αk
2250
+ K−1
2251
+
2252
+ k=0
2253
+ αkxk+1�
2254
+ − ψ(x⋆)
2255
+
2256
+
2257
+ ∥x0 − x⋆∥2
2258
+ 4α(
2259
+
2260
+ K + 1 − 1) + θβα(1 + ln K)
2261
+ 4(
2262
+
2263
+ K + 1 − 1).
2264
+ Proof of c): If f is µ–strongly–convex, then ψ is (λ + µ)–strongly convex and
2265
+ ψ(x⋆) − ψ(xk+1) ≤ − µ+λ
2266
+ 2 ∥xk+1 − x⋆∥2.
2267
+ From (20), with αk = α, we get
2268
+ (1 + α(µ + 2λ))E∥xk+1 − x⋆∥2 ≤ E∥xk − x⋆∥2 + θβα2.
2269
+ Doing a recursion of the above from k = 0, . . . , K − 1 gives
2270
+ E∥xK − x⋆∥2 ≤ (1 + α(µ + 2λ))−K∥x0 − x⋆∥2 + θβα2
2271
+ K
2272
+
2273
+ k=1
2274
+ (1 + α(µ + 2λ))−k
2275
+ 9By assumption f is convex and therefore ψ is convex.
2276
+ 26
2277
+
2278
+ Using the geometric series, �K
2279
+ k=1(1 + α(µ + 2λ))−k ≤ 1+α(µ+2λ)
2280
+ α(µ+2λ)
2281
+ − 1 =
2282
+ 1
2283
+ α(µ+2λ), and thus
2284
+ E∥xK − x⋆∥2 ≤ (1 + α(µ + 2λ))−K∥x0 − x⋆∥2 +
2285
+ θβα
2286
+ µ + 2λ.
2287
+ A.3
2288
+ Proof of Theorem 8
2289
+ Proof of Theorem 8. In the proof, we will denote gk = ∇f(xk; Sk). By assumption f is ρ-weakly
2290
+ convex and hence ψ is (ρ − λ)-weakly convex if ρ > λ and convex if ρ ≤ λ. Hence, ˆxk := proxηψ(xk)
2291
+ is well-defined for η < 1/(ρ − λ) if ρ > λ and for any η > 0 else. Note that ˆxk is Fk–measurable.
2292
+ We apply Lemma 6, (16) with x = ˆxk. Due to Lemma 2 (ii) it holds
2293
+ ψxk(ˆxk; Sk) = fxk(ˆxk; Sk) + ϕ(ˆxk) ≤ f(ˆxk; Sk) +
2294
+ ρSk
2295
+ 2 ∥ˆxk − xk∥2 + ϕ(ˆxk).
2296
+ Together with (17), this gives
2297
+ (1 + αkλ)∥xk+1 − ˆxk∥2 ≤(1 + αkρSk)∥xk − ˆxk∥2 − ∥xk+1 − xk∥2
2298
+ + 2αk
2299
+
2300
+ ϕ(ˆxk) − ϕ(xk+1) + f(ˆxk; Sk) − f(xk; Sk) − ⟨gk, xk+1 − xk⟩
2301
+
2302
+ Analogous to the proof of Theorem 7, due to Lipschitz smoothness, for all θ > 0 we have
2303
+ −f(xk; Sk) − ⟨gk, xk+1 − xk⟩ ≤ −f(xk; Sk) + f(xk)
2304
+ − f(xk+1) + θαk
2305
+ 2 ∥∇f(xk) − gk∥2 +
2306
+
2307
+ 1
2308
+ 2θαk + L
2309
+ 2
2310
+
2311
+ ∥xk+1 − xk∥2.
2312
+ Plugging in gives
2313
+ (1 + αkλ)∥xk+1 − ˆxk∥2 ≤ (1 + αkρSk)∥xk − ˆxk∥2 + 2αk
2314
+
2315
+ ϕ(ˆxk) − ϕ(xk+1)
2316
+
2317
+ + 2αk
2318
+
2319
+ f(ˆxk; Sk) − f(xk; Sk) + f(xk) − f(xk+1) + θαk
2320
+ 2 ∥∇f(xk) − gk∥2�
2321
+ +
2322
+ � 1
2323
+ θ + αkL − 1
2324
+
2325
+ ∥xk+1 − xk∥2.
2326
+ It holds E[f(ˆxk; Sk) − f(xk; Sk)|Fk] = f(ˆxk) − f(xk) and E[ψ(ˆxk)|Fk] = ψ(ˆxk). By Assumption 4,
2327
+ we have E[∥gk − ∇f(xk)∥2|Fk] ≤ β. Altogether, taking conditional expectation yields
2328
+ (1 + αkλ)E[∥xk+1 − ˆxk∥2|Fk] ≤ (1 + αkρ)∥xk − ˆxk∥2 + 2αkE
2329
+
2330
+ ψ(ˆxk) − ψ(xk+1)|Fk
2331
+
2332
+ + α2
2333
+ kθβ +
2334
+ � 1
2335
+ θ + αkL − 1
2336
+
2337
+ E[∥xk+1 − xk∥2|Fk].
2338
+ Next, the definition of the proximal operator implies that almost surely
2339
+ ψ(ˆxk) +
2340
+ 1
2341
+ 2η∥ˆxk − xk∥2 ≤ ψ(xk+1) +
2342
+ 1
2343
+ 2η∥xk+1 − xk∥2,
2344
+ and hence
2345
+ E
2346
+
2347
+ ψ(ˆxk) − ψ(xk+1)|Fk
2348
+
2349
+ ≤ E
2350
+ � 1
2351
+ 2η∥xk+1 − xk∥2 −
2352
+ 1
2353
+ 2η∥ˆxk − xk∥2|Fk
2354
+
2355
+ .
2356
+ Altogether, we have
2357
+ (1 + αkλ)E[∥xk+1 − ˆxk∥2|Fk] ≤ (1 + αk(ρ − η−1))∥xk − ˆxk∥2
2358
+ + α2
2359
+ kθβ +
2360
+ � 1
2361
+ θ + αkL + αkη−1 − 1
2362
+
2363
+ E[∥xk+1 − xk∥2|Fk].
2364
+ 27
2365
+
2366
+ From assumption (24), we can drop the last term. Now, we aim for a recursion in envη
2367
+ ψ. Using that
2368
+ 1 + αk(ρ − η−1)
2369
+ 1 + αkλ
2370
+ = 1 + αkλ − αkλ + αk(ρ − η−1)
2371
+ 1 + αkλ
2372
+ = 1 + αk(ρ − η−1 − λ)
2373
+ 1 + αkλ
2374
+ ≤ 1 + αk(ρ − η−1 − λ),
2375
+ we get
2376
+ E[envη
2377
+ ψ(xk+1)|Fk] ≤ E[ψ(ˆxk) + 1
2378
+ 2η ∥xk+1 − ˆxk∥2|Fk]
2379
+ ≤ ψ(ˆxk) + 1
2380
+ 2η ∥xk − ˆxk∥2
2381
+
2382
+ ��
2383
+
2384
+ =envη
2385
+ ψ(xk)
2386
+ + 1
2387
+
2388
+
2389
+ αk(ρ − η−1 − λ)
2390
+
2391
+ ∥xk − ˆxk∥2 + α2
2392
+ k
2393
+ 2η θβ.
2394
+ Now using ∥xk − ˆxk∥ = η∥∇envη
2395
+ ψ(xk)∥ we conclude
2396
+ E[envη
2397
+ ψ(xk+1)|Fk] ≤ envη
2398
+ ψ(xk) + η
2399
+ 2
2400
+
2401
+ αk(ρ − η−1 − λ)
2402
+
2403
+ ∥∇envη
2404
+ ψ(xk)∥2 + α2
2405
+ k
2406
+ 2η θβ.
2407
+ Due to (24), we have η−1 + λ − ρ > 0. Taking expectation and unfolding the recursion by summing
2408
+ over k = 0, . . . , K − 1, we get
2409
+ K−1
2410
+
2411
+ k=0
2412
+ αk
2413
+ 2 (1 − η(ρ − λ))E∥∇envη
2414
+ ψ(xk)∥2 ≤ envη
2415
+ ψ(x0) − E[envη
2416
+ ψ(xK)] +
2417
+ K−1
2418
+
2419
+ k=0
2420
+ α2
2421
+ k
2422
+ 2η θβ.
2423
+ Now using that envη
2424
+ ψ(xK) ≥ inf ψ almost surely, we finally get
2425
+ K−1
2426
+
2427
+ k=0
2428
+ αkE∥∇envη
2429
+ ψ(xk)∥2 ≤
2430
+ 2(envη
2431
+ ψ(x0) − inf ψ)
2432
+ 1 − η(ρ − λ)
2433
+ +
2434
+ βθ
2435
+ η(1 − η(ρ − λ))
2436
+ K−1
2437
+
2438
+ k=0
2439
+ α2
2440
+ k,
2441
+ (34)
2442
+ which proves (25). Now choose αk =
2443
+ α
2444
+ √k+1 and divide (34) by �K−1
2445
+ k=0 αk. Using Lemma 13 for
2446
+ �K−1
2447
+ k=0 αk and �K−1
2448
+ k=0 α2
2449
+ k, we have
2450
+ min
2451
+ k=0,...,K−1 E∥∇envη
2452
+ ψ(xk)∥2 ≤
2453
+ envη
2454
+ ψ(x0) − inf ψ
2455
+ α(1 − η(ρ − λ))(
2456
+
2457
+ K + 1 − 1) +
2458
+ βθ
2459
+ 2η(1 − η(ρ − λ))
2460
+ α(1 + ln K)
2461
+ (
2462
+
2463
+ K + 1 − 1).
2464
+ Choosing αk =
2465
+ α
2466
+
2467
+ K instead, we can identify the left-hand-side of (34) as α
2468
+
2469
+ KE∥∇envη
2470
+ ψ(xK
2471
+ ∼)∥2.
2472
+ Dividing by α
2473
+
2474
+ K and using �K−1
2475
+ k=0 α2
2476
+ k = α2, we obtain
2477
+ E∥∇envη
2478
+ ψ(xK
2479
+ ∼)∥2 ≤
2480
+ 2(envη
2481
+ ψ(x0) − inf ψ)
2482
+ α(1 − η(ρ − λ))
2483
+
2484
+ K
2485
+ +
2486
+ βθ
2487
+ η(1 − η(ρ − λ))
2488
+ α
2489
+
2490
+ K
2491
+ .
2492
+ B
2493
+ Auxiliary Lemmas
2494
+ Lemma 11. Let ϕ be convex and f be L-smooth. For η > 0, define Gη(x) := η−1�
2495
+ x − proxηϕ(x −
2496
+ η∇f(x))
2497
+
2498
+ . For any η > 0 and x ∈ Rn, it holds
2499
+ 1−Lη
2500
+ η
2501
+ ∥Gη(x)∥ ≤ ∥∇envη
2502
+ ψ(x)∥ ≤ 1+Lη
2503
+ η
2504
+ ∥Gη(x)∥.
2505
+ 28
2506
+
2507
+ Proof. This follows from the fact that ∥∇envη
2508
+ ψ(x)∥ = η−1∥x − proxηψ(x)∥ and applying (Drusvy-
2509
+ atskiy & Lewis, 2018, Thm. 3.5) with t = η, G = ϕ, Φ = ψ, and β = L.
2510
+ Lemma 12. Let c ∈ R, a, x0 ∈ Rn and β > 0 and let ϕ : Rn → R ∪ {∞} be proper, closed, convex.
2511
+ The solution to
2512
+ y+ = arg min
2513
+ y∈Rn
2514
+
2515
+ c + ⟨a, y⟩
2516
+
2517
+ + + ϕ(y) + 1
2518
+ 2β ∥y − x0∥2
2519
+ (35)
2520
+ is given by
2521
+ y+ =
2522
+
2523
+
2524
+
2525
+
2526
+
2527
+ proxβϕ(x0 − βa),
2528
+ if c + ⟨a, proxβϕ(x0 − βa)⟩ > 0,
2529
+ proxβϕ(x0),
2530
+ if c + ⟨a, proxβϕ(x0)⟩ < 0,
2531
+ proxβϕ(x0 − βua)
2532
+ else, for u ∈ [0, 1] such that c + ⟨a, proxβϕ(x0 − βua)⟩ = 0.
2533
+ (36)
2534
+ Remark 3. The first two conditions can not hold simultaneously due to uniqueness of the solution.
2535
+ If neither of the conditions of the first two cases are satisfied, we have to find the root of u �→
2536
+ c + ⟨a, proxβϕ(x0 − βua)⟩ for u ∈ [0, 1]. Due to strong convexity of the objective in (35), we know
2537
+ that there exists a root and hence y+ can be found efficiently with bisection.
2538
+ Proof. The objective of (35) is strongly convex and hence there exists a unique solution. Due to
2539
+ (Beck, 2017, Thm. 3.63), y is the solution to (35) if and only if it satisfies first-order optimality, i.e.
2540
+ ∃u ∈ ∂(·)+(c + ⟨a, y⟩) : 0 ∈ ua + ∂ϕ(y) + 1
2541
+ β (y − x0).
2542
+ (37)
2543
+ Now, as y = proxβϕ(z) ⇐⇒ 0 ∈ ∂ϕ(y) + 1
2544
+ β (y − z), it holds
2545
+ (37) ⇐⇒ ∃u ∈ ∂(·)+(c + ⟨a, y⟩) : 0 ∈ ∂ϕ(y) + 1
2546
+ β (y − (x0 − βua))
2547
+ ⇐⇒ ∃u ∈ ∂(·)+(c + ⟨a, y⟩) : y = proxβϕ(x0 − βua).
2548
+ We distinguish three cases:
2549
+ 1. Let ¯y := proxβϕ(x0 − βa) and suppose that c + ⟨a, ¯y⟩ > 0. Then ∂(·)+(c + ⟨a, ¯y⟩) = {1} and
2550
+ hence ¯y satisfies (37) with u = 1. Hence, y+ = ¯y.
2551
+ 2. Let ¯y := proxβϕ(x0) and suppose that c+⟨a, ¯y⟩ < 0. Then ∂(·)+(c+⟨a, ¯y⟩) = {0} and hence
2552
+ ¯y satisfies (37) with u = 0. Hence, y+ = ¯y.
2553
+ 3. If neither the condition of the first nor of the second case of (36) are satisfied, then, as (37)
2554
+ is a necessary condition for the solution y+, it must hold c+⟨a, y+⟩ = 0. Hence, there exists
2555
+ a u ∈ ∂(·)+(c + ⟨a, y+⟩) = [0, 1] such that
2556
+ c + ⟨a, proxβϕ(x0 − uβa)⟩ = 0.
2557
+ Lemma 13. For any K ≥ 1 it holds
2558
+ K−1
2559
+
2560
+ k=0
2561
+ 1
2562
+ k+1 = 1 +
2563
+ K−1
2564
+
2565
+ k=1
2566
+ 1
2567
+ k+1 ≤ 1 +
2568
+ � K−1
2569
+ 0
2570
+ 1
2571
+ s+1ds = 1 + ln K,
2572
+ K−1
2573
+
2574
+ k=0
2575
+ 1
2576
+ √k+1 ≥
2577
+ � K
2578
+ 0
2579
+ 1
2580
+ √s+1ds = 2
2581
+
2582
+ K + 1 − 2.
2583
+ 29
2584
+
2585
+ C
2586
+ Model equivalence for SGD
2587
+ In the unregularized case, the SGD update
2588
+ xk+1 = xk − αkgk,
2589
+ gk ∈ ∂f(xk; Sk),
2590
+ can be seen as solving (5) with the model
2591
+ fx(y; s) = f(x; s) + ⟨g, y − x⟩,
2592
+ g ∈ ∂f(x; s).
2593
+ Now, consider again the regularized problem (2) with ϕ(x) = λ
2594
+ 2 ∥x∥2 and update (8) .
2595
+ On the one hand, the model ψx(y; s) = f(x; s) + ϕ(x) + ⟨g + λx, y − x⟩ with g ∈ ∂f(x; s) yields
2596
+ xk+1 = xk − αk(gk + λxk) = (1 − αkλ)xk − αkgk.
2597
+ (38)
2598
+ On the other hand, the model ψx(y; s) = f(x; s) + ⟨g, y − x⟩ + ϕ(y) with g ∈ ∂f(x; s) results in
2599
+ xk+1 = proxαkϕ(xk − αkgk) =
2600
+ 1
2601
+ 1 + αkλ
2602
+
2603
+ xk − αkgk
2604
+
2605
+ = (1 −
2606
+ αk
2607
+ 1 + αkλλ)xk −
2608
+ αk
2609
+ 1 + αkλgk.
2610
+ (39)
2611
+ Running (38) with step sizes αk = βk is equivalent to running (39) with step sizes
2612
+ αk
2613
+ 1+αkλ = βk ⇐⇒
2614
+ αk =
2615
+ βk
2616
+ 1−βkλ. In this sense, standard SGD can be seen to be equivalent to proximal SGD for ℓ2–
2617
+ regularized problems.
2618
+ D
2619
+ Additional information on numerical experiments
2620
+ D.1
2621
+ Matrix Factorization
2622
+ Synthetic data generation: We adapted the experimental setting of the deep matrix factorization
2623
+ experiments in (Loizou et al., 2021), but extending with regularization. We generate data in the
2624
+ following way: first sample B ∈ Rq×p with uniform entries in the interval [0, 1]. Then choose κ ∈ R
2625
+ (which will be our targeted inverse condition number) and compute A = DB where D is a diagonal
2626
+ matrix with entries from 1 to κ (equidistant on a logarithmic scale)10.
2627
+ In order to investigate
2628
+ the impact of regularization, we generate a noise matrix E with uniform entries in [−ε, ε] and set
2629
+ ˜A := A⊙(1+E). We then sample y(i) ∼ N(0, I) and compute the targets b(i) = ˜Ay(i). A validation
2630
+ set of identical size is created by the same mechanism, but computing its targets, denoted by b(i)
2631
+ val,
2632
+ via the original matrix A instead of ˜A. The validation set contains Nval = N samples.
2633
+ Name
2634
+ p
2635
+ q
2636
+ N
2637
+ κ
2638
+ r
2639
+ ε
2640
+ matrix-fac1
2641
+ 6
2642
+ 10
2643
+ 1000
2644
+ 1e-5
2645
+ 4
2646
+ 0
2647
+ matrix-fac2
2648
+ 6
2649
+ 10
2650
+ 1000
2651
+ 1e-5
2652
+ 10
2653
+ 0.05
2654
+ Table 1: Matrix factorization synthetic datasets.
2655
+ Model and general setup: Problem (28) can be interpreted as a two-layer neural network without
2656
+ activation functions. We train the network using the squared distance of the model output and b(i)
2657
+ (averaged over a mini-batch) as the loss function. We run 50 epochs for different methods, step size
2658
+ 10Note that (Loizou et al., 2021) uses entries from 1 to κ on a linear scale which, in our experiments, did not result
2659
+ in large condition numbers even if κ is very small.
2660
+ 30
2661
+
2662
+ schedules and values of λ. For each different instance, we do ten independent runs: each run has
2663
+ the identical training set and initialization of W1 and W2, but different shuffling of the training set
2664
+ and different samples y(i) for the validation set. In order to allow a fair comparison, all methods
2665
+ have identical train and validation sets across all runs. All metrics are averaged over the ten runs.
2666
+ We always use a batch size of 20.
2667
+ D.2
2668
+ Plots for matrix-fac2
2669
+ In this section, we plot additional results for Matrix Factorization, namely for the setting
2670
+ matrix-fac2 of Table 1. The results are qualitatively very similar to the setting matrix-fac1.
2671
+ 0
2672
+ 10
2673
+ 20
2674
+ 30
2675
+ 40
2676
+ Epoch
2677
+ 10−5
2678
+ 10−4
2679
+ 10−3
2680
+ 10−2
2681
+ 10−1
2682
+ ψ(xk) − mink ψ(xk)
2683
+ constant
2684
+ 0
2685
+ 10
2686
+ 20
2687
+ 30
2688
+ 40
2689
+ Epoch
2690
+ 10−5
2691
+ 10−4
2692
+ 10−3
2693
+ 10−2
2694
+ 10−1 sqrt
2695
+ α0
2696
+ 2.0
2697
+ 1.62
2698
+ 1.25
2699
+ 0.88
2700
+ 0.5
2701
+ 2.0
2702
+ 1.62
2703
+ 1.25
2704
+ 0.88
2705
+ 0.5
2706
+ 0.7
2707
+ 0.56
2708
+ 0.41
2709
+ 0.27
2710
+ 0.12
2711
+ prox-sps
2712
+ sps
2713
+ sgd
2714
+ Figure 10: Objective function for the Matrix Factorization problem (28), with constant (left) and
2715
+ sqrt (right) step size schedule and several choices of initial values. Here mink ψ(xk) is the best
2716
+ objective function value found over all methods and all iterations.
2717
+ 0
2718
+ 10
2719
+ 20
2720
+ 30
2721
+ 40
2722
+ Epoch
2723
+ 10−2
2724
+ 10−1
2725
+ 100
2726
+ Validation Error
2727
+ constant
2728
+ 0
2729
+ 10
2730
+ 20
2731
+ 30
2732
+ 40
2733
+ Epoch
2734
+ 10−2
2735
+ 10−1
2736
+ 100
2737
+ sqrt
2738
+ α0
2739
+ 2.0
2740
+ 1.62
2741
+ 1.25
2742
+ 0.88
2743
+ 0.5
2744
+ 2.0
2745
+ 1.62
2746
+ 1.25
2747
+ 0.88
2748
+ 0.5
2749
+ 0.7
2750
+ 0.56
2751
+ 0.41
2752
+ 0.27
2753
+ 0.12
2754
+ prox-sps
2755
+ sps
2756
+ sgd
2757
+ Figure 11: Validation error for the Matrix Factorization problem (28), with constant (left) and
2758
+ sqrt (right) step size schedule and several choices of initial values.
2759
+ 31
2760
+
2761
+ 0
2762
+ 20
2763
+ 40
2764
+ Epoch
2765
+ 0.00025
2766
+ 0.00050
2767
+ 0.00075
2768
+ 0.00100
2769
+ 0.00125
2770
+ 0.00150
2771
+ Objective ψ(xk)
2772
+ λ = 1e − 05
2773
+ 0
2774
+ 20
2775
+ 40
2776
+ Epoch
2777
+ 0.0010
2778
+ 0.0015
2779
+ 0.0020
2780
+ 0.0025
2781
+ 0.0030
2782
+ 0.0035
2783
+ λ = 0.0001
2784
+ 0
2785
+ 20
2786
+ 40
2787
+ Epoch
2788
+ 0.010
2789
+ 0.015
2790
+ 0.020
2791
+ 0.025
2792
+ λ = 0.001
2793
+ 0
2794
+ 20
2795
+ 40
2796
+ Epoch
2797
+ 0.10
2798
+ 0.15
2799
+ 0.20
2800
+ 0.25
2801
+ λ = 0.01
2802
+ 0
2803
+ 20
2804
+ 40
2805
+ Epoch
2806
+ 1.0
2807
+ 1.5
2808
+ 2.0
2809
+ 2.5
2810
+ λ = 0.1
2811
+ prox-sps, sqrt, α0=10.0
2812
+ prox-sps, sqrt, α0=5.0
2813
+ prox-sps, sqrt, α0=1.0
2814
+ sps, sqrt, α0=10.0
2815
+ sps, sqrt, α0=5.0
2816
+ sps, sqrt, α0=1.0
2817
+ 0
2818
+ 20
2819
+ 40
2820
+ Epoch
2821
+ 0.0063
2822
+ 0.0064
2823
+ 0.0065
2824
+ 0.0066
2825
+ Validation Error
2826
+ λ = 1e − 05
2827
+ 0
2828
+ 20
2829
+ 40
2830
+ Epoch
2831
+ 0.0065
2832
+ 0.0070
2833
+ 0.0075
2834
+ 0.0080
2835
+ λ = 0.0001
2836
+ 0
2837
+ 20
2838
+ 40
2839
+ Epoch
2840
+ 0.006
2841
+ 0.008
2842
+ 0.010
2843
+ 0.012
2844
+ 0.014
2845
+ 0.016
2846
+ 0.018
2847
+ λ = 0.001
2848
+ 0
2849
+ 20
2850
+ 40
2851
+ Epoch
2852
+ 0.02
2853
+ 0.04
2854
+ 0.06
2855
+ 0.08
2856
+ 0.10
2857
+ 0.12
2858
+ λ = 0.01
2859
+ 0
2860
+ 20
2861
+ 40
2862
+ Epoch
2863
+ 0.00
2864
+ 0.25
2865
+ 0.50
2866
+ 0.75
2867
+ 1.00
2868
+ 1.25
2869
+ λ = 0.1
2870
+ prox-sps, sqrt, α0=10.0
2871
+ prox-sps, sqrt, α0=5.0
2872
+ prox-sps, sqrt, α0=1.0
2873
+ sps, sqrt, α0=10.0
2874
+ sps, sqrt, α0=5.0
2875
+ sps, sqrt, α0=1.0
2876
+ Figure 12: Objective function value and validation error over the course of optimization. For the
2877
+ validation error, we plot a rolling median over five epochs in order to avoid clutter.
2878
+ 32
2879
+
SNE4T4oBgHgl3EQfLAy5/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
T9E2T4oBgHgl3EQftQh4/content/tmp_files/2301.04068v1.pdf.txt ADDED
@@ -0,0 +1,5168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ BLOBBED TOPOLOGICAL RECURSION FROM EXTENDED
2
+ LOOP EQUATIONS
3
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
4
+ Abstract. We consider the N × N Hermitian matrix model with measure
5
+ dµE,λ(M) = 1
6
+ Z exp(− λN
7
+ 4 tr(M 4))dµE,0(M), where dµE,0 is the Gaußian mea-
8
+ sure with covariance ⟨MklMmn⟩ =
9
+ δknδlm
10
+ N(Ek+El) for given E1, ..., EN > 0. It was
11
+ previously understood that this setting gives rise to two ramified coverings x, y
12
+ of the Riemann sphere strongly tied by y(z) = −x(−z) and a family ω(g)
13
+ n
14
+ of
15
+ meromorphic differentials conjectured to obey blobbed topological recursion
16
+ due to Borot and Shadrin. We develop a new approach to this problem via
17
+ a system of six meromorphic functions which satisfy extended loop equations.
18
+ Two of these functions are symmetric in the preimages of x and can be deter-
19
+ mined from their consistency relations. An expansion at ∞ gives global linear
20
+ and quadratic loop equations for the ω(g)
21
+ n . These global equations provide the
22
+ ω(g)
23
+ n
24
+ not only in the vicinity of the ramification points of x but also in the
25
+ vicinity of all other poles located at opposite diagonals zi + zj = 0 and at
26
+ zi = 0. We deduce a recursion kernel representation valid at least for g ≤ 1.
27
+ 1. Introduction
28
+ 1.1. Historical comments. This paper is the fifth in a series [GHW19, SW22,
29
+ BHW22, HW21] which investigates and solves the quartic Kontsevich model. See
30
+ [BGHW22] for a review. Back in 1991, Kontsevich [Kon92] constructed his classi-
31
+ cal matrix model to prove Witten’s conjecture [Wit91] that the generating series
32
+ of intersection numbers on the moduli space Mg,n of stable complex curves is a tau
33
+ function of the integrable KdV hierarchy. It is formulated as an N ×N-Hermitian
34
+ matrix model with covariance ⟨MklMmn⟩ =
35
+ δknδlm
36
+ N(Ek+El) for Ek > 0, deformed by a
37
+ cubic potential
38
+ iN
39
+ 6 Tr(M 3).
40
+ Explicit results were computed for the correlation
41
+ function in the classical Kontsevich model for instance in [MS91, EO07, Eyn16]
42
+ and more recently for higher spectral dimension with smooth covariance renor-
43
+ malised by quantum field theoretical techniques in [GSW17, GSW18, GHW23].
44
+ The quartic Kontsevich model has the same covariance ⟨MklMmn⟩ =
45
+ δknδlm
46
+ N(Ek+El) for
47
+ Ek > 0, but deformed by a quartic potential λN
48
+ 4 Tr(M 4). It is different from the
49
+ generalised Kontsevich model [BCEGF21] (see for more details [BHW21, § 2.1]).
50
+ 2020 Mathematics Subject Classification. 05A15, 14H70, 14N10, 30F30, 32A20.
51
+ Key words and phrases. (Blobbed) topological recursion, matrix models, exactly solvable
52
+ models, enumerative geometry, Dyson-Schwinger equations.
53
+ 1
54
+ arXiv:2301.04068v1 [math-ph] 10 Jan 2023
55
+
56
+ 2
57
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
58
+ The Hermitian 1-matrix models with trivial covariance but arbitrary polyno-
59
+ mial deformation were understood properly in [Eyn05]. The solution of the more
60
+ advanced Hermitian 2-matrix model [CEO06] shared similar structures to the
61
+ Hermitian 1-matrix model and the Kontsevich model, which gave rise to a univer-
62
+ sal definition of the theory of topological recursion (TR) [EO07] for general initial
63
+ data (Σ, x, y, B), the so-called spectral curve. We assume in this article to have
64
+ Σ ⋍ ˆC and two coverings x, y : Σ → Σ with simple distinct ramification points.
65
+ These two coverings build in TR a meromorphic 1-form ω(0)TR
66
+ 1
67
+ = y dx on Σ;
68
+ ω(0)TR
69
+ 2
70
+ = B is the symmetric meromorphic bilinear diffferential on Σ2 with double
71
+ pole on the diagonal and no residue. From the initial data (Σ, x, y, B), Eynard and
72
+ Orantin defined recursively in the negative Euler characteristic −χ = 2g + n − 2
73
+ a family of symmetric meromorphic differentials ω(g)TR
74
+ n
75
+ on Σn with poles just at
76
+ the ramification points of x for −χ > 0.
77
+ The important insight coming from the Hermitian 2-matrix model was the cor-
78
+ rect local interpretation around a ramification point βi through the deck trans-
79
+ formation σi, giving a local map between two branches ramified at βi.
80
+ The proof that the 2-matrix model satisfies TR has used a completely new
81
+ technique due to the fact that the number of ramification points can be arbitrarily
82
+ large depending on the deformation (the potential) of the model. The starting
83
+ point was to look at two different classes of mixed correlators, where one of
84
+ them is rational in x(z) rather than z [CEO06].
85
+ We denote these functions
86
+ by H(g),TR
87
+ n+1
88
+ (y(w); z; I) and P (g),TR
89
+ n+1
90
+ (y(w); x(z); I)1, where I = {u1, ..., un}. Here,
91
+ H(g),TR is rational in {y(w), z} ∪ I and P (g),TR in {y(w), x(z)} ∪ I as indicated
92
+ by the arguments.
93
+ The two functions H(g),TR
94
+ and P (g),TR
95
+ together with W (g),TR
96
+ n
97
+ defined
98
+ by du1...dunW (g),TR
99
+ n+1
100
+ (z; u1, ..., un)dx(z) := ω(g)TR
101
+ n+1 (z, u1, ..., un) satisfy a Dyson-
102
+ Schwinger equation (DSE)
103
+ (y(w) − y(z))H(g),TR
104
+ n+1
105
+ (y(w); z; I) + P (g),TR
106
+ n+1
107
+ (y(w); x(z); I)
108
+ (1.1)
109
+ = −
110
+
111
+ g1+g2=g
112
+ I1⊎I2=I
113
+ (g2,I2)̸=(0,∅)
114
+ H(g1),TR
115
+ |I1|+1 (y(w); z; I1)W (g2),TR
116
+ |I2|+1 (z; I2) − ∂x(z′)H(g−1),TR
117
+ n+2
118
+ (y(w); z; z′, I)
119
+ ��
120
+ z′=z .
121
+ Exactly the same DSE for some H(g),TR and P (g),TR (but with different x, y)
122
+ appears in several other examples which are governed by TR (see for instance
123
+ [BCEGF21, BH22]).
124
+ The important observation is that the solution of P (g),TR has a representation
125
+ in terms of symmetric functions of W (g′),TR
126
+ n′
127
+ , which are symmetric in all preimages
128
+ of x in the variable z, i.e. symmetric in (ˆzk)k=0,1,...,d with x(z) = x(ˆzk) and ˆz0 = z.
129
+ Furthermore, the function H(g),TR was chosen such that its asymptotic expansion
130
+ 1In [CEO06], we identify U (g) → �
131
+ ui∈I ∂x(ui)H(g),T R and E(g) → �
132
+ ui∈I ∂x(ui)P (g),T R
133
+
134
+ BTR FROM EXTENDED LOOP EQUATIONS
135
+ 3
136
+ in y(w) has as leading order W (g),TR, i.e.
137
+ H(g),TR
138
+ n+1
139
+ (y(w); z; I) =
140
+ 1
141
+ y(w)W (g),TR
142
+ n+1
143
+ (z; I) + O(y(w)−2) ,
144
+ plus additional terms for (g, n) ∈ {(0, 0), (0, 1)}. From this one can show that
145
+ the asymptotic expansion of the solution of P (g),TR recovers the so-called linear
146
+ and quadratic loop equations [BEO15]), which are describing the local behaviour
147
+ of W (g),TR around the ramification points.
148
+ Finally, from the linear and qua-
149
+ dratic loop equations, the well-known recursion formula for the W (g),TR
150
+ n+1
151
+ (z; I) or
152
+ ω(g),TR
153
+ n+1
154
+ (z, I) can be deduced. Vica versa, for any spectral curve in TR, rˆole and
155
+ formulae for H(g),TR, P (g),TR are completely determined. Actually, to prove that
156
+ some example is governed by TR, the starting point is in general the equation
157
+ (1.1).
158
+ A family W (g),TR
159
+ n+1
160
+ (z; I) satisfies topological recursion if and only if it satisfies
161
+ the linear and quadratic loop equations and all of its poles are for 2g + n −
162
+ 1 > 0 at the ramifications points of x. It is very natural to assume the linear
163
+ and quadratic loop equations but to relax the assumption on the pole structure.
164
+ This gave birth to a generalisation of TR by blobbed topological recursion (BTR)
165
+ [BS17]. The motivation of formulating BTR came from the enumeration of stuffed
166
+ maps [Bor14], which is a natural generalisation of the Hermitian 1-matrix model
167
+ through a deformation (the potential) of higher topological structure. BTR is
168
+ built not just by the initial data (Σ, x, y, B), but enriched by additional blobs
169
+ φg′,n′ associated with a topology contributing if 2g + n − 2 ≤ 2g′ + n′ − 2. The
170
+ meromorphic functions W (g)
171
+ n+1(z; I) = PzW (g)
172
+ n+1(z; I)+ HzW (g)
173
+ n+1(z; I) decompose in
174
+ BTR into a part PzW (g)
175
+ n+1 with poles at the ramification points of x and a part
176
+ HzW (g)
177
+ n+1 with poles elsewhere. The decomposition is constructed by orthogonal
178
+ projectors Pz, Hz. Due to the linear and quadratic loop equations, it is rather
179
+ easy to show that the polar part PzW (g)
180
+ n+1(z; I) is still determined by the TR
181
+ recursion formula. However, the part HzW (g)
182
+ n+1(z; I) depends highly on the model
183
+ under consideration and therefore on the enriched initial data φg,n.
184
+ 1.2. Main result. Consider the N × N Hermitian matrix model with measure
185
+ dµE,λ(M) =
186
+ 1
187
+ Z exp(− λN
188
+ 4 tr(M 4))dµE,0(M) where dµE,0 is the Gaußian measure
189
+ with covariance ⟨MklMmn⟩ =
190
+ δknδlm
191
+ N(Ek+El) for given E1, ..., EN > 0. Let e1, ..., ed be
192
+ the pairwise distinct values in (E1, ..., EN) and r1, ..., rd their multiplicities. In
193
+ previous work [SW22, BHW22] we showed that this setting (called the quartic
194
+ Kontsevich model) is characterised by a generic ramified covering x : ˆC → ˆC of
195
+ degree d+1 with simple poles (one located at ∞) and simple ramification points.
196
+ The key observation was that the other ramified covering y of the spectral curve
197
+ is simply given by
198
+ y(z) = −x(−z).
199
+ (1.2)
200
+
201
+ 4
202
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
203
+ This strong x, y-entanglement has exceptional consequences. We show in this
204
+ paper that the Dyson-Schwinger equations established in [BHW22] give rise to a
205
+ system of six functions which come in triples
206
+ (P (g)
207
+ n+1(x(w), x(z); I), H(g)
208
+ n+1(x(w); z; I), U (g)
209
+ n+1(w, z; I))
210
+ and
211
+ (Q(g)
212
+ n+1(x(w), x(z); I), M (g)
213
+ n+1(x(w); z; I), V (g)
214
+ n+1(w, z; I)).
215
+ Compared with the two functions (P (g),TR
216
+ n+1
217
+ (y(w), x(z); I), H(g),TR(y(w); z; I)) in
218
+ TR, the rˆole of x, y is partly flipped. Partial fraction decompositions given in Def-
219
+ inition 2.2 and the equations themselves given in Proposition 2.3 are intervowen.
220
+ By extending the known techniques from the literature (applied for instance in
221
+ [BCEGF21, BH22]), we show how our system of equations can be solved and
222
+ how the Taylor expansion about
223
+ 1
224
+ x(w) = 0 gives rise to globally defined linear and
225
+ quadratic loop equations. They indicate poles at the ramification points z = βi of
226
+ x, at the opposite diagonal z +ui = 0 and (for g ≥ 1) at z = 0. From the detailed
227
+ structure a formula to compute ω(g)
228
+ n
229
+ recursively in decreasing Euler characteristic
230
+ is deduced:
231
+ Theorem 1.1. Let I = {u1, ..., un} and x be a generic ramified cover of ˆC. Let
232
+ x, y be related via (1.2) and ω(0)
233
+ 2 (z, w) =
234
+ dzdw
235
+ (z−w)2 +
236
+ dzdw
237
+ (z+w)2.
238
+ If the six function
239
+ (P (g), H(g), U (g)) and (Q(g), M (g), V (g)) satisfy the interwoven DSEs of Proposi-
240
+ tion 2.3 and the partial fraction decomposition of Definition 2.2, then the so-
241
+ lution for ω(g)
242
+ n+1(z, u1, ...un) = λ2g+n−1du1 · · · dunW (g)
243
+ n+1(z; u1, ..., un)dx(z), where
244
+ H(g)
245
+ n+1(x(v); z; I) = −
246
+ λW (g)
247
+ n+1(z;I)
248
+ x(v)
249
+ + O(x(v)−2), is recursively computed for all I with
250
+ 2g + |I| ≥ 2 and for g < 2 via the recursion kernel representation
251
+ ω(g)
252
+ |I|+1(z, I) =
253
+
254
+ βi
255
+ Res
256
+ q→βi Ki(z; q)
257
+ � �
258
+ I1⊎I2=I
259
+ g1+g2=g
260
+ (gi,Ii)̸=(0,∅)
261
+ ω(g1)
262
+ |I1|+1(q, I1)ω(g2)
263
+ |I2|+1(q, I2) + ω(g−1)
264
+ |I|+2 (q, q, I)
265
+
266
+ +
267
+ |I|
268
+
269
+ j=1
270
+ duj
271
+
272
+ Res
273
+ q→−uj Kuj(z; q)
274
+ � �
275
+ I1⊎I2=I
276
+ g1+g2=g
277
+ (gi,Ii)̸=(0,∅)
278
+ d−1
279
+ uj (ω(g1)
280
+ |I1|+1(q, I1)ω(g2)
281
+ |I2|+1(q, I2))
282
+ + d−1
283
+ uj ω(g−1)
284
+ |I|+2 (q, q, I) + (dx(q))2
285
+ 6
286
+ ∂2
287
+ ∂(x(q))2
288
+ �ω(g−1)
289
+ |I|+1 (q, I))
290
+ dx(q)dx(uj)
291
+ ���
292
+ + Res
293
+ q→0 K0(z; q)
294
+ � �
295
+ I1⊎I2=I
296
+ g1+g2=g
297
+ (gi,Ii)̸=(0,∅)
298
+ ω(g1)
299
+ |I1|+1(q, I1)ω(g2)
300
+ |I2|+1(q, I2) + ω(g−1)
301
+ |I|+2 (q, q, I)
302
+ + (dx(q))2
303
+ 2
304
+
305
+ ∂x(q)
306
+ �d−1
307
+ q′ ω(g−1)
308
+ |I|+2 (q, q′, I)
309
+ dx(q)
310
+ ���
311
+ q′=q
312
+ ��
313
+ ,
314
+ (1.3)
315
+
316
+ BTR FROM EXTENDED LOOP EQUATIONS
317
+ 5
318
+ where βi are the ramification points of x, ω(0)
319
+ 2 (q, q) should be replaced by
320
+ limq′→q(ω(0)
321
+ 2 (q, q′) −
322
+ 1
323
+ (x(q)−x(q′))2) and the recursion kernels are given by Ki(z; q) =
324
+
325
+ ( dz
326
+ z−q −
327
+ dz
328
+ z−σi(q) )
329
+ 2(y(q)−y(σi(q)))dx(q), Ku(z; q) = −
330
+ ( dz
331
+ z−q − dz
332
+ z+u )
333
+ 2(y(q)+x(u))dx(q) and K0(z; q) = −
334
+ ( dz
335
+ z−q − dz
336
+ z )
337
+ 2(y(q)+x(q))dx(q).
338
+ We do not see any obstacle to extend the result to all g; only the combinatorics
339
+ becomes extremely involved and Taylor expansions of P (g)
340
+ n+1(x(w), x(z); I) up to
341
+ an order which increases as 4g become necessary. Already in the proof up to
342
+ genus g ≤ 1 the employment of a loop insertion operator as an abbreviation
343
+ for particular rational functions of (P (g), H(g), U (g)) and (Q(g), M (g), V (g)) was
344
+ essential to master the combinatorics.
345
+ A few remarks:
346
+ • We establish the linear and quadratic loop equations globally on ˆC and
347
+ not only in small neighbourhoods of the ramification points as required
348
+ in the general formulation [BS17]. Therefore, we not only get a recursion
349
+ formula for the ‘polar’ part Pzω(g)
350
+ n+1(z, I) but for the entire ω(g)
351
+ n+1(z, I).
352
+ • In [HW21] we solved the genus-0 sector under the much weaker assumption
353
+ that x, y are related as y(z) = −x(ιz) for some holomorphic involution ι
354
+ on ˆC. We noticed that although one can solve (Q(0)
355
+ 1 , M (0)
356
+ 1 , V (0)
357
+ 1
358
+ ) also for
359
+ y(z) = −x(ιz) along the lines of [SW22], the resulting expressions are of
360
+ completely different type to which our tools do not apply. Nevertheless we
361
+ would like to remark that the involution identity discovered in [HW21] was
362
+ vastly extended in [Hoc22b, Hoc22a, ABDB+22] to a general approach to
363
+ the x-y symmetry in topological recursion.
364
+ • Considering in TR a more general version of (1.1) including intermediate
365
+ correlators W (g),TR
366
+ n,m
367
+ (see [Hoc22b] for details), one can show that these
368
+ correlatators can be derived via BTR with an astonishing similarity to
369
+ the formula (1.3) except for the pole at the origin [Hoc23].
370
+ The paper is organised as follows. In sec. 2 we derive from previous results
371
+ a system of equations for (P (g)
372
+ n , H(g)
373
+ n , U (g)
374
+ n ) and (Q(g)
375
+ n , M (g)
376
+ n , V (g)
377
+ n ) and introduce
378
+ the loop insertion operator. Sec. 3 solves P (0)
379
+ |I|+1(x(v), x(z); I) by exploiting its
380
+ symmetry in the preimages of x and its residues at x(v) = x(ui). The outcome
381
+ gives the linear and quadratic loop equations for W (0)
382
+ n . By essentially the same
383
+ methods (but including poles at x(v) = x(0)) we identify Q(0)
384
+ |I|+1(x(v), x(z); I) in
385
+ sec. 3.3 and, with much larger effort, in particular in view of poles at x(v) = x(z),
386
+ the functions P (1)
387
+ |I|+1(x(v), x(z); I) in secs. 4.1 and 4.2. Again, this allows us to
388
+ derive the global linear and quadratic loop equations for W (1)
389
+ n
390
+ from which we get
391
+ in sec. 5 the recursion formula stated in Theorem 1.1.
392
+
393
+ 6
394
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
395
+ Acknowledgements. AH was supported through the Walter-Benjamin fellow-
396
+ ship2. RW was supported3 by the Cluster of Excellence Mathematics M¨unster
397
+ and the CRC 1442 Geometry: Deformations and Rigidity.
398
+ 2. Setup
399
+ 2.1. Summary of prevous results. In [BHW22] we have shown that a
400
+ quartic analogue of the Kontsevich matrix model gives rise to, and is com-
401
+ pletely determined by, a coupled system of Dyson-Schwinger equations for
402
+ three families of meromorphic functions Ω(g)
403
+ n (z1, ..., zn), T (g)(u1, ..., un∥z, w|) and
404
+ T (g)(u1, ..., un∥z|w|) on the Riemann sphere ˆC = C ∪ {∞}. Of particular interest
405
+ are the Ω(g)
406
+ n
407
+ which give rise to meromorphic differentials
408
+ ω(g)
409
+ n (z1, ..., zn) = Ω(g)
410
+ n (z1, ..., zn)
411
+ n
412
+
413
+ j=1
414
+ dx(zj) ,
415
+ where
416
+ (2.1a)
417
+ x(z) = z − λ
418
+ N
419
+ d
420
+
421
+ k=1
422
+ ϱk
423
+ z + εk
424
+ .
425
+ (2.1b)
426
+ The ramified cover4 x : ˆC → ˆC forms with its reflection y(z) = −x(−z) and
427
+ ω(0)
428
+ 2 (z1, z2) =
429
+ dz1 dz2
430
+ (z1−z2)2 + dz1 dz2
431
+ (z1+z2)2 a spectral curve in the spirit of topological recursion
432
+ [EO07]. The parameters (λ, N, d, {ϱk, εk}) are defined by the initial data of the
433
+ quartic Kontsevich model: It is a matrix model for N × N-Hermitian matrices
434
+ M with covariance ⟨MklMmn⟩ =
435
+ δknδlm
436
+ N(Ek+El) for Ek > 0, deformed by a quartic
437
+ potential λN
438
+ 4 Tr(M 4). If (e1, ..., ed) are the pairwise different values in (Ek), which
439
+ arise with multiplicities r1, ..., rd, then (εk, ϱk) are determined by x(εk) = ek and
440
+ ϱkx′(εk) = rk with limλ→0 εk = ek and limλ→0 ϱk = rk [GHW19, SW22].
441
+ The system of Dyson-Schwinger equations for Ω(g)
442
+ n
443
+ and the two variants of T (g)
444
+ in [BHW22] is most conveniently expressed in terms of meromorphic functions
445
+ (W (g)
446
+ n , U (g)
447
+ n , V (g)
448
+ n ) where multiple derivatives
449
+
450
+ ∂x(ui) are taken out:
451
+ Ω(g)
452
+ n+1(z, u1, ..., un) =: ∂nW (g)
453
+ n+1(z; u1, ..., un)
454
+ ∂x(u1) · · · ∂x(un)
455
+ ,
456
+ (2.2)
457
+ T (g)
458
+ n+1(u1, ..., un∥z, w|) =: ∂nU (g)
459
+ n+1(z, w; u1, ..., un)
460
+ ∂x(u1) · · · ∂x(un)
461
+ ,
462
+ 2“Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) –
463
+ Project-ID 465029630.”
464
+ 3“Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) –
465
+ Project-ID 427320536 – SFB 1442, as well as under Germany’s Excellence Strategy EXC 2044
466
+ 390685587, Mathematics M¨unster: Dynamics – Geometry – Structure.”
467
+ 4The ramified cover x was called R in [SW22, BHW22].
468
+
469
+ BTR FROM EXTENDED LOOP EQUATIONS
470
+ 7
471
+ T (g)
472
+ n+1(u1, ..., un∥z|w|) =: ∂nV (g)
473
+ n+1(z, w; u1, ..., un)
474
+ ∂x(u1) · · · ∂x(un)
475
+ .
476
+ In terms of (U, V, W) and with I := {u1, ..., un} and |I| = n, the system reads:
477
+ (a) DSE for U (g)
478
+ n+1:
479
+ (x(w) + y(z))U (g)
480
+ |I|+1(z, w; I) + λ
481
+ N
482
+ d
483
+
484
+ k=1
485
+ rkU (g)
486
+ |I|+1(εk, w; I)
487
+ x(z) − x(εk)
488
+ (2.3)
489
+ = δ|I|,0δg,0 + λ
490
+
491
+ I1⊎I2=I
492
+ g1+g2=g
493
+ (g1,I1)̸=(0,∅)
494
+ W (g1)
495
+ |I1|+1(z; I1)U (g2)
496
+ |2|+1(z, w; I2) − λ
497
+ |I|
498
+
499
+ j=1
500
+ U (g)
501
+ |I| (ui, w; I \ uj)
502
+ x(uj) − x(z)
503
+ − λ
504
+
505
+ ∂x(s)U (g−1)
506
+ |I|+2 (z, w; I ∪ {s})
507
+ ���
508
+ s=z − λ
509
+ V (g−1)
510
+ |I|+1 (z, w; I) − V (g−1)
511
+ |I|+1 (w, w; I)
512
+ x(w) − x(z)
513
+ .
514
+ (b) DSE for V (g)
515
+ n+1:
516
+ (x(z) + y(z))V (g)
517
+ |I|+1(z, w; I) + λ
518
+ N
519
+ d
520
+
521
+ k=1
522
+ rk
523
+ V (g)
524
+ |I|+1(εk, w; I)
525
+ x(z) − x(εk)
526
+ (2.4)
527
+ = −λ
528
+
529
+ I1⊎I2=I
530
+ g1+g2=g
531
+ (g1,I1)̸=(0,∅)
532
+ W (g1)
533
+ |I1|+1(z; I1)V (g2)
534
+ |I2|+1(z, w; I2) − λ
535
+ |I|
536
+
537
+ j=1
538
+ V (g)
539
+ |I| (ui, w; I\ui)
540
+ x(ui) − x(z)
541
+ − λ
542
+
543
+ ∂x(s)V (g−1)
544
+ |I|+2 (z, w; I ∪ {s})
545
+ ���
546
+ s=z − λ
547
+ U (g)
548
+ |I|+1(z, w; I) − U (g)
549
+ |I|+1(w, w; I)
550
+ x(w) − x(z)
551
+ .
552
+ (c) Connecting equation for W (g)
553
+ n+1:
554
+ W (g)
555
+ |I|+1(z; I) =
556
+ δ|I|,1δg,0
557
+ x(z) − x(u1) + δ|I|,0δg,0
558
+ λ
559
+
560
+ x(z) + λ
561
+ N
562
+ d
563
+
564
+ k=1
565
+ rk
566
+ x(εk) − x(z)
567
+
568
+ (2.5)
569
+ + 1
570
+ N
571
+ d
572
+
573
+ l=1
574
+ rlU (g)
575
+ |I|+1(z, εl; I) −
576
+ |I|
577
+
578
+ j=1
579
+ U (g)
580
+ |I| (z, ui; I\ui) + V (g−1)
581
+ |I|+1 (z, z; I) .
582
+ The connecting equation (c) has been extended to include the consistency equa-
583
+ tion W (0)
584
+ 1 (z; ∅) :=
585
+ 1
586
+ λy(z) for the ramified cover x, see [SW22].
587
+ This consis-
588
+ tency equation turned the originally non-linear equation [GW09, GW14] for
589
+ U (0)
590
+ 1 (z, w) ≡ U (0)
591
+ 1 (z, w; ∅) into the linear equation (2.3) for I = ∅; its solution
592
+
593
+ 8
594
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
595
+ is [GHW19, SW22]
596
+ U (0)
597
+ 1 (z, w) =
598
+ 1
599
+ x(z) + y(w)
600
+ �d
601
+ k=1(x(w) + y(ˆzk))
602
+ �d
603
+ k=1(x(w) − x(εk))
604
+ (2.6)
605
+ where {z = ˆz0, ˆz1, ..., ˆzd} := x−1(x(z)) is the set of preimages of x. Straight-
606
+ forward manipulations show the symmetry U (0)
607
+ 1 (z, w) = U (0)
608
+ 1 (w, z). The basic
609
+ equation for V (0)
610
+ 1
611
+ (z, w) ≡ V (0)
612
+ 1
613
+ (z, w; ∅) has been solved in [SW22]:
614
+ V (0)
615
+ 1
616
+ (z, w)
617
+ (2.7)
618
+ =
619
+ λ
620
+ (x(w) − x(z))2
621
+
622
+ U (0)
623
+ 1 (z, w) − (x(w) + x(z) − 2x(0)) Æ(x(z)) Æ(x(w))
624
+ (x(w) + y(w))(x(z) + y(z))
625
+
626
+ ,
627
+ where Æ(x(z)) := �d
628
+ k=1
629
+ x(z)−x(αk)
630
+ x(z)−x(εk) and z ∈ {0, ±α1, ..., ±αd} are the solutions of
631
+ x(z) + y(z) = 0. The diagonal function is also expressed in terms of Æ(x(z))
632
+ [SW22]:
633
+ U (0)
634
+ 1 (z, z) = 2(x(z) − x(0))(Æ(x(z)))2
635
+ (x(z) + y(z))2
636
+ .
637
+ (2.8)
638
+ Remark 2.1. The equations (2.3) and (2.4) were derived in [BHW22] from
639
+ identities for the two-point functions ⟨MabMba⟩ =
640
+
641
+ HN MabMba dµE,λ(M) and
642
+ ⟨MaaMbb⟩ =
643
+
644
+ HN MaaMbb dµE,λ(M), which are symmetric in a, b. The deriva-
645
+ tion made a choice in the order of a, b. Repeating all steps in the other order one
646
+ would get an equation
647
+ (x(z) + y(w))U (g)
648
+ |I|+1(z, w; I) + λ
649
+ N
650
+ d
651
+
652
+ k=1
653
+ rkU (g)
654
+ |I|+1(z, εk; I)
655
+ x(w) − x(εk)
656
+ (2.3’)
657
+ = δ|I|,0δg,0 + λ
658
+
659
+ I1⊎I2=I
660
+ g1+g2=g
661
+ (g1,I1)̸=(0,∅)
662
+ W (g1)
663
+ |I1|+1(w; I1)U (g2)
664
+ |2|+1(z, w; I2) − λ
665
+ |I|
666
+
667
+ j=1
668
+ U (g)
669
+ |I| (z, ui; I \ uj)
670
+ x(uj) − x(w)
671
+ − λ
672
+
673
+ ∂x(s)U (g−1)
674
+ |I|+2 (z, w; I ∪ {s})
675
+ ���
676
+ s=w − λ
677
+ V (g−1)
678
+ |I|+1 (z, w; I) − V (g−1)
679
+ |I|+1 (z, z; I)
680
+ x(z) − x(w)
681
+ and similarly for V (g)
682
+ |I|+1(z, w; I).
683
+ Exchanging w
684
+
685
+ z in (2.3’) and com-
686
+ paring with
687
+ (2.3) shows that the pairs (U (g)
688
+ |I|+1(z, w; I), V (g)
689
+ |I|+1(z, w; I)) and
690
+ (U (g)
691
+ |I|+1(w, z; I), V (g)
692
+ |I|+1(w, z; I)) satisfy the same system of equations.
693
+ Since the
694
+ solution is unique according to the construction below, we necessarily have sym-
695
+ metry U (g)
696
+ |I|+1(z, w; I) = U (g)
697
+ |I|+1(w, z; I) and V (g)
698
+ |I|+1(z, w; I) = V (g)
699
+ |I|+1(w, z; I)) in the
700
+ first two arguments.
701
+
702
+ BTR FROM EXTENDED LOOP EQUATIONS
703
+ 9
704
+ The solution of the system (2.3)+(2.4)+(2.5) in [BHW22] for (g, n)
705
+ =
706
+ {(0, 2), (0, 3), (0, 4), (1, 1)} provided strong support for the conjecture that the
707
+ meromorphic differentials ω(g)
708
+ n
709
+ obey blobbed topological recursion, a systematic
710
+ extension of TR due to Borot and Shadrin [BS17].
711
+ In [HW21] we succeeded
712
+ in solving the genus g = 0 sector in larger generality. The result of [HW21],
713
+ restricted to y(z) = −x(−z), is covered by Thm. 1.1 for g = 0.
714
+ 2.2. Auxiliary functions. Our proof of Theorem 1.1 follows a very different
715
+ strategy than the one for genus g = 0 given in [HW21].
716
+ The key idea is to
717
+ rewrite the Dyson-Schwinger equations (2.3) and (2.4) into equations for auxiliary
718
+ functions which in one or two arguments are symmetric in the preimages of x
719
+ (given in (2.1b)).
720
+ For genus g ≥ 1 it is essential that y(z) = −x(−z).
721
+ We
722
+ separate the arguments in the functions below by a comma if the function is
723
+ symmetric when exchanging the arguments, otherwise by a semicolon.
724
+ Definition 2.2. For I = {u1, ..., un} the following combinations of the functions
725
+ U, V are introduced:
726
+ H(g)
727
+ |I|+1(x(v); z; I)
728
+ (2.9a)
729
+ := δg,0δI,∅ − λ
730
+ N
731
+ d
732
+
733
+ l=1
734
+ rlU (g)
735
+ |I|+1(z, εl; I)
736
+ x(v) − x(εl)
737
+ + λ
738
+ |I|
739
+
740
+ j=1
741
+ U (g)
742
+ |I| (z, uj; I\uj)
743
+ x(v) − x(uj)
744
+ − λ
745
+ V (g−1)
746
+ |I|+1 (z, z; I)
747
+ x(v) − x(z)
748
+ ,
749
+ P (g)
750
+ |I|+1(x(v), x(z); I)
751
+ (2.9b)
752
+ :=
753
+ λδ|I|,1δg,0
754
+ x(v) − x(u1) + δ|I|,0δg,0
755
+
756
+ x(v) + x(z) − λ
757
+ N
758
+ d
759
+
760
+ k=1
761
+ rk
762
+ x(v) − x(εk)
763
+
764
+ − λ
765
+ N
766
+ d
767
+
768
+ k=1
769
+ rkH(g)
770
+ |I|+1(x(v); εk; I)
771
+ x(z) − x(εk)
772
+ + λ
773
+ |I|
774
+
775
+ j=1
776
+ H(g)
777
+ |I| (x(v); uj; I \ uj)
778
+ x(z) − x(uj)
779
+ ,
780
+ M (g)
781
+ |I|+1(x(v); z; I)
782
+ (2.9c)
783
+ := − λ
784
+ N
785
+ d
786
+
787
+ l=1
788
+ rlV (g)
789
+ |I|+1(z, εl; I)
790
+ x(v) − x(εl)
791
+ + λ
792
+ |I|
793
+
794
+ j=1
795
+ V (g)
796
+ |I| (z, uj; I\uj)
797
+ x(v) − x(uj)
798
+ − λ
799
+ U (g)
800
+ |I|+1(z, z; I)
801
+ x(v) − x(z) ,
802
+ Q(g)
803
+ |I|+1(x(v), x(z); I)
804
+ (2.9d)
805
+ := − λ
806
+ N
807
+ d
808
+
809
+ k=1
810
+ rk
811
+ M (g)
812
+ |I|+1(x(v); εk; I)
813
+ x(z) − x(εk)
814
+ + λ
815
+ |I|
816
+
817
+ j=1
818
+ M (g)
819
+ |I|+1(x(v); uj; I\uj)
820
+ x(z) − x(uj)
821
+ .
822
+ Proposition 2.3. The Dyson-Schwinger equations (2.3), (2.4) and (2.5) imply:
823
+ H(g)
824
+ |I|+1(x(v); z; I)
825
+ (2.10a)
826
+
827
+ 10
828
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
829
+ = (x(z) + y(v))U (g)
830
+ |I|+1(v, z; I) + λ
831
+
832
+ I1⊎I2=I
833
+ g1+g2=g
834
+ (g1,I1)̸=(0,∅)
835
+ W (g1)
836
+ |I1|+1(v; I1)U (g2)
837
+ |I2|+1(v, z; I2)
838
+ + λ
839
+
840
+ ∂x(s)U (g−1)
841
+ |I|+2 (v, z; I ∪ s)
842
+ ���
843
+ s=v + λ
844
+ V (g−1)
845
+ |I|+1 (v, z; I)
846
+ x(z) − x(v)
847
+ ,
848
+ P (g)
849
+ |I|+1(x(v), x(z); I)
850
+ (2.10b)
851
+ = (x(v) + y(z))H(g)
852
+ |I|+1(x(v); z; I) + λ
853
+
854
+ I1⊎I2=I
855
+ g1+g1=g
856
+ (g1,I1)̸=(0,∅)
857
+ W (g1)
858
+ |I1|+1(z; I1)H(g)
859
+ |I2|+1(x(v); z; I2)
860
+ + λ
861
+
862
+ ∂x(s)H(g−1)
863
+ |I|+2 (x(v); z; I ∪ s)
864
+ ���
865
+ s=z + λ
866
+ M (g−1)
867
+ |I|+1 (x(v); z; I)
868
+ x(v) − x(z)
869
+ ,
870
+ M (g)
871
+ |I|+1(x(v); z; I)
872
+ (2.10c)
873
+ = (x(v) + y(v))V (g)
874
+ |I|+1(v, z; I) + λ
875
+
876
+ I1⊎I2=I
877
+ g1+g2=g
878
+ (g1,I1)̸=(0,∅)
879
+ W (g1)
880
+ |I1|+1(v; I1)V (g2)
881
+ |I2|+1(v, z; I2)
882
+ + λ
883
+
884
+ ∂x(s)V (g−1)
885
+ |I|+2 (v, z; I ∪ s)
886
+ ���
887
+ s=v + λ
888
+ U (g)
889
+ |I|+1(v, z; I)
890
+ x(z) − x(v) ,
891
+ Q(g)
892
+ |I|+1(x(v), x(z); I)
893
+ (2.10d)
894
+ = (x(z) + y(z))M (g)
895
+ |I|+1(x(v); z; I) + λ
896
+
897
+ I1⊎I2=I
898
+ g1+g2=g
899
+ (g1,I1)̸=(0,∅)
900
+ W (g1)
901
+ |I1|+1(z; I1)M (g2)
902
+ |I2|+1(x(v); z; I2)
903
+ + λ
904
+
905
+ ∂x(s)M (g−1)
906
+ |I|+2 (x(v); z; I ∪ s)
907
+ ���
908
+ s=z + λ
909
+ H(g)
910
+ |I|+1(x(v); z; I)
911
+ x(v) − x(z)
912
+ .
913
+ Proof. Equations (2.10a) and (2.10c) are an obvious rewriting of (2.3) and (2.4),
914
+ respectively. For the proof of (2.10b) one starts from (2.10a) for z �→ εk, multi-
915
+ plied by multiplies by
916
+ λ
917
+ N
918
+ rk
919
+ x(z)−x(εk) and summed over k. A rearrangement taking
920
+ (2.5) into acount gives the assertion. Similarly for (2.10d).
921
+
922
+ Inserting U (0)
923
+ 1 (v, z) given by (2.6) into (2.10a) and the result into (2.10b) gives
924
+ H(0)
925
+ 1 (x(v); z) := H(0)
926
+ 1 (x(v); z; ∅) =
927
+ d
928
+
929
+ k=1
930
+ x(v) + y(ˆzk)
931
+ x(v) − x(εk) ,
932
+ (2.11a)
933
+ P (0)
934
+ 1 (x(v), x(z)) := P (0)
935
+ 1 (x(v); x(z); ∅) =
936
+ �d
937
+ k=0(x(v) + y(ˆzk))
938
+ �d
939
+ k=1(x(v) − x(εk))
940
+ .
941
+ (2.11b)
942
+
943
+ BTR FROM EXTENDED LOOP EQUATIONS
944
+ 11
945
+ Proposition 2.4. The functions P (g)
946
+ n
947
+ and Q(g)
948
+ n
949
+ are symmetric in their first two
950
+ arguments.
951
+ Proof. Expressing all H(g′)
952
+ n′
953
+ and M (g′)
954
+ n′
955
+ in (2.10b) and (2.10d) in terms of U (g′′)
956
+ n′′
957
+ and
958
+ V (g′′)
959
+ n′′
960
+ via (2.10a) and (2.10c) gives a manifestly symmetric expression when taking
961
+ the symmetries U (g)
962
+ |I|+1(z, v; I) = U (g)
963
+ |I|+1(v, z; I) and V (g)
964
+ |I|+1(z, v; I) = V (g)
965
+ |I|+1(v, z; I)
966
+ according to Remark 2.1 into account.
967
+
968
+ The Dyson-Schwinger equations (2.10a), (2.10b), (2.10c) and (2.10d) can be
969
+ disentangled into two separate systems:
970
+ Proposition 2.5. Let (Ch)h∈N be the sequence of Catalan numbers, defined for
971
+ instance by C0 = 1 and Ch+1 = �h
972
+ l=0 ClCh−l. Then the linear combinations
973
+ ˆU (g)
974
+ |I|+1(v, z; I)
975
+ (2.12a)
976
+ := U (g)
977
+ |I|+1(v, z; I) +
978
+ g−1
979
+
980
+ h=0
981
+ (−1)hChλ1+2h
982
+ (x(v) − x(z))2+4hV (g−1−h)
983
+ |I|+1
984
+ (v, z; I) ,
985
+ ˆH(g)
986
+ |I|+1(x(v); z; I)
987
+ (2.12b)
988
+ := H(g)
989
+ |I|+1(x(v); z; I) +
990
+ g−1
991
+
992
+ h=0
993
+ (−1)hChλ1+2h
994
+ (x(v) − x(z))2+4hM (g−1−h)
995
+ |I|+1
996
+ (x(v); z; I) ,
997
+ ˆP (g)
998
+ |I|+1(x(v), x(z); I)
999
+ (2.12c)
1000
+ := P (g)
1001
+ |I|+1(x(v), x(z); I) +
1002
+ g−1
1003
+
1004
+ h=0
1005
+ (−1)hChλ1+2h
1006
+ (x(v) − x(z))2+4hQ(g−1−h)
1007
+ |I|+1
1008
+ (x(v), x(z); I) ,
1009
+ ˆV (g)
1010
+ |I|+1(z, v; I)
1011
+ (2.12d)
1012
+ := V (g)
1013
+ |I|+1(v, z; I) −
1014
+ g
1015
+
1016
+ h=0
1017
+ (−1)hChλ1+2h
1018
+ (x(v) − x(z))2+4hU (g−h)
1019
+ |I|+1 (v, z; I) ,
1020
+ ˆ
1021
+ M (g)
1022
+ |I|+1(x(v); z; I)
1023
+ (2.12e)
1024
+ := M (g)
1025
+ |I|+1(x(v); z; I) −
1026
+ g
1027
+
1028
+ h=0
1029
+ (−1)hChλ1+2h
1030
+ (x(v) − x(z))2+4hH(g−h)
1031
+ |I|+1 (x(v); z; I) ,
1032
+ ˆQ(g)
1033
+ |I|+1(x(v), x(z); I)
1034
+ (2.12f)
1035
+ := Q(g)
1036
+ |I|+1(x(v), x(z); I) −
1037
+ g
1038
+
1039
+ h=0
1040
+ (−1)hChλ1+2h
1041
+ (x(v) − x(z))2+4hP (g−h)
1042
+ |I|+1 (x(v), x(z); I)
1043
+ satisfy
1044
+ ˆH(g)
1045
+ I|+1(x(v); z; I)
1046
+ (2.13a)
1047
+
1048
+ 12
1049
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
1050
+ = (x(z) + y(v)) ˆU (g)
1051
+ |I|+1(v, z; I) + λ
1052
+ ∂ ˆU (g−1)
1053
+ |I|+2 (v, z; I ∪ s)
1054
+ ∂x(s)
1055
+ ���
1056
+ s=v
1057
+ + λ
1058
+
1059
+ I1⊎I2=I
1060
+ g1+g2=g
1061
+ (g1,I1)̸=(0,∅)
1062
+
1063
+ W (g1)
1064
+ |I1|+1(v; I1) − (−1)g1λ2g1−1δI1,∅Cg1−1
1065
+ (x(z) − x(v))4g1−1
1066
+
1067
+ ˆU (g2)
1068
+ |I2|+1(v, z; I2) ,
1069
+ ˆP (g)
1070
+ I|+1(x(v), x(z); I)
1071
+ (2.13b)
1072
+ = (x(v) + y(z)) ˆH(g)
1073
+ |I|+1(x(v); z; I) + λ
1074
+ ∂ ˆH(g−1)
1075
+ |I|+2 (x(v); z; I ∪ s)
1076
+ ∂x(s)
1077
+ ���
1078
+ s=z
1079
+ + λ
1080
+
1081
+ I1⊎I2=I
1082
+ g1+g2=g
1083
+ (g1,I1)̸=(0,∅)
1084
+
1085
+ W (g1)
1086
+ |I1|+1(z; I1) − (−1)g1λ2g1−1δI1,∅Cg1−1
1087
+ (x(v) − x(z))4g1−1
1088
+
1089
+ ˆH(g2)
1090
+ |I2|+1(x(v); z; I2) ,
1091
+ ˆ
1092
+ M (g)
1093
+ I|+1(x(v); z; I)
1094
+ (2.13c)
1095
+ = (x(v) + y(v))ˆV (g)
1096
+ |I|+1(v, z; I) + λ
1097
+ ∂ ˆV (g−1)
1098
+ |I|+2 (v, z; I ∪ s)
1099
+ ∂x(s)
1100
+ ���
1101
+ s=z
1102
+ + λ
1103
+
1104
+ I1⊎I2=I
1105
+ g1+g2=g
1106
+ (g1,I1)̸=(0,∅)
1107
+
1108
+ W (g1)
1109
+ |I1|+1(v; I1) − (−1)g1λ2g1−1δI1,∅Cg1−1
1110
+ (x(v) − x(z))4g1−1
1111
+
1112
+ ˆV (g2)
1113
+ |I2|+1(v, z; I2) ,
1114
+ ˆQ(g)
1115
+ I|+1(x(v), x(z); I)
1116
+ (2.13d)
1117
+ = (x(z) + y(z)) ˆ
1118
+ M (g)
1119
+ |I|+1(x(v); z; I) + λ
1120
+ ∂ ˆ
1121
+ M (g−1)
1122
+ |I|+2 (x(v); z; I ∪ s)
1123
+ ∂x(s)
1124
+ ���
1125
+ s=z
1126
+ + λ
1127
+
1128
+ I1⊎I2=I
1129
+ g1+g2=g
1130
+ (g1,I1)̸=(0,∅)
1131
+
1132
+ W (g1)
1133
+ |I1|+1(z; I1) − (−1)g1λ2g1−1δI1,∅Cg1−1
1134
+ (x(v) − x(z))4g1−1
1135
+
1136
+ ˆ
1137
+ M (g2)
1138
+ |I2|+1(x(v); z; I2) .
1139
+ Proof. Lengthy but straightforward. The Segner recursion Ch+1 = �h
1140
+ l=0 ClCh−l
1141
+ is a key step.
1142
+
1143
+ In the following cases it is safe to omit the hat: ˆU (0)
1144
+ n+1 = U (0)
1145
+ n+1, ˆH(0)
1146
+ n+1 = H(0)
1147
+ n+1,
1148
+ ˆP (0)
1149
+ n+1 = P (0)
1150
+ n+1.
1151
+ 2.3. The loop insertion operator. Let ˜R be the ring of Q-polynomials in the
1152
+ variables
1153
+
1154
+ x, y, W (g)
1155
+ n+1, ˆU (g)
1156
+ n+1, ˆH(g)
1157
+ n+1, ˆP (g)
1158
+ n+1, ˆV (g)
1159
+ n+1, ˆ
1160
+ M (g)
1161
+ n+1, ˆQ(g)
1162
+ n+1
1163
+
1164
+ ,
1165
+ (2.14a)
1166
+
1167
+ BTR FROM EXTENDED LOOP EQUATIONS
1168
+ 13
1169
+ in x( . )-derivatives of them (e.g.
1170
+ ∂2y(z)
1171
+ ∂(x(z))2,
1172
+ ∂2W (0)
1173
+ n+1(z;u1,...,un)
1174
+ ∂x(z)∂x(u1)
1175
+ ,
1176
+ ∂4 ˆP (g)
1177
+ n+1(x(z),x(w);u1,...,un)
1178
+ ∂(x(z))2∂x(w)∂x(un)
1179
+ )
1180
+ as well as reciprocals
1181
+
1182
+ 1
1183
+ x(∗) − x(⋆),
1184
+ 1
1185
+ x(∗) + y(⋆),
1186
+ 1
1187
+ U (0)
1188
+ 1
1189
+ ,
1190
+ 1
1191
+ H(0)
1192
+ 1
1193
+ ,
1194
+ 1
1195
+ P (0)
1196
+ 1
1197
+ ,
1198
+ 1
1199
+ ˆV (0)
1200
+ 1
1201
+ ,
1202
+ 1
1203
+ ˆ
1204
+ M (0)
1205
+ 1
1206
+ ,
1207
+ 1
1208
+ ˆQ(0)
1209
+ 1
1210
+
1211
+ .
1212
+ (2.14b)
1213
+ We let I be the ideal in ˜R generated by
1214
+
1215
+ (x(v) + y(z)) ·
1216
+ 1
1217
+ (x(v) + y(z)) − 1,
1218
+ U (0)
1219
+ 1 (v, z) ·
1220
+ 1
1221
+ U (0)
1222
+ 1 (v, z)
1223
+ − 1,
1224
+ H(0)
1225
+ 1 (x(v); z) ·
1226
+ 1
1227
+ H(0)
1228
+ 1 (x(v); z)
1229
+ − 1,
1230
+ P (0)
1231
+ 1 (x(v), x(z)) ·
1232
+ 1
1233
+ P (0)
1234
+ 1 (x(v), x(z))
1235
+ − 1
1236
+
1237
+ and R = ˜R/I be the quotient. The variables (2.14) belong to the field of mero-
1238
+ morphic functions on several copies of ˆC. For our purpose they are considered
1239
+ as independent variables. We also consider ˆP (g)
1240
+ n+1(x(v), x(z); I) as independent of
1241
+ ˆP (g)
1242
+ n+1(x(v′), x(z′); I′) whenever x(v) ̸= x(v′) or x(z) ̸= x(z′) or I ̸= I′. Simi-
1243
+ larly for ˆQ(g)
1244
+ n+1. We consider ˆH(g)
1245
+ n+1(x(v); z; I) as independent of ˆH(g)
1246
+ n+1(x(v′); z′; I′)
1247
+ whenever x(v) ̸= x(v′) or z ̸= z′ or I ̸= I′. Similarly for ˆ
1248
+ M (g)
1249
+ n+1. We consider
1250
+ ˆU (g)
1251
+ n+1(v, z; I) as independent of ˆH(g)
1252
+ n+1(v′; z′; I′) whenever v ̸= v′ or z ̸= z′ or
1253
+ I ̸= I′. Similarly for ˆV (g)
1254
+ n+1.
1255
+ The Dyson-Schwinger equations (2.13) are polynomial equations fi = 0 for fi ∈
1256
+ R. In addition we have relations between residues which follow from (2.9) and
1257
+ (2.12) as well as from the condition [(x(v))−1]H(g)
1258
+ |I|+1(x(v); z; I) = −λW (g)
1259
+ n+1(z; I).
1260
+ The most decisive tool in our construction is a loop insertion operator.
1261
+ Definition 2.6. For u ∈ ˆC, the loop insertion operator Du : R → R is defined
1262
+ on the variables (2.14a) as
1263
+ Du(x(z)) = 0,
1264
+ Duy(z) = λW (0)
1265
+ 2 (z; u),
1266
+ DuW (g)
1267
+ |I|+1(z; I) = W (g)
1268
+ |I|+2(z; I ∪ u),
1269
+ Du ˆU (g)
1270
+ |I|+1(v, z; I) = ˆU (g)
1271
+ |I|+2(v, z; I ∪ u),
1272
+ Du ˆV (g)
1273
+ |I|+1(v, z; I) = ˆV (g)
1274
+ |I|+2(v, z; I ∪ u),
1275
+ Du ˆH(g)
1276
+ |I|+1(x(v); z; I) = ˆH(g)
1277
+ |I|+2(x(v); z; I ∪ u),
1278
+ Du ˆ
1279
+ M (g)
1280
+ |I|+1(x(v); z; I) = ˆ
1281
+ M (g)
1282
+ |I|+2(x(v); z; I ∪ u),
1283
+ Du ˆP (g)
1284
+ |I|+1(x(v), x(z); I) = ˆP (g)
1285
+ |I|+2(x(v), x(z); I ∪ u),
1286
+ Du ˆQ(g)
1287
+ |I|+1(x(v), x(z); I) = ˆQ(g)
1288
+ |I|+2(x(v), x(z); I ∪ u)
1289
+ and extended to R by linearity, Leibniz rule, the requirement Du : I → I
1290
+ and commutation with any x( . )-derivative. For J = {u1, ..., un} we let DJ :=
1291
+ Du1 · · · Dun : R → R. Moreover, D∅ is defined as the identity operator.
1292
+
1293
+ 14
1294
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
1295
+ The condition Du : I → I just means that the loop insertion operator of the
1296
+ reciprocals in (2.14b) is given by the usual rules of calculus, e.g.
1297
+ I ∋Du
1298
+
1299
+ (x(v) + y(z)) ·
1300
+ 1
1301
+ (x(v) + y(z))
1302
+
1303
+ − Du1
1304
+ = (x(v) + y(z))
1305
+ � λW (0)
1306
+ 2 (z; u)
1307
+ (x(v) + y(z))2 + Du
1308
+
1309
+ 1
1310
+ x(v) + y(z)
1311
+ ��
1312
+ ,
1313
+ i.e. Du
1314
+
1315
+ 1
1316
+ x(v)+y(z)
1317
+
1318
+ = − λW (0)
1319
+ 2
1320
+ (z;u)
1321
+ (x(v)+y(z))2 + I. We can also apply DI to logarithms
1322
+ DI log H(0)
1323
+ 1 (x(v); z) =
1324
+ |I|
1325
+
1326
+ l=1
1327
+ (−1)l−1
1328
+ l
1329
+
1330
+ I1⊎...⊎Il=I
1331
+ I1,...Il̸=∅
1332
+ l�
1333
+ j=1
1334
+ H(0)
1335
+ |Ij|+1(x(v); z; Ij)
1336
+ H(0)
1337
+ 1 (x(v); z)
1338
+ ,
1339
+ (2.15)
1340
+ DI log P (0)
1341
+ 1 (x(v), x(z)) =
1342
+ |I|
1343
+
1344
+ l=1
1345
+ (−1)l−1
1346
+ l
1347
+
1348
+ I1⊎...⊎Il=I
1349
+ I1,...Il̸=∅
1350
+ l�
1351
+ j=1
1352
+ P (0)
1353
+ |Ij|+1(x(v), x(z); Ij)
1354
+ P (0)
1355
+ 1 (x(v), x(z))
1356
+ and similarly for DI log ˆ
1357
+ M (0)
1358
+ 1 (x(x); z) and DI log ˆQ(0)
1359
+ 1 (x(v), x(z)). In the same
1360
+ way one has
1361
+ DI log(x(v) + y(z)) =
1362
+ |I|
1363
+
1364
+ l=1
1365
+ (−1)l−1
1366
+ l
1367
+
1368
+ I1⊎...⊎Il=I
1369
+ I1,...Il̸=∅
1370
+ l�
1371
+ j=1
1372
+ λW (0)
1373
+ |Ij|+1(z; Ij)
1374
+ x(v) + y(z)
1375
+ .
1376
+ (2.16)
1377
+ The loop insertion operator is compatible with the Dyson-Schwinger equations
1378
+ (2.13).
1379
+ 3. Solution for genus g = 0
1380
+ 3.1. Auxiliary functions for genus g = 0. Starting point is the following
1381
+ observation:
1382
+ Lemma 3.1. For any I = {u1, ..., un} one has
1383
+ DI log P (0)
1384
+ 1 (x(v), x(z)) = DI log(x(v) + y(z)) + DI log H(0)
1385
+ 1 (x(v); z) ,
1386
+ (3.1)
1387
+ where the three terms are short-hand notations for (2.15) and (2.16).
1388
+ Proof. This is a combinatorial rewriting of (2.10b), which reads
1389
+ P (0)
1390
+ |I|+1(x(v), x(z); I)
1391
+ P (0)
1392
+ 1 (x(v), x(z))
1393
+ =
1394
+ H(0)
1395
+ |I|+1(x(v); z; I)
1396
+ H(0)
1397
+ 1 (x(v); z)
1398
+ +
1399
+ λW (0)
1400
+ |I|+1(z; I)
1401
+ x(v) + y(z)
1402
+ +
1403
+
1404
+ I′⊎I′′
1405
+ I′,I′′̸=∅
1406
+ λW (0)
1407
+ |I′|+1(z; I′)
1408
+ x(v) + y(z)
1409
+ H(0)
1410
+ |I′′|+1(x(v); z; I′′)
1411
+ H(0)
1412
+ 1 (x(v); z)
1413
+ .
1414
+ (3.2)
1415
+
1416
+ BTR FROM EXTENDED LOOP EQUATIONS
1417
+ 15
1418
+ We arrange products of these expressions into DI log P (0)
1419
+ 1 (x(v), x(z)) in the second
1420
+ line of (2.15). A contribution with h ≥ 1 factors of
1421
+ H(0)
1422
+ |Ii|+1(x(v);z;Ii)
1423
+ H(0)
1424
+ 1
1425
+ (x(v);z)
1426
+ and w ≥ 1
1427
+ factors of
1428
+ λW (0)
1429
+ |Ij|+1(z;Ij)
1430
+ x(v)+y(z)
1431
+ arises via the binomial theorem in
1432
+ (w+h−k)!
1433
+ (w−k)!(h−k)k! different
1434
+ ways if k times a factor of the second line of (3.2) occurs. The prefactor of such
1435
+ a term in DI log P (0)
1436
+ 1 (x(v), x(z)) is (−1)w+h−k−1
1437
+ (w+h−k)
1438
+ . The sum over k equals
1439
+ min(w,h)
1440
+
1441
+ k=0
1442
+ (−1)k (w + h − k − 1)!
1443
+ (w − k)!(h − k)k! = 0 ,
1444
+ which follows e.g. from [Gou10, vol. 4, eq. (10.20)]
1445
+ n
1446
+
1447
+ k=0
1448
+ (−1)k
1449
+ �n
1450
+ k
1451
+ ��x − k
1452
+ j
1453
+
1454
+ =
1455
+ �x − n
1456
+ j − n
1457
+
1458
+ when setting n = h, x = w + h − 1 and j = h − 1. Hence, all contributions with
1459
+ at least one factor
1460
+ H(0)
1461
+ |Ii|+1(x(v);z;Ii)
1462
+ H(0)
1463
+ 1
1464
+ (x(v);z)
1465
+ and at least one factor
1466
+ λW (0)
1467
+ |Ij|+1(z;Ij)
1468
+ x(v)+y(z)
1469
+ cancel, and
1470
+ the assertion follows.
1471
+
1472
+ In complete analogy to Lemma 3.1 one establishes
1473
+ DI log H(0)
1474
+ 1 (x(v); z) = DI log(x(z) + y(v)) + DI log U (0)
1475
+ 1 (v, z) ,
1476
+ (3.3a)
1477
+ DI log ˆ
1478
+ M (0)
1479
+ 1 (x(v), x(z)) = DI log(x(v) + y(v)) + DI log ˆV (0)
1480
+ 1
1481
+ (v, z) ,
1482
+ (3.3b)
1483
+ DI log ˆQ(0)
1484
+ 1 (x(v), x(z)) = DI log(x(z) + y(z)) + DI log ˆ
1485
+ M (0)
1486
+ 1 (x(v); z) .
1487
+ (3.3c)
1488
+ Lemma 3.2.
1489
+ DI
1490
+ U (0)
1491
+ 1 (v, z)
1492
+ H(0)
1493
+ 1 (x(v); z)
1494
+
1495
+ |I|
1496
+
1497
+ l=0
1498
+
1499
+ I0⊎...⊎Il=I\uj
1500
+ I1,...Il̸=∅
1501
+ U (0)
1502
+ |I0|+1(v, z; I0)
1503
+ H(0)
1504
+ 1 (x(v); z)
1505
+ (−1)l
1506
+ l�
1507
+ i=1
1508
+ H(0)
1509
+ |Ii|+1(x(v); z; Ii)
1510
+ H(0)
1511
+ 1 (x(v); z)
1512
+ = DI
1513
+ 1
1514
+ x(z) + y(v) .
1515
+ (3.4)
1516
+ Proof. Clear for I = ∅. For I ̸= ∅, start from the Dyson-Schwinger equation
1517
+ (2.10a)
1518
+ U (0)
1519
+ |I0|+1(v, z; I)
1520
+ H(0)
1521
+ 1 (x(v); z)
1522
+ =
1523
+ H(0)
1524
+ |I|+1(x(v), z; I)
1525
+ (x(z) + y(v))H(0)
1526
+ 1 (x(v); z)
1527
+ +
1528
+ (−λ)W (0)
1529
+ |I|+1(v; I)
1530
+ (x(z) + y(v))2
1531
+ +
1532
+
1533
+ I′⊎I′′=I
1534
+ I′,I′′̸=∅
1535
+ (−λ)W (0)
1536
+ |I′|+1(v; I′)
1537
+ (x(z) + y(v))
1538
+ U (0)
1539
+ |I′′|+1(v, z; I′′)
1540
+ H(0)
1541
+ 1 (x(v); z)
1542
+ ,
1543
+
1544
+ 16
1545
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
1546
+ which iterates to
1547
+ U (0)
1548
+ |I0|+1(v, z; I)
1549
+ H(0)
1550
+ 1 (x(v); z)
1551
+ =
1552
+ H(0)
1553
+ |I|+1(x(v), z; I)
1554
+ (x(z) + y(v))H(0)
1555
+ 1 (x(v); z)
1556
+ +
1557
+ 1
1558
+ (x(z) + y(v))
1559
+ |I|
1560
+
1561
+ l=1
1562
+
1563
+ I1⊎...⊎Il=I
1564
+ I1,...,Il̸=∅
1565
+ l�
1566
+ i=1
1567
+ (−λ)W (0)
1568
+ |Ii|+1(v; Ii)
1569
+ (x(z) + y(v))
1570
+ +
1571
+ |I|
1572
+
1573
+ l=1
1574
+
1575
+ I0⊎I1⊎...⊎Il=I
1576
+ I0,I1,...,Il̸=∅
1577
+ H(0)
1578
+ |I0|+1(x(v), z; I0)
1579
+ (x(z) + y(v))H(0)
1580
+ 1 (x(v); z)
1581
+ l�
1582
+ i=1
1583
+ (−λ)W (0)
1584
+ |Ii|+1(v; Ii)
1585
+ (x(z) + y(v))
1586
+ .
1587
+ The last line is iteratively removed by
1588
+ |I|−1
1589
+
1590
+ l=0
1591
+ (−1)l
1592
+
1593
+ I0⊎I1⊎...⊎Il=I
1594
+ I0,I1,...,Il̸=∅
1595
+ U (0)
1596
+ |I0|+1(v, z; I0)
1597
+ H(0)
1598
+ 1 (x(v); z)
1599
+ l�
1600
+ i=1
1601
+ H(0)
1602
+ |Ii|+1(x(v), z; Ii)
1603
+ H(0)
1604
+ 1 (x(v); z)
1605
+ =
1606
+ I
1607
+
1608
+ l=1
1609
+ (−1)l−1
1610
+ (x(z) + y(v))
1611
+
1612
+ I1⊎...⊎Il=I
1613
+ I1,...,Il̸=∅
1614
+ l�
1615
+ i=1
1616
+ H(0)
1617
+ |Ii|+1(x(v), z; Ii)
1618
+ H(0)
1619
+ 1 (x(v); z)
1620
+ (*)
1621
+ +
1622
+ 1
1623
+ (x(z) + y(v))
1624
+ |I|
1625
+
1626
+ l=1
1627
+
1628
+ I1⊎...⊎Il=I
1629
+ I1,...,Il̸=∅
1630
+ l�
1631
+ i=1
1632
+ (−λ)W (0)
1633
+ |Ii|+1(v; Ii)
1634
+ (x(z) + y(v))
1635
+ .
1636
+ (**)
1637
+ Taking
1638
+ 1
1639
+ x(z)+y(v) =
1640
+ U(0)
1641
+ 1
1642
+ (v,z)
1643
+ H(0)
1644
+ 1
1645
+ (x(v);z) into account, the line (*) corresponds to the case
1646
+ I0 = ∅ of the lhs. The line (**) equals DI
1647
+ 1
1648
+ x(z)+y(v) so that the assertion follows.
1649
+
1650
+ The previous lemmas allow us to solve the genus g = 0 case:
1651
+ Proposition 3.3. The solution of the system (2.10a)+(2.10b) with (2.9a)+(2.9b)
1652
+ is
1653
+ DI log H(0)
1654
+ 1 (x(v); z) =
1655
+ d
1656
+
1657
+ k=1
1658
+ DI log(x(v) + y(ˆzk)) + F (0)
1659
+ |I|+1(x(v); x(z); I) , (3.5)
1660
+ DI log P (0)
1661
+ 1 (x(v), x(z)) =
1662
+ d
1663
+
1664
+ k=0
1665
+ DI log(x(v) + y(ˆzk)) + F (0)
1666
+ |I|+1(x(v); x(z); I)
1667
+ (3.6)
1668
+ [note that the sum starts with k = 1 in (3.5) but k = 0 in (3.5)] where
1669
+ F (0)
1670
+ |I|+1(x(v); x(z); I) =
1671
+ |I|
1672
+
1673
+ j=1
1674
+ DI\uj
1675
+ λ
1676
+ (x(v) − x(uj))(x(z) + y(uj)) .
1677
+ (3.7)
1678
+
1679
+ BTR FROM EXTENDED LOOP EQUATIONS
1680
+ 17
1681
+ Proof. Both sides of (3.1) can only be a function of x(z) if (3.5) and (3.6) hold for
1682
+ some rational function F (0)
1683
+ |I|+1(x(v); x(z); I). Since
1684
+ H(0)
1685
+ |I|+1(x(v);z;I)
1686
+ H(0)
1687
+ 1
1688
+ (x(v);z)
1689
+ is holomorphic at
1690
+ x(v)+y(z) = 0 by (2.9a), the function F (0)
1691
+ |I|+1 cannot have poles at x(v)+y(ˆzj) = 0.
1692
+ Recall from (2.9b) and (2.9a) that the only poles of x(v) �→
1693
+ P (0)
1694
+ |I|+1(x(v),x(z);I)
1695
+ P (0)
1696
+ 1
1697
+ (x(v),x(z))
1698
+ are
1699
+ at x(v) = x(uj) for uj ∈ I and at the z with P (0)
1700
+ 1 (x(v), x(z)) = 0. The latter are
1701
+ already given by the first line of (3.6) so that the only poles of F (0)
1702
+ |I|+1 are located
1703
+ at x(v) = x(uj) for every uj ∈ I. These poles are simple and arise via (2.9a):
1704
+ lim
1705
+ x(v)→x(uj)(x(v) − x(uj))H(0)
1706
+ |I|+1(x(v); z; I) = λU (0)
1707
+ |I| (uj, z; I \ uj) ,
1708
+ if uj ∈ I. This gives for the corresponding limit of (2.15)
1709
+ lim
1710
+ x(v)→x(uj)(x(v) − x(uj))DI log H(0)
1711
+ 1 (x(v); z))
1712
+ = λ
1713
+ |I|−1
1714
+
1715
+ l=0
1716
+
1717
+ I0⊎...⊎Il=I\uj
1718
+ I1,...Il̸=∅
1719
+ U (0)
1720
+ |I0|+1(uj, z; I0)
1721
+ H(0)
1722
+ 1 (x(uj); z)
1723
+ (−1)l
1724
+ l�
1725
+ i=1
1726
+ H(0)
1727
+ |Ii|+1(x(uj); z; Ii)
1728
+ H(0)
1729
+ 1 (x(uj); z)
1730
+ = DI\uj
1731
+ λ
1732
+ x(z) + y(uj)
1733
+ (3.8)
1734
+ where Lemma 3.2 has been used. This finishes the proof.
1735
+
1736
+ 3.2. Linear and quadratic loop equations for genus g = 0. From (2.9b),
1737
+ (2.9a) and the connecting equation (2.5) we read off
1738
+ P (0)
1739
+ 1 (x(v), x(z)) = x(v) + x(z) − λ
1740
+ N
1741
+ d
1742
+
1743
+ k=1
1744
+ rk
1745
+ x(z) − x(εk) + O((x(v))−1) ,
1746
+ H(0)
1747
+ 1 (x(v); z) = 1 +
1748
+ 1
1749
+ x(v)
1750
+
1751
+ x(z) − y(z) − λ
1752
+ N
1753
+ d
1754
+
1755
+ k=1
1756
+ rk
1757
+ x(z) − x(εk)
1758
+
1759
+ + O((x(v))−2) ,
1760
+ P (0)
1761
+ 2 (x(v), x(z); u1) =
1762
+ λ
1763
+ x(z) − x(u1) +
1764
+ λ
1765
+ x(v)
1766
+ � λ
1767
+ N
1768
+ d
1769
+
1770
+ k=1
1771
+ rkW (0)
1772
+ 2 (εk; u1)
1773
+ x(z) − x(εk)
1774
+ +
1775
+ x(z) − y(u1) − λ
1776
+ N
1777
+ �d
1778
+ k=1
1779
+ rk
1780
+ x(z)−x(εk)
1781
+ (x(z) − x(u1))
1782
+
1783
+ + O((x(v))−2)
1784
+ and then for |I| ≥ 2
1785
+ P (0)
1786
+ |I|+1(x(v), x(z); I) =
1787
+ λ
1788
+ x(v)
1789
+ � λ
1790
+ N
1791
+ d
1792
+
1793
+ k=1
1794
+ rkW (0)
1795
+ |I|+1(εk; I)
1796
+ x(z) − x(εk) −
1797
+ |I|
1798
+
1799
+ j=1
1800
+ λW (0)
1801
+ |I| (uj; I \ uj)
1802
+ x(z) − x(uj)
1803
+
1804
+ 18
1805
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
1806
+ +
1807
+ λδ|I|,2
1808
+ (x(z) − x(u1))(x(z) − x(u2))
1809
+
1810
+ + O((x(v))−2) .
1811
+ These expansions combine for I ̸= ∅ to
1812
+ DI log P (0)
1813
+ 1 (x(v), x(z))
1814
+ (3.9)
1815
+ =
1816
+ λ
1817
+ (x(v))2
1818
+ � λ
1819
+ N
1820
+ d
1821
+
1822
+ k=1
1823
+ rkW (0)
1824
+ |I|+1(εk; I)
1825
+ x(z) − x(εk) − λ(1 − δ|I|,1)
1826
+ |I|
1827
+
1828
+ j=1
1829
+ W (0)
1830
+ |I|+1(uj; I \ uj)
1831
+ x(z) − x(uj)
1832
+
1833
+ +
1834
+ λδ|I|,1
1835
+ (x(z) − x(u1))
1836
+ � 1
1837
+ x(v) − y(u1)
1838
+ (x(v))2
1839
+
1840
+ + O((x(v))−3) .
1841
+ Comparison with (3.6) and (3.7) yields the following main result:
1842
+ Proposition 3.4. The functions W (0)
1843
+ |I|+1 satisfy for I ̸= ∅ the linear loop equations
1844
+ d
1845
+
1846
+ k=0
1847
+ W (0)
1848
+ |I|+1(ˆzk; I) =
1849
+ δ|I|,1
1850
+ (x(z) − x(u1)) −
1851
+ |I|
1852
+
1853
+ j=1
1854
+ DI\uj
1855
+
1856
+ 1
1857
+ x(z) + y(uj)
1858
+
1859
+ (3.10)
1860
+ and the quadratic loop equations
1861
+
1862
+ d
1863
+
1864
+ k=0
1865
+ y(ˆzk)W (0)
1866
+ |I|+1(ˆzk; I)
1867
+ (3.11)
1868
+ = λ
1869
+ 2
1870
+
1871
+ I1⊎I2=I
1872
+ I1,I2̸=∅
1873
+ d
1874
+
1875
+ k=0
1876
+ W (0)
1877
+ |I1|+1(ˆzk; I1)W (0)
1878
+ |I2|+1(ˆzk; I2) −
1879
+ |I|
1880
+
1881
+ j=1
1882
+ x(uj)DI\uj
1883
+
1884
+ 1
1885
+ x(z) + y(uj)
1886
+
1887
+ + λ
1888
+ N
1889
+ d
1890
+
1891
+ k=1
1892
+ rkW (0)
1893
+ |I|+1(εk; I)
1894
+ x(z) − x(εk) − λ(1 − δ|I|,1)
1895
+ |I|
1896
+
1897
+ j=1
1898
+ W (0)
1899
+ |I|+1(uj; I \ uj)
1900
+ x(z) − x(uj)
1901
+
1902
+ δ|I|,1y(u1)
1903
+ x(z) − x(u1) .
1904
+ 3.3. Computation of ˆQ(0). Combining (2.7) with (2.12d) and (2.13c)+(2.13d)
1905
+ gives
1906
+ ˆQ(0)
1907
+ 1 (x(v), x(z)) = −λ(x(v) + x(z) − 2x(0)) Æ(x(v)) Æ(x(z))
1908
+ (x(v) − x(z))2
1909
+ (3.12)
1910
+ = −λ(x(v) + x(z) − 2x(0))
1911
+ 2(x(v) − x(z))2
1912
+
1913
+ P (0)
1914
+ 1 (x(v), x(v))P (0)
1915
+ 1 (x(z), x(z))
1916
+ (x(v) − x(0))(x(z) − x(0))
1917
+ ,
1918
+ where (2.8) and (2.10a)+(2.10b) have been used.
1919
+ We take the logarithm of this equation and apply the loop insertion operator.
1920
+ The na¨ıve expectation is actually true if a symbolic expression D0
1921
+ Ix(0) is correctly
1922
+ identified:
1923
+ Proposition 3.5. For I ̸= ∅ one has
1924
+ DI log ˆQ(0)
1925
+ 1 (x(v), x(z))
1926
+ (3.13)
1927
+
1928
+ BTR FROM EXTENDED LOOP EQUATIONS
1929
+ 19
1930
+ = 1
1931
+ 2DI log P (0)
1932
+ 1 (x(v), x(v)) + 1
1933
+ 2DI log P (0)
1934
+ 1 (x(z), x(z))
1935
+ − 1
1936
+ 2
1937
+ |I|
1938
+
1939
+ l=1
1940
+ (−1)l−1
1941
+ l
1942
+
1943
+ 2l+1
1944
+ (x(v) + x(z) − 2x(0))l −
1945
+ 1
1946
+ (x(v) − x(0))l −
1947
+ 1
1948
+ (x(z) − x(0))l
1949
+
1950
+ ×
1951
+
1952
+ I1⊎...⊎Il=I
1953
+ I1,..,Il̸=∅
1954
+ l�
1955
+ i=1
1956
+ D0
1957
+ Iix(0) ,
1958
+ where D0
1959
+ Iix(0) are symbolic expressions uniquely determined by the condition that
1960
+ DI log ˆQ(0)
1961
+ 1 (x(v), x(z)) is holomorphic at v = 0.
1962
+ We provide parts of the proof as separate results. Let v ∈ {0, ±α1, ..., ±αd} be
1963
+ the set of zeros of x(v) + y(v) = 0.
1964
+ Lemma 3.6. U (0)
1965
+ |I|+1(z, z; I) is holomorphic at z = αk.
1966
+ Proof. Set v = z in (3.3a) and insert Proposition (3.3):
1967
+ DI log U (0)
1968
+ 1 (z, z) =
1969
+ d
1970
+
1971
+ l=1
1972
+ DI log(x(ˆzl) + y(ˆzl)) − DI log(x(z) + y(z))
1973
+ +
1974
+ |I|
1975
+
1976
+ j=1
1977
+ DI\uj
1978
+ λ
1979
+ (x(z) − x(uj))(x(z) + y(uj)) .
1980
+ By definition we have 0 = x(αk)+y(αk) = x(αk)−x(−αk) where the key identity
1981
+ (1.2) is used. This means that �
1982
+ αk
1983
+ 1 = −αk is one of the preimages of x(αk). Again
1984
+ with (1.2) we conclude x(ˆz1) + y(ˆz1) = −(x(z) + y(z)) for z = αk. This means
1985
+ that log(−x(ˆz1) − y(ˆz1)) − log(x(z) + y(z)) is holomorphic in a neighbourhood of
1986
+ z = αk and leads to a holomorphic DI log(x(ˆz1)+y(ˆz1))−DI log(x(z)+y(z)).
1987
+
1988
+ The zeros ˆQ(0)
1989
+ 1 (x(v), x(αk)) = 0 and P (0)
1990
+ 1 (x(αk), x(αk)) = 0 produce (higher)
1991
+ poles in DI log ˆQ(0)
1992
+ 1 (x(v), x(z)) and DI log P (0)
1993
+ 1 (x(z), x(z)) at x(z) = x(αk). But
1994
+ these cancel exactly:
1995
+ Lemma 3.7. DI log ˆQ(0)
1996
+ 1 (x(v), x(z)) − 1
1997
+ 2DI log P (0)
1998
+ 1 (x(z), x(z)) is holomorphic at
1999
+ x(z) = x(αk).
2000
+ Proof. Equations (3.1)+(3.3a) for v �→ z and (3.3a)+(3.3c) combine to
2001
+ DI log ˆQ(0)
2002
+ 1 (x(v), x(z)) − 1
2003
+ 2DI log P (0)
2004
+ 1 (x(z), x(z))
2005
+ = DI log ˆV (0)
2006
+ 1
2007
+ (v, z) − 1
2008
+ 2DI log U (0)
2009
+ 1 (z, z) + DI log(x(v) + y(v))
2010
+
2011
+ 20
2012
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
2013
+
2014
+ |I|
2015
+
2016
+ l=1
2017
+ (−1)l−1
2018
+ l
2019
+
2020
+ I1⊎...⊎Il=I
2021
+ I1,...,Il̸=∅
2022
+
2023
+ l�
2024
+ i=1
2025
+ ˆV (0)
2026
+ |Ii|+1(v, z; Il)
2027
+ ˆV (0)
2028
+ 1
2029
+ (v, z)
2030
+ − 1
2031
+ 2
2032
+ l�
2033
+ i=1
2034
+ U (0)
2035
+ |Ii|+1(z, z; Il)
2036
+ U (0)
2037
+ 1 (z, z)
2038
+ +
2039
+ l�
2040
+ i=1
2041
+ W (0)
2042
+ |Ii|+1(v; Il)
2043
+ x(v) + y(v)
2044
+
2045
+ .
2046
+ Here ˆV (0)
2047
+ 1
2048
+ (v, αk) ̸= 0 and U (0)
2049
+ 1 (αk, αk) ̸= 0 from the explicit formulae. Every func-
2050
+ tion V (0)
2051
+ |I|+1(v, z; I) and thus also ˆV (0)
2052
+ |I|+1(v, z; I) is holomorphic at z = αk from the
2053
+ matrix model construction. In fact, this holomorphicity is the key assumption for
2054
+ the recursive solution [BHW22, Prop. E.4] which we reproduce by our algebraic
2055
+ method. The assertion follows with Lemma 3.6.
2056
+
2057
+ By (2.9b) and (2.9d), Q(0)
2058
+ |I|+1(x(v), x(z); I) and P (0)
2059
+ |I|+1(x(v), x(z); I) have simple
2060
+ poles at x(z) = x(uj) which give rise to simple poles of DI log ˆQ(0)
2061
+ 1 (x(v), x(z))
2062
+ and 1
2063
+ 2DI log P (0)
2064
+ 1 (x(v), x(v)) at x(v) = x(uj). But they cancel:
2065
+ Lemma 3.8. DI log ˆQ(0)
2066
+ 1 (x(v), x(z)) − 1
2067
+ 2DI log P (0)
2068
+ 1 (x(v), x(v)) is holomorphic at
2069
+ every x(v) = x(uj) with uj ∈ I.
2070
+ Proof. From Proposition 3.3 at z = v we get
2071
+ lim
2072
+ v→uj(x(v) − x(uj))DI log P (0)
2073
+ 1 (x(v), x(v)) = DI\uj
2074
+
2075
+ x(uj) + y(uj) .
2076
+ Indeed, this term with numerator λ instead of 2λ arises from (3.7) at z = v. A
2077
+ second copy arises from a unique factor W (0)
2078
+ 2 (ˆvk; uj) in the first line of (3.6); its
2079
+ residue is
2080
+ Res
2081
+ x(v)→x(uj) DI\uj
2082
+ d
2083
+
2084
+ k=0
2085
+ λW (0)
2086
+ 2 (ˆvk; uj)dx(v)
2087
+ x(v) + y(ˆvk)
2088
+ =
2089
+ Res
2090
+ x(v)→x(uj) DI\uj
2091
+ d
2092
+
2093
+ k=0
2094
+ λW (0)
2095
+ 2 (ˆvk; uj)dx(v)
2096
+ x(v) + y(uj)
2097
+ = DI\uj
2098
+ λ
2099
+ x(uj) + y(uj)
2100
+ by (3.10). Next, from (2.12f) and (2.9b)+(2.9d), all at v ↔ z, we get
2101
+ lim
2102
+ v→uj(x(v) − x(uj)) ˆQ(0)
2103
+ |I|+1(x(z), x(v); I)
2104
+ = λM (0)
2105
+ |I| (x(z); uj; I \ uj) −
2106
+ λ2H(0)
2107
+ |I| (x(z); uj; I \ uj)
2108
+ (x(uj) − x(z))2
2109
+ ≡ λ ˆ
2110
+ M (0)
2111
+ |I| (x(z); uj; I \ uj) .
2112
+ Therefore, the residue of DI log ˆQ(0)
2113
+ 1
2114
+ is
2115
+ lim
2116
+ v→uj(x(v) − x(uj))DI log ˆQ(0)
2117
+ 1 (x(v), x(z))
2118
+
2119
+ BTR FROM EXTENDED LOOP EQUATIONS
2120
+ 21
2121
+ =
2122
+ |I|−1
2123
+
2124
+ l=0
2125
+ (−1)l
2126
+
2127
+ I0⊎I1⊎...⊎Il=I\uj
2128
+ I1,...,Il̸=∅
2129
+ ˆ
2130
+ M (0)
2131
+ |I0|+1(x(z); uj; I0)
2132
+ ˆQ(0)
2133
+ 1 (x(z), x(uj))
2134
+ l�
2135
+ i=1
2136
+ ˆQ(0)
2137
+ |Ii|+1(x(z); uj; Ii)
2138
+ ˆQ(0)
2139
+ 1 (x(z), x(uj))
2140
+ .
2141
+ In complete analogy to the proof of Lemma 3.2 we have
2142
+ lim
2143
+ v→uj(x(v) − x(uj))DI log ˆQ(0)
2144
+ 1 (x(v), x(z)) = DI\uj
2145
+ λ
2146
+ x(uj) + y(uj) ,
2147
+ which finishes the proof.
2148
+
2149
+ Proof of Proposition 3.5. Observe that (2.12f) implies limv→z
2150
+ ˆQ(0)
2151
+ 1 (x(v),x(z);I)
2152
+ ˆQ(0)
2153
+ 1 (x(v),x(z))
2154
+ =
2155
+ P (0)
2156
+ 1
2157
+ (x(z),x(z);I)
2158
+ P (0)
2159
+ 1
2160
+ (x(z),x(z)) so that DI log ˆQ(0)
2161
+ 1 (x(v), x(z)) is holomorphic at v = z for I ̸= ∅.
2162
+ Together with Lemma 3.7 and Lemma 3.8, the only remaining candidates for poles
2163
+ of DI log ˆQ(0)
2164
+ 1 (x(v), x(z))− 1
2165
+ 2DI log P (0)
2166
+ 1 (x(v), x(v))− 1
2167
+ 2DI log P (0)
2168
+ 1 (x(z), x(z)) are:
2169
+ • The zeros x(v)
2170
+ =
2171
+ x(0) and x(z)
2172
+ =
2173
+ x(0) of P (0)
2174
+ 1 (x(v), x(v)) and
2175
+ P (0)
2176
+ 1 (x(z), x(z)), respectively, which produce higher poles
2177
+ DI log P (0)
2178
+ 1 (x(v), x(v)) =
2179
+ |I|
2180
+
2181
+ l=1
2182
+ fl(I)
2183
+ (x(v) − x(0))l + O((x(v) − x(0))0)
2184
+ and similarly for DI log P (0)
2185
+ 1 (x(z), x(z)).
2186
+ Neither ˆQ(0)
2187
+ 1 (x(v), x(z)) has a
2188
+ zero at x(v) = x(0) or x(z) = x(0) nor ˆQ(0)
2189
+ |I|+1(x(v), x(z); I) has a pole there
2190
+ so that there is no compensation. The coefficients fl(I) only depend on the
2191
+ uj ∈ I but not on v, z; they are the same for DI log P (0)
2192
+ 1 (x(v), x(v)) and
2193
+ DI log P (0)
2194
+ 1 (x(z), x(z)) because of the symmetry of DI log ˆQ(0)
2195
+ 1 (x(v), x(z))
2196
+ in v ↔ z. We can trade the fl(I) for the symbolic expressions D0
2197
+ I′x(0).
2198
+ • The zeros x(v) = 2x(0) − x(z) of ˆQ(0)
2199
+ 1 (x(v), x(z)), which produce higher
2200
+ poles
2201
+ DI log ˆQ(0)
2202
+ 1 (x(v), x(z)) =
2203
+ |I|
2204
+
2205
+ l=1
2206
+ ˜fl(I)
2207
+ (x(v) + x(z) − 2x(0))l
2208
+ + O((x(v) + x(z) − 2x(0))0) .
2209
+ Neither P (0)
2210
+ 1 (x(v), x(v)) has a zero at x(v)
2211
+ =
2212
+ 2x(0) − x(z) nor
2213
+ P (0)
2214
+ |I|+1(x(v), x(z); I) has a pole there so that there is no compensation.
2215
+ Viewed as function x(v) �→ DI log ˆQ(0)
2216
+ 1 (x(v), x(z)), the coefficients ˜fl(I)
2217
+ are independent of x(v) and then by symmetry independent of x(z). The
2218
+ requirement limz→v
2219
+ ˆQ(0)
2220
+ 1 (x(v),x(z);I)
2221
+ ˆQ(0)
2222
+ 1 (x(v),x(z))
2223
+ =
2224
+ P (0)
2225
+ 1
2226
+ (x(v),x(v);I)
2227
+ P (0)
2228
+ 1
2229
+ (x(v),x(v))
2230
+ then fixes the relative
2231
+ factor between fl(I) and ˜fl(I) to be −2l+1 as given in (3.13).
2232
+ This completes the proof.
2233
+
2234
+
2235
+ 22
2236
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
2237
+ Example 3.9. We have
2238
+ lim
2239
+ v→0(x(v) − x(0))P (0)
2240
+ 1 (x(v), x(v); u)
2241
+ P (0)
2242
+ 1 (x(v), x(v))
2243
+ = λ
2244
+ 2W (0)
2245
+ 2 (0; u)
2246
+ from Proposition 3.3. This identifies D0
2247
+ ux(0) = − λ
2248
+ 2W (0)
2249
+ 2 (0; u).
2250
+ 4. Solution for g = 1
2251
+ 4.1. The case I = ∅. We consider the relation between the ‘genus inser-
2252
+ tions’ into log P (0)
2253
+ 1 (x(v), x(z)) and log H(0)
2254
+ 1 (x(v); z). Equation (2.13b), divided
2255
+ by P (0)
2256
+ 1 (x(v), x(z)), reads
2257
+ ˆP (1)
2258
+ 1 (x(v), x(z))
2259
+ P (0)
2260
+ 1 (x(v), x(z))
2261
+
2262
+ ˆH(1)
2263
+ 1 (x(v); z)
2264
+ H(0)
2265
+ 1 (x(v); z)
2266
+ (4.1)
2267
+ =
2268
+ λ
2269
+ x(v) + y(z)
2270
+ �∂Dw log H(0)
2271
+ 1 (x(v); z)
2272
+ ∂x(w)
2273
+ ���
2274
+ w=z + W (1)
2275
+ 1 (z) +
2276
+ λ
2277
+ (x(v) − x(z))3
2278
+
2279
+ =
2280
+ λW (1)
2281
+ 1 (z)
2282
+ x(v) + y(z) +
2283
+ λ2
2284
+ (x(v) − x(z))3(x(v) + y(z))
2285
+ +
2286
+ d
2287
+
2288
+ k=1
2289
+ λ2Ω(0)
2290
+ 2 (z, ˆzk)
2291
+ 2(x(v) + y(z))(x(v) + y(ˆzk)) +
2292
+ d
2293
+
2294
+ j=1
2295
+ λ2Ω(0)
2296
+ 2 (ˆzj, z)
2297
+ 2(x(v) + y(ˆzj))(x(v) + y(z))
2298
+ +
2299
+ λ2
2300
+ (x(v) + y(z))
2301
+
2302
+ ∂x(w)
2303
+ 1
2304
+ (x(v) − x(w))(x(z) + y(w))
2305
+ ���
2306
+ w=z ,
2307
+ where ∂Dw log H(0)
2308
+ 1
2309
+ (x(v);ˆzk)
2310
+ ∂x(w)
2311
+ ��
2312
+ w=z was provided by Proposition 3.3. The differentiation
2313
+ leads to Ω(0)
2314
+ 2
2315
+ (see (2.2)) which we have symmetrised in both arguments.
2316
+ We
2317
+ understand ∂y(w)
2318
+ ∂x(w) ≡ y′(w)
2319
+ x′(w).
2320
+ In order for
2321
+ ˆP (1)
2322
+ 1
2323
+ (x(v),x(z))
2324
+ P (0)
2325
+ 1
2326
+ (x(v),x(z)) to be a rational function of x(z), we need that
2327
+ ˆP (1)
2328
+ 1 (x(v), x(z))
2329
+ P (0)
2330
+ 1 (x(v), x(z))
2331
+ =
2332
+ d
2333
+
2334
+ k=0
2335
+ λW (1)
2336
+ 1 (ˆzk)
2337
+ x(v) + y(ˆzk) + λ2
2338
+ 2
2339
+ d
2340
+
2341
+ k,j=0
2342
+ k̸=j
2343
+ Ω(0)
2344
+ 2 (ˆzj, ˆzk)
2345
+ (x(v) + y(ˆzj))(x(v) + y(ˆzk))
2346
+ +
2347
+ d
2348
+
2349
+ k=0
2350
+ λ2
2351
+ (x(v) + y(ˆzk))
2352
+
2353
+ ∂x(w)
2354
+ 1
2355
+ (x(v) − x(w))(x(z) + y(w))
2356
+ ���
2357
+ w=ˆzk
2358
+ +
2359
+ d
2360
+
2361
+ k=0
2362
+ λ2
2363
+ (x(v) − x(z))3(x(v) + y(ˆzk)) + F (1)
2364
+ 1 (x(v); x(z))
2365
+ (4.2)
2366
+
2367
+ BTR FROM EXTENDED LOOP EQUATIONS
2368
+ 23
2369
+ for some rational function F (1)
2370
+ 1 , and that almost the same formula holds for
2371
+ ˆH(1)
2372
+ 1
2373
+ (x(v);z)
2374
+ H(0)
2375
+ 1
2376
+ (x(v);z), only with preimage sum �d
2377
+ k=1 instead of �d
2378
+ k=0. By (2.12b), (2.9a)
2379
+ and (2.9c), the function
2380
+ ˆH(1)
2381
+ 1
2382
+ (x(v);z)
2383
+ H(0)
2384
+ 1
2385
+ (x(v);z) is holomorphic at x(v) + y(z) = 0 so that F (1)
2386
+ 1
2387
+ must be holomorphic at x(v) + y(ˆzk) = 0. It follows from (2.12c), (2.9b) and
2388
+ (2.9d) that the only other poles of x(v) �→
2389
+ ˆP (1)
2390
+ 1
2391
+ (x(v),x(z))
2392
+ P (0)
2393
+ 1
2394
+ (x(v),x(z)) are at x(v) = x(z) of
2395
+ order at most 2; more precisely
2396
+ ˆP (1)
2397
+ 1 (x(v), x(z))
2398
+ P (0)
2399
+ 1 (x(v), x(z))
2400
+ =
2401
+ λ
2402
+ (x(v) − x(z))2
2403
+ Q(0)
2404
+ 1 (x(z), x(z))
2405
+ P (0)
2406
+ 1 (x(z), x(z))
2407
+ (4.3)
2408
+ +
2409
+ λ
2410
+ (x(v) − x(z))
2411
+
2412
+ ∂x(w)
2413
+ Q(0)
2414
+ 1 (x(w), x(z))
2415
+ P (0)
2416
+ 1 (x(w), x(z))
2417
+ ���
2418
+ w=z + regular,
2419
+ where ‘regular’ means O((x(v) − x(z))0). To achieve this we necessarily need
2420
+ F (1)
2421
+ 1 (x(v); x(z)) =
2422
+ 3
2423
+
2424
+ a=1
2425
+ F (1)a
2426
+ 1
2427
+ (x(z))
2428
+ (x(v) − x(z))a ,
2429
+ F (1)3
2430
+ 1
2431
+ (x(z)) = −
2432
+ d
2433
+
2434
+ k=0
2435
+ λ2
2436
+ x(z) + y(ˆzk) ,
2437
+ F (1)2
2438
+ 1
2439
+ (x(z)) = λQ(0)
2440
+ 1 (x(z), x(z))
2441
+ P (0)
2442
+ 1 (x(z), x(z))
2443
+ ,
2444
+ F (1)1
2445
+ 1
2446
+ (x(z)) =
2447
+
2448
+ ∂x(w)
2449
+ λQ(0)
2450
+ 1 (x(w), x(z))
2451
+ P (0)
2452
+ 1 (x(w), x(z))
2453
+ ���
2454
+ w=z − 1
2455
+ 2
2456
+
2457
+ ∂x(w)
2458
+ d
2459
+
2460
+ k=0
2461
+ λ2
2462
+ (x(z) + y(w))2
2463
+ ���
2464
+ w=ˆzk.
2465
+ From (3.12) and (2.12f) we obtain a representation
2466
+ λQ(0)
2467
+ 1 (x(w), x(z))
2468
+ P (0)
2469
+ 1 (x(w), x(z))
2470
+ =
2471
+ λ2
2472
+ (x(w) − x(z))2
2473
+
2474
+ 1 −
2475
+
2476
+ 1 +
2477
+ (x(z) − x(w))2
2478
+ 4(x(w) − x(0))(x(z) − x(0))
2479
+ × exp
2480
+ �1
2481
+ 2 log P (0)
2482
+ 1 (x(w), x(w))
2483
+ P (0)
2484
+ 1 (x(z), x(z))
2485
+ − log P (0)
2486
+ 1 (x(w), x(z))
2487
+ P (0)
2488
+ 1 (x(z), x(z))
2489
+ ��
2490
+ = −
2491
+ λ2
2492
+ 8(x(z) − x(0))2 − λ2
2493
+ 2
2494
+ ∂2 log P (0)
2495
+ 1 (x(w), x(z))
2496
+ ∂x(w)∂x(z)
2497
+ ���
2498
+ w=z
2499
+ + (x(w) − x(z))
2500
+
2501
+ λ2
2502
+ 8(x(z) − x(0))3 − λ2
2503
+ 2
2504
+ ∂3 log P (0)
2505
+ 1 (x(w), x(z))
2506
+ ∂(x(w))2∂x(z)
2507
+ ���
2508
+ w=z
2509
+
2510
+ + O((x(w) − x(z))2) .
2511
+ Inserting the resulting Taylor expansion into F (1)1
2512
+ 1
2513
+ (x(z)) and F (1)2
2514
+ 1
2515
+ (x(z)) leads to:
2516
+
2517
+ 24
2518
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
2519
+ Proposition 4.1.
2520
+ ˆP (1)
2521
+ 1 (x(v), x(z))
2522
+ P (0)
2523
+ 1 (x(v), x(z))
2524
+ (4.4)
2525
+ =
2526
+ d
2527
+
2528
+ k=0
2529
+ λW (1)
2530
+ 1 (ˆzk)
2531
+ x(v) + y(ˆzk) + λ2
2532
+ 2
2533
+ d
2534
+
2535
+ k,j=0
2536
+ k̸=j
2537
+ Ω(0)
2538
+ 2 (ˆzj, ˆzk)
2539
+ (x(v) + y(ˆzj))(x(v) + y(ˆzk))
2540
+
2541
+ λ2
2542
+ (x(v) − x(z))3
2543
+ �∂ log P (0)
2544
+ 1 (x(v), x(w))
2545
+ ∂x(w)
2546
+ − ∂ log P (0)
2547
+ 1 (x(z), x(w))
2548
+ ∂x(w)
2549
+ ����
2550
+ w=z
2551
+ +
2552
+ λ2
2553
+ 2(x(v) − x(z))2
2554
+ ∂2 log P (0)
2555
+ 1 (x(z), x(w))
2556
+ ∂x(z)∂x(w)
2557
+ ���
2558
+ w=z
2559
+
2560
+ λ2
2561
+ 8(x(v) − x(z))2(x(z) − x(0))2 +
2562
+ λ2
2563
+ 8(x(v) − x(z))(x(z) − x(0))3 .
2564
+ 4.2. The case I ̸= ∅. Our goal is to determine
2565
+ DI
2566
+ ˆP (1)
2567
+ 1 (x(v), x(z))
2568
+ P (0)
2569
+ 1 (x(v), x(z))
2570
+ =
2571
+ |I|
2572
+
2573
+ l=0
2574
+ (−1)l
2575
+
2576
+ I0⊎I1⊎...⊎Il=I
2577
+ I1,...,Il̸=∅
2578
+ ˆP (1)
2579
+ |I0|+1(x(v), x(z); I0)
2580
+ P (0)
2581
+ 1 (x(v), x(z))
2582
+ l�
2583
+ i=1
2584
+ P (0)
2585
+ |Ii|+1(x(v), x(z); Ii)
2586
+ P (0)
2587
+ 1 (x(v), x(z))
2588
+ in parallel with DI
2589
+ ˆH(1)
2590
+ 1
2591
+ (x(v);z)
2592
+ H(0)
2593
+ 1
2594
+ (x(v);z). The Dyson-Schwinger equations (2.13b) for g ≤ 1
2595
+ imply:
2596
+ Lemma 4.2.
2597
+ DI
2598
+ ˆP (1)
2599
+ 1 (x(v), x(z))
2600
+ P (0)
2601
+ 1 (x(v), x(z))
2602
+ − DI
2603
+ ˆH(1)
2604
+ 1 (x(v); z)
2605
+ H(0)
2606
+ 1 (x(v); z)
2607
+ (4.5)
2608
+ =
2609
+
2610
+ I1⊎I2=I
2611
+ DI1
2612
+
2613
+ λ
2614
+ x(v) + y(z)
2615
+ ��∂DI2∪w log H(0)
2616
+ 1 (x(v); z)
2617
+ ∂x(w)
2618
+ ���
2619
+ w=z
2620
+ + W (1)
2621
+ |I2|+1(z; I2) +
2622
+ λδ|I2|,∅
2623
+ (x(v) − x(z))3
2624
+
2625
+ .
2626
+ Proof. By induction in |I|. The case |I| = 0 is (2.13b) for g = 1. Otherwise
2627
+ DI
2628
+ ˆP (1)
2629
+ 1 (x(v), x(z))
2630
+ P (0)
2631
+ 1 (x(v), x(z))
2632
+ − DI
2633
+ ˆH(1)
2634
+ 1 (x(v); z)
2635
+ H(0)
2636
+ 1 (x(v); z)
2637
+ =
2638
+ ˆP (1)
2639
+ |I|+1(x(v), x(z); I)
2640
+ P (0)
2641
+ 1 (x(v), x(z))
2642
+
2643
+
2644
+ I1⊎I2=I
2645
+ I2̸=∅
2646
+ DI1
2647
+ � ˆP (1)
2648
+ 1 (x(v), x(z))
2649
+ P (0)
2650
+ 1 (x(v), x(z))
2651
+
2652
+ ·
2653
+ P (0)
2654
+ |I2|+1(x(v), x(z); I2)
2655
+ P (0)
2656
+ 1 (x(v), x(z))
2657
+
2658
+ BTR FROM EXTENDED LOOP EQUATIONS
2659
+ 25
2660
+
2661
+ ˆH(1)
2662
+ |I|+1(x(v); z; I)
2663
+ H(0)
2664
+ 1 (x(v); z)
2665
+ +
2666
+
2667
+ I1⊎I2=I
2668
+ I2̸=∅
2669
+ DI1
2670
+ � ˆH(1)
2671
+ 1 (x(v); z)
2672
+ H(0)
2673
+ 1 (x(v); z)
2674
+
2675
+ ·
2676
+ H(0)
2677
+ |I2|+1(x(v); z; I2)
2678
+ H(0)
2679
+ 1 (x(v); z)
2680
+ =
2681
+
2682
+ I1⊎I2=I
2683
+ I1̸=∅
2684
+ λW (0)
2685
+ |I1|+1(z; I1)
2686
+ x(v) + y(z)
2687
+ ˆH(1)
2688
+ |I2|+1(x(v); z; I2)
2689
+ H(0)
2690
+ 1 (x(v); z)
2691
+ (*)
2692
+ +
2693
+
2694
+ I1⊎I2=I
2695
+ λ(W (1)
2696
+ |I1|+1(z; I1) +
2697
+ λδ|I1|,0
2698
+ (x(v)−x(z))3)
2699
+ x(v) + y(z)
2700
+ H(0)
2701
+ |I2|+1(x(v); z; I2)
2702
+ H(0)
2703
+ 1 (x(v); z)
2704
+ (‡)
2705
+ +
2706
+ λ
2707
+ x(v) + y(z)
2708
+
2709
+ ∂x(w)
2710
+ H(0)
2711
+ |I|+2(x(v); z; I ∪ w)
2712
+ H(0)
2713
+ 1 (x(v); z)
2714
+ ���
2715
+ w=z
2716
+ (§)
2717
+
2718
+
2719
+ I1⊎I2⊎I3=I
2720
+ I3̸=∅
2721
+ �∂DI2∪w log H(0)
2722
+ 1 (x(v); z)
2723
+ ∂x(w)
2724
+ ���
2725
+ w=z + W (1)
2726
+ |I1|+1(z; I1) +
2727
+ λδ|I1|,0
2728
+ (x(v) − x(z))3
2729
+
2730
+ × DI2
2731
+
2732
+ λ
2733
+ x(v) + y(z)
2734
+
2735
+ ·
2736
+ P (0)
2737
+ |I3|+1(x(v), x(z); I3)
2738
+ P (0)
2739
+ 1 (x(v), x(z))
2740
+ (†)
2741
+
2742
+
2743
+ I1⊎I2⊎I3=I
2744
+ I2̸=∅
2745
+ DI1
2746
+ � ˆH(1)
2747
+ 1 (x(v); z)
2748
+ H(0)
2749
+ 1 (x(v); z)
2750
+
2751
+ ·
2752
+ λW (0)
2753
+ |I2|+1(z; I2)
2754
+ x(v) + y(z)
2755
+ H(0)
2756
+ |I3|+1(x(v); z; I3)
2757
+ H(0)
2758
+ 1 (x(v); z)
2759
+ ,
2760
+ (**)
2761
+ where the induction hypothesis was used. The lines (*) and (**) cancel when
2762
+ distinguishing I3 = ∅ and I3 ̸= ∅. Take (3.2), multiplied by
2763
+ λ
2764
+ x(v)+y(z):
2765
+ λP (0)
2766
+ |˜I|+1(x(v), x(z); ˜I)
2767
+ (x(v) + y(z))P (0)
2768
+ 1 (x(v), x(z))
2769
+ =
2770
+ λH(0)
2771
+ |˜I|+1(x(v); z; ˜I)
2772
+ (x(v) + y(z))H(0)
2773
+ 1 (x(v); z)
2774
+ +
2775
+
2776
+ ˜I′⊎˜I′′=˜I
2777
+ ˜I′̸=∅
2778
+ λW (0)
2779
+ |˜I′|+1(z; ˜I′)
2780
+ x(v) + y(z)
2781
+ λH(0)
2782
+ |˜I′′|+1(x(v); z; ˜I′′)
2783
+ (x(v) + y(z))H(0)
2784
+ 1 (x(v); z)
2785
+ .
2786
+ For ˜I′′ ̸= ∅, take this equation for ˜I �→ ˜I′′, multiply by
2787
+ (−λ)W (0)
2788
+ |˜I′|+1(z;˜I′)
2789
+ x(v)+y(z)
2790
+ and sum
2791
+ over ˜I′ ⊎ ˜I′′ = ˜I with ˜I′ ̸= ∅. Repeat until all products of
2792
+ (−λ)W (0)
2793
+ |Ii|+1(z;Ii)
2794
+ x(v)+y(z)
2795
+ with
2796
+ λH(0)
2797
+ |I0|+1(x(v);z;I0)
2798
+ (x(v)+y(z))H(0)
2799
+ 1
2800
+ (x(v);z) are removed. The result is
2801
+
2802
+ I2⊎I3=I′
2803
+ I3̸=∅
2804
+ DI2
2805
+
2806
+ λ
2807
+ x(v) + y(z)
2808
+ �P (0)
2809
+ |I3|+1(x(v), x(z); I3)
2810
+ P (0)
2811
+ 1 (x(v), x(z))
2812
+ (4.6)
2813
+
2814
+ 26
2815
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
2816
+ =
2817
+ λ
2818
+ x(v) + y(z)
2819
+ H(0)
2820
+ |I′|+1(x(v); z; I′)
2821
+ H(0)
2822
+ 1 (x(v); z)
2823
+ +
2824
+
2825
+ I2⊎I3=I′
2826
+ I3̸=∅
2827
+ λW (0)
2828
+ |I3|+1(z; I3)
2829
+ x(v) + y(z) DI2
2830
+ λ
2831
+ x(v) + y(z) ,
2832
+ which we use in (†). Multiplied with (W (1)
2833
+ |I1|+1(z; I1) +
2834
+ λδ|I1|,0
2835
+ (x(v)−x(z))3), this cancels all
2836
+ terms with I2 ̸= ∅ in (‡) and completes the case I2 = ∅ in (‡) to the last line of
2837
+ the assertion (4.5). Similarly, expanding
2838
+ DI∪w log H(0)
2839
+ 1 (x(v); z)
2840
+ =
2841
+ |I|
2842
+
2843
+ l=0
2844
+ (−1)l
2845
+
2846
+ I0⊎I1⊎...⊎Il=I
2847
+ I1,...,Il̸=∅
2848
+ H(0)
2849
+ |I0|+2(x(v); z; I0 ∪ w)
2850
+ H(0)
2851
+ 1 (x(v); z)
2852
+ l�
2853
+ i=1
2854
+ H(0)
2855
+ |Ii|+1(x(v); z; Ii)
2856
+ H(0)
2857
+ 1 (x(v); z)
2858
+ we get for the product with the first term on the rhs of (4.6)
2859
+
2860
+ I2∪I′=I
2861
+ I′̸=∅
2862
+ DI∪w log H(0)
2863
+ 1 (x(v); z)
2864
+ λ
2865
+ x(v) + y(z)
2866
+ H(0)
2867
+ |I′|+1(x(v); z; I′)
2868
+ H(0)
2869
+ 1 (x(v); z)
2870
+ =
2871
+ λ
2872
+ x(v) + y(z)
2873
+ �H(0)
2874
+ |I|+2(x(v); z; I ∪ w)
2875
+ H(0)
2876
+ 1 (x(v); z)
2877
+ − DI∪w log H(0)
2878
+ 1 (x(v); z)
2879
+
2880
+ .
2881
+ The first term on the rhs cancels the line (§), and the second term completes the
2882
+ final term in (4.6) to the second line of the assertion (4.5).
2883
+
2884
+ We conclude from Proposition 3.3:
2885
+
2886
+ ∂x(w)DI∪w log H(0)
2887
+ 1 (x(v); z)
2888
+ ���
2889
+ w=z
2890
+ (4.7)
2891
+ =
2892
+ d
2893
+
2894
+ k=1
2895
+ |I|
2896
+
2897
+ l=0
2898
+ (−1)l
2899
+
2900
+ I0⊎I1⊎...⊎Il=I
2901
+ I1,...Il̸=∅
2902
+ λDI0Ω(0)
2903
+ 2 (ˆzk, z)
2904
+ x(v) + y(ˆzk)
2905
+ l�
2906
+ j=1
2907
+ λW (0)
2908
+ |Ij|+1(ˆzk; Ij)
2909
+ x(v) + y(ˆzk)
2910
+
2911
+ |I|
2912
+
2913
+ j=1
2914
+ DI\uj
2915
+ λ2Ω(0)
2916
+ 2 (z, uj)
2917
+ (x(v) − x(uj))(x(z) + y(uj))2 + DI
2918
+ λ
2919
+ (x(v) − x(z))2(x(z) + y(z))
2920
+ +
2921
+
2922
+ ∂x(w)DI
2923
+ λ
2924
+ (x(v) − x(z))(x(z) + y(w))
2925
+ ���
2926
+ w=z .
2927
+ We insert (4.7) into (4.5) and get with the derivation property of DI:
2928
+ DI
2929
+ ˆP (1)
2930
+ 1 (x(v), x(z))
2931
+ P (0)
2932
+ 1 (x(v), x(z))
2933
+ − DI
2934
+ ˆH(1)
2935
+ 1 (x(v); z)
2936
+ H(0)
2937
+ 1 (x(v); z)
2938
+ = DI
2939
+ λW (1)
2940
+ 1 (z)
2941
+ x(v) + y(z) +
2942
+ d
2943
+
2944
+ k=1
2945
+ DI
2946
+ λ2Ω(0)
2947
+ 2 (ˆzk, z)
2948
+ (x(v) + y(ˆzk))(x(v) + y(z))
2949
+
2950
+ BTR FROM EXTENDED LOOP EQUATIONS
2951
+ 27
2952
+
2953
+ |I|
2954
+
2955
+ j=1
2956
+ λ3
2957
+ (x(v) − x(uj))DI\uj
2958
+ Ω(0)
2959
+ 2 (z, uj)
2960
+ (x(v) + y(z))(x(z) + y(uj))2
2961
+ +
2962
+ λ2
2963
+ (x(v) − x(z))3DI
2964
+ 1
2965
+ (x(z) + y(z))
2966
+ +
2967
+ λ2
2968
+ (x(v) − x(z))
2969
+
2970
+ ∂x(w)DI
2971
+ 1
2972
+ (x(v) + y(z))(x(z) + y(w))
2973
+ ���
2974
+ w=z .
2975
+ As before, in order for DI
2976
+ ˆP (1)
2977
+ 1
2978
+ (x(v),x(z))
2979
+ P (0)
2980
+ 1
2981
+ (x(v),x(z)) to be a rational function of x(z), we need
2982
+ DI
2983
+ ˆP (1)
2984
+ 1 (x(v), x(z))
2985
+ P (0)
2986
+ 1 (x(v), x(z))
2987
+ = K(1)
2988
+ 0,|I|+1(x(v); z; I) + F (1)
2989
+ |I|+1(x(v); x(z); I) ,
2990
+ (4.8a)
2991
+ DI
2992
+ ˆH(1)
2993
+ 1 (x(v); z)
2994
+ H(0)
2995
+ 1 (x(v); z)
2996
+ = K(1)
2997
+ 1,|I|+1(x(v); z; I) + F (1)
2998
+ |I|+1(x(v); x(z); I) ,
2999
+ (4.8b)
3000
+ where (note the difference in the lower subscript A between (4.8a) and (4.8b))
3001
+ K(1)
3002
+ A,|I|+1(x(v); z; I)
3003
+ (4.9)
3004
+ :=
3005
+ d
3006
+
3007
+ k=A
3008
+ DI
3009
+ λW (1)
3010
+ 1 (ˆzk)
3011
+ x(v) + y(ˆzk) + λ2
3012
+ 2
3013
+ d
3014
+
3015
+ j,k=A
3016
+ j̸=k
3017
+ DI
3018
+ Ω(0)
3019
+ 2 (ˆzj, ˆzk)
3020
+ (x(v) + y(ˆzj))(x(v) + y(ˆzk))
3021
+
3022
+ |I|
3023
+
3024
+ j=1
3025
+ λ3
3026
+ (x(v) − x(uj))
3027
+ d
3028
+
3029
+ k=A
3030
+ DI\uj
3031
+ Ω(0)
3032
+ 2 (ˆzk, uj)
3033
+ (x(v) + y(ˆzk))(x(z) + y(uj))2
3034
+ +
3035
+ λ2
3036
+ (x(v) − x(z))3DI
3037
+ d
3038
+
3039
+ k=A
3040
+ 1
3041
+ (x(z) + y(ˆzk))
3042
+ +
3043
+ λ2
3044
+ (x(v) − x(z))
3045
+ d
3046
+
3047
+ k=A
3048
+
3049
+ ∂x(w)DI
3050
+ 1
3051
+ (x(v) + y(ˆzk))(x(z) + y(w))
3052
+ ���
3053
+ w=ˆzk
3054
+ and F (1)
3055
+ |I|+1(x(v); x(z); I) is some rational function (the same in (4.8a) and (4.8b))
3056
+ in both x(v) and x(z). In fact, also K(1)
3057
+ 0,|I|+1(x(v); z; I) is rational in both x(v)
3058
+ and x(z). Since DI
3059
+ ˆH(1)
3060
+ 1
3061
+ (x(v),x(z))
3062
+ H(0)
3063
+ 1
3064
+ (x(v),x(z)) is holomorphic at x(v) + y(z) = 0, the rational
3065
+ function x(v) �→ F (1)
3066
+ |I|+1 can only have poles at x(v) = x(z) and at x(v) = x(uj)
3067
+ with uj ∈ I. With the behavior resulting from (2.9a) and (2.12b),
3068
+ ˆH(g)
3069
+ |I|+1(x(v); z; I)
3070
+ H(0)
3071
+ 1 (x(v); z)
3072
+ =
3073
+ |I|
3074
+
3075
+ j=1
3076
+ λ
3077
+ (x(v) − x(uj))
3078
+ ˆU (g)(z, uj; I \ uj)
3079
+ H(0)
3080
+ 1 (x(uj); z)
3081
+ + O((x(v) − x(uj))0)
3082
+ (4.10)
3083
+
3084
+ 28
3085
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
3086
+ and (4.7) we see that F (1)
3087
+ |I|+1 has first-order poles at x(v) = x(uj). We determine
3088
+ them in Proposition 4.5 below.
3089
+ Near x(v) = x(z) we have in (4.9)
3090
+ K(1)
3091
+ 0,|I|+1(x(v); z; I)
3092
+ (4.11)
3093
+ =
3094
+ λ2
3095
+ (x(v) − x(z))
3096
+ d
3097
+
3098
+ k=0
3099
+
3100
+ ∂x(w)DI
3101
+ 1
3102
+ (x(z) + y(ˆzk))(x(z) + y(w))
3103
+ ���
3104
+ w=ˆzk
3105
+ +
3106
+ λ2
3107
+ (x(v) − x(z))3DI
3108
+ d
3109
+
3110
+ k=0
3111
+ 1
3112
+ (x(z) + y(ˆzk)) + O((x(v) − x(z))0)
3113
+ = −
3114
+ λ2
3115
+ (x(v) − x(z))
3116
+ 1
3117
+ 2
3118
+ ∂3
3119
+ ∂(x(z))2∂x(w)
3120
+ d
3121
+
3122
+ k=0
3123
+ DI log(x(z) + y( ˆwk))
3124
+ ���
3125
+ w=z
3126
+ +
3127
+ λ2
3128
+ (x(v) − x(z))3DI
3129
+ d
3130
+
3131
+ k=0
3132
+ 1
3133
+ (x(z) + y(ˆzk)) + O((x(v) − x(z))0)
3134
+ = −
3135
+ λ2
3136
+ (x(v) − x(z))
3137
+ �1
3138
+ 2
3139
+ ∂3(DI log P (0)
3140
+ 1 (x(z), x(w)))
3141
+ ∂(x(z))2∂x(w)
3142
+ ���
3143
+ w=z
3144
+ +
3145
+ |I|
3146
+
3147
+ j=1
3148
+ DI\uj
3149
+ λ
3150
+ (x(z) − x(uj))3(x(z) + y(uj))2
3151
+
3152
+ +
3153
+ λ2
3154
+ (x(v) − x(z))3DI
3155
+ d
3156
+
3157
+ k=0
3158
+ 1
3159
+ (x(z) + y(ˆzk)) + O((x(v) − x(z))0) .
3160
+ In the last step we have used Proposition 3.3. On the other hand, from (2.12c)
3161
+ we get
3162
+ DI
3163
+ ˆP (1)
3164
+ 1 (x(v), x(z))
3165
+ P (0)
3166
+ 1 (x(v), x(z))
3167
+ =
3168
+ λ
3169
+ (x(v) − x(z))2DI
3170
+ Q(0)
3171
+ 1 (x(z), x(z))
3172
+ P (0)
3173
+ 1 (x(z), x(z))
3174
+ (4.12)
3175
+ +
3176
+ λ
3177
+ (x(v) − x(z))
3178
+
3179
+ ∂x(w)
3180
+
3181
+ DI
3182
+ Q(0)
3183
+ 1 (x(w), x(z))
3184
+ P (0)
3185
+ 1 (x(w), x(z))
3186
+
3187
+ w=z
3188
+ + O((x(v) − x(z))0) ,
3189
+ where
3190
+ DI
3191
+ Q(0)
3192
+ 1 (x(z), x(z))
3193
+ P (0)
3194
+ 1 (x(z), x(z))
3195
+ :=
3196
+ |I|
3197
+
3198
+ l=0
3199
+ (−1)l
3200
+
3201
+ I0⊎I1⊎...⊎Il=I
3202
+ I1,...,Il̸=∅
3203
+ Q(0)
3204
+ |I0|+1(x(w), x(z); I0)
3205
+ P (0)
3206
+ 1 (x(w), x(z))
3207
+ l�
3208
+ i=1
3209
+ P (0)
3210
+ |Ii|+1(x(w), x(z); Ii)
3211
+ P (0)
3212
+ 1 (x(w), x(z))
3213
+ .
3214
+
3215
+ BTR FROM EXTENDED LOOP EQUATIONS
3216
+ 29
3217
+ These properties lead to an expansion
3218
+ F (1)
3219
+ |I|+1(x(v); x(z); I) =
3220
+ 3
3221
+
3222
+ a=1
3223
+ F (1)a
3224
+ |I|+1(x(z); I)
3225
+ ((x(v) − x(z))a +
3226
+ |I|
3227
+
3228
+ j=1
3229
+ ˆF (1)j
3230
+ |I| (x(z); I \ uj)
3231
+ x(v) − x(uj)
3232
+ (4.13)
3233
+ where (4.11) and (4.12) combine to
3234
+ F (1)3
3235
+ |I|+1(x(z); I) = −
3236
+ d
3237
+
3238
+ k=0
3239
+ DI
3240
+ λ2
3241
+ x(z) + y(ˆzk)
3242
+ (4.14a)
3243
+ F (1)2
3244
+ |I|+1(x(z); I) = λDI
3245
+ Q(0)
3246
+ 1 (x(z), x(z))
3247
+ P (0)
3248
+ 1 (x(z), x(z))
3249
+ (4.14b)
3250
+ F (1)1
3251
+ |I|+1(x(z); I) = λ
3252
+
3253
+
3254
+ DI
3255
+ Q(0)
3256
+ 1 (x(z), x(w))
3257
+ P (0)
3258
+ 1 (x(z), x(w))
3259
+
3260
+ ∂x(w)
3261
+ + λ2
3262
+ 2
3263
+ ∂3(DI log P (0)
3264
+ 1 (x(z), x(w)))
3265
+ ∂(x(z))2∂x(w)
3266
+ +
3267
+ |I|
3268
+
3269
+ j=1
3270
+ DI\uj
3271
+ λ3
3272
+ (x(z) − x(uj))3(x(z) + y(uj))2
3273
+ ���
3274
+ w=z .
3275
+ (4.14c)
3276
+ It remains to determine the functions ˆF (1)j
3277
+ |I| (x(z); I \ uj). We will need two
3278
+ lemmas:
3279
+ Lemma 4.3.
3280
+ ˆU (1)
3281
+ |I|+1(v, z; I)
3282
+ =
3283
+
3284
+ I1⊎I2=I
3285
+ DI1
3286
+ 1
3287
+ (x(z) + y(v)) ·
3288
+
3289
+ ˆH(1)
3290
+ |I2|+1(x(v); z; I2) − λ
3291
+ ∂U (0)
3292
+ |I|+2(v, z; I ∪ s)
3293
+ ∂x(s)
3294
+ ���
3295
+ s=v
3296
+ − λ
3297
+
3298
+ I′
3299
+ 2⊎I′′
3300
+ 2 =I2
3301
+
3302
+ W (1)
3303
+ |I′
3304
+ 2|+1(v; I′
3305
+ 2) +
3306
+ λδI′
3307
+ 2,∅
3308
+ (x(z) − x(v))3
3309
+
3310
+ U (0)
3311
+ |I′′
3312
+ 2 |+1(v, z; I′′
3313
+ 2 )
3314
+
3315
+ .
3316
+ Proof. Resolve (2.13a) at g = 1 for the first term ˆU (1)(v, z; I), which becomes
3317
+ − �
3318
+ I1⊎I2=I,I1̸=∅
3319
+ λW (0)
3320
+ |I1|+1(v;I1)
3321
+ x(z)+y(v)
3322
+ ˆU (1)(v, z; I2) plus other terms. Iterate this procedure
3323
+ for every ˆU (1)(v, z; I2) and so on. The resulting products of
3324
+ λW (0)
3325
+ |Ii|+1(v;Ii)
3326
+ x(z)+y(v)
3327
+ can be
3328
+ collected to DI1
3329
+ 1
3330
+ (x(z)+y(v)).
3331
+
3332
+ Lemma 4.4.
3333
+ DI
3334
+ U (0)
3335
+ 2 (v, z; s)
3336
+ H(0)
3337
+ 1 (x(v); z)
3338
+ = −DI
3339
+ λ
3340
+
3341
+ W (0)
3342
+ 2 (v; s) −
3343
+ 1
3344
+ x(v)−x(s)
3345
+
3346
+ (x(z) + y(v))2
3347
+ (4.15)
3348
+
3349
+ 30
3350
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
3351
+ +
3352
+ d
3353
+
3354
+ k=1
3355
+ DI
3356
+ λW (0)
3357
+ |I0|+2(ˆzk; s)
3358
+ (x(v) + y(ˆzk))(x(z) + y(v))
3359
+
3360
+ |I|
3361
+
3362
+ i=1
3363
+ DI\ui
3364
+ λ2W (0)
3365
+ 2 (ui; s)
3366
+ (x(v) − x(ui))(x(z) + y(ui))2(x(z) + y(v))
3367
+
3368
+
3369
+ I⊎I′′=I
3370
+ DI′
3371
+ λ
3372
+ x(z) + y(v)DI′′
3373
+ 1
3374
+ (x(z)+y(v)) −
3375
+ 1
3376
+ (x(z)+y(s))
3377
+ (x(v) − x(s))
3378
+ .
3379
+ Proof. With (3.4) one has
3380
+ DI
3381
+ U (0)
3382
+ 2 (v, z; s)
3383
+ H(0)
3384
+ 1 (x(v); z)
3385
+ = DI∪s
3386
+ U (0)
3387
+ 2 (v, z)
3388
+ H(0)
3389
+ 1 (x(v); z)
3390
+ + DI
3391
+
3392
+ U (0)
3393
+ 2 (v, z)
3394
+ H(0)
3395
+ 1 (x(v); z)
3396
+ Ds log H(0)
3397
+ 1 (x(v); z)
3398
+
3399
+ = DI∪s
3400
+ 1
3401
+ x(z)+y(v) +
3402
+
3403
+ I⊎I′′=I
3404
+ DI′
3405
+ 1
3406
+ x(z)+y(v)DI′′∪s log H(0)
3407
+ 1 (x(v); z).
3408
+ The first term equals DI∪s
3409
+ 1
3410
+ x(z)+y(v) = −DI
3411
+ λW (0)
3412
+ 2
3413
+ (v;s)
3414
+ (x(z)+y(v))2 and gives partly the first
3415
+ line of (4.15). The other part cancels with a term in the last line of (4.15); we
3416
+ will need this combination. The last term is known from (3.5) and (3.7):
3417
+ DI′′∪s log H(0)
3418
+ 1 (x(v); z)
3419
+ (4.16)
3420
+ =
3421
+ d
3422
+
3423
+ k=1
3424
+ |I′′|+1
3425
+
3426
+ l=1
3427
+ (−1)l−1
3428
+ l
3429
+
3430
+ I1⊎...⊎Il=I′′∪s
3431
+ I1,...Il̸=∅
3432
+ l�
3433
+ i=1
3434
+ λW (0)
3435
+ |Ii|+1(ˆzk; Ii)
3436
+ x(v) + y(ˆzk)
3437
+ +
3438
+ |I′′|
3439
+
3440
+ i=1
3441
+ DI′′\uiDs
3442
+ λ
3443
+ (x(v)−x(ui))(x(z)+y(ui)) + DI′′
3444
+ λ
3445
+ (x(v)−x(s))(x(z)+y(s)) .
3446
+ The middle line of (4.16) equals �d
3447
+ k=1 DI′′ λW (0)
3448
+ 2
3449
+ (ˆzk;s)
3450
+ x(v)+y(ˆzk) and gives with the deriva-
3451
+ tion property of the DI the second line of (4.15). In the last line of (4.16) we
3452
+ have Ds
3453
+ λ
3454
+ (x(v)−x(ui))(x(z)+y(ui)) = −
3455
+ λ2W (0)
3456
+ 2
3457
+ (ui;s)
3458
+ (x(v)−x(ui))(x(z)+y(ui))2 which gives the third line of
3459
+ (4.15). The final term of (4.16) gives the missing part of the last line of (4.15).
3460
+
3461
+ Proposition 4.5. One has
3462
+ ˆF (1)j(x(z); I \ uj)
3463
+ (4.17)
3464
+ = DI\uj
3465
+ �λ3Ω(0)reg
3466
+ 2
3467
+ (uj, uj)
3468
+ (x(z) + y(uj))3 +
3469
+ λ3
3470
+ 2
3471
+ ∂2
3472
+ ∂(x(uj))2
3473
+ 1
3474
+ (x(z)+y(uj)) − λ2�
3475
+ W (1)
3476
+ 1 (uj) +
3477
+ λ
3478
+ (x(z)−x(uj))3
3479
+
3480
+ (x(z) + y(uj))2
3481
+
3482
+ +
3483
+ |I|
3484
+
3485
+ i=1
3486
+ i̸=j
3487
+ DI\{ui,uj}
3488
+ λ4Ω(0)
3489
+ 2 (ui, uj)
3490
+ (x(uj) − x(ui))(x(z) + y(ui))2(x(z) + y(uj))2 ,
3491
+
3492
+ BTR FROM EXTENDED LOOP EQUATIONS
3493
+ 31
3494
+ where Ω(0)reg(u, u) := lims→u
3495
+
3496
+ Ω(0)
3497
+ 2 (u, s) −
3498
+ 1
3499
+ (x(u)−x(s))2
3500
+
3501
+ and DIΩ(0)reg
3502
+ 2
3503
+ (u, u) =
3504
+
3505
+ ∂x(s)W (0)
3506
+ |I|+2(u; I ∪ s)
3507
+ ��
3508
+ s=u for I ̸= ∅.
3509
+ Proof. The residue of (4.13) times dx(v) at x(v) = x(uj) is with (4.8b) and (4.9)
3510
+ given by
3511
+ ˆF (1)j
3512
+ |I| (x(z); I \ uj) = λ3
3513
+ d
3514
+
3515
+ k=1
3516
+ DI\uj
3517
+ Ω(0)
3518
+ 2 (ˆzk, uj)
3519
+ (x(uj) + y(ˆzk))(x(z) + y(uj))2
3520
+ (4.18)
3521
+ + lim
3522
+ v→uj(x(v) − x(uj))DI
3523
+ ˆH(1)
3524
+ 1 (x(v); z)
3525
+ H(0)
3526
+ 1 (x(v); z)
3527
+ .
3528
+ The limit in the last line follows from (4.10) and the derivation property of DI:
3529
+ lim
3530
+ v→uj(x(v) − x(uj))DI
3531
+ ˆH(1)
3532
+ 1 (x(v); z)
3533
+ H(0)
3534
+ 1 (x(v); z)
3535
+ = λ
3536
+ � |I|−1
3537
+
3538
+ l=0
3539
+ (−1)l
3540
+
3541
+ I0⊎I1⊎...⊎Il=I\uj
3542
+ I1,...,Il̸=∅
3543
+ ˆU (1)
3544
+ |I0|+1(z, uj; I0)
3545
+ H(0)
3546
+ 1 (x(uj); z)
3547
+ l�
3548
+ i=1
3549
+ H(0)
3550
+ |Ii|+1(x(uj); z; Ii)
3551
+ H(0)
3552
+ 1 (x(uj); z)
3553
+
3554
+ |I|−1
3555
+
3556
+ l=0
3557
+ (−1)l(l+1)
3558
+
3559
+ I−1⊎I0⊎I1⊎...⊎Il=I\uj
3560
+ I1,...,Il̸=∅
3561
+ U (0)
3562
+ |I−1|+1(z, uj; I−1)
3563
+ H(0)
3564
+ 1 (x(uj); z)
3565
+ ˆH(1)
3566
+ |I0|+1(x(uj); z; I0)
3567
+ H(0)
3568
+ 1 (x(uj); z)
3569
+ ×
3570
+ l�
3571
+ i=1
3572
+ H(0)
3573
+ |Ii|+1(x(uj), x(z); Ii)
3574
+ H(0)
3575
+ 1 (x(uj); z)
3576
+
3577
+ ≡ λDI\uj
3578
+
3579
+ ˆU (1)
3580
+ 1 (uj, z)
3581
+ H(0)
3582
+ 1 (x(uj); z)
3583
+
3584
+ U (0)
3585
+ 1 (uj, z)
3586
+ H(0)
3587
+ 1 (x(uj); z)
3588
+ ˆH(1)
3589
+ 1 (x(uj); z)
3590
+ H(0)
3591
+ 1 (x(uj); z)
3592
+
3593
+ = λDI\uj
3594
+ ˆU (1)
3595
+ 1 (uj, z)
3596
+ H(0)
3597
+ 1 (x(uj); z)
3598
+ − λ
3599
+
3600
+ I1⊎I2=I\uj
3601
+ DI1
3602
+ U (0)
3603
+ 1 (uj, z)
3604
+ H(0)
3605
+ 1 (x(uj); z)
3606
+ DI2
3607
+ ˆH(1)
3608
+ 1 (x(uj); z)
3609
+ H(0)
3610
+ 1 (x(uj); z)
3611
+ .
3612
+ We insert (3.4) and expand the other operations DI\uj and DI2:
3613
+ lim
3614
+ v→uj(x(v) − x(uj))DI
3615
+ ˆH(1)
3616
+ 1 (x(v); z)
3617
+ H(0)
3618
+ 1 (x(v); z)
3619
+ (4.19)
3620
+ = λ
3621
+ |I|−1
3622
+
3623
+ l=0
3624
+ (−1)l
3625
+
3626
+ I0⊎I1⊎...⊎Il=I\uj
3627
+ I1,...,Il̸=∅
3628
+ � ˆU (1)
3629
+ |I0|+1(uj, z; I0)
3630
+ H(0)
3631
+ 1 (x(uj); z)
3632
+
3633
+ 32
3634
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
3635
+
3636
+
3637
+ I′
3638
+ 0⊎I′′
3639
+ 0 =I0
3640
+ DI′
3641
+ 0
3642
+ 1
3643
+ x(z) + y(uj)
3644
+ ˆH(1)
3645
+ |I′′
3646
+ 0 |+1(x(uj); z; I′′
3647
+ 0 )
3648
+ H(0)
3649
+ 1 (x(uj); z)
3650
+
3651
+ l�
3652
+ i=1
3653
+ H(0)
3654
+ |Ii|+1(x(uj); z; Ii)
3655
+ H(0)
3656
+ 1 (x(uj); z)
3657
+ = −λ2
3658
+ |I|−1
3659
+
3660
+ l=0
3661
+ (−1)l
3662
+
3663
+ I0⊎I1⊎...⊎Il=I\uj
3664
+ I1,...,Il̸=∅
3665
+ l�
3666
+ i=1
3667
+ H(0)
3668
+ |Ii|+1(x(uj); z; Ii)
3669
+ H(0)
3670
+ 1 (x(uj); z)
3671
+ ×
3672
+
3673
+
3674
+ I′
3675
+ 0⊎I′′
3676
+ 0 =I0
3677
+ DI′
3678
+ 0
3679
+ 1
3680
+ x(z) + y(uj) ·
3681
+
3682
+ ∂x(s)
3683
+ U (0)
3684
+ |I′′
3685
+ 0 |+2(uj, z; I′′
3686
+ 0 ∪ s)
3687
+ H(0)
3688
+ 1 (x(uj); z)
3689
+ ���
3690
+ s=uj
3691
+ +
3692
+
3693
+ I′
3694
+ 0⊎I′′
3695
+ 0 ⊎I′′′
3696
+ 0 =I0
3697
+ DI′
3698
+ 0
3699
+ 1
3700
+ x(z) + y(uj)
3701
+
3702
+ W (1)
3703
+ |I′′
3704
+ 0 |+1(uj; I′′
3705
+ 0 ) +
3706
+ λδI′′
3707
+ 0 ,∅
3708
+ (x(z)−x(uj))3
3709
+ �U (0)
3710
+ |I′′′
3711
+ 0 |+1(uj, z; I′′′
3712
+ 0 )
3713
+ H(0)
3714
+ 1 (x(uj); z)
3715
+
3716
+ = −λ2
3717
+
3718
+ I′⊎I′′=I\uj
3719
+ DI′
3720
+ 1
3721
+ x(z) + y(uj) ·
3722
+
3723
+ ∂x(s)DI′′ U (0)
3724
+ 2 (uj, z; s)
3725
+ H(0)
3726
+ 1 (x(uj); z)
3727
+ ���
3728
+ s=uj
3729
+ − DI\uj
3730
+ λ2�
3731
+ W (1)
3732
+ 1 (uj) +
3733
+ λ
3734
+ (x(z)−x(uj))3
3735
+
3736
+ (x(z) + y(uj))2
3737
+ .
3738
+ Here Lemma 4.3 was used to get the second equality and (3.4) together with
3739
+ derivation property of DI to get the last equality.
3740
+ Next, in Lemma 4.4 we set v �→ uj and differentiate with respect to x(s) at
3741
+ s = uj. With the definition of Ω(0)reg
3742
+ 2
3743
+ (uj, uj) given in the Proposition we get
3744
+
3745
+ ∂x(s)DI
3746
+ U (0)
3747
+ 2 (uj, z; s)
3748
+ H(0)
3749
+ 1 (x(uj); z)
3750
+ ���
3751
+ s=uj
3752
+ = −DI
3753
+ λΩ(0)reg
3754
+ 2
3755
+ (uj, uj)
3756
+ (x(z) + y(uj))2 +
3757
+ d
3758
+
3759
+ k=1
3760
+ DI
3761
+ λΩ(0)
3762
+ 2 (ˆzk, uj)
3763
+ (x(uj) + y(ˆzk))(x(z) + y(uj))
3764
+
3765
+ |I|
3766
+
3767
+ i=1
3768
+ DI\ui
3769
+ λ2Ω(0)
3770
+ 2 (ui, uj)
3771
+ (x(uj) − x(ui))(x(z) + y(ui))2)(x(z) + y(uj))
3772
+
3773
+
3774
+ I⊎I′′=I
3775
+ DI′
3776
+ λ
3777
+ 2(x(z) + y(uj))
3778
+ ∂2
3779
+ ∂(x(uj))2DI′′
3780
+ 1
3781
+ (x(z) + y(uj)) .
3782
+ This result is inserted into (4.19) and then into (4.18). The sum over preimages
3783
+ cancels, the remainder simplifies to the assertion (4.17).
3784
+
3785
+ We proceed with the terms F (1)a
3786
+ |I|+1 in (4.14) which contribute to ˆP 1
3787
+ |I|+1. For them
3788
+ we need DI
3789
+ Q(0)
3790
+ 1 (x(v),x(z))
3791
+ P (0)
3792
+ 1
3793
+ (x(v),x(z)) which in the case I ̸= ∅ coincide with DI
3794
+ ˆQ(0)
3795
+ 1 (x(v),x(z))
3796
+ P (0)
3797
+ 1
3798
+ (x(v),x(z)). The
3799
+ function ˆQ(0)
3800
+ 1 (x(v), x(z)) was given in (3.12). By Proposition 3.5, the action of
3801
+
3802
+ BTR FROM EXTENDED LOOP EQUATIONS
3803
+ 33
3804
+ DI on functions of log ˆQ(0)
3805
+ 1 (x(v), x(z)) is the same as the combined action of DI
3806
+ on P (0)
3807
+ 1 (x(z), x(z)) and P (0)
3808
+ 1 (x(v), x(v)) and symbolic expressions D0x(0) as given
3809
+ in Proposition 3.5. Understanding D[0]
3810
+ I
3811
+ as DI when acting on P and D0
3812
+ I when
3813
+ acting on x(0), this means
3814
+ DI
3815
+ Q(0)
3816
+ 1 (x(v), x(z))
3817
+ P (0)
3818
+ 1 (x(v), x(z))
3819
+ = −
3820
+ λ
3821
+ (x(v) − x(z))2D[0]
3822
+ I
3823
+ ��
3824
+ 1 +
3825
+ (x(v) − x(z))2
3826
+ 4(x(v) − x(0))(x(z) − x(0))
3827
+ × exp
3828
+ �1
3829
+ 2 log P (0)
3830
+ 1 (x(v), x(v)) + 1
3831
+ 2 log P (0)
3832
+ 1 (x(z), x(z)) − log P (0)
3833
+ 1 (x(v), x(z))
3834
+ ��
3835
+ .
3836
+ We need this expression at and near the diagonal x(v) = x(z). Taylor expansion
3837
+ gives
3838
+ DI
3839
+ Q(0)
3840
+ 1 (x(v), x(z))
3841
+ P (0)
3842
+ 1 (x(v), x(z))
3843
+ (4.20)
3844
+ = −D0
3845
+ I
3846
+ λ
3847
+ 8(x(z) − x(0))2 − λ
3848
+ 2
3849
+ ∂2�
3850
+ DI log P (0)
3851
+ 1 (x(w), x(z))
3852
+
3853
+ ∂x(w)∂x(z)
3854
+ ���
3855
+ w=z
3856
+ + (x(v) − x(z))
3857
+
3858
+ D0
3859
+ I
3860
+ λ
3861
+ 8(x(z) − x(0))3 − λ
3862
+ 2
3863
+ ∂3�
3864
+ DI log P (0)
3865
+ 1 (x(w), x(z))
3866
+
3867
+ ∂x(w)∂(x(z))2
3868
+ ���
3869
+ w=z
3870
+
3871
+ + O((x(v) − x(z))2) .
3872
+ We insert (4.20) into (4.14) and the result together with Proposition 4.5 into
3873
+ (4.13) to get the part F (1)
3874
+ |I|+1(x(v); x(z); I) of DI
3875
+ ˆP (1)
3876
+ 1
3877
+ (x(v),x(z))
3878
+ P (0)
3879
+ 1
3880
+ (x(v),x(z)) in (4.8a). The other
3881
+ part is K0,|I|+1(x(v); z; I) given in (4.9), which for A = 0 is a function of x(z). To
3882
+ simplify the total expression we write the last line of (4.9) for A = 0 as
3883
+ λ2
3884
+ (x(v) − x(z))
3885
+ d
3886
+
3887
+ k=0
3888
+
3889
+ ∂x(w)DI
3890
+ 1
3891
+ (x(v) + y(ˆzk))(x(z) + y(w))
3892
+ ���
3893
+ w=ˆzk
3894
+ = −
3895
+ λ2
3896
+ (x(v) − x(z))
3897
+ d
3898
+
3899
+ k=0
3900
+ DI
3901
+ y′(ˆzk)
3902
+ x′(ˆzk)(x(v) + y(ˆzk))(x(z) + y(ˆzk))2
3903
+ = −
3904
+ λ2
3905
+ (x(v) − x(z))3
3906
+ d
3907
+
3908
+ k=0
3909
+ DI
3910
+
3911
+ y′(ˆzk)
3912
+ x′(ˆzk)(x(v) + y(ˆzk)) −
3913
+ y′(ˆzk)
3914
+ x′(ˆzk)(x(z) + y(ˆzk))2
3915
+
3916
+
3917
+ λ2
3918
+ (x(v) − x(z))2
3919
+ d
3920
+
3921
+ k=0
3922
+ DI
3923
+ y′(ˆzk)
3924
+ x′(ˆzk)(x(z) + y(ˆzk))2
3925
+
3926
+ 34
3927
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
3928
+ = −
3929
+ λ2
3930
+ (x(v) − x(z))3
3931
+
3932
+ ∂x(w)
3933
+ d
3934
+
3935
+ k=0
3936
+ DI
3937
+
3938
+ log(x(v) + y( ˆwk)) − log(x(z) + y( ˆwk))
3939
+
3940
+ w=z
3941
+ +
3942
+ λ2
3943
+ (x(v) − x(z))2
3944
+ ∂2
3945
+ ∂x(z)∂x(w)
3946
+ d
3947
+
3948
+ k=0
3949
+ DI log(x(z) + y( ˆwk))
3950
+ ���
3951
+ w=z
3952
+ = −
3953
+ λ2
3954
+ (x(v) − x(z))3
3955
+
3956
+ ∂x(w)
3957
+
3958
+ DI log P (0)
3959
+ 1 (x(v), x(w)) − DI log P (0)
3960
+ 1 (x(z), x(w))
3961
+
3962
+ w=z
3963
+ +
3964
+ λ2
3965
+ (x(v) − x(z))2
3966
+ ∂2
3967
+ ∂x(z)∂x(w)DI log P (0)
3968
+ 1 (x(z), x(w))
3969
+ ���
3970
+ w=z
3971
+
3972
+ |I|
3973
+
3974
+ j=1
3975
+ DI\uj
3976
+
3977
+ λ3
3978
+ (x(v) − x(z))(x(v) − x(uj))(x(z) − x(uj))2(x(z) + y(uj))2
3979
+
3980
+ ,
3981
+ where Proposition 3.3 has been used. Putting everything together, we arrive (for
3982
+ I ̸= ∅) at
3983
+ Theorem 4.6. The auxiliary functions ˆP (1)
3984
+ |I|+1 of genus g = 1 are determined by
3985
+ DI
3986
+ ˆP (1)
3987
+ 1 (x(v), x(z))
3988
+ P (0)
3989
+ 1 (x(v), x(z))
3990
+ (4.21)
3991
+ =
3992
+ d
3993
+
3994
+ k=0
3995
+ DI
3996
+ λW (1)
3997
+ 1 (ˆzk)
3998
+ x(v) + y(ˆzk) + λ2
3999
+ 2
4000
+ d
4001
+
4002
+ j,k=0
4003
+ j̸=k
4004
+ DI
4005
+ Ω(0)
4006
+ 2 (ˆzj, ˆzk)
4007
+ (x(v) + y(ˆzj))(x(v) + y(ˆzk))
4008
+
4009
+ |I|
4010
+
4011
+ j=1
4012
+ 1
4013
+ (x(v) − x(uj))
4014
+ d
4015
+
4016
+ k=0
4017
+ DI\uj
4018
+ λ3Ω(0)
4019
+ 2 (ˆzk, uj)
4020
+ (x(v) + y(ˆzk))(x(z) + y(uj))2
4021
+
4022
+ λ2
4023
+ (x(v) − x(z))3
4024
+
4025
+ ∂x(w)
4026
+
4027
+ DI log P (0)
4028
+ 1 (x(v), x(w)) − DI log P (0)
4029
+ 1 (x(z), x(w))
4030
+
4031
+ w=z
4032
+ +
4033
+ λ2
4034
+ (x(v) − x(z))2
4035
+
4036
+ − D0
4037
+ I
4038
+ 1
4039
+ 8(x(z) − x(0))2 + 1
4040
+ 2
4041
+ ∂2(DI log P (0)
4042
+ 1 (x(w), x(z)))
4043
+ ∂x(w)∂x(z)
4044
+ ���
4045
+ w=z
4046
+
4047
+ +
4048
+ 1
4049
+ x(v) − x(z)D0
4050
+ I
4051
+ λ2
4052
+ 8(x(z) − x(0))3
4053
+ +
4054
+ |I|
4055
+
4056
+ j=1
4057
+ 1
4058
+ (x(v) − x(uj))DI\uj
4059
+ �λ3Ω(0)reg
4060
+ 2
4061
+ (uj, uj)
4062
+ (x(z) + y(uj))3 −
4063
+ λ2W (1)
4064
+ 1 (uj)
4065
+ (x(z) + y(uj))2
4066
+ +
4067
+ λ3
4068
+ 2(x(z) + y(uj))2
4069
+ ∂2
4070
+ ∂(x(uj))2
4071
+ 1
4072
+ (x(z) + y(uj))
4073
+
4074
+
4075
+ BTR FROM EXTENDED LOOP EQUATIONS
4076
+ 35
4077
+ +
4078
+ |I|
4079
+
4080
+ i,j=1
4081
+ i<j
4082
+ DI\{ui,uj}
4083
+ λ4Ω(0)
4084
+ 2 (ui, uj)
4085
+ (x(v) − x(uj))(x(v) − x(ui))(x(z) + y(ui))2(x(z) + y(uj))2 .
4086
+ The Theorem also holds for I = ∅ where it specifies to Proposition 4.1.
4087
+ 4.3. Loop equations for genus g = 1. From (4.21) we extract
4088
+ [(x(v))−1]DI
4089
+ ˆP (1)
4090
+ 1 (x(v), x(z))
4091
+ P (0)
4092
+ 1 (x(v), x(z))
4093
+ = λ
4094
+ d
4095
+
4096
+ k=0
4097
+ W (1)
4098
+ |I|+1(ˆzk; I) + D0
4099
+ I
4100
+ λ2
4101
+ 8(x(z) − x(0))3
4102
+ (4.22)
4103
+ +
4104
+ |I|
4105
+
4106
+ j=1
4107
+ DI\uj
4108
+ �λ3Ω(0)reg
4109
+ 2
4110
+ (uj, uj)
4111
+ (x(z) + y(uj))3 −
4112
+ λ2W (1)
4113
+ 1 (uj)
4114
+ (x(z) + y(uj))2
4115
+
4116
+ λ3
4117
+ 2(x(z) + y(uj))2
4118
+ ∂2
4119
+ ∂(x(uj))2
4120
+ 1
4121
+ (x(z) + y(uj))
4122
+
4123
+ and
4124
+ [(x(v))−2]DI
4125
+ ˆP (1)
4126
+ 1 (x(v), x(z))
4127
+ P (0)
4128
+ 1 (x(v), x(z))
4129
+ (4.23)
4130
+ = −λ
4131
+ d
4132
+
4133
+ k=0
4134
+ y(ˆzk)W (1)
4135
+ |I|+1(ˆzk; I) − λ2
4136
+
4137
+ I1⊎I2=I
4138
+ I2̸=∅
4139
+ d
4140
+
4141
+ k=0
4142
+ W (1)
4143
+ |I|+1(ˆzk; I1)W (0)
4144
+ |I|+1(ˆzk; I2)
4145
+
4146
+ |I|
4147
+
4148
+ j=1
4149
+ d
4150
+
4151
+ k=0
4152
+ DI\uj
4153
+ λ3Ω(0)
4154
+ 2 (ˆzk, uj)
4155
+ (x(z) + y(uj))2
4156
+ (*)
4157
+ + λ2
4158
+ 2
4159
+ d
4160
+
4161
+ j,k=0
4162
+ j̸=k
4163
+ DIΩ(0)
4164
+ 2 (ˆzj, ˆzk) + λ2
4165
+ 2
4166
+ ∂2(DI log P (0)
4167
+ 1 (x(w), x(z)))
4168
+ ∂x(w)∂x(z)
4169
+ ���
4170
+ w=z
4171
+ (†)
4172
+ − D0
4173
+ I
4174
+ λ2
4175
+ 8(x(z) − x(0))2 + x(z)D0
4176
+ I
4177
+ λ2
4178
+ 8(x(z) − x(0))3
4179
+ +
4180
+ |I|
4181
+
4182
+ j=1
4183
+ x(uj)DI\uj
4184
+ �λ3Ω(0)reg
4185
+ 2
4186
+ (uj, uj)
4187
+ (x(z) + y(uj))3 −
4188
+ λ2W (1)
4189
+ 1 (uj)
4190
+ (x(z) + y(uj))2
4191
+ +
4192
+ λ3
4193
+ 2(x(z) + y(uj))2
4194
+ ∂2
4195
+ ∂(x(uj))2
4196
+ 1
4197
+ (x(z) + y(uj))
4198
+
4199
+ + 1
4200
+ 2
4201
+ |I|
4202
+
4203
+ i,j=1
4204
+ i̸=j
4205
+ DI\{ui,uj}
4206
+ λ4Ω(0)
4207
+ 2 (ui, uj)
4208
+ (x(z) + y(ui))2(x(z) + y(uj))2 .
4209
+ (**)
4210
+ The combination of terms in the lines (*), (†) and (**) simplifies considerably:
4211
+
4212
+ 36
4213
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
4214
+ Lemma 4.7.
4215
+
4216
+ |I|
4217
+
4218
+ j=1
4219
+ d
4220
+
4221
+ k=0
4222
+ DI\uj
4223
+ λ3Ω(0)
4224
+ 2 (ˆzk, uj)
4225
+ (x(z) + y(uj))2 + λ2
4226
+ 2
4227
+ ∂2(DI log P (0)
4228
+ 1 (x(w), x(z)))
4229
+ ∂x(w)∂x(z)
4230
+ ���
4231
+ w=z
4232
+ + λ2
4233
+ 2
4234
+ d
4235
+
4236
+ j,k=0
4237
+ j̸=k
4238
+ DIΩ(0)
4239
+ 2 (ˆzj, ˆzk) + 1
4240
+ 2
4241
+ |I|
4242
+
4243
+ i,j=1
4244
+ i̸=j
4245
+ DI\{ui,uj}
4246
+ λ4Ω(0)
4247
+ 2 (ui, uj)
4248
+ (x(z) + y(ui))2(x(z) + y(uj))2
4249
+ = −λ2
4250
+ 2
4251
+ d
4252
+
4253
+ k=0
4254
+ DIΩ(0)reg
4255
+ 2
4256
+ (ˆzk, ˆzk) + λ3
4257
+ 6
4258
+ |I|
4259
+
4260
+ j=1
4261
+
4262
+ ∂x(uj)
4263
+
4264
+ DI\uj
4265
+ 1
4266
+ (x(z) + y(uj))3
4267
+
4268
+ .
4269
+ (4.24)
4270
+ Proof. We start from (3.6) for v �→ z �→ w and differentiate with respect to x(z).
4271
+ In the second step we take (3.10) for I �→ I ∪ ˆwl into account:
4272
+ ∂(DI log P (0)
4273
+ 1 (x(z), x(w)))
4274
+ ∂x(z)
4275
+ (4.25)
4276
+ =
4277
+ d
4278
+
4279
+ l=0
4280
+ DI
4281
+ 1
4282
+ x(z) + y( ˆwl) −
4283
+ |I|
4284
+
4285
+ j=1
4286
+ DI\uj
4287
+ λ
4288
+ (x(z) − x(uj))2(x(w) + y(uj))
4289
+ = −DI
4290
+ d
4291
+
4292
+ k,l=0
4293
+
4294
+ W (0)
4295
+ 2 (ˆzk; ˆwl) −
4296
+ δk,l
4297
+ (x(ˆzk) − x( ˆwl))
4298
+
4299
+
4300
+ |I|
4301
+
4302
+ j=1
4303
+ DI\uj
4304
+
4305
+ λ
4306
+ (x(z) − x(uj))2(x(w) + y(uj)) +
4307
+ d
4308
+
4309
+ l=0
4310
+ D ˆwl
4311
+ 1
4312
+ x(z) + y(uj)
4313
+
4314
+ .
4315
+ We have �d
4316
+ l=0 D ˆwl
4317
+ 1
4318
+ x(z)+y(uj) = − �d
4319
+ l=0
4320
+ λW (0)
4321
+ 2
4322
+ (uj; ˆwl)
4323
+ (x(z)+y(uj))2.
4324
+ When differentiating with
4325
+ respect to x(w) at w = z, this term is the first one in (4.24), but with relative
4326
+ factor −2. Moreover, in the limit w → z the contributions with l ̸= k in the third
4327
+ line of (4.25) cancel the first term of the second line of (4.24), whereas for k = l
4328
+ the regularised Ω(0)reg
4329
+ 2
4330
+ (ˆzk, ˆzk) appears. We thus have
4331
+
4332
+ |I|
4333
+
4334
+ j=1
4335
+ d
4336
+
4337
+ k=0
4338
+ DI\uj
4339
+ λ3Ω(0)
4340
+ 2 (ˆzk, uj)
4341
+ (x(z) + y(uj))2 + λ2
4342
+ 2
4343
+ ∂2(DI log P (0)
4344
+ 1 (x(w), x(z)))
4345
+ ∂x(w)∂x(z)
4346
+ ���
4347
+ w=z
4348
+ + λ2
4349
+ 2
4350
+ d
4351
+
4352
+ j,k=0
4353
+ j̸=k
4354
+ DIΩ(0)
4355
+ 2 (ˆzj, ˆzk)
4356
+ = −λ2
4357
+ 2
4358
+ d
4359
+
4360
+ k=0
4361
+ DIΩ(0)reg
4362
+ 2
4363
+ (ˆzk, ˆzk)
4364
+
4365
+ BTR FROM EXTENDED LOOP EQUATIONS
4366
+ 37
4367
+ + λ2
4368
+ 2
4369
+ |I|
4370
+
4371
+ j=1
4372
+
4373
+ I1⊎I2=I\uj
4374
+ DI1
4375
+ λ
4376
+ (x(z) + y(uj))2DI2
4377
+
4378
+ δI2,∅
4379
+ (x(z) − x(uj))2 −
4380
+ d
4381
+
4382
+ l=0
4383
+ Ω(0)
4384
+ 2 (uj, ˆzl)
4385
+
4386
+ .
4387
+ In the DI2 operation we use the symmetry under uj ↔ ˆzl to take a x(uj) derivative
4388
+ out. Using (3.10) we then get
4389
+ DI2
4390
+
4391
+ δI2,∅
4392
+ (x(z) − x(uj)2) −
4393
+ d
4394
+
4395
+ l=0
4396
+ Ω(0)
4397
+ 2 (uj, ˆzl)
4398
+
4399
+ =
4400
+
4401
+ ∂x(uj)
4402
+
4403
+ δI2,∅
4404
+ (x(z) − x(uj)) −
4405
+ d
4406
+
4407
+ l=0
4408
+ W (0)
4409
+ 2 (zl; I2 ∪ uj)
4410
+
4411
+ =
4412
+
4413
+ ∂x(uj)
4414
+
4415
+ DI2
4416
+ 1
4417
+ x(z) + y(uj) +
4418
+ |I2|
4419
+
4420
+ i=1
4421
+ DI2\uiDuj
4422
+ 1
4423
+ x(z) + y(ui)
4424
+
4425
+ =
4426
+
4427
+ ∂x(uj)DI2
4428
+ 1
4429
+ x(z) + y(uj) −
4430
+ |I2|
4431
+
4432
+ i=1
4433
+ DI2\ui
4434
+ λΩ(0)
4435
+ 2 (ui, uj)
4436
+ (x(z) + y(ui))2 .
4437
+ Inserted back, the last term leads to a double sum over pairs i ̸= j and a DI\{ui,uj}
4438
+ operation, which exactly cancels the second term of the second line of (4.24). An
4439
+ obvious rearrangement of �
4440
+ I1⊎I2=I\uj DI1
4441
+ 1
4442
+ (x(z)+y(uj))2
4443
+
4444
+ ∂x(uj)DI2
4445
+ 1
4446
+ x(z)+y(uj) confirms
4447
+ the assertion.
4448
+
4449
+ On the other hand, comparing (2.9a) with (2.5) we get as leading coefficient
4450
+ [(x(v))−1]H(g)
4451
+ |I|+1(x(v); q; I) = −λW (g)
4452
+ |I|+1(q; I) for any g ≥ 1.
4453
+ Then (2.9b) and
4454
+ (2.12c) show
4455
+ [(x(v))−2]
4456
+ ˆP (g)
4457
+ |I|+1(x(v), x(z); I)
4458
+ P (0)
4459
+ 1 (x(v), x(z))
4460
+ (4.26)
4461
+ = λ
4462
+ N
4463
+ d
4464
+
4465
+ l=1
4466
+ λW (g)
4467
+ |I|+1(εl; I)
4468
+ x(z) − x(εl) − λ2
4469
+ |I|
4470
+
4471
+ j=1
4472
+ W (g)
4473
+ |I| (uj; I \ uj)
4474
+ x(z) − x(uj)
4475
+ for g ≥ 1
4476
+ as leading coefficient. Comparison with (4.22), (4.23) and Lemma 4.7 gives:
4477
+ Proposition 4.8. The genus-1 meromorphic functions W (1)
4478
+ |I|+1(z; I) satisfy the
4479
+ linear loop equation
4480
+ d
4481
+
4482
+ k=0
4483
+ W (1)
4484
+ |I|+1(ˆzk; I) = −D0
4485
+ I
4486
+ λ
4487
+ 8(x(z) − x(0))3
4488
+ (4.27)
4489
+
4490
+ |I|
4491
+
4492
+ j=1
4493
+ DI\uj
4494
+ �λ2Ω(0)reg
4495
+ 2
4496
+ (uj, uj)
4497
+ (x(z) + y(uj))3 −
4498
+ λW (1)
4499
+ 1 (uj)
4500
+ (x(z) + y(uj))2
4501
+
4502
+ 38
4503
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
4504
+
4505
+ λ2
4506
+ 2(x(z) + y(uj))2
4507
+ ∂2
4508
+ ∂(x(uj))2
4509
+ 1
4510
+ (x(z) + y(uj))
4511
+
4512
+ and the quadratic loop equation
4513
+
4514
+ d
4515
+
4516
+ k=0
4517
+ y(ˆzk)W (1)
4518
+ |I|+1(ˆzk; I)
4519
+ (4.28)
4520
+ = λ
4521
+
4522
+ I1⊎I2=I
4523
+ I2̸=∅
4524
+ d
4525
+
4526
+ k=0
4527
+ W (1)
4528
+ |I|+1(ˆzk; I1)W (0)
4529
+ |I|+1(ˆzk; I2) + λ
4530
+ 2
4531
+ d
4532
+
4533
+ k=0
4534
+ DIΩ(0)reg
4535
+ 2
4536
+ (ˆzk, ˆzk)
4537
+ (*)
4538
+ − λ2
4539
+ 6
4540
+ |I|
4541
+
4542
+ j=1
4543
+
4544
+ ∂x(uj)
4545
+
4546
+ DI\uj
4547
+ 1
4548
+ (x(z) + y(uj))3
4549
+
4550
+ (†)
4551
+ + D0
4552
+ I
4553
+ λ
4554
+ 8(x(z) − x(0))2 − x(z)D0
4555
+ I
4556
+ λ
4557
+ 8(x(z) − x(0))3
4558
+ (§)
4559
+
4560
+ |I|
4561
+
4562
+ j=1
4563
+ x(uj)DI\uj
4564
+ �λ2Ω(0)reg
4565
+ 2
4566
+ (uj, uj)
4567
+ (x(z) + y(uj))3 −
4568
+ λW (1)
4569
+ 1 (uj)
4570
+ (x(z) + y(uj))2
4571
+ (‡)
4572
+
4573
+ λ2
4574
+ 2(x(z) + y(uj))2
4575
+ ∂2
4576
+ ∂(x(uj))2
4577
+ 1
4578
+ (x(z) + y(uj))
4579
+
4580
+ (‡)
4581
+ + λ
4582
+ N
4583
+ d
4584
+
4585
+ l=1
4586
+ W (1)
4587
+ |I|+1(εl; I)
4588
+ x(z) − x(εl) − λ
4589
+ |I|
4590
+
4591
+ j=1
4592
+ W (1)
4593
+ |I| (uj; I \ uj)
4594
+ x(z) − x(uj)
4595
+ .
4596
+ 5. The recursion formula
4597
+ We learn from the loop equations (3.11)+(3.10) for g = 0, the loop equations
4598
+ (4.28)+(4.27) for g = 1 and the identity y(u) = −x(−u), see (1.2):
4599
+ • The only poles of z �→ x′(z)W (g)
4600
+ |I|+1(z; I) for 2g + |I| > 1 are located at
4601
+ the ramification points z = βi of x, at the opposite diagonals z = −ui for
4602
+ ui ∈ I and (for g ≥ 1) at z = 0. The pole at z = ui in the last line of
4603
+ (4.28) is due to the particular case I2 = {ui} in the first term of the line
4604
+ (*) of (4.28); it is not a pole of x′(z)W (1)
4605
+ |I|+1(z; I). Similarly, the pole at
4606
+ z = εk in the last line of (4.28) is due to the pole of y(z) at z = εk in the
4607
+ first line of (4.28). The same discussion applies to (3.11).
4608
+ Moving the integration contour to the complementary poles, we get
4609
+ x′(z)W (g)
4610
+ |I|+1(z; I)dz = Res
4611
+ q→z
4612
+ x′(q)W (g)
4613
+ |I|+1(q; I)dqdz
4614
+ q − z
4615
+ (5.1)
4616
+ =
4617
+ 2d
4618
+
4619
+ i=1
4620
+ Res
4621
+ q→βi
4622
+ dz
4623
+ z − qx′(q)W (g)
4624
+ |I|+1(q; I)dq
4625
+
4626
+ BTR FROM EXTENDED LOOP EQUATIONS
4627
+ 39
4628
+ +
4629
+ |I|
4630
+
4631
+ i=1
4632
+ Res
4633
+ q→−ui
4634
+ dz
4635
+ z − qx′(q)W (g)
4636
+ |I|+1(q; I)dq + Res
4637
+ q→0
4638
+ dz
4639
+ z − qx′(q)W (g)
4640
+ |I|+1(q; I)dq .
4641
+ • The linear loop equations (3.10) and (4.27) imply that the poles of z �→
4642
+ x′(z)W (g)
4643
+ |I|+1(I; z)dz at z = −ui and z = 0 have vanishing residue,
4644
+ Res
4645
+ z→ui x′(z)W (g)
4646
+ |I|+1(I; z)dz = 0 ,
4647
+ Res
4648
+ z→0 x′(z)W (g)
4649
+ |I|+1(I; z)dz = 0
4650
+ (since the integrands are total differentials in z). Renaming z �→ q, we
4651
+ thus have
4652
+ 0 = −
4653
+ |I|
4654
+
4655
+ i=1
4656
+ Res
4657
+ q→−ui
4658
+ dz
4659
+ z + ui
4660
+ x′(q)W (g)
4661
+ |I|+1(I; q)dq − Res
4662
+ q→0
4663
+ dz
4664
+ z x′(q)W (g)
4665
+ |I|+1(I; q)dq ,
4666
+ which we can safely add to (5.1).
4667
+ • Let σi(z) be the local Galois conjugate near βi, i.e. the unique preim-
4668
+ age ˆzji = σi(z) ∈ x−1(x(z)) with limz→βi σi(z) = βi and σi(z) ̸≡ z.
4669
+ A residue at βi is invariant under Galois involution, Resq→βi f(q)dq =
4670
+ Resq→βi f(σi(q))dσi(q), so that
4671
+ Res
4672
+ q→βi
4673
+ dz
4674
+ z − qx′(q)W (g)
4675
+ |I|+1(q; I)dq
4676
+ = 1
4677
+ 2 Res
4678
+ q→βi
4679
+ � dz
4680
+ z − qx′(q)W (g)
4681
+ |I|+1(q; I)dq +
4682
+ dz
4683
+ z − σi(q)x′(σi(q))W (1)
4684
+ |I|+1(σi(q); I)dσi(q)
4685
+
4686
+ = 1
4687
+ 2 Res
4688
+ q→βi
4689
+ � dz
4690
+ z − q −
4691
+ dz
4692
+ z − σi(q)
4693
+
4694
+ x′(q)W (g)
4695
+ |I|+1(q; I)dq
4696
+ + 1
4697
+ 2 Res
4698
+ q→βi
4699
+ dz
4700
+ z − σi(q)
4701
+
4702
+ x′(q)W (g)
4703
+ |I|+1(q; I)dq + x′(σi(q))W (g)
4704
+ |I|+1(σi(q); I)dσi(q)
4705
+
4706
+ .
4707
+ The last line vanishes because x′(q)dq = x′(σi)dσi(q) and W (g)
4708
+ |I|+1(q; I) +
4709
+ W (g)
4710
+ |I|+1(σi(q); I) is holomorphic at q = βi as consequence of the linear loop
4711
+ equations (3.10) and (4.27).
4712
+ In summary,
4713
+ x′(z)W (g)
4714
+ |I|+1(z; I)dz =
4715
+ 2d
4716
+
4717
+ i=1
4718
+ Res
4719
+ q→βi
4720
+ �1
4721
+ 2
4722
+ � dz
4723
+ z − q −
4724
+ dz
4725
+ z − σi(q)
4726
+
4727
+ x′(q)W (g)
4728
+ |I|+1(q; I)dq
4729
+
4730
+ +
4731
+ |I|
4732
+
4733
+ i=1
4734
+ Res
4735
+ q→−ui
4736
+ �� dz
4737
+ z − q −
4738
+ dz
4739
+ z + ui
4740
+
4741
+ x′(q)W (g)
4742
+ |I|+1(q; I)dq
4743
+
4744
+ + Res
4745
+ q→0
4746
+ �� dz
4747
+ z − q − dz
4748
+ z
4749
+
4750
+ x′(q)W (g)
4751
+ |I|+1(q; I)dq
4752
+
4753
+ ,
4754
+ (5.2)
4755
+ where the last line vanishes identically for g = 0.
4756
+ We can eventually complete the
4757
+
4758
+ 40
4759
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
4760
+ Proof of Theorem 1.1 for g ≤ 1. We identify the three contributions on the rhs
4761
+ of (5.2).
4762
+ (a) (poles at z = βi) For g = 1, only the lhs and the line (*) of the rhs of
4763
+ (4.28) have poles at z = βi; more precisely only the principal preimage
4764
+ k = 0 and the Galois preimage k = ki ∈ {1, ..., d} with ˆzki = σi(z). All
4765
+ terms with other k and all other lines in (4.28) are holomorphic at z = βi.
4766
+ Similarly, only the lhs and the first term of the rhs of (4.28) have poles at
4767
+ z = βi; more precisely only the principal preimage k = 0 and the Galois
4768
+ preimage k = ki ∈ {1, ..., d} with ˆzki = σi(z). The lhs of the quadratic
4769
+ loop equations for z �→ q, multiplied by x′(q)dq = x′(σi)dσi(q), is written
4770
+ as
4771
+
4772
+ d
4773
+
4774
+ k=0
4775
+ y(ˆqk)W (g)
4776
+ |I|+1(ˆqk; I)x′(q)dq
4777
+ =
4778
+
4779
+ − y(q)W (g)
4780
+ |I|+1(q; I) − y(σi(q))W (g)
4781
+ |I|+1(σi(q); I)
4782
+
4783
+ x′(q)dq + O((q − βi)1)dq
4784
+ = −(y(q) − y(σi(q)))W (1)
4785
+ |I|+1(q; I)x′(q)dq
4786
+ − y(σi(q))(W (g)
4787
+ |I|+1(q; I) + W (g)
4788
+ |I|+1(σi(q); I))x′(q)dq + O((q − βi)1)dq
4789
+ (**)
4790
+ = −(y(q) − y(σi(q)))W (g)
4791
+ |I|+1(q; I)x′(q)dq + O((q − βi)1)dq ,
4792
+ where we used that (3.10) and (4.27) make the whole line (**) of or-
4793
+ der O((q − βi)1)dq.
4794
+ Thus, the quadratic loop equations multiplied by
4795
+ x′(q)dq
4796
+ −(y(q)−y(σi(q))) (which is of order O((q − βi)0)dq), takes the form
4797
+ W (g)
4798
+ |I|+1(q; I)x′(q)dq
4799
+ (5.3)
4800
+ = −
4801
+ λx′(q)dq
4802
+ (y(q) − y(σi(q)))
4803
+
4804
+ q′∈{q,σi(q)}
4805
+ �1
4806
+ 2
4807
+
4808
+ I1⊎I2=I
4809
+ g1+g1=g
4810
+ (gi,Ii)̸=(0,∅)
4811
+ W (g1)
4812
+ |I1|+1(q′; I1)W (g2)
4813
+ |I2|+1(q′; I2)
4814
+ + 1
4815
+ 2DIΩ(g−1)reg
4816
+ 2
4817
+ (q′, q′)
4818
+
4819
+ + O((q − βi)0)dq .
4820
+ When inserting this into (5.2), both cases q′ = q and q′ = σi(q)
4821
+ give the same contribution since the residue at βi is invariant un-
4822
+ der local Galois involution.
4823
+ When translating to ω(g)
4824
+ n+1(z, u1, ..., un) =
4825
+ λ2g+n−1du1 · · · dunW (g)
4826
+ n+1(z; u1, ..., un)dx(z) the first line of (1.3) with recur-
4827
+ sion kernel Ki(z; q) results.
4828
+ (b) (poles at z = −uj) For g = 1, these are present on the lhs and the line (*)
4829
+ of the rhs of (4.28), there only in the principal preimage k = 0, and in the
4830
+ lines (†) and (‡) of (4.28), there only in the j-summands. The line (†), by
4831
+
4832
+ BTR FROM EXTENDED LOOP EQUATIONS
4833
+ 41
4834
+ the linear loop equation (3.10) at genus g = 0, takes the form
4835
+ (4.28)† = −λ2
4836
+ 12
4837
+ ∂3
4838
+ ∂x(uj)∂(x(z))2
4839
+
4840
+ DI\uj
4841
+ 1
4842
+ (x(z) + y(uj))
4843
+
4844
+ = λ2
4845
+ 12
4846
+ ∂3W (0)
4847
+ |I|+1(z; I)
4848
+ ∂x(uj)∂(x(z))2 + O((z + uj)0)
4849
+ = λ2
4850
+ 12
4851
+ ∂2DI\ujΩ(0)
4852
+ 2 (z, uj)
4853
+ ∂(x(z))2
4854
+ + O((z + uj)0) .
4855
+ By the linear loop equation (4.27), the j-summand of the lines (‡) can be
4856
+ written as
4857
+ (4.28)‡ = x(u)W (1)
4858
+ |I|+1(z; I) + O((z + uj)0) .
4859
+ Similarly, only the first two lines of (3.11) have poles at z = −uj, again
4860
+ only the principal preimages k = 0 and the j-summand of the last term
4861
+ of the second line of (3.11). The latter is with (3.10) also of the form
4862
+ x(u)W (0)
4863
+ |I|+1(z; I) + O((z + uj)0) that we noticed for g = 1. We bring these
4864
+ terms x(u)W (g)
4865
+ I|+1(z; I) to the lhs of the quadratic loop equations, rename
4866
+ z �→ q and multiply by
4867
+ x′(q)dq
4868
+ −(y(q)+x(uj)) to get
4869
+ W (g)
4870
+ |I|+1(q; I)x′(q)dq
4871
+ (5.4)
4872
+ =
4873
+ λx′(q)dq
4874
+ −(y(q) + x(uj))
4875
+ �1
4876
+ 2
4877
+
4878
+ I1⊎I2=I
4879
+ g1+g1=g
4880
+ (gi,Ii)̸=(0,∅)
4881
+ W (g1)
4882
+ |I1|+1(q; I1)W (g2)
4883
+ |I2|+1(q; I2) + 1
4884
+ 2DIΩ(g−1)reg
4885
+ 2
4886
+ (q, q)
4887
+ + λ
4888
+ 12
4889
+ ∂2DI\ujΩ(g−1)
4890
+ 2
4891
+ (q, uj)
4892
+ ∂(x(q))2
4893
+
4894
+ + O((q + uj)−1)dq .
4895
+ Note that the undetermined residue poses no problem since it is
4896
+ projected away in (5.2).
4897
+ When translating to ω(g)
4898
+ n+1(z, u1, ..., un)
4899
+ =
4900
+ λ2g+n−1du1 · · · dunW (g)
4901
+ n+1(z; u1, ..., un)dx(z), the second and third lines of
4902
+ (1.3) with recursion kernel Kuj(z; q) result. Here one has take into ac-
4903
+ count that the differential duj does not commute with Kuj(z; q).
4904
+ This
4905
+ makes it necessary to keep the primitive d−1
4906
+ uj inside the residue.
4907
+ (c) (pole at z = 0) There is no such pole for g = 0. For g = 1, this pole is
4908
+ present on the lhs and the line (*) of the rhs of (4.28), there only in the
4909
+ principal preimage k = 0, and in both terms of the line (§). We write the
4910
+ first term as
4911
+ D0
4912
+ I
4913
+ λ
4914
+ 8(x(z) − x(0))2 = −λ
4915
+ 8
4916
+ ∂2(D0
4917
+ I log(x(z) − x(0)))
4918
+ ∂(x(z))2
4919
+
4920
+ 42
4921
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
4922
+ = −λ
4923
+ 8
4924
+ ∂2(DI log P (0)
4925
+ 1 (x(z), x(z)))
4926
+ ∂(x(z))2
4927
+ + O(z0)
4928
+ by Proposition 3.5. Next, from (4.25) we know
4929
+ ∂(DI log P (0)
4930
+ 1 (x(z), x(z)))
4931
+ ∂(x(z))
4932
+ = lim
4933
+ w→z 2∂(DI log P (0)
4934
+ 1 (x(z), x(w)))
4935
+ ∂(x(z))
4936
+ ���
4937
+ w=z
4938
+ = −2DIW (0)reg
4939
+ 2
4940
+ (z; z) + O(z0) .
4941
+ By the linear loop equation (4.27), the second term in the line (§) of (4.28)
4942
+ can be written as
4943
+ −x(z)D0
4944
+ I
4945
+ λ
4946
+ 8(x(z) − x(0))3 = x(z)W (1)
4947
+ |I|+1(z; I) + O(z0) .
4948
+ We bring this term to the lhs, rename z �→ q and multiply by
4949
+ x′(q)dq
4950
+ −(y(q)+x(q))
4951
+ to get with the previous considerations
4952
+ W (1)
4953
+ |I|+1(q; I)x′(q)dq
4954
+ (5.5)
4955
+ =
4956
+ λx′(q)dq
4957
+ −(y(q) + x(q))
4958
+ � �
4959
+ I1⊎I2=I
4960
+ I2̸=∅
4961
+ W (1)
4962
+ |I1|+1(q; I1)W (0)
4963
+ |I2|+1(q; I2) + 1
4964
+ 2DIΩ(0)reg
4965
+ 2
4966
+ (q, q)
4967
+ + 1
4968
+ 4
4969
+ ∂(DIW (0)reg
4970
+ 2
4971
+ (q; q))
4972
+ ∂x(q)
4973
+
4974
+ + O(q−1) .
4975
+ Again the undetermined residue poses no problem since it is pro-
4976
+ jected away in (5.2).
4977
+ When translating to ω(g)
4978
+ n+1(z, u1, ..., un)
4979
+ =
4980
+ λ2g+n−1du1 · · · dunW (g)
4981
+ n+1(z; u1, ..., un)dx(z), the last two lines of (1.3) with
4982
+ recursion kernel K0(z; q) result.
4983
+
4984
+ 6. Outlook
4985
+ We continued the work of [GHW19, SW22, BHW22, HW21] and pushed
4986
+ the proof of the conjecture [BHW22, Conj. 6.1] that the quartic Kontsevich
4987
+ model obeys blobbed topological recursion [BS17] to genus g ≤ 1. The method
4988
+ that we developed in this paper is general and powerful enough to achieve the
4989
+ proof to any g. The Dyson-Schwinger equations (2.13b) provide the difference
4990
+ DIDg log P (0)
4991
+ 1 (x(v), x(z)) − DIDg log H(0)
4992
+ 1 (x(v); z), where DI is the loop inser-
4993
+ tion operator of Definition 2.6 and Dg a ‘genus insertion’ still to make precise.
4994
+ Then DIDg log P (0)
4995
+ 1 (x(v), x(z)) is this difference symmetrised in all preimages ˆzk
4996
+ plus a function of (x(v), x(z); I) with simple poles at x(v) = x(ui) and poles at
4997
+ x(v) = x(z) up to order 4g + 3. The principal part of the corresponding Laurent
4998
+ series is uniquely determined by (2.9a), (2.9b), (2.12b) and (2.12c) in terms of
4999
+ Taylor expansions of Q(h)
5000
+ 1 (x(v), x(z); I) and P (h)
5001
+ 1 (x(v), x(z); I) for h < g at the
5002
+
5003
+ BTR FROM EXTENDED LOOP EQUATIONS
5004
+ 43
5005
+ diagonal x(v) = x(z). The required functions Q are determined before via a sim-
5006
+ ilar analysis of DIDg−1 log ˆQ(0)
5007
+ 1 (x(v), x(z)) − DIDg−1 log ˆ
5008
+ M (0)
5009
+ 1 (x(v); z). After all,
5010
+ there is no principal obstacle to produce the solution DIDg log P (0)
5011
+ 1 (x(v), x(z))
5012
+ from which by expansion about x(v) = ∞ we get global linear and quadratic
5013
+ loop equations for W (g)
5014
+ |I|+1. This shows that the quartic Kontsevich model satisfies
5015
+ blobbed TR. Working out the details will be challenging, however, as the for-
5016
+ mulae become increasingly lengthy with larger g and Taylor expansions to order
5017
+ 4g + 1 are necessary.
5018
+ References
5019
+ [ABDB+22] A. Alexandrov, B. Bychkov, P. Dunin-Barkowski, M. Kazarian, and S. Shadrin. A
5020
+ universal formula for the x − y swap in topological recursion. 2022, 2212.00320.
5021
+ [BCEGF21] R. Belliard, S. Charbonnier, B. Eynard, and E. Garcia-Failde. Topological re-
5022
+ cursion for generalised Kontsevich graphs and r-spin intersection numbers. 2021,
5023
+ 2105.08035.
5024
+ [BEO15]
5025
+ G. Borot, B. Eynard, and N. Orantin. Abstract loop equations, topological re-
5026
+ cursion and new applications. Commun. Num. Theor. Phys., 09:51–187, 2015,
5027
+ 1303.5808. doi:10.4310/CNTP.2015.v9.n1.a2.
5028
+ [BGHW22]
5029
+ J. Branahl, H. Grosse, A. Hock, and R. Wulkenhaar. From scalar fields on quan-
5030
+ tum spaces to blobbed topological recursion. J. Phys. A, 55(42):423001, 2022,
5031
+ 2110.11789. doi:10.1088/1751-8121/ac9260.
5032
+ [BH22]
5033
+ J. Branahl and A. Hock. Complete solution of the LSZ Model via Topological
5034
+ Recursion. 2022, 2205.12166.
5035
+ [BHW21]
5036
+ J. Branahl, A. Hock, and R. Wulkenhaar. Perturbative and Geometric Anal-
5037
+ ysis of the Quartic Kontsevich Model. SIGMA, 17:085,
5038
+ 2021,
5039
+ 2012.02622.
5040
+ doi:10.3842/SIGMA.2021.085.
5041
+ [BHW22]
5042
+ J. Branahl, A. Hock, and R. Wulkenhaar. Blobbed Topological Recursion of the
5043
+ Quartic Kontsevich Model I: Loop Equations and Conjectures. Commun. Math.
5044
+ Phys., 393(3):1529–1582, 2022, 2008.12201. doi:10.1007/s00220-022-04392-z.
5045
+ [Bor14]
5046
+ G. Borot. Formal multidimensional integrals, stuffed maps, and topological re-
5047
+ cursion. Ann. Inst. H. Poincare D Comb. Phys. Interact., 1(2):225–264, 2014,
5048
+ 1307.4957. doi:10.4171/aihpd/7.
5049
+ [BS17]
5050
+ G. Borot and S. Shadrin. Blobbed topological recursion:
5051
+ properties and ap-
5052
+ plications. Math. Proc. Cambridge Phil. Soc., 162(1):39–87, 2017, 1502.00981.
5053
+ doi:10.1017/S0305004116000323.
5054
+ [CEO06]
5055
+ L. Chekhov, B. Eynard, and N. Orantin. Free energy topological expansion for
5056
+ the 2-matrix model. JHEP, 12:053, 2006, math-ph/0603003. doi:10.1088/1126-
5057
+ 6708/2006/12/053.
5058
+ [EO07]
5059
+ B. Eynard and N. Orantin. Invariants of algebraic curves and topological ex-
5060
+ pansion. Commun. Num. Theor. Phys., 1:347–452, 2007, math-ph/0702045.
5061
+ doi:10.4310/CNTP.2007.v1.n2.a4.
5062
+ [Eyn05]
5063
+ B. Eynard. Topological expansion for the 1-hermitian matrix model correlation
5064
+ functions. Journal of High Energy Physics, 2004(11):031, 2005. doi:10.1088/1126-
5065
+ 6708/2004/11/031.
5066
+ [Eyn16]
5067
+ B. Eynard. Counting Surfaces, volume 70 of Prog. Math. Phys. Birkh¨auser/
5068
+ Springer, 2016. doi:10.1007/978-3-7643-8797-6.
5069
+
5070
+ 44
5071
+ ALEXANDER HOCK AND RAIMAR WULKENHAAR
5072
+ [GHW19]
5073
+ H. Grosse, A. Hock, and R. Wulkenhaar. Solution of all quartic matrix models.
5074
+ 2019, 1906.04600.
5075
+ [GHW23]
5076
+ H. Grosse, A. Hock, and R. Wulkenhaar. A Laplacian to compute intersection
5077
+ numbers on Mg,n and correlation functions in NCQFT. Commun. Math. Phys.,
5078
+ online first, 2023, 1903.12526. doi:10.1007/s00220-022-04557-w.
5079
+ [Gou10]
5080
+ H. W. Gould. Tables of Combinatorial Identities. edited by Jocelyn Quain-
5081
+ tance,
5082
+ 2010. URL https://web.archive.org/web/20190629193344/http://
5083
+ www.math.wvu.edu/~gould/.
5084
+ [GSW17]
5085
+ H.
5086
+ Grosse,
5087
+ A.
5088
+ Sako,
5089
+ and
5090
+ R.
5091
+ Wulkenhaar.
5092
+ Exact
5093
+ solution
5094
+ of
5095
+ matricial
5096
+ Φ3
5097
+ 2
5098
+ quantum field theory. Nucl. Phys. B, 925:319–347,
5099
+ 2017,
5100
+ 1610.00526.
5101
+ doi:10.1016/j.nuclphysb.2017.10.010.
5102
+ [GSW18]
5103
+ H. Grosse, A. Sako, and R. Wulkenhaar. The Φ3
5104
+ 4 and Φ3
5105
+ 6 matricial QFT mod-
5106
+ els have reflection positive two-point function. Nucl. Phys. B, 926:20–48, 2018,
5107
+ 1612.07584. doi:10.1016/j.nuclphysb.2017.10.022.
5108
+ [GW09]
5109
+ H. Grosse and R. Wulkenhaar. Progress in solving a noncommutative quantum
5110
+ field theory in four dimensions. 2009, 0909.1389.
5111
+ [GW14]
5112
+ H. Grosse and R. Wulkenhaar. Self-dual noncommutative φ4-theory in four di-
5113
+ mensions is a non-perturbatively solvable and non-trivial quantum field theory.
5114
+ Commun. Math. Phys., 329:1069–1130, 2014, 1205.0465. doi:10.1007/s00220-014-
5115
+ 1906-3.
5116
+ [Hoc22a]
5117
+ A. Hock. A simple formula for the x-y symplectic transformation in topological
5118
+ recursion. 2022, 2211.08917.
5119
+ [Hoc22b]
5120
+ A. Hock. On the x-y Symmetry of Correlators in Topological Recursion via Loop
5121
+ Insertion Operator. 2022, 2201.05357.
5122
+ [Hoc23]
5123
+ A. Hock. Computing intermediate correlators W (g)
5124
+ n,m in Topological Recursion via
5125
+ Blobbed Topological Recursion. 2023. in preparation.
5126
+ [HW21]
5127
+ A. Hock and R. Wulkenhaar. Blobbed topological recursion of the quartic Kontse-
5128
+ vich model II: Genus=0. 2021, 2103.13271.
5129
+ [Kon92]
5130
+ M. Kontsevich. Intersection theory on the moduli space of curves and the matrix
5131
+ Airy function. Commun. Math. Phys., 147:1–23, 1992. doi:10.1007/BF02099526.
5132
+ [MS91]
5133
+ Yu.
5134
+ Makeenko
5135
+ and
5136
+ G.
5137
+ W.
5138
+ Semenoff.
5139
+ Properties
5140
+ of
5141
+ Hermitean
5142
+ matrix
5143
+ models
5144
+ in
5145
+ an
5146
+ external
5147
+ field.
5148
+ Mod.
5149
+ Phys.
5150
+ Lett.,
5151
+ A6:3455–3466,
5152
+ 1991.
5153
+ doi:10.1142/S0217732391003985.
5154
+ [SW22]
5155
+ J. Sch¨urmann and R. Wulkenhaar. An algebraic approach to a quartic analogue of
5156
+ the Kontsevich model. Math. Proc. Camb. Phil. Soc, online first, 2022, 1912.03979.
5157
+ doi:10.1017/S0305004122000366.
5158
+ [Wit91]
5159
+ E. Witten. Two-dimensional gravity and intersection theory on moduli space. In
5160
+ Surveys in differential geometry (Cambridge, MA, 1990), pages 243–310. Lehigh
5161
+ Univ., Bethlehem, PA, 1991. doi:10.4310/SDG.1990.v1.n1.a5.
5162
+ Mathematical Institute,
5163
+ University of Oxford,
5164
+ Andrew Wiles Building,
5165
+ Woodstock Road, OX2 6GG, Oxford, UK, e-mail: alexander.hock@maths.ox.ac.uk
5166
+ Mathematisches Institut der Westf¨alischen Wilhelms-Universit¨at,
5167
+ Einsteinstr. 62, 48149 M¨unster, Germany, e-mail: raimar@math.uni-muenster.de
5168
+
T9E2T4oBgHgl3EQftQh4/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
TdA0T4oBgHgl3EQfEP9z/content/tmp_files/2301.02015v1.pdf.txt ADDED
@@ -0,0 +1,1981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Scaling transition for singular linear random fields on Z2:
2
+ spectral approach
3
+ Donatas Surgailis
4
+ January 6, 2023
5
+ Vilnius University, Faculty of Mathematics and Informatics, Naugarduko 24, 03225 Vilnius, Lithuania
6
+ Abstract
7
+ We study partial sums limits of linear random fields X on Z2 with spectral density f(x) tending to ∞, 0
8
+ or to both (along different subsequences) as x → (0, 0). The above behaviors are termed (spectrum) long-
9
+ range dependence, negative dependence, and long-range negative dependence, respectively, and assume an
10
+ anisotropic power-law form of f(x) near the origin. The partial sums are taken over rectangles whose sides
11
+ increase as λ and λγ, for any fixed γ > 0. We prove that for above X the partial sums or scaling limits
12
+ exist for any γ > 0 and exhibit a scaling transition at some γ = γ0 > 0; moreover, the ‘unbalanced’ scaling
13
+ limits (γ ̸= γ0) are Fractional Brownian Sheet with Hurst parameters taking values from [0, 1]. The paper
14
+ extends [24, 19, 28] to the above spectrum dependence conditions and/or more general values of Hurst
15
+ parameters.
16
+ Keywords: Linear random field, Gaussian random field, spectral density, long-range dependence, negative
17
+ dependence, long-range negative dependence, hyperbolic dependence, self-similarity, anisotropic scaling,
18
+ scaling transition, fractional Brownian sheet,
19
+ 1
20
+ Introduction
21
+ A stationary random field (RF) X = {X(t); t ∈ Zd} with finite second moment and covariance r(t) =
22
+ Cov(X(0), X(t)) is said (i) long-range dependent (LRD) if �
23
+ t∈Zd |r(t)| = ∞;
24
+ (ii) short-range depen-
25
+ dent (SRD) if �
26
+ t∈Zd |r(t)| < ∞,
27
+
28
+ t∈Zd r(t) ̸= 0, and (iii) negatively dependent (ND) if �
29
+ t∈Zd |r(t)| <
30
+ ∞, �
31
+ t∈Zd r(t) = 0 (for RF indexed by continuous argument, the above concepts are analogously defined
32
+ with sums replaced by integrals over Rd). The above classification, albeit not completely satisfactory, is
33
+ important in limit theorems, see [13, 16, 28]. Related but not equivalent definitions of LRD, SRD and ND
34
+ are given in terms of spectrum or moving-average coefficients of RF. In the sequel we refer to the above co-
35
+ variance or moving-average characterizations as spatial domain properties, to be distinguished from frequency
36
+ domain (spectrum) characterizations used in the literature. A very general form of such frequency domain
37
+ concepts assumes the existence of spectral density f(u), u ∈ Πd := [−π, π]d which is bounded outside of the
38
+ 1
39
+ arXiv:2301.02015v1 [math.PR] 5 Jan 2023
40
+
41
+ origin and such that (i’) limu→0 f(u) = ∞ (spectrum LRD), (ii’) limu→0 f(u) > 0 (spectrum SRD), and
42
+ (iii’) limu→0 f(u) = 0 (spectrum ND). Moreover, in cases (i’) and (iii’) it is often additionally assumed that
43
+ spectral density varies regularly as u → 0 [13, 22, 25]. It is clear that in dimensions d ≥ 2 a function can
44
+ increase regularly to ∞ along one direction and vanish along another one. Such a behavior is not reflected in
45
+ (i’)-(iii’), calling for a new concept (iv’) 0 = lim infu→0 f(u) < lim supu→0 f(u) = ∞ which we term spectrum
46
+ long-range negative dependence (spectrum LRND). Properties (i’), (iii’) and (iv’) can be jointly dubbed as
47
+ singular behaviors, the singularity limited to the origin and including both infinite and/or zero limits.
48
+ The present paper studies anisotropic partial sums limits for a class of stationary linear RFs X on Z2 with
49
+ spectrum LRD/ND/LRND properties. The partial sums
50
+ Sλ,γ(x) :=
51
+
52
+ t∈K[λx1,λγx2]
53
+ X(t),
54
+ x = (x1, x2) ∈ R2
55
+ +
56
+ (1.1)
57
+ are taken over rectangles K[λx1,λγx2] := {t ∈ Z2 : 1 ≤ t1 ≤ λx1, 1 ≤ t2 ≤ λγx2} ⊂ Z2 whose sides increase as
58
+ O(λ) and O(λγ) as λ → ∞, for a fixed γ > 0. Our main object are anisotropic scaling limits
59
+ d−1
60
+ λ,γSλ,γ(x)
61
+ fdd
62
+ −→ Vγ(x),
63
+ x ∈ R2
64
+ +,
65
+ λ → ∞,
66
+ (1.2)
67
+ where dλ,γ → ∞ is a normalization. It was proved in [24, 23, 18, 19, 4, 20, 28] and other works that for
68
+ a large class of RFs nontrivial scaling limits Vγ(x) in (1.2) exist for any γ > 0. Moreover, the limit family
69
+ {Vγ; γ > 0} may exhibit a trichotomy called scaling transition at some (nonrandom) point γ = γ0 > 0
70
+ meaning that Vγ = V± (γ ≶ γ0) do not depend on γ and are different; see Definition 1. For linear RFs, these
71
+ works can be summarized as saying that a large class of RFs on Z2 parade two types of the limit behavior
72
+ in (1.2): either (I) Vγ is a standard Brownian Sheet B1/2,1/2 for any γ > 0, or (II) scaling transition at some
73
+ γ0 > 0 exists such that all unbalanced limits Vγ, γ ̸= γ0 are Fractional Brownian Sheet (FBS) BH1,H2 with
74
+ Hurst parameters (H1, H2) = (H+
75
+ 1 , H+
76
+ 2 ) (γ > γ0), = (H−
77
+ 1 , H−
78
+ 2 ) (γ < γ0), (H+
79
+ 1 , H+
80
+ 2 ) ̸= (H−
81
+ 1 , H−
82
+ 2 ). Behavior
83
+ (I) is rather standard under SRD conditions, see [6, 7, 23] and the references therein. Behavior (II) is more
84
+ interesting and was established under LRD and ND conditions in [24, 28]. However, these results exclude
85
+ LRND singularity and some particular (boundary) values of parameters.
86
+ The present paper extends [24, 19, 28] and some other work on type (II) behavior for linear RFs X under
87
+ spectrum LRD, ND, and LRND conditions. It appears that the spectral approach used in this work is more
88
+ efficient and simple compared to ‘time-domain’ approach in [19, 28] and other papers; moreover, it can be
89
+ used to obtain the covariance structure of the scaling limits in a non-Gaussian context as well. It is assumed
90
+ that X have a moving-average (MA) representation
91
+ X(t) =
92
+
93
+ s∈Z2
94
+ a(t − s)ε(s),
95
+ t ∈ Z2,
96
+ (1.3)
97
+ with standardized i.i.d.
98
+ innovations {ε(s); s ∈ Z2}, Eε(s) = 0, Eε(s)2 = 1 and deterministic coefficients
99
+ a(t), t ∈ Z2. The latter coefficients satisfy the necessary condition �
100
+ t∈Z2 a(t)2 < ∞ for the convergence of
101
+ (1.3) but all other conditions for various behaviors in (1.2) in this paper refer to the spectral density of X
102
+ 2
103
+
104
+ Figure 1. Level graphs of spectral density under LRD, ND, LRND and ‘hyperbolic dependence’ conditions. a) f(x) =
105
+ ρ−1
106
+ 2 (x), υ1 = 0.8, υ2 = 1.2; b) f(x) = ρ(x), υ1 = 0.5, υ2 = 1; c) f(x) = ρ−1(x)|x2/ρ(x)1/υ2|0.5, υ1 = 0.5, υ2 = 1; d)
107
+ f(x) = |x1|−0.2|x2|−.5
108
+ written as the squared Fourier transform f(x) = (2π)2|�a(x)|2, �a(x) := (2π)−2 �
109
+ t∈Z2 eit·xa(t), x ∈ Π2 of
110
+ MA coeffiecients. For identification of the covariance structure of the limit Gaussian RF in (1.2), the basic
111
+ assumption is that f(x) behaves like a power function
112
+ ρ(x) := |x1|υ1 + |x2|υ2,
113
+ x = (x1, x2) ∈ R2
114
+ (1.4)
115
+ at the origin, where υi > 0, i = 1, 2 are given parameters, or, more precisely,
116
+ f(x) ∼ L(x)/ρ(x)
117
+ (‘spectrum LRD’)
118
+ and
119
+ f(x) ∼ L(x)ρ(x)
120
+ (‘spectrum ND’),
121
+ (1.5)
122
+ where L(x) is an ‘angular function’ satisfying some boundedness conditions. Clearly, the first relation in
123
+ (1.5) implies spectrum LRD provided L(x) is bounded from below, while the second relation relation in (1.5)
124
+ 3
125
+
126
+ a)b)c)d)implies spectrum ND provided L(x) is bounded from above. The above choice of ρ(x) as the ‘radial function’
127
+ is rather flexible: we can use other forms of (1.4), e.g. with
128
+ ρp(x) := (|x1|pυ1 + |x2|pυ2)1/p,
129
+ p > 0,
130
+ x = (x1, x2) ∈ R2
131
+ (1.6)
132
+ instead of (1.4) as well, since the change of ρ(x) to ρp(x) amounts to a change of the angular function which
133
+ does not affect the limit results. Spectrum LRND may arise if these boundedness conditions on L(x) are
134
+ violated, e.g. when L(x) decreases as |xi|µi with exponent µi > 0 when xi → 0 in the first relation in (1.5).
135
+ We show that in this case the limits in (1.2) may exist, together with a scaling transition and the limit
136
+ Gaussian RF depends on parameters υi, µi, i = 1, 2. We also consider the case when the spectral density
137
+ asymptotically factorizes into a product of power functions |xi|−υi, i = 1, 2. This form of singularity (studied
138
+ in [24, 6] and termed hyperbolic dependence in the present paper) allows for spectrum LRD, ND, or LRND
139
+ depending on the sign of υi, but leads to very different limit results (the absence of scaling transition). Fig. 1
140
+ illustrates different types of singular spectral densities studied in this paper. We also expect that our results
141
+ can be extended to RFs on d-dimensional lattice, d ≥ 3 arbitrary although the description of the limit RFs
142
+ is more complicated, see [4, 27].
143
+ The rest of the paper is organized as follows. Sec. 2 provides rigorous assumptions on f(x) and some
144
+ preliminary facts used in the subsequent text. Sections 3-5 contain the main Theorems 1, 2, 3 and 4, under
145
+ respective LRD, ND, LRND and hyperbolic dependence premises.
146
+ Notation. In this paper,
147
+ fdd
148
+ −→ ,
149
+ fdd
150
+ = , and
151
+ fdd
152
+ ̸= , denote respectively the weak convergence, equality, and
153
+ inequality, of finite dimensional distributions. C stands for a generic positive constant which may assume
154
+ different values at various locations and whose precise value has no importance. R+ := (0, ∞), R2
155
+ 0 := R2 \
156
+ {(0, 0)}, Π := [−π, π], |x| := |x1| + |x2|, x · y := x1y1 + x2y2, x = (x1, x2), y = (y1, y2), 1 := (1, 1), 0 := (0, 0).
157
+ I(A) denotes the indicator function of a set A.
158
+ 2
159
+ Preliminaries and assumptions
160
+ Definition 1. [24, 23, 19] We say that a stationary RF X = {X(t); t ∈ Z2} exhibits scaling transition if
161
+ non-trivial limits in (1.2) exist for any γ > 0 and there exists a γ0 > 0 such that
162
+
163
+ fdd
164
+ = V+ (γ > γ0),
165
+
166
+ fdd
167
+ = V− (0 < γ < γ0),
168
+ V+
169
+ fdd
170
+ ̸= aV− (∀ a > 0).
171
+ (2.1)
172
+ In such a case, RFs V± will be called the unbalanced limits, and RF V0 the well-balanced limit of X.
173
+ A weaker version of Definition 1 in [7] requires that Vγ is independent of γ for γ > γ0 and γ < γ0 in a small
174
+ neighborhood of γ0. There exist RFs which exhibit scaling transition in the above weaker sense at several
175
+ (possibly, infinite) number of points γ > 0 [18, 7]. However, for RFs discussed in this paper Definition 1
176
+ suffices.
177
+ Recall that the covariance function of stationary zero-mean RF X = {X(t); t ∈ Z2} is the Fourier transform
178
+ of its spectral density, viz., r(t) := EX(0)X(t) =
179
+
180
+ Π2 eit·uf(u)du, t ∈ Z2. Whence, the covariance function
181
+ 4
182
+
183
+ of the normalized partial sums RF in (1.1) writes as
184
+ Rλ,γ(x, y)
185
+ :=
186
+ d−2
187
+ λ,γESλ,γ(x)Sλ,γ(y) = d−2
188
+ λ,γ
189
+
190
+ Π2
191
+ 2
192
+
193
+ i=1
194
+ D[λγixi](ui)D[λγiyi](ui)f(u)du,
195
+ x, y ∈ R2
196
+ +, (2.2)
197
+ where (γ1, γ2) := (1, γ) and Dn(u) := �n
198
+ t=1 eitu = (eiu − ei(n+1)u)/(1 − eiu), u ∈ Π (the real-valued function
199
+ Dn(u)e−i(n+1)u/2 is the Dirichlet kernel).
200
+ Proposition 1. Let X be a linear RF in (1.3) with spectral density f and Sλ,γ be the partial sum RF in
201
+ (1.1), γ > 0. Assume that
202
+ Rλ,γ(x, y) → Rγ(x, y),
203
+ λ → ∞
204
+ (∀ x, y ∈ R2
205
+ +)
206
+ (2.3)
207
+ and
208
+ |gλ|∞ := sup
209
+ u∈Z2
210
+ ���
211
+
212
+ t∈K[λx1,λγx2]
213
+ a(t − u)
214
+ ��� = o(dλ,γ),
215
+ λ → ∞.
216
+ (2.4)
217
+ Then (1.2) holds, where Vγ is a Gaussian RF on R2
218
+ + with zero mean EVγ(x) = 0 and covariance EVγ(x)Vγ(y) =
219
+ Rγ(x, y), x, y ∈ R2
220
+ +.
221
+ Proof. The finite-dimensional convergence in (1.2) is equivalent to one-dimensional convergence Sλ,γ :=
222
+ d−1
223
+ λ,γ
224
+ �m
225
+ j=1 θj Sλ,γ(xj)
226
+ d
227
+ −→ �m
228
+ j=1 θjVγ(xj) =: Vγ for any θj ∈ R, xj ∈ R2
229
+ +, j = 1, · · · , m, m ≥ 1. From (2.3)
230
+ we have E(Sλ,γ)2 → �m
231
+ j,j′=1 θjθj′Rγ(xj, xj′) =: Rγ ≥ 0. Let Rγ > 0 then Sλ,γ = d−1
232
+ λ,γ
233
+
234
+ u∈Z2 gλ(u)ε(u) is a
235
+ normalized weighted linear form in i.i.d. r.v.s with weights gλ(u) := �m
236
+ j=1 θj
237
+
238
+ t∈K[λx1j,λγx2j] a(t − u), xj =
239
+ (x1j, x2j). Relation (2.4) implies the Lindeberg condition, viz., for any τ > 0
240
+
241
+ u∈Z2
242
+ g2
243
+ λ(t)E[ε(u)2I(|gλ(u)ε(u)| > τdλ,γ)] = o(d2
244
+ λ,γ),
245
+ λ → ∞
246
+ (2.5)
247
+ since Eε(u)2I(|gλ(u)ε(u)| > τdλ,γ) ≤ Eε(u)2I
248
+
249
+ |ε(u)| > τdλ,γ/|gλ|∞
250
+
251
+ → 0 in view of (2.4) and Eε(0)2 < ∞.
252
+ Thus, Sλ,γ
253
+ d
254
+ −→ N(0, Rγ), in other words, the distribution of (d−1
255
+ λ,γSλ,γ(xj); j = 1, · · · , m) tends to a Gaussian
256
+ distribution on Rm with mean zero and covariance matrix (Rγ(xj, xj′))j,j=1,··· ,m, proving the proposition. □
257
+ Let υi > 0, i = 1, 2, Υ :=
258
+ 1
259
+ υ1 + 1
260
+ υ2 and ρ(x), ρp(x) be as in (1.4), (1.6). We note the elementary inequality:
261
+ for any p > 0 there exist constants 0 < C1 ≤ C2 < ∞ such that
262
+ C−ρ(x) ≤ ρp(x) ≤ C+ρ(x),
263
+ (∀ x ∈ R2).
264
+ (2.6)
265
+ We also use the fact [19] that for any δ > 0, w > 0
266
+
267
+ |x|<δ
268
+ ρ(x)−wdx < ∞ ⇐⇒ w < Υ,
269
+
270
+ |x|>δ
271
+ ρ(x)−wdx < ∞ ⇐⇒ w > Υ.
272
+ (2.7)
273
+ Following [20], we say that a measurable function L : R2
274
+ 0 → R is generalized invariant if the function
275
+ x �→ L(λ1/υ1x1, λ1/υ2x2) does not depend on λ > 0. Every generalized invariant function L can be represented
276
+ as
277
+ L(x) = ˜L(x1/ρ(x)1/υ1, x2/ρ(x)1/υ2),
278
+ x ∈ R2
279
+ 0,
280
+ (2.8)
281
+ 5
282
+
283
+ where ˜L is the restriction of L to S1 := {x ∈ R2
284
+ 0 : ρ(x) = 1}.
285
+ Assumption (F)LRD The spectral density f satisfies
286
+ f(x) = ρ(x)−1L(x)
287
+
288
+ 1 + o(1)
289
+
290
+ ,
291
+ |x| → 0,
292
+ (2.9)
293
+ where ρ(x) as in (1.4), Υ > 1 and L(x) = L(−x), x ∈ R2
294
+ 0 is a strictly positive continuous generalized invariant
295
+ function. Moreover, f is bounded on {x ∈ Π2 : |x| > δ}, for any δ > 0.
296
+ Assumption (F)LRND,i
297
+ (i = 1, 2) The spectral density f satisfies Assumption (F)LRD except that strict
298
+ positivity of L(x) is replaced by
299
+ ˜L(x) = ℓi|xi|µi(1 + o(1))
300
+ (xi → 0, x ∈ S1)
301
+ (2.10)
302
+ for some µi > 0, ℓi > 0.
303
+ Assumption (F)ND The spectral density f(x), x ∈ Π2 is a bounded continuous function such that
304
+ f(x) = ρ(x)L(x)
305
+
306
+ 1 + o(1)
307
+
308
+ ,
309
+ |x| → 0,
310
+ where
311
+ (2.11)
312
+ where ρ(x) as in (1.4), and L(x), x ∈ R2
313
+ 0 is as in (2.9).
314
+ With generalized homogeneous functions ρ(x)−1L(x), ρ(x)L(x) in (2.9), (2.11) we associate Gaussian RFs
315
+ V0,1/ρ, V0,ρ by
316
+ V0,1/ρ(x) :=
317
+
318
+ R2
319
+ �2
320
+ j=1
321
+ 1−eiujxj
322
+ iuj
323
+
324
+ L(u)/ρ(u) Z(du),
325
+ V0,ρ(x) :=
326
+
327
+ R2
328
+ �2
329
+ j=1
330
+ 1−eiujxj
331
+ iuj
332
+
333
+ L(u)ρ(u) Z(du),(2.12)
334
+ x ∈ R2
335
+ +, where {Z(du)} is a complex-valued Gaussian noise with zero mean and E|Z(du)|2 = du. The
336
+ existence of V0,1/ρ, V0,ρ will be established in the following sections.
337
+ Definition 2. Let V = {V (x); x ∈ R2
338
+ +} be a RF and γ > 0, H ≥ 0, Hi ≥ 0, i = 1, 2. We say that
339
+ (i) V is (γ, H)-self-similar (SS) if
340
+ {V (λx1, λγx2); x ∈ R2
341
+ +} fdd
342
+ = {λHV (x); x ∈ R2
343
+ +},
344
+ ∀λ > 0.
345
+ (2.13)
346
+ (ii) V is (H1, H2)-multi-self-similar (MSS) is
347
+ {V (λ1x1, λ2x2); x ∈ R2
348
+ +} fdd
349
+ = {λH1
350
+ 1 λH2
351
+ 2 V (x); x ∈ R2
352
+ +},
353
+ ∀λ1, λ2 > 0.
354
+ (2.14)
355
+ (γ, H)-SS property is a particular case of the operator self-similarity property introduced in [2] and corre-
356
+ sponding to scaling x → λEx with diagonal matrix E = diag(1, γ). Particularly, (1, H)-SS property coincides
357
+ with the usual H-SS property for RFs on R2
358
+ + [25]. (H1, H2)-MSS property was introduced in [12]. It implies
359
+ (γ, H)-SS property with any γ > 0 and H = H1 + γH2. Under mild additional assumptions, scaling limits
360
+ V X
361
+ γ
362
+ in (1.2) are (γ, H)-SS RFs and the normalization dλ,γ is regularly varying at infinity with index H [23].
363
+ 6
364
+
365
+ Definition 3. (Standard) Fractional Brownian Sheet (FBS) BH1,H2 = {BH1,H2(x); x ∈ R2
366
+ +} with (H1, H2) ∈
367
+ [0, 1]2 is defined as a Gaussian process with zero-mean and covariance function EBH1,H2(x)BH1,H2(y) =
368
+ �2
369
+ i=1 rHi(xi, yi), x, y ∈ R2
370
+ +, where for x, y ∈ R+,
371
+ rH(x, y)
372
+ :=
373
+ 1
374
+ 2
375
+
376
+
377
+
378
+
379
+
380
+
381
+
382
+
383
+
384
+
385
+
386
+ x2H + y2H − |x − y|2H,
387
+ 0 < H ≤ 1,
388
+ 2,
389
+ H = 0, x = y,
390
+ 1,
391
+ H = 0, x ̸= y.
392
+ (2.15)
393
+ Usually, FBS is defined for (H1, H2) ∈ (0, 1]2 or even (H1, H2) ∈ (0, 1)2, see [1]. Extension of FBS to
394
+ H1 ∧ H2 = 0 was introduced in [28]. (2.15) implies that the restriction of FBS BH1,H2 to horizontal/vertical
395
+ line agrees with fractional Brownian motion (FBM) BH = {BH(x); x ∈ R+} with covariance function rH(x, y)
396
+ and the corresponding Hurst parameter H = Hi ∈ [0, 1], i = 1, 2. Note r0(x, y) = limH↓0 rH(x, y). FBM B0
397
+ is 0-SS and an extremely singular (non-measurable) process, see [25, pp.256–257]. It can be represented as
398
+ B0
399
+ fdd
400
+ = { 1
401
+
402
+ 2(W(x)−W(0)); x ∈ R+}, where W(x), x ∈ [0, ∞), is (uncountable) family of independent N(0, 1)
403
+ r.v.s. The above B0 is different from the ’regularized’ FBM with H = 0 defined in [10, p.2985]. FBS BH1,H2
404
+ with H1 ∧ H2 = 0 and their α-stable extensions appeared in limit theorems for RFs [28, 21]. We also recall
405
+ spectral representation of FBS with (H1, H2) ∈ (0, 1)2:
406
+ BH1,H2(x)
407
+ =
408
+ κ−1
409
+
410
+ R2
411
+ 2
412
+
413
+ j=1
414
+ 1 − eixjuj
415
+ iuj|uj|Hj− 1
416
+ 2
417
+ Z(du)
418
+ (2.16)
419
+ where
420
+ κ2
421
+ :=
422
+
423
+ R2
424
+ 2
425
+
426
+ j=1
427
+ |1 − eiuj|2
428
+ |uj|2Hj+1 du =
429
+ 2
430
+
431
+ j=1
432
+ π
433
+ HjΓ(2Hj) sin(Hjπ)
434
+ (2.17)
435
+ and Z(du) is the same Gaussian white noise as in (2.12); see [1]. For H1 ∨ H2 = 1, FBS is a line RF of the
436
+ form B1,H2(x) = x1BH2(x2) (0 < H2 < 1), BH1,1(x) = x2BH1(x1) (0 < H1 < 1), B1,1(x) = x1x2Z, where BH
437
+ is FBM and Z ∼ N(0, 1). For H1 ∧ H2 = 0, FBS allow a construction of finite-dimensional distributions via
438
+ independent FBM or uncountable family of independent Gaussian variables [28].
439
+ 3
440
+ Long-range dependence
441
+ Define
442
+ H+
443
+ 1
444
+ :=
445
+ 1
446
+ 2(1 + (υ1 ∧ 1)),
447
+ H+
448
+ 2 := 1
449
+ 2(1 + υ2 −
450
+ υ2
451
+ υ1 ∨ 1),
452
+ (3.1)
453
+ H−
454
+ 1
455
+ :=
456
+ 1
457
+ 2(1 + υ1 −
458
+ υ1
459
+ υ2 ∨ 1),
460
+ H−
461
+ 2 := 1
462
+ 2(1 + (υ2 ∧ 1)),
463
+ Note H+
464
+ 1 = 1 for υ1 ≥ 1, H+
465
+ 2 = 1/2 for υ1 ≤ 1. Analogous relations are satisfied by H−
466
+ i , i = 1, 2.
467
+ Theorem 1. Let X in (1.1) be a stationary linear RF on Z2 in (1.3) with spectral density f satisfying
468
+ Assumption (F)LRD. Then:
469
+ 7
470
+
471
+ • The scaling limits in (1.2) exist for any γ > 0 and satisfy (2.1) with γ0 = υ1
472
+ υ2 and the unbalanced limits
473
+ are given by
474
+ V+
475
+ :=
476
+ κ+
477
+
478
+
479
+
480
+
481
+
482
+ BH+
483
+ 1 ,1/2
484
+ υ1 ≤ 1,
485
+ B1,H+
486
+ 2 ,
487
+ υ1 ≥ 1,
488
+ V− := κ−
489
+
490
+
491
+
492
+
493
+
494
+ B1/2,H−
495
+ 2
496
+ υ2 ≤ 1,
497
+ BH−
498
+ 1 ,1,
499
+ υ2 ≥ 1,
500
+ (3.2)
501
+ where H±
502
+ i , i = 1, 2 as in (3.1) and the constants κ± > 0 in (3.2) are defined in (3.13), (3.14) (or can
503
+ be determined from these expressions by symmetry).
504
+ • The well-balanced limit V0 = V0,1/ρ is given in (2.12).
505
+ • The normalization in (1.2) is given by dλ,γ := λH(γ)(log+ λ)1/2 if γ > γ0 and υ1 = 1 or γ < γ0 and
506
+ υ2 = 1, and dλ,γ := λH(γ) in the remaining cases, where H(γ) is the weighted linear combination with
507
+ weights (1, γ) of the Hurst indices of FBM on the r.h.s. (3.2), viz.,
508
+ H(γ) =
509
+
510
+
511
+
512
+
513
+
514
+ H+
515
+ 1 + γH+
516
+ 2 ,
517
+ γ ≥ γ0,
518
+ H−
519
+ 1 + γH−
520
+ 2 ,
521
+ γ ≤ γ0.
522
+ (3.3)
523
+ • The RF X exhibits scaling transition at γ0.
524
+ Proof. The simple bound
525
+ �� �
526
+ t∈K[λx1,λγx2] a(t − u)
527
+ �� ≤ Cλ(1+γ)/2� �
528
+ t∈Z2 a(t)2�1/2 ≤ Cλ(1+γ)/2 reduces the
529
+ criterion (2.4) to H(γ) > (1 + γ)/2. This follows from (3.3) since H±
530
+ i
531
+ ≥ 1/2, i = 1, 2. Whence, for (1.2) it
532
+ suffices to prove the convergence of covariance functions in (2.3). The latter relation is essentially proved in
533
+ [24] except for the cases υ1 = 1 or υ2 = 1. The subsequent proof is limited to these cases. Note that the
534
+ definitions in (3.2), (3.3) given by different expressions agree for υi = 1, i = 1, 2 and/or γ = γ0. By symmetry,
535
+ it suffices to discuss the case υ1 = 1 only and consider the limits in (2.3) for γ > γ0, γ < γ0, and γ = γ0
536
+ separately. W.l.g. λ > 1.
537
+ Case γ > γ0 = 1/υ2 (υ1 = 1). In this case, V+ = κ+B1,1/2 and d2
538
+ λ,γ = λ2+γ log λ, according to (3.2). In the
539
+ integral in (2.2), change the variables: u1 → u1/λγυ2, u2 → u2/λγ and note that
540
+ λ−1D[λx1](u1/λγυ2) → x1,
541
+ λ−γD[λγx2](u2/λγ) → 1 − eix2u2
542
+ −iu2
543
+ ,
544
+ (3.4)
545
+ fλ(u) := λ−γυ2f(u1/λγυ2, u2/λγ) → L(u)/ρ(u) =: f0(u)
546
+ point-wise for any u ∈ R2
547
+ 0 as λ → ∞, due to γ > γ0. Moreover, (2.9) implies
548
+ |fλ(u) − f0(u)| ≤
549
+ 1
550
+ |u1| + |u2|υ2 δ
551
+ � u1
552
+ λγυ2 , u2
553
+ λγ
554
+
555
+ ,
556
+ (3.5)
557
+ where δ(u) is a bounded function tending to 0 as u → 0.
558
+ Thus, Rλ,γ(x, y) = �2
559
+ i=1 R(i)
560
+ λ,γ(x, y), where
561
+ R(i)
562
+ λ,γ(x, y) :=
563
+ 1
564
+ log λ
565
+
566
+ λγυ2Π×λγΠ G(i)
567
+ λ (u)du, i = 1, 2 with
568
+ G(1)
569
+ λ (u)
570
+ :=
571
+ D[λx1](u1/λγυ2)D[λy1](u1/λγυ2)
572
+ λ2
573
+ · D[λγx2](u2/λγ)D[λγy2](u2/λγ)
574
+ λ2γ
575
+ · f0(u)
576
+ (3.6)
577
+
578
+ x1y1 · (1 − eix2u2)(1 − e−iy2u2)
579
+ |u2|2
580
+ · f0(u) =: G(u),
581
+ 8
582
+
583
+ and G(2)
584
+ λ (u) analogously defined with f0(u) replaced by fλ(u) − f0(u).
585
+ Let us prove R(2)
586
+ λ,γ(x, y) → 0. From elementary bound |Dn(x)| ≤ Cn/(1 + n|x|), x ∈ Π and (3.5) we get
587
+ |R(2)
588
+ λ,γ(x, y)|
589
+
590
+ C
591
+ log λ
592
+
593
+ λγυ2Π×λγΠ
594
+ du
595
+ (1 + u2
596
+ 2)(|u1| + |u2|υ2)δ
597
+ � u1
598
+ λγυ2 , u2
599
+ λγ
600
+
601
+ =
602
+ C
603
+ log λ
604
+
605
+ R
606
+ du2
607
+ 1 + u2
608
+ 2
609
+
610
+ |u1|≤λγυ2/|u2|υ2
611
+ du1
612
+ |u1| + 1δ
613
+ �u1|u2|υ2
614
+ λγυ2
615
+ , u2
616
+ λγ
617
+
618
+ .
619
+ (3.7)
620
+ Split the integration region on the r.h.s.
621
+ of (3.7) into three parts corresponding to {|u2|
622
+ λγ
623
+ ≥ ϵ}, {|u2|
624
+ λγ
625
+ <
626
+ ϵ, |u1|( |u2|
627
+ λγ )υ2 < ϵ}, and {|u2|
628
+ λγ < ϵ, |u1|( |u2|
629
+ λγ )υ2 ≥ ϵ}, T1, T2 and T3, say, so that |R(2)
630
+ λ,γ(x, y)| ≤ C(log λ)−1 �3
631
+ i=1
632
+
633
+ Ti . . .
634
+ =: (log λ)−1 �3
635
+ i=1 Ri. The inner integral in (3.7) on T1 is bounded by C log(1/ϵ)υ2) implying R1 → 0 (λ →
636
+ ∞, ∀ϵ > 0). Next, by the property δ(u) → 0, for any δ′ > 0 there exists ϵ > 0 s.t. R2 ≤ δ′(log λ)−1 �
637
+ R(1 +
638
+ u2
639
+ 2)−1 log(λγ/|u2|)υ2du2 ≤ Cδ′ vanishes with ϵ → 0 uniformly in λ ≥ 2.
640
+ Finally, for any ϵ > 0 fixed,
641
+ R3 ≤ C(log λ)−1 �
642
+ R(1 + u2
643
+ 2)−1�
644
+ log(λγ/|u2|)υ2 − log ϵ(λγ/|u2|)υ2�
645
+ = O(1/ log λ) → 0 as λ → ∞, thus ending
646
+ the proof of R(2)
647
+ λ,γ(x, y) → 0.
648
+ Next, we prove the limits
649
+ lim
650
+ λ→∞ R(1)
651
+ λ,γ(x, y) = lim
652
+ λ→∞
653
+ ˜Rλ,γ(x, y) = κ2
654
+ +E[B1,1/2(x)B1,1/2(y)] = κ2
655
+ +x1y1(x2 ∧ y2)
656
+ (3.8)
657
+ where
658
+ ˜Rλ,γ(x, y)
659
+ :=
660
+ 1
661
+ log λ
662
+
663
+ λγυ2Π×λγΠ
664
+ G(u)du
665
+ =
666
+ x1y1
667
+
668
+ |u2|≤λγπ
669
+ (1 − eix2u2)(1 − e−iy2u2)
670
+ |u2|2
671
+ du2 ×
672
+ 1
673
+ log λ
674
+
675
+ |u1|≤λγυ2π
676
+ f0(u)du1.
677
+ (3.9)
678
+ Consider the second limit in (3.8).
679
+ By definition, f0(u) = L
680
+
681
+ u1
682
+ |u1|+|u2|υ2 ,
683
+ u2
684
+ (|u1|+|u2|υ2)1/υ2
685
+
686
+ (|u1| + |u2|υ2)−1
687
+ behaves as L(1, 0)|u1|−1 when |u1| → ∞, indeed, for any u2 ̸= 0
688
+ |u2|υ2(|u1| + 1)f0(u1|u2|υ2, u2) = L
689
+ � u1 + 1
690
+ |u1| + 1, sgn(u2)
691
+ |u1| + 1
692
+
693
+ → L(1, 0),
694
+ |u1| → ∞
695
+ (3.10)
696
+ by the conditions on L(·) in the theorem. Then, the last term on the r.h.s. of (3.9) can be rewritten as
697
+ the sum of two terms: the first term L(1,0)
698
+ log λ
699
+
700
+ |u1|≤(λγ/|u2|)υ2π(|u1| + 1)−1du1 → 2L(1, 0)γυ2 (∀u2 ̸= 0), and the
701
+ second term
702
+ 1
703
+ log λ
704
+
705
+ |u1|≤(λγ/|u2|)υ2π
706
+ du1
707
+ |u1| + 1
708
+
709
+ L
710
+ � u1 + 1
711
+ |u1| + 1, sgn(u2)
712
+ |u1| + 1
713
+
714
+ − L(1, 0)
715
+
716
+ → 0
717
+ (∀ u2 ̸= 0)
718
+ (3.11)
719
+ in view of (3.10) and boundedness of L(·). The proof of the second limit in (3.8) follows from (3.9)-(3.11)
720
+ and the DCT using the bound (log λ)−1�� �
721
+ |u1|≤λγυ2π g+(u)du1
722
+ �� ≤ C(log λ)−1 �
723
+ |u1|≤λγυ2π(|u1| + |u2|υ2)−1du1 ≤
724
+ C(1 + | log(|u2|)|) and the integrability of (1 + | log(|u2|)|)(1 + u2
725
+ 2)−1 on R.
726
+ It remains to prove that R(1)
727
+ λ,γ(x, y) − ˜Rλ,γ(x, y) → 0.
728
+ W.l.g., take x = y = 1 and let R(1)
729
+ λ,γ(1, 1) −
730
+ ˜Rλ,γ(1, 1) =: R′
731
+ λ,γ. Then
732
+ |R′
733
+ λ,γ|
734
+
735
+ 1
736
+ log λ
737
+
738
+ λγυ2Π×λγΠ
739
+ ���
740
+ |D[λ](u1/λγυ2)D[λγ](u2/λγ)|2
741
+ λ2+2γ
742
+ − |1 − eiu2|2
743
+ u2
744
+ 2
745
+ ���
746
+ du
747
+ |u1| + |u2|υ2
748
+ =
749
+ 1
750
+ log λ
751
+
752
+ λγυ2Π×λγΠ
753
+ δλ(u)du
754
+ (1 + u2
755
+ 2)(|u1| + |u2|υ2) = o(1)
756
+ (3.12)
757
+ 9
758
+
759
+ since δλ(u) → 0 uniformly in u in the last integral, see (4.6), and the r.h.s. of (3.12) with δλ(u) replaced by
760
+ 1 is bounded. This ends the proof of (3.8) with
761
+ κ2
762
+ + := 2L(1, 0)γυ2
763
+
764
+ R
765
+ |(1 − eiu)/u|2du = 4πL(1, 0)γυ2
766
+ (3.13)
767
+ and completes the proof of (2.3) when γ > γ0.
768
+ Case γ < γ0 = 1/υ2 (υ1 = 1). There are three subcases: (a) υ2 < 1, (b) υ2 > 1, and (c) υ2 = 1. Accordingly,
769
+ V− = κ−B1/2,H−
770
+ 2 in subcase (a), V− = κ−BH−
771
+ 1 ,1 in subcase (b), and V− = κ−B1/2,1 in subcase (c). Subcases
772
+ (a) and (b) do not involve logarithmic normalization and are essentially treated in [24, Thm.3.1]. Subcase
773
+ (c) is symmetric to the above case γ > γX
774
+ 0 , υ1 = 1 with (x1, υ1) and (x2, υ2) exchanged, by noting that the
775
+ latter discussion applies to any υ2 > 0 including υ2 = 1.
776
+ Case γ = γ0 = 1/υ2 (υ1 = 1). In this case, V0 = V0,1/ρ is given in (2.12) and the result follows from [24] with
777
+ small changes, thus ending the proof of Theorem 1.
778
+
779
+ Remark 1. The squared asymptotic constant in (3.2) for υ1 ̸= 1 takes the form
780
+ κ2
781
+ +
782
+ =
783
+
784
+
785
+
786
+
787
+
788
+ L(1, 0)
789
+
790
+ R2
791
+ �2
792
+ j=1
793
+ ��(1 − eiuj)/uj
794
+ ��2|u1|−υ1du,
795
+ υ1 < 1,
796
+
797
+ R2
798
+ ��(1 − eiu2)/u2
799
+ ��2L(u)ρ(u)−1du,
800
+ υ1 > 1,
801
+ (3.14)
802
+ (κ2
803
+ − is defined symmetrically with u1, u2, υ1, υ2 exchanged by u2, u1, υ2, υ1). For υ1 < 1 the integral in (3.14)
804
+ can be explicitly evaluated as κ2
805
+ + = L(1, 0)(2π)2/Γ(2 + υ1) sin((1 + υ1)π/2), see (2.17).
806
+ 4
807
+ Negative dependence
808
+ This sec. describes anisotropic scaling limits in (1.2) of linear RFs satisfying Assumption (F)ND. Define
809
+ H+
810
+ 1 := 1
811
+ 2(1 − (υ1 ∧ 1)),
812
+ H−
813
+ 2 := 1
814
+ 2(1 − (υ2 ∧ 1)),
815
+ γ0 := υ1 ∧ 1
816
+ υ2 ∧ 1.
817
+ (4.1)
818
+ Note H+
819
+ 1 , H−
820
+ 2 ∈ [0, 1/2) and H+
821
+ 1 = 0 (respectively, H−
822
+ 2 = 0) is equivalent to υ1 ≥ 1 (respectively, υ2 ≥ 1).
823
+ We also set H+
824
+ 2 = H−
825
+ 1 := 1/2.
826
+ Theorem 2. Let X in (1.1) be a stationary linear RF on Z2 in (1.3) with spectral density f satisfying
827
+ Assumption (F)ND. Then:
828
+ • The scaling limits in (1.2) exist for any γ > 0 and satisfy (2.1) with γ0, H±
829
+ i in (4.1) and the unbalanced
830
+ limits given by
831
+ V+
832
+ :=
833
+ κ+BH+
834
+ 1 ,1/2,
835
+ V− := κ−B1/2,H−
836
+ 2 .
837
+ (4.2)
838
+ The asymptotic constants κ± > 0 are written in (4.9), (4.14), (4.19), (4.21), and (??) (or can be
839
+ determined from these expressions by symmetry).
840
+ 10
841
+
842
+ • The well-balanced limit is given by
843
+ V0
844
+ :=
845
+
846
+
847
+
848
+
849
+
850
+ V0,ρ,
851
+ H+
852
+ 1 ∧ H−
853
+ 2 > 0,
854
+ κ+BH+
855
+ 1 ,1/2 + κ−B1/2,H−
856
+ 2 ,
857
+ H+
858
+ 1 ∧ H−
859
+ 2 = 0,
860
+ (4.3)
861
+ where BH+
862
+ 1 ,1/2 and B1/2,H−
863
+ 2 are independent and V0,ρ is defined in (2.12).
864
+ • The normalization in (1.2) is given by dλ,γ := λH(γ)(log+ λ)1/2 in the cases γ ≥ γ0, H+
865
+ 1 = 0 or γ ≤
866
+ γ0, H−
867
+ 2 = 0, and dλ,γ := λH(γ) in the remaining cases, with
868
+ H(γ)
869
+ :=
870
+
871
+
872
+
873
+
874
+
875
+ H+
876
+ 1 + (γ/2),
877
+ γ ≥ γ0,
878
+ (1/2) + γH−
879
+ 2 ,
880
+ γ ≤ γ0.
881
+ (4.4)
882
+ • The RF X exhibits scaling transition at γ0.
883
+ Proof.
884
+ Similarly as in the proof of Theorem 1 we check the asymptotic gaussianity criterion in (2.4),
885
+ reducing the proof to the convergence in (2.3) of covariance functions. Relation f(u) = (2π)2|�a(u)|2 and
886
+ the assumptions on f imply that |�a(u)| ≤ C is bounded.
887
+ Whence, with (γ1, γ2) := (1, γ), we get that
888
+ sups∈Z2
889
+ �� �
890
+ t∈K[λx1,λγx2] a(t−s)
891
+ �� ≤
892
+
893
+ Π2
894
+ �2
895
+ i=1
896
+ ��D[λγixi](ui)
897
+ �� |�a(u)|du ≤ C
898
+
899
+ Π2
900
+ �2
901
+ i=1
902
+ ��D[λγixi](ui)
903
+ �� du ≤ C(log λ)2
904
+ so that (2.4) holds since H(γ) in (4.4) satisfy H(γ) > 0.
905
+ The subsequent proof of (2.3) is split into parts I and II according to whether υi ̸= 1, i = 1, 2, or υi =
906
+ 1 (∃i = 1, 2). In turn, each part is split into several cases and subcases which are treated separately. Part II
907
+ involves the logarithmic factor in the normalization and is more delicate.
908
+ Part I. Case υ1 ∨ υ2 < 1, γ0 = υ1/υ2.
909
+ Then H+
910
+ 1 = (1 − υ1)/2 ∈ (0, 1/2), H−
911
+ 2 = (1 − υ2)/2 ∈ (0, 1/2). By
912
+ change of variables: u1 → u1/λ, u2 → u2/λγ we rewrite (2.2) as Rλ,γ(x, y) =
913
+
914
+ λΠ×λγΠ Gλ(u)du where
915
+ Gλ(u)
916
+ :=
917
+ D[λx1](u1/λ)D[λy1](u1/λ)
918
+ λ2
919
+ · D[λγx2](u2/λγ)D[λγy2](u2/λγ)
920
+ λ2γ
921
+ · fλ(u),
922
+ (4.5)
923
+ fλ(u)
924
+ :=
925
+ f(u1/λ, u2/λγ)/λ2H(γ)−1−γ.
926
+ By (2.9) and the definition of H(γ) in (4.4),
927
+ λ−1D[λx1](u1/λ) → 1 − eix1u1
928
+ −iu1
929
+ ,
930
+ λ−γD[λγx2](u2/λγ) → 1 − eix2u2
931
+ −iu2
932
+ ,
933
+ (4.6)
934
+ fλ(u) → gγ(u) :=
935
+
936
+
937
+
938
+
939
+
940
+
941
+
942
+
943
+
944
+
945
+
946
+ f0(u),
947
+ γ = γ0,
948
+ f0(u1, 0),
949
+ γ > γ0,
950
+ f0(0, u2),
951
+ γ < γ0,
952
+ point-wise for any u ∈ R2
953
+ 0, where f0(u) := L(u)ρ(u), see (2.11). Particularly, f0(u1, 0) = |u1|−υ1L(1, 0),
954
+ f0(0, u2) = |u2|−υ2L(0, 1), u = (u1, u2) ∈ R2
955
+ 0 since L(1, 0) = L(−1, 0), L(0, 1) = L(0, −1).
956
+ We also see
957
+ 11
958
+
959
+ from (2.16), (2.12) that the covariance function of the limit RF with appropriately chosen κ± writes as
960
+ EVγ(x)Vγ(y) =
961
+
962
+ R2 G(u)du, where
963
+ G(u) :=
964
+ 2
965
+
966
+ j=1
967
+ �1 − eixjuj
968
+ iuj
969
+ ��1 − e−iyjuj
970
+ −iuj
971
+
972
+ gγ(u)
973
+ (4.7)
974
+ is the product of the corresponding limit functions in (4.6) and Gλ(u) → G(u), u ∈ R2
975
+ 0 according to (4.6).
976
+ The proof of
977
+
978
+ R2 Gλ(u)du →
979
+
980
+ R2 G(u)du in all three cases γ > γ0, γ < γ0, and γ = γ0 now follows from the
981
+ dominating convergence theorem (DCT) using the bound
982
+ |λ−1D[λx]( u
983
+ λ)| ≤ Cx(1 + |([λx]/λ)u|) ≤ C/(1 + |u|),
984
+ |u| < λπ,
985
+ for any fixed x ∈ R, x ̸= 0, implying
986
+ |Gλ(u)| ≤ C �2
987
+ i=1(1 + u2
988
+ i )−1 ×
989
+
990
+
991
+
992
+
993
+
994
+
995
+
996
+
997
+
998
+
999
+
1000
+ ρ(u),
1001
+ γ = γ0,
1002
+ ρ(u1, 0),
1003
+ γ > γ0,
1004
+ ρ(0, u2),
1005
+ γ < γ0.
1006
+ (4.8)
1007
+ Hence, the r.h.s. of (4.8) denoted by ¯G(u) is integrable:
1008
+
1009
+ R2 ¯G(u)du < ∞. The asymptotic constants κ± in
1010
+ (4.2) are determined by
1011
+
1012
+ R2 |G(u)|2du = 1/κ2
1013
+ +(γ > γ0), = 1/κ2
1014
+ −(γ < γ0) for x = y = 1 in (4.7) and take a
1015
+ similar form as in (3.14); particularly,
1016
+ κ2
1017
+ +
1018
+ :=
1019
+ L(1, 0)
1020
+
1021
+ R2 |u1|υ1
1022
+ 2
1023
+
1024
+ j=1
1025
+ |(1 − eiuj)/uj|2du.
1026
+ (4.9)
1027
+ Case υ1 < 1 < υ2, γ0 = υ1.
1028
+ Subcase γ > υ1. We prove (2.3) with the same V+ = κ+BH+
1029
+ 1 ,1/2 as in the previous case using a modified
1030
+ argument, as follows. Split f(u) = f1(u) + ˜f1(u), where f1(u) = f(u1, 0), ˜f1(u) := f(u) − f(u1, 0) and
1031
+ accordingly, Rλ,γ(x, y) = R1(x, y) + ˜R1(x, y). The convergence R1(x, y) → κ2
1032
+ +EBH+
1033
+ 1 ,1/2(x)BH+
1034
+ 1 ,1/2(y) for
1035
+ any γ > 0 follows as in the case υ1 ∨ υ2 < 1 above. Whence, it suffices to prove ˜R1(x, x) → 0, or
1036
+ Jλ :=
1037
+
1038
+ Π2 | ˜f1(u)| |D[λx1](u1)D[λγx2](u2)|2du
1039
+ =
1040
+ o(λ1−υ1+γ).
1041
+ (4.10)
1042
+ We have | ˜f1(u)| ≤ C �3
1043
+ k=1 |h1,k(u)|, where h1,1(u) := ρ(u) − ρ(u1, 0) = |u2|υ2, h1,2(u) := ρ(u1, 0)(L(u) −
1044
+ L(u1, 0)) and h1,3(u) = ρ(u)δ(u), where δ(u) is a bounded function tending to 0 as |u| → 0. Accordingly,
1045
+ Jλ ≤ C �3
1046
+ k=1 Jλ,k where Jλ,k is defined as in (4.10) with | ˜f1(u)| replaced by |h1,k(u)|. Here,
1047
+ Jλ,1
1048
+
1049
+
1050
+ Π2 |u2|υ2 |1 − ei[λx1]u1|2
1051
+ |u1|2|u2|2
1052
+ du ≤ λ
1053
+
1054
+ R×Π
1055
+ |1 − eiu1x1|2
1056
+ |u1|2
1057
+ |u2|υ2−2du ≤ Cλ = o(λ1−υ1+γ)
1058
+ (4.11)
1059
+ since γ > υ1. Next,
1060
+ Jλ,2
1061
+
1062
+
1063
+ Π
1064
+ |u1|υ1 |1 − ei[λx1]u1|2
1065
+ |u1|2
1066
+ du1 ×
1067
+
1068
+ Π
1069
+ |L(u1, u2) − L(u1, 0)||1 − ei[λγx2]u2|2
1070
+ |u2|2
1071
+ du2
1072
+ (4.12)
1073
+
1074
+ Cλ1−υ1+γ
1075
+
1076
+ R
1077
+ |u1|υ1 |1 − eiu1x1|2
1078
+ |u1|2
1079
+ du1 ×
1080
+
1081
+ R
1082
+ ��L
1083
+ �u1
1084
+ λ , u2
1085
+ λγ
1086
+
1087
+ − L
1088
+ �u1
1089
+ λ , 0
1090
+ ���|1 − eiu2x2|2
1091
+ |u2|2
1092
+ du2
1093
+ =
1094
+ o(λ1−υ1+γ)
1095
+ 12
1096
+
1097
+ by the DCT and the fact that
1098
+ ��L
1099
+ � u1
1100
+ λ , u2
1101
+ λγ
1102
+
1103
+ − L
1104
+ � u1
1105
+ λ , 0
1106
+ ��� is bounded and tends to 0 for any u ∈ R2
1107
+ 0 by the
1108
+ continuity of ˜L in (2.8) and the fact that γ > υ1 > υ1/υ2. A similar argument also entails Jλ,3 = o(λ1−υ1+γ),
1109
+ proving (4.10) and the convergence (1.2) in the subcase γ > υ1.
1110
+ Subcase γ < υ1. We prove (2.3) with V− = κ−B1/2,0, d2
1111
+ λ,γ = λ and κ− in (4.14). Split f(u) = f2(u) +
1112
+ ˜f2(u), ˜f2(u) := f(u) − f(0, u2) and accordingly, Rλ,γ(x, y) = R2(x, y) + ˜R2(x, y). It suffices to prove
1113
+ R2(x, y) → κ2
1114
+ −(x1 ∧ y1) × 1
1115
+ 2(1 + I(x2 = y2))
1116
+ and
1117
+ ˜R2(x, x) → 0.
1118
+ (4.13)
1119
+ To show the first relation in (4.13), note that
1120
+ R2(x, y)
1121
+ =
1122
+ 1
1123
+ λ
1124
+
1125
+ Π
1126
+ D[λx1](u)D[λy1](u)du ×
1127
+
1128
+ Π
1129
+ f(0, v)D[λγx2](v)D[λγy2](v)dv =: J1 × J2,
1130
+ where
1131
+ J1
1132
+
1133
+
1134
+ R
1135
+ (1 − eix1u)(1 − e−iy1u)
1136
+ |u|2
1137
+ du = (x1 ∧ y1)
1138
+
1139
+ R
1140
+ |1 − eiu|2
1141
+ |u|2
1142
+ du
1143
+ follows by the DCT, and
1144
+ J2
1145
+ =
1146
+
1147
+ Π
1148
+ f(0, v)
1149
+ |1 − eiv|2
1150
+
1151
+ 1 − ei[λγx2]v − e−i[λγy2]v + ei([λγx2]−[λγy2])v�
1152
+ dv
1153
+
1154
+
1155
+ Π
1156
+ f(0, v)
1157
+ |1 − eiv|2 dv ×
1158
+
1159
+
1160
+
1161
+
1162
+
1163
+ 2,
1164
+ x2 = y2,
1165
+ 1,
1166
+ x2 ̸= y2,
1167
+ by the Lebesgue-Riemann theorem [26, Thm.1.2] and the integrability of f(0, v)|1 − eiv|−2. Whence, the
1168
+ asymptotic constant in (4.13) equals
1169
+ κ2
1170
+ − := 2
1171
+
1172
+ R
1173
+ |(1 − eiu)/u|2du ×
1174
+
1175
+ Π
1176
+ f(0, v)|1 − eiv|−2dv.
1177
+ (4.14)
1178
+ To show the second relation in (4.13), similarly as in the subcase γ > υ1 above write | ˜f2(u)| ≤ C �3
1179
+ k=1 |h2,k(u)|
1180
+ and ˜R2(x, x) ≤ C �3
1181
+ k=1 ˜R3,k(x, x) accordingly, where h2,1(u) := ρ(u) − ρ(0, u2) = |u1|υ1, h2,2(u) :=
1182
+ ρ(0, u2)(L(u) − L(0, u2)) and h2,3(u) := ρ(u)δ(u), where δ(u) is a bounded function tending to 0 as |u| → 0.
1183
+ Then ˜R2,3(x, x) = o(1),
1184
+ | ˜R2,1(x, x)|
1185
+
1186
+ Cλ−1
1187
+
1188
+ Π2 |u1|υ1 |1 − ei[λx1]u1|2|1 − ei[λγx2]u2|2
1189
+ |1 − eiu1|2|1 − eiu2|2
1190
+ du
1191
+
1192
+ Cλγ−υ1
1193
+
1194
+ R
1195
+ |u1|υ1−2|1 − eix1u1|2du1 ×
1196
+
1197
+ R
1198
+ |u2|−2|1 − eix2u2|2du2
1199
+ =
1200
+ O(λγ−υ1) = o(1)
1201
+ (4.15)
1202
+ and
1203
+ | ˜R2,2(x, x)|
1204
+
1205
+ C
1206
+
1207
+ Π
1208
+ |u2|υ2−2du2
1209
+
1210
+ R
1211
+ ��L
1212
+ �u1
1213
+ λ , u2) − L(0, u2)
1214
+ ��|(1 − eix1u1)/u1|2du1, = o(1),
1215
+ (4.16)
1216
+ proving (4.13).
1217
+ 13
1218
+
1219
+ Subcase γ = υ1. We prove (2.3) with V0 = κ+BH+
1220
+ 1 ,1/2 + κ−B1/2,0 a sum of independent limits in subcases
1221
+ γ > υ1 and γ < υ1. Split f(u) = f1(u)+f2(u)+ ˜f12(u), ˜f12(u) := f(u)−f(u1, 0)−f(0, u2) and, accordingly,
1222
+ Rλ,γ(x, y) = R1(x, y) + R2(x, y) + ˜R12(x, y). Note H+
1223
+ 1 + (υ1/2) = 1/2. The convergences R1(x, y) →
1224
+ κ2
1225
+ +EBH+
1226
+ 1 ,1/2(x, y)BH+
1227
+ 1 ,1/2(x, y) and R2(x, y) → κ2
1228
+ −EB1/2,0(x, y)B1/2,0(x, y) were proved in subcases γ >
1229
+ γ0, υi < 1, i = 1, 2 and γ < γ0, υ1 < 1 < υ2, respectively. It remains to show
1230
+ ˜R12(x, x) → 0.
1231
+ (4.17)
1232
+ Write f(u) = f0(u)T(u), where f0(u) = ρ(u)L(u) and T(u) := f(u)
1233
+ f0(u), u ∈ Π2 are a continuous functions, see
1234
+ Assumption (F)ND. Then, ˜f12(u) = �2
1235
+ i=1 ˜f12,i(u), where
1236
+ ˜f12,1(u) := |u1|υ1( �T(u) − �T(u1, 0)),
1237
+ ˜f12,2(u) := |u2|υ2( �T(u) − �T(0, u2)),
1238
+ �T(u) := L(u)T(u).
1239
+ Accordingly, ˜R12(x, x) = �2
1240
+ i=1 ˜R12,i(x, x). Then
1241
+ ˜R12,1(x, x)
1242
+
1243
+
1244
+ R2 |u1|υ1δλ,1(u)
1245
+ 2
1246
+
1247
+ j=1
1248
+ |(1 − eiujxj)/uj|2du,
1249
+ (4.18)
1250
+ where δλ,1(u) :=
1251
+ �� �T
1252
+ � u1
1253
+ λ , u2
1254
+ λυ1
1255
+
1256
+ − �T
1257
+ � u1
1258
+ λ , 0
1259
+ ���I(u ∈ λΠ × λυ1Π) is bounded uniformly in λ > 1 and the integral on
1260
+ the r.h.s. with δλ,1(u) replaced by 1 converges due to υ1 < 1. Also note that δλ,1(u) → 0 as T(u), u ∈ Π2
1261
+ is continuous and L
1262
+ � u1
1263
+ λ , u2
1264
+ λυ1
1265
+
1266
+ − L
1267
+ � u1
1268
+ λ , 0
1269
+
1270
+ → 0 due to υ2 > 1. This proves (4.17) for ˜R12,1(x, x) instead of
1271
+ ˜R12(x, x). The proof of ˜R12,2(x, x) → 0 is similar to (4.16), with
1272
+ ��L
1273
+ � u1
1274
+ λ , u2) − L(0, u2)
1275
+ �� there replaced by
1276
+ δλ,2(u) :=
1277
+ �� �T
1278
+ � u1
1279
+ λ , u2) − �T(0, u2)
1280
+ �� → 0 in view of the above mentioned properties of L(u) and T(u).
1281
+ Case υi > 1, i = 1, 2, γ0 = 1.
1282
+ By symmetry, it suffices to consider the case γ ≤ 1. Split f(u) = f1(u) +
1283
+ f2(u) + ˜f12(u), Rλ,γ(x, y) = R1(x, y) + R2(x, y) + ˜R12(x, y) as in the previous case. Then R2(x, y) →
1284
+ κ2
1285
+ −EB1/2,0(x)B1/2,0(y) as in (4.13) while R1(x, x) ≤ Cλγ−1 �
1286
+ Π |u1|υ1−2du1 ×
1287
+
1288
+ R |(1 − eiu2x2)/u2|2du2 =
1289
+ O(λγ−1) = o(1) is negligible when γ < 1; for γ = 1 we have R1(x, y) → κ2
1290
+ +EB0,1/2(x)B0,1/2(y) analogously
1291
+ to (4.13) with
1292
+ κ2
1293
+ + := 2
1294
+
1295
+ R
1296
+ |(1 − eiu)/u|2du ×
1297
+
1298
+ Π
1299
+ f(v, 0)|1 − eiv|−2dv.
1300
+ (4.19)
1301
+ The proof of ˜R12(x, x) → 0 for γ ≤ 1 follows as in (4.16). This ends the proof of Part I.
1302
+ Part II. Case υ1 = 1, υ2 < 1, γ0 = 1/υ2.
1303
+ Let first γ > 1/υ2, d2
1304
+ λ,γ = λγ log+ λ. Split f(u) = f1(u) + ˜f1(u)
1305
+ and Rλ,γ(x, y) = R1(x, y) + ˜R1(x, y) as in the case υ1 < 1 < υ2 above. Then
1306
+ R1(x, y)
1307
+
1308
+ 1
1309
+ log λ
1310
+
1311
+ λΠ
1312
+ (1 − eix1u1)(1 − e−iy1u1)
1313
+ |u1|2
1314
+ λf(u1/λ, 0)du1
1315
+ ×
1316
+
1317
+ λγΠ
1318
+ (1 − eix2u2)(1 − e−iy2u2)
1319
+ |u2|2
1320
+ du2 =: J1 × J2,
1321
+ (4.20)
1322
+ where J2 → κ2(x2 ∧y2), κ2 :=
1323
+
1324
+ R |(1−eiu)/u|2du and we need to show the limit of J1. We have λf(u1/λ, 0) =
1325
+ λL(1, 0)|u1/λ|(1+δ(u1/λ)) = L(1, 0)|u1|(1+δ(u1/λ)) where δ(u) is a bounded function tending to 0 as u → 0.
1326
+ 14
1327
+
1328
+ Therefore, J1 = J′
1329
+ 1 + J′′
1330
+ 1 , where
1331
+ J′
1332
+ 1 := 2L(1, 0)
1333
+ log λ Re
1334
+ � λπ
1335
+ 0
1336
+ (1 − eix1u)(1 − e−iy1u)u−1du → 2L(1, 0)
1337
+
1338
+
1339
+
1340
+
1341
+
1342
+ 2,
1343
+ x1 = y1,
1344
+ 1,
1345
+ x1 ̸= y1
1346
+ as limλ→∞
1347
+ � λ
1348
+ 1 eixuu−1du exists for any x ̸= 0, and |J′′
1349
+ 1 | ≤ C(log λ)−1‘
1350
+ � λπ
1351
+ 0 (u ∧ 1)2u−1δ(u/λ)du → 0 fol-
1352
+ lows as in (3.7) by splitting the last integral over sets u/λ < ϵ and ϵ ≤ u/λ ≤ π. Whence, R1(x, y) →
1353
+ κ2
1354
+ +EB0,1/2(x)B0,1/2(y), where
1355
+ κ2
1356
+ + = 4L(1, 0)κ2 = 4L(1, 0)
1357
+
1358
+ R
1359
+ |(1 − eiu)/u|2du.
1360
+ (4.21)
1361
+ To show that ˜R1(x, x) is negligible, use the decomposition of ˜f1(u) following (4.10) so that | ˜R1(x, x)| ≤
1362
+ C �3
1363
+ k=1 ˜R1,k(x, x) where ˜R1,k(x, x) = Jλ,k/ log λ and Jλ,k are as in the aforementioned proof. Then ˜R1,1(x, x) =
1364
+ o(1) as in (4.11) while ˜R1,2(x, x) ≤ C
1365
+
1366
+ R(1 + u2
1367
+ 2)−1δλ(u2)du2, c.f. (4.12), with
1368
+ δλ(u2) :=
1369
+ 1
1370
+ log λ
1371
+
1372
+ |u1|≤λπ
1373
+ (1 + |u1|)−1��L
1374
+ �u1
1375
+ λ , u2
1376
+ λγ
1377
+
1378
+ − L
1379
+ �u1
1380
+ λ , 0
1381
+ ���du1
1382
+ bounded and tending to 0 as λ → ∞ for any fixed u2 ̸= 0 by continuity of ˜L due to γυ2 > 1. Finally,
1383
+ ˜R1,3(x, x) ≤ C(log λ)−1 �
1384
+ λΠ(1 + |u1|)−1du1
1385
+
1386
+ R δ(u1/λ, u2/λγ)(1 + u2
1387
+ 2)−1du2 + o(1) = o(1) follows by splitting
1388
+ the last integral over |u1| < ϵλ and |u1| > ϵλ, using
1389
+ � πλ
1390
+ ϵλ u−1
1391
+ 1 du1 = log(1/ϵ) < ∞ for any small ϵ > 0.
1392
+ Next, let γ < 1/υ2, d2
1393
+ λ,γ = λ2H(γ), 2H(γ) = 1 + γ(1 − υ2).
1394
+ Note λγυ2f(u1/λ, u2/λγ) → f0(0, u2) =
1395
+ L(0, 1)|u2|υ2. Then Rλ,γ(x, y) → κ2
1396
+ −EB1/2,H−
1397
+ 2 (x)B1/2,H−
1398
+ 2 (y) follows as in the case υ1 ∧ υ2 < 1, γ < γ0, with
1399
+ κ2
1400
+ − = L(0, 1)
1401
+
1402
+ R2 |u2|υ2 �2
1403
+ j=1 |(1 − eiuj)/uj|2du, c.f. (4.9).
1404
+ Let γ = 1/υ2, then d2
1405
+ λ,γ = λ2H(γ) log λ, 2H(γ) =
1406
+ 1
1407
+ υ2 . We have
1408
+ Rλ,γ(x, y)
1409
+
1410
+ 1
1411
+ log λ
1412
+
1413
+ λΠ×λ1/υ2Π
1414
+ 2
1415
+
1416
+ i=1
1417
+ (1 − eixiui)(1 − e−iyiui)
1418
+ |ui|2
1419
+ λf(u1/λ, u2/λ1/υ2)du
1420
+ (4.22)
1421
+ where λf(u1/λ, u2/λ1/υ2) = f0(u)(1+δ(u1/λ, u2/λ1/υ2)) = �3
1422
+ k=1 fk(u), f1(u) := L(u)|u1|, f2(u) := L(u)|u2|υ2,
1423
+ f3(u) := f0(u)δ(u1/λ, u2/λ1/υ2), and limu→0 δ(u) = 0. Accordingly, Rλ,γ(x, y) ∼ �3
1424
+ k=1 Rk(x, y); we will
1425
+ show that R1(x, y) is the main term and Rk(x, y) → 0, k = 2, 3. Indeed,
1426
+ R1(x, y)
1427
+ =
1428
+ 1
1429
+ log λ
1430
+ � λπ
1431
+ −λπ
1432
+ (1 − eix1u)(1 − e−iy1u)
1433
+ |u|
1434
+ hλ(u)du,
1435
+ where hλ(u) :=
1436
+
1437
+ |w|≤λ1/υ2π(1 − eix2w)(1 − e−iy2w)|w|−2L(u, w)dw → h(u) (λ → ∞) and where
1438
+ h(u)
1439
+ :=
1440
+
1441
+ R
1442
+ (1 − eix2w)(1 − e−iy2w)
1443
+ |w|2
1444
+ L(u, w)dw
1445
+ (4.23)
1446
+
1447
+ L(1, 0)
1448
+
1449
+ R
1450
+ (1 − eix2w)(1 − e−iy2w)
1451
+ |w|2
1452
+ dw = L(1, 0)κ2(x2 ∧ y2)
1453
+ as |u| → ∞, see (2.16), (2.17) for the last equality. Whence, the convergence
1454
+ R′
1455
+ 1(x, y) :=
1456
+ 1
1457
+ log λ
1458
+ � λπ
1459
+ −λπ
1460
+ (1 − eix1u)(1 − e−iy1u)
1461
+ |u|
1462
+ h(u)du → κ2
1463
+ +EB0,1/2(x)B0,1/2(y)
1464
+ 15
1465
+
1466
+ with κ2
1467
+ + in (4.21) follows as in (4.20) and the same limit for R1(x, y) requires few changes. Next, |R2(x, x)| ≤
1468
+ C(log λ)−1 �
1469
+ R2(1+u2
1470
+ 1)−1(1+u2
1471
+ 2)−1|u2|υ2du = O(1/ log λ). Finally, |R3(x, x)| ≤ C(log λ)−1� � λπ
1472
+ 0 (1+u)−1 �
1473
+ R(1+
1474
+ w2)−1δ(u/λ, w/λ1/υ2)dw + O(1)
1475
+
1476
+ → 0 follows as in the case γ > 1/υ2.
1477
+ Case υ1 = 1, υ2 > 1, γ0 = 1. Let γ ≥ 1, d2
1478
+ λ,γ = λγ log+ λ. Then
1479
+ Rλ,γ(x, y)
1480
+
1481
+ 1
1482
+ log λ
1483
+
1484
+ λΠ×λγΠ
1485
+ 2
1486
+
1487
+ i=1
1488
+ (1 − eixiui)(1 − e−iyiui)
1489
+ |ui|2
1490
+ λf(u1/λ, u2/λγ)du
1491
+ where λf(u1/λ, u2/λγ) → f0(u1, 0) = L(1, 0)|u| due to γυ2 > 1. Then Rλ,γ(x, y) → κ2
1492
+ +EB0,1/2(x)B0,1/2(y)
1493
+ as in (4.20) with κ2
1494
+ + in (4.21). Next, let γ < 1. Then dλ,γ = λ1/2 and
1495
+ Rλ,γ(x, y)
1496
+
1497
+ � πλ
1498
+ −πλ
1499
+ (1 − eix1u)(1 − e−iy1u)
1500
+ |u|2
1501
+ du ×
1502
+
1503
+ Π
1504
+ (1 − eiλγx2w)(1 − e−iλγy2w)
1505
+ |1 − eiv|2
1506
+ f(u/λ, w)dw
1507
+
1508
+ κ2
1509
+ − EB1/2,0(x)B1/2,0(y)
1510
+ as in (4.13) with κ2
1511
+ − in (4.14).
1512
+ Case υ1 = υ2 = γ0 = 1.
1513
+ The convergence in (2.3) for γ ̸= 1 leading to limits V+ = κ+B0,1/2 and V− =
1514
+ κ−B1/2,0 by symmetry follow as in the case υ1 = 1, υ2 > 1, with with κ2
1515
+ + in (4.21) and κ2
1516
+ − = 4L(0, 1)κ2. Let
1517
+ us prove that for γ = 1 (2.3) tends to the sum of the latter limits leading to
1518
+ V0
1519
+ =
1520
+ κ+B0,1/2 + κ−B1/2,0,
1521
+ with
1522
+ d2
1523
+ λ,γ = λ log+ λ,
1524
+ where B0,1/2 and B1/2,0 are mutually independent. Proceeding as in (4.22) we see that fλ(u) := λf(u1/λ, u2/λ)
1525
+ → f0(u) = L(u)(|u1| + |u2|) and Rλ,γ(x, y) behaves asymptotically as the sum of two terms
1526
+ Rk(x, y)
1527
+ :=
1528
+ 1
1529
+ log λ
1530
+
1531
+ (λΠ)2 L(u1, u2)|uk|
1532
+ 2
1533
+
1534
+ j=1
1535
+ (1 − eixjuj)(1 − e−iyjuj)
1536
+ |uj|2
1537
+ du,
1538
+ k = 1, 2
1539
+ which tend to κ2
1540
+ +EB0,1/2(x)B0,1/2(y) and κ2
1541
+ −EB1/2,0(x)B1/2,0(y), respectively. We also need to check that
1542
+ the term R3(x, y) corresponding to fλ(u)−f0(u) is negligible, viz., |R3(x, y)| ≤ C(log λ)−1 �
1543
+ (λΠ)2(|u1|+|u2|)
1544
+ δ(u1/λ, u2/λ) �2
1545
+ j=1(1 + |uj|2)−1du → 0. We omit these details being similar to (4.22). This ends the proof
1546
+ of Part II and Theorem 2, too.
1547
+
1548
+ Remark 2. Following the terminology in time series [13], the asymptotic constants κ2
1549
+ ± may be dubbed
1550
+ ‘long-range variances’. It is notable that the only case when κ2
1551
+ ± depend on the spectral density outside of
1552
+ the origin are (4.14), (4.19) corresponding to H±
1553
+ i
1554
+ = 0 or spectrum ND under ‘edge effects’, see Remark 3.
1555
+ Obviously, these expressions for κ2
1556
+ ± make sense for continuous f (the continuity can be relaxed) but not for
1557
+ an arbitrary integrable or bounded f. On the other hand, continuity of f is a consequence of summability of
1558
+ covariance function hence occurs under covariance SRD and ND.
1559
+ 5
1560
+ Long-range negative and hyperbolic dependence
1561
+ Recall the asymptotic form of the spectral density under Assumption (F)LRND,2:
1562
+ f(u) ∼ f0(u) =
1563
+ L(u)
1564
+ |u1|υ1 + |u2|υ2
1565
+ |u| → 0,
1566
+ (5.1)
1567
+ 16
1568
+
1569
+ where L(u) = L(−u), u ∈ R2
1570
+ + is a continuous generalized invariant function such that
1571
+ L(u) ∼ ℓ
1572
+
1573
+ |u2|
1574
+ ρ(u)1/υ2
1575
+ �µ
1576
+ (u2 → 0)
1577
+ (5.2)
1578
+ for some µ ∈ (0, 1), ℓ > 0. Define
1579
+ H+
1580
+ 1
1581
+ :=
1582
+ 1
1583
+ 2
1584
+
1585
+ 1 +
1586
+
1587
+ (υ1 + µυ1
1588
+ υ2
1589
+ ) ∧ 1
1590
+ ��
1591
+ ,
1592
+ H+
1593
+ 2 := 1
1594
+ 2
1595
+
1596
+ 1 −
1597
+
1598
+ µ ∧ (υ2
1599
+ υ1
1600
+ − υ2)
1601
+ ��
1602
+ ,
1603
+ (5.3)
1604
+ H−
1605
+ 1
1606
+ :=
1607
+ 1
1608
+ 2
1609
+
1610
+ 1 + υ1 −
1611
+ υ1
1612
+ υ2 ∨ 1
1613
+
1614
+ ,
1615
+ H−
1616
+ 2 := 1
1617
+ 2
1618
+
1619
+ 1 + (υ2 ∧ 1)
1620
+
1621
+ Note H−
1622
+ i , i = 1, 2 in (5.3) are the same as in Theorem 1, (3.1), whereas the expressions for H+
1623
+ i , i = 1, 2 in (5.3)
1624
+ and (3.1) agree if and only if µ = 0. Also note that H±
1625
+ 1 , H−
1626
+ 2 ∈ [1/2, 1] while H+
1627
+ 2 = 1
1628
+ 2(1+υ2 − υ2
1629
+ υ1 ) ∈ [1/2, 1] for
1630
+ υ1 ≥ 1; for υ1 ≤ 1 we have H+
1631
+ 2 ∈ (0, 1/2]. Finally, H(γ) in (5.5) is a continuous function of γ, υi, i = 1, 2, µ,
1632
+ its value H(γ0) = 1
1633
+ 2(1 + υ1 + υ1
1634
+ υ2 ) at γ = γ0 := υ1
1635
+ υ2 being the same as in Theorem 1 independently of µ.
1636
+ The following theorem excludes some particular cases of parameters µ, υi, i = 1, 2 which may require
1637
+ extra logarithmic normalizing factor. It also leaves open the question about the scaling limits when both
1638
+ Assumptions (F)LRND,1 and (F)LRND,2 are satisfied.
1639
+ Theorem 3. Let X in (1.1) be a stationary linear RF on Z2 in (1.3) with spectral density f satisfying
1640
+ Assumption (F)LRND,2. In addition, let υ1 ̸= 1, υ1 + υ2
1641
+ υ1 ̸= 1. Then:
1642
+ • The scaling limits in (1.2) exist for any γ > 0 and satisfy (2.1) with γ0 = υ1
1643
+ υ2 and the unbalanced limits
1644
+ are given by
1645
+ V+
1646
+ :=
1647
+ κ+BH+
1648
+ 1 ,H+
1649
+ 2 ,
1650
+ V− := κ−BH−
1651
+ 1 ,H−
1652
+ 2 ,
1653
+ (5.4)
1654
+ where H±
1655
+ i , i = 1, 2 as in (5.3). The asymptotic constant κ+ is defined in (5.6), (5.7), whereas κ− is the
1656
+ same as in Theorem 1.
1657
+ • The well-balanced limit V0 := V0,1/ρ is given in (2.12).
1658
+ • The normalization in (1.2) is given by dλ,γ := λH(γ) with
1659
+ H(γ)
1660
+ :=
1661
+
1662
+
1663
+
1664
+
1665
+
1666
+ H+
1667
+ 1 + γH+
1668
+ 2 ,
1669
+ γ ≥ γ0,
1670
+ H−
1671
+ 1 + γH−
1672
+ 2 ,
1673
+ γ ≤ γ0.
1674
+ (5.5)
1675
+ • The RF X exhibits scaling transition at γ0 = υ1
1676
+ υ2 .
1677
+ Proof. Since (5.5) satisfy H(γ) > 1/2 (∀ γ > 0), the Lindeberg criterion in (2.4) is satisfied as in Theorem
1678
+ 1. For γ ≤ γ0 the results of Theorem 3 and their proof completely agree with those of Theorem 1. The
1679
+ subsequent proof on (2.3) is limited to γ > γ0 and split into three cases as follows.
1680
+ Case υ1 + υ1
1681
+ υ2 < 1. According to (5.1)-(5.2) for γ > υ1/υ2 we have that
1682
+ f(u1/λ, u2/λγ)
1683
+
1684
+ λυ1
1685
+ |u1|υ1 × ℓ
1686
+ ���
1687
+ u2
1688
+ λγ
1689
+ | u1
1690
+ λ |υ1/υ2
1691
+ ���
1692
+ µ
1693
+ = ℓλ2H(γ)−1−γ
1694
+ 2
1695
+
1696
+ j=1
1697
+ |uj|1−2H+
1698
+ j
1699
+ 17
1700
+
1701
+ point-wise in u = (u1, u2) ∈ R2
1702
+ 0. Then, the limit in (2.3) with Vγ = V+ = κ+BH+
1703
+ 1 ,H+
1704
+ 2 follows similarly as in
1705
+ Theorem 1 with
1706
+ κ2
1707
+ +
1708
+ =
1709
+
1710
+ 2
1711
+
1712
+ j=1
1713
+
1714
+ R
1715
+ |(1 − eiu)/u|2|u|1−2H+
1716
+ j du.
1717
+ (5.6)
1718
+ Case υ1 < 1 < υ1 + υ1
1719
+ υ2 . Then (2H+
1720
+ 1 , 2H+
1721
+ 2 ) = (1+υ1+ µυ1
1722
+ υ2 , 1−µ) if µ < υ2
1723
+ υ1 −υ2, = (2, 1+υ2− υ2
1724
+ υ1 ) if µ > υ2
1725
+ υ1 −υ2.
1726
+ The proof of the limit V+ = κ+BH+
1727
+ 1 ,H+
1728
+ 2 for µ < υ2
1729
+ υ1 − υ2 is the same as in the case υ1 + υ1
1730
+ υ2 < 1 above, and is
1731
+ omitted. Next, let µ > υ2
1732
+ υ1 − υ2. Due to γ > υ1/υ2 we see that
1733
+ λ−γυ2f
1734
+
1735
+ u1
1736
+ λγυ2/υ1 , u2
1737
+ λγ
1738
+
1739
+ → f0(u),
1740
+ λ−1D[λx]
1741
+
1742
+ u1
1743
+ λγυ2/υ1
1744
+
1745
+ → x,
1746
+ λ−γD[λγx]
1747
+ �u2
1748
+ λγ
1749
+
1750
+ → 1 − eixu2
1751
+ −iu2
1752
+ point-wise for any ui ̸= 0, i = 1, 2, x > 0. We also have
1753
+
1754
+ R g+(u1, u2)du1 = |u2|1−2H+
1755
+ 2 �
1756
+ R f0(u, 1)du where the
1757
+ last integral converges due to (5.2) and µ > υ2
1758
+ υ1 − υ2. Then, V+ = κ+B1,H+
1759
+ 2 follows as in [24, Thm.3.1]. The
1760
+ asymptotic constant κ+ in both cases of µ is given by
1761
+ κ2
1762
+ +
1763
+ =
1764
+
1765
+
1766
+
1767
+
1768
+
1769
+ ℓ �2
1770
+ j=1
1771
+
1772
+ R |(1 − eiu)/u|2|u|1−2H+
1773
+ j du,
1774
+ µ < υ2
1775
+ υ1 − υ2,
1776
+
1777
+ R
1778
+ ��(1 − eiv)/v
1779
+ ��2|v|1−2H+
1780
+ 2 dv ×
1781
+
1782
+ R g+(u, 1)du,
1783
+ µ > υ2
1784
+ υ1 − υ2.
1785
+ (5.7)
1786
+ Case υ1 > 1. In this case, the results do not depend on µ and completely agree with those in Theorem 1,
1787
+ including the proof. This ends the proof of Theorem 3.
1788
+
1789
+ Next, we formalize the meaning of hyperbolic dependence mentioned in the Introduction. Let
1790
+ f(u) = f0,hyp(u)(1 + o(1)),
1791
+ |u| → 0,
1792
+ where
1793
+ f0,hyp(u) := L(u)
1794
+ 2
1795
+
1796
+ i=1
1797
+ |ui|−υi,
1798
+ u ∈ Π2,
1799
+ (5.8)
1800
+ |υi| < 1, i = 1, 2 and
1801
+ L(u) := ˜L
1802
+
1803
+ u1
1804
+ (|u1||υ1| + |u2||υ2|)1/|υ1| ,
1805
+ u2
1806
+ (|u1||υ1| + |u2||υ2|)1/|υ2|
1807
+
1808
+ (5.9)
1809
+ is a generalized invariant function corresponding to generalized homogeneous function ρ(u) = |u1||υ1| +
1810
+ |u2||υ2|, u = (u1, u2) ∈ R2
1811
+ 0. Let
1812
+ Hi := 1
1813
+ 2(1 + υi),
1814
+ i = 1, 2.
1815
+ (5.10)
1816
+ The class in (5.8) includes (separately) fractionally integrated spectral densities �2
1817
+ i=1 |1 − eiui|−υi which play
1818
+ an important role in spatial statistics [5, 14, 17]. If L(u) is separated from 0 and ∞, the spectral density in
1819
+ (5.8) satisfies (spectrum) LRD, ND or LRND properties depending on the sign of υi, i = 1, 2 except that it
1820
+ explodes/vanishes on the coordinate axis ui = 0 when υi ̸= 0 as well and represents a different class from
1821
+ those discussed in Theorems 1-3 Introduce a Gaussian RF
1822
+ V0,hyp(x)
1823
+ :=
1824
+
1825
+ R2
1826
+ 2
1827
+
1828
+ j=1
1829
+ 1 − eiujxj
1830
+ iuj
1831
+
1832
+ f0,hyp(u)Z(du),
1833
+ x ∈ R2
1834
+ +,
1835
+ (5.11)
1836
+ where Z(du) is the same white noise as in (2.12). In the case when L(u) = ℓ > 0 is a constant function, the
1837
+ RF V0,hyp is a multiple of FBS: V0,hyp = κℓBH1,H2 with Hi, i = 1, 2 given in (5.10) and κ > 0 as in (2.17).
1838
+ 18
1839
+
1840
+ Theorem 4. be a stationary linear RF on Z2 in (1.3) with spectral density f in (5.8)-(5.9), where υi ∈
1841
+ (−1, 1), i = 1, 2 and ˜L(x) is a strictly positive continuous function. Then:
1842
+ • The scaling limits in (1.2) exist for any γ > 0 and satisfy
1843
+ Vγ =
1844
+
1845
+
1846
+
1847
+
1848
+
1849
+ κγBH1,H2,
1850
+ γ ̸= |υ1|
1851
+ |υ2|,
1852
+ V0,hyp,
1853
+ γ = |υ1|
1854
+ |υ2|,
1855
+ (5.12)
1856
+ where Hi, i = 1, 2 as in (5.10) and
1857
+ κ2
1858
+ γ :=
1859
+ 2
1860
+
1861
+ j=1
1862
+
1863
+ R
1864
+ |(1 − eiu)/u|2|u|1−2Hjdu ×
1865
+
1866
+
1867
+
1868
+
1869
+
1870
+ L(1, 0),
1871
+ γ > |υ1|
1872
+ |υ2|,
1873
+ L(0, 1),
1874
+ γ < |υ1|
1875
+ |υ2|.
1876
+ (5.13)
1877
+ • The normalization in (1.2) is given by dλ,γ := λH(γ), H(γ) := H1 + γH2.
1878
+ • The RF X does not exhibit scaling transition.
1879
+ We omit the proof of Theorem 4 since it resembles [24] and the previous proofs. The gaussianity criterion
1880
+ (2.4) holds for sign(υ1) = sign(υ2) as in Theorems 1 and 2; for υ1υ2 < 0 from (5.8) we get |�a(u)| ≤
1881
+ C �2
1882
+ i=1 |ui|−υi/2 and (2.4) follows similarly.
1883
+ The absence of scaling transition at γ =
1884
+ |υ1|
1885
+ |υ2| is clear from
1886
+ (5.12)-(5.13) and the fact that L(1, 0) > 0, L(0, 1) > 0 according to the positivity assumption on L (the last
1887
+ conclusion may fail if L(1, 0) and/or L(0, 1) vanish).
1888
+ We end the paper with two remarks on possible extensions of this work.
1889
+ Remark 3. More general scaling schemes and ‘edge effects’. In a more abstract setting, a RF is often indexed
1890
+ by test functions, through which scaling operations are defined; see [8, 9, 11]. Accordingly, one can study
1891
+ anisotropic scaling limits of integrals
1892
+ Sλ,γ(φ) :=
1893
+
1894
+ R2 φ(λ−Γt)X(⌈t⌉)dt
1895
+ (5.14)
1896
+ involving X in (1.3) extended to R2; ⌈t⌉ := (⌈t1⌉, ⌈t2⌉), t = (t1, t2) ∈ R2, and a re-scaled function φ : R2 → R,
1897
+ λ−Γt := (λ−1t1, λ−γt2), Γ := diag(1, γ), for a given γ > 0 and each φ from a class Φ of (test) functions. The
1898
+ sum in (1.1) corresponds to (5.14) with indicator function φ(t) = I(t ∈]0, x]). For (5.14), the scaling limit
1899
+ d−1
1900
+ λ,γ(Sλ,γ(φ) − ESλ,γ(φ))
1901
+ d
1902
+ −→ Vγ(φ) is a RF indexed by φ ∈ Φ. Isotropic (γ = 1) scaling limits in spirit of
1903
+ (5.14) for different classes of RF on Rd were studied in [9, 3, 15] and other papers. Extending Theorems 1-4
1904
+ to scaling limits of integral functionals in (5.14) for suitable class Φ of test functions which include indicator
1905
+ functions φ(t) = I(t ∈ A) of general bounded sets A ⊂ R2 is an interesting problem. Of particular interest
1906
+ is the case of ND RF X, where ‘edge effects’ can be expected following [16, 28], leading to scaling limits Vγ
1907
+ ‘living’ on the boundary of A or the discontinuity set of φ.
1908
+ Remark 4. Incongruous scaling and dependence axis. For linear LRD RF X in (1.3) [20] define the de-
1909
+ pendence axis of X as the direction on the plane along which a(t) decay at the smallest rate. In spectral
1910
+ 19
1911
+
1912
+ terms, we may define the dependence axis as a direction along which the spectral density f(x) grows at the
1913
+ fastest rate when |x| → 0. Under Assumption (F)LRD with υ1 ̸= υ2 such dependence axis coincides with
1914
+ one of the coordinate axes. The scaling in (1.2) involves rectangles with sides parallel to the coordinate axis.
1915
+ Using the terminology in [20] we may say that the scaling in (1.2) and in Theorem 1 is congruous with the
1916
+ dependence axis of X. [20] showed that incongruous scaling of linear LRD RF in (1.3) may dramatically
1917
+ change the scaling limits in (1.2) and the scaling transition point γ0. We expect that the spectrum approach
1918
+ in our paper may lead to a comprehensive treatment of incongruous scaling limits under various dependence
1919
+ assumptions.
1920
+ Acknowledgements
1921
+ The author thanks Vytaut˙e Pilipauskait˙e for useful comments and Remigijus Lapinskas for help with Figure
1922
+ 1 graphs.
1923
+ References
1924
+ [1] Ayache, A., Leger, S. and Pointer, M. (2002) Drap brownien fractionnaire. Potential Anal. 17, 41–53.
1925
+ [2] Bierm´e, H., Meerschaert, M.M. and Scheffler, H.P. (2007) Operator scaling stable random fields. Stoch. Process.
1926
+ Appl. 117, 312-332.
1927
+ [3] Bierm´e, H., Estrade, A. and Kaj, I. (2010) Self-similar random fields and rescaled random balls models. J. Theoret.
1928
+ Probab. 23, 1110–1141.
1929
+ [4] Bierm´e, H., Durieu, O. and Wang, Y. (2017) Invariance principles for operator-scaling Gaussian random fields. Ann.
1930
+ Appl. Probab. 27, 1190–1234.
1931
+ [5] Boissy, Y., Bhattacharyya, B.B., Li, X. and Richardson, G.D. (2005) Parameter estimates for fractional autoregres-
1932
+ sive spatial processes. Ann. Statist. 33, 2533–2567.
1933
+ [6] Damarackas, J. and Paulauskas, V. (2017) Spectral covariance and limit theorems for random fields with infinite
1934
+ variance. J. Multiv. Anal. 153, 156-175.
1935
+ [7] Damarackas, J. and Paulauskas, V. (2021) On Lamperti type limit theorem and scaling transition for random fields.
1936
+ J. Math. Anal. Appl. 497 (1):124852.
1937
+ [8] Dobrushin, R.L. (1979) Gaussian and their subordinated self-similar random generalized fields. Ann. Probab. 7,
1938
+ 1–28.
1939
+ [9] Dobrushin, R.L. (1980) Automodel generalized random fields and their renormgroup. In: R.L. Dobrushin and Ya.G.
1940
+ Sinai (Eds.), Multicomponent Random Systems, pp. 153–198. Dekker, New York.
1941
+ [10] Fyodorov, Y.V., Khoruzhenko, B.A. and Simm, N.J. (2016) Fractional Brownian motion with Hurst index H = 0
1942
+ and the Gaussian unitary ensemble. Ann. Probab. 44, 2980–3031.
1943
+ [11] Gel’fand, I.M. and Vilenkin, N.Ya. (1964) Generalized Functions - Vol.4: Applications of Harmonic Analysis.
1944
+ Academic Press.
1945
+ 20
1946
+
1947
+ [12] Genton, M.G., Perrin, O. and Taqqu, M.S. (2007) Self-similarity and Lamperti transformation for random fields.
1948
+ Stoch. Models 23, 397–411.
1949
+ [13] Giraitis, L., Koul, H.L. and Surgailis, D. Large Sample Inference for Long Memory Processes. Imperial College
1950
+ Press, London, 2012.
1951
+ [14] Guo, H., Lim, C. and Meerschaert, M. (2009) Local Whittle estimator for anisotropic random fields. J. Multiv.
1952
+ Anal. 100, 993–1028.
1953
+ [15] Kaj, I., Leskel¨a, L., Norros, I. and Schmidt, V. (2007) Scaling limits for random fields with long-range dependence.
1954
+ Ann. Probab. 35, 528–550.
1955
+ [16] Lahiri, S.N. and Robinson, P.M. (2016) Central limit theorems for long range dependent spatial linear processes.
1956
+ Bernoulli 22, 345–375.
1957
+ [17] Leonenko, N.N. and Taufer, E. (2013) Disaggregation of spatial autoregressive processes. Spatial Statistics 3, 1–20.
1958
+ [18] Pilipauskait˙e, V. and Surgailis, D. (2016) Anisotropic scaling of random grain model with application to network
1959
+ traffic. J. Appl. Probab. 53, 857–879.
1960
+ [19] Pilipauskait˙e, V. and Surgailis, D. (2017) Scaling transition for nonlinear random fields with long-range dependence.
1961
+ Stochastic Process. Appl. 127, 2751–2779.
1962
+ [20] Pilipauskait˙e, V. and Surgailis, D. (2021) Scaling limits of linear random fields on Z2 with general dependence
1963
+ axis. In: Vares, M.E., Fernandez, R., Fontes, L.R. and Newman, C.M. (eds.) An Out of Equilibrium 3: Celebrating
1964
+ Vladas Sidoravicius. Progress in Probability, pp. 683–710. Birkh¨auser, Basel.
1965
+ [21] Pilipauskait˙e, V. and Surgailis, D. (2022) Local scaling limits of L´evy driven fractional random fields. Bernoulli
1966
+ 28, 2833-2861.
1967
+ [22] Pipiras, V. and Taqqu, M.S. (2017) Long-Range Dependence and Self-Similarity. Cambridge Univ. Press, Cam-
1968
+ bridge.
1969
+ [23] Puplinskait˙e, D. and Surgailis, D. (2016) Aggregation of autoregressive random fields and anisotropic long-range
1970
+ dependence. Bernoulli 22, 2401–2441.
1971
+ [24] Puplinskait˙e, D. and Surgailis, D. (2015) Scaling transition for long-range dependent Gaussian random fields. Stoch.
1972
+ Process. Appl. 125, 2256–2271.
1973
+ [25] Samorodnitsky, G. (2016) Stochastic Processes and Long Range Dependence. Springer, New York.
1974
+ [26] Stein, E.M and Weiss, G. (1971) Introduction to Fourier Analysis on Euclidean Spaces. Princeton, Princeton Univ.
1975
+ Press.
1976
+ [27] Surgailis, D. (2019) Anisotropic scaling limits of long-range dependent linear random fields on Z3. J. Math. Anal.
1977
+ Appl. 472, 328–351.
1978
+ [28] Surgailis, D. (2020) Scaling transition and edge effects for negatively dependent linear random fields on Z2. Stochas-
1979
+ tic Process. Appl. 130, 7518–7546.
1980
+ 21
1981
+
TdA0T4oBgHgl3EQfEP9z/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
VtFIT4oBgHgl3EQfhCtr/content/tmp_files/2301.11286v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
VtFIT4oBgHgl3EQfhCtr/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
XdFLT4oBgHgl3EQfUC-H/content/tmp_files/2301.12047v1.pdf.txt ADDED
@@ -0,0 +1,1631 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Folded Optimization for End-to-End Model-Based Learning
2
+ James Kotary , My H. Dinh and Ferdinando Fioretto
3
+ Syracuse University
4
+ {jkotary, mydinh, ffiorett}@syr.edu
5
+ Abstract
6
+ The integration of constrained optimization models
7
+ as components in deep networks has led to promis-
8
+ ing advances in both these domains. A primary
9
+ challenge in this setting is backpropagation through
10
+ the optimization mapping, which typically lacks
11
+ a closed form. A common approach is unrolling,
12
+ which relies on automatic differentiation through
13
+ the operations of an iterative solver. While flexi-
14
+ ble and general, unrolling can encounter accuracy
15
+ and efficiency issues in practice. These issues can
16
+ be avoided by differentiating the optimization map-
17
+ ping analytically, but current frameworks impose
18
+ rigid requirements on the optimization problem’s
19
+ form. This paper provides theoretical insights into
20
+ the backpropagation of unrolled optimizers, which
21
+ lead to a system for generating equivalent but effi-
22
+ ciently solvable analytical models. Additionally, it
23
+ proposes a unifying view of unrolling and analyti-
24
+ cal differentiation through constrained optimization
25
+ mappings. Experiments over various structured pre-
26
+ diction and decision-focused learning tasks illustrate
27
+ the potential of the approach both computationally
28
+ and in terms of enhanced expressiveness.
29
+ 1
30
+ Introduction
31
+ The integration of optimization mappings in deep networks
32
+ has shown to be an effective framework for enforcing certain
33
+ types of structures in deep models. Here, an optimization prob-
34
+ lem is viewed as a mapping from some undefined parameters
35
+ to a resulting optimal solution. Outputs of the mapping are
36
+ guaranteed to obey the problem’s constraints, which can be
37
+ either predefined or learned [Kotary et al., 2021].
38
+ These approaches are known to increase accuracy and effi-
39
+ ciency on specialized learning tasks by imparting task-specific
40
+ structural knowledge. For example, they have been used
41
+ to design efficient multi-label classifiers based on sparsity-
42
+ enforcing mechanisms [Martins and Astudillo, 2016], learn-
43
+ ing to rank tasks by exploiting simple optimization models
44
+ [Adams and Zemel, 2011], and even enhance decision-focused
45
+ pipelines by the integration of predictive components with op-
46
+ erational decision models [Wilder et al., 2019a].
47
+ While, in general, constrained optimization mappings can
48
+ be used as components in neural networks in a similar man-
49
+ ner to linear layers or activation functions [Amos and Kolter,
50
+ 2017], networks with optimization layers require end-to-end
51
+ training by stochastic gradient descent, which in turn requires
52
+ differentiation of the optimization mappings for backpropaga-
53
+ tion of gradients.
54
+ This poses unique challenges, partly due to their lack of
55
+ a closed form, and modern approaches typically follow one
56
+ of two strategies. In unrolling, an optimization algorithm is
57
+ executed entirely on the computational graph, and backpropa-
58
+ gated by automatic differentiation from optimal solutions to
59
+ the underlying problem parameters. The approach is adaptable
60
+ to many problem classes, but has been shown to suffer from
61
+ time and space inefficiency, as well as vanishing gradients
62
+ [Monga et al., 2021]. Analytical differentiation is a second
63
+ strategy that circumvents those issues by forming analytical
64
+ models for the derivatives of an optimization mapping and
65
+ solving them exactly. However, current frameworks have rigid
66
+ requirements on the form of the optimization problems, re-
67
+ lying on transformations to canonical convex cone programs
68
+ before applying a standardized procedure for their solution and
69
+ differentiation, based on cone programming [Agrawal et al.,
70
+ 2019a]. This system precludes the use of specialized solvers
71
+ that are best-suited to handle various optimization problems,
72
+ and inherently restricts itself only to convex problems.1
73
+ Contributions.
74
+ To address these limitations, this paper pro-
75
+ poses a novel analysis of unrolled optimization, which results
76
+ in closed-form models for the backpropagation of an unrolled
77
+ optimizer. Theoretically, the result is significant because it
78
+ establishes an equivalence between unrolling and analytical
79
+ differentiation, and allows for convergence of the backward
80
+ pass to be analyzed in unrolling. Practically, it allows for
81
+ the forward and backward passes of unrolled optimization
82
+ to be disentangled and solved separately, using blackbox im-
83
+ plementations of specialized algorithms. More specifically,
84
+ the paper makes the following novel contributions: (1) A
85
+ theoretical analysis of unrolling that leads to an efficiently
86
+ solvable closed-form model, whose solution is equivalent to
87
+ the backward pass of an unrolled optimizer. (2) Building on
88
+ this analysis, it proposes a system for generating analytically
89
+ 1A discussion of related work on differentiable optimization and
90
+ decision-focused learning is provided in Appendix A.
91
+ arXiv:2301.12047v1 [cs.LG] 28 Jan 2023
92
+
93
+ differentiable optimizers from unrolled implementations, ac-
94
+ companied by a Python library called fold-opt to facilitate
95
+ automation. (3) Its effectiveness is evaluated on a diverse set
96
+ of end-to-end optimization and learning tasks, demonstrat-
97
+ ing efficiency and modeling advantages compared to current
98
+ differentiable optimization approaches. Importantly, we re-
99
+ port the first demonstration of decision-focused learning with
100
+ nonconvex decision models.
101
+ 2
102
+ Setting and Goals
103
+ In this paper, the goal is to differentiate mappings that are
104
+ defined as the solution to an optimization problem. Consider
105
+ the parameterized problem (1) which defines an optimization
106
+ mapping from a vector of parameters c ∈ Rp to its associated
107
+ optimal solution x⋆(c) ∈ Rn:
108
+ x⋆(c) = argmin
109
+ x
110
+ f(x, c)
111
+ (1a)
112
+ subject to: g(x, c) ≤ 0,
113
+ (1b)
114
+ h(x, c) = 0,
115
+ (1c)
116
+ in which f is the objective function, and g and h are vector-
117
+ valued functions capturing the inequality and equality con-
118
+ straints of the problem, respectively. It is assumed throughout
119
+ that for any c, the associated optimal solution x⋆(c) can be
120
+ found by conventional solution methods, within some toler-
121
+ ance in solver error. This coincides with the “forward pass” of
122
+ the mapping in a neural network. The primary challenge is
123
+ to compute its backward pass, which amounts to finding the
124
+ Jacobian matrix of partial derivatives ∂x⋆(c)
125
+ ∂c
126
+ .
127
+ The parameters c can be thought of as a prediction from
128
+ previous layers of a neural network, or as learnable parameters
129
+ of the problem (1), analogous to the weights of a linear layer,
130
+ or as some combination of both. In any case, ∂x⋆(c)
131
+ ∂c
132
+ is required
133
+ to backpropagate gradients from a downstream loss function
134
+ to the underlying parameters of the machine learning model.
135
+ Backpropagation.
136
+ Backpropagation is the calculation of
137
+ gradients from a downstream loss function L to the param-
138
+ eters of a learning model. The primary goal of this paper
139
+ is backpropagation through the function x⋆(c), as defined
140
+ in Problem (1). The Jacobian matrix of the vector-valued
141
+ function x⋆(c) : Rp → Rn is a matrix ∂x⋆
142
+ ∂c in Rn×p, whose
143
+ elements at (i, j) are the partial derivatives ∂x⋆
144
+ i (c)
145
+ ∂cj . Backpropa-
146
+ gation through x⋆(c) amounts to computing ∂L
147
+ ∂c given ∂x⋆(c)
148
+ ∂c
149
+ ,
150
+ and can be performed by finding the Jacobian-vector product
151
+ ∂L
152
+ ∂c = ∂L
153
+ ∂x⋆ · ∂x⋆(c)
154
+ ∂c
155
+ .
156
+ (2)
157
+ In deep learning, backpropagation is typically accomplished
158
+ through automatic differentiation (AD), which propagates gra-
159
+ dients from the loss function through the arithmetic operations
160
+ and low-level functions of a deep model by repeatedly apply-
161
+ ing the multivariate chain rule. This requires a record of all
162
+ the operations performed during the forward pass and their
163
+ dependencies, known as the computational graph.
164
+ z
165
+ <latexit sha1_base64="p+i9TFiNyAed8OavWLvtitvNs=">AC2XicfVFLb9QwEPaGVwmvFo5cIlZICKFVnN1st7dKcOCKFJ3WymJKseZ3bq1nch2oIuVAzfECYkT/Bd+CP8G
166
+ ZzcHtq0YyZpP38x8nkdecaZNGP7peTdu3rp9Z+uf+/+g4ePtncez3RZKwpTWvJSHedEA2cSpoYZDseVAiJyDkf5+es2fvQRlGalPDTLCjJBFpLNGSXGUbM0F/Zzc7LdDwfhyoKrAHegjzo7ONnp/U6LktYCpKGcaJ3gsDKZJcowyqHx01pDReg5WUDioCQCdGZX7TbBc8cUwbxU7kTrNh/KywRWi9F7jIFMaf6cqwlr43l4jo6qc18klkmq9qApOv/5zUPTBm0KwkKpoAavnS
167
+ AUMXcCAE9JYpQ4xbn+6mET7QUgsjCpgqKJsGZb26jtRizyz4WA8nIz3wlft0uLJ7rAFOIyjYdPHTbMpkfMa1hqbEmE8xjh2lREexjF2IB6FUTRpJVwXb8BtWsE7N97ChQxpXpU6IWglw0tvP/S2Nynea8Ozi+fN6rYBYN8Ggw+jDq7x92p9CT9Ez9AJhtIv20Vt0gKaIojP0Hf1Ev7zE+J9b6tU71eV/MEbZj34y+hJN91</latexit>
168
+ c
169
+ <latexit sha1_base64="fjM2S7p8D7eJZqsS2+r8ZuJM3A=">AC2XicfVFLj9MwEHbDawmvXThyiaiQEKVnTbdclsJDlwQi7TtrpREK8eds3aTmQ7QGXlwA1xQuIE/4Ufwr/B
170
+ aXOguytGsubTNzOf51FUghuL8Z9ecO36jZu3dm6Hd+7eu/9gd+/hzJS1ZjBlpSj1SUENCK5garkVcFJpoLIQcFycv2rjx9BG16qI7uqIJd0qfiCM2o9NcsK6VhzutvHA7y26DIgHeijzg5P93q/s3nJagnKMkGNSQmubO6otpwJaMKsNlBRdk6XkHqoqASTu3W7TfTUM/NoUWr/lI3W7L8VjkpjVrLwmZLaM3Mx1pJXxgp5FZ3WdjHJHVdVbUGxzf+LWkS2jNqVRHOugVmx8oA
171
+ yzf0IETujmjLrFxeGmYJPrJSqrnLNMyblOShc9m671Qvi9zhwXg4Gb/EL9qlJZP9YQsITuJh0ydNsy1RiBo2GtsSOBkTkvjKmAyThHiQjHAcT1oJ38Vr8JvW8NaP964CTW2pn7uM6qWknxvX+f+lcbVJ894fnFw872UwiwdkNBi9H/UPjrT76DH6Al6hgjaRwfoDTpEU8TQB/Qd/US/gjT4EnwNvm1Sg15X8whtWfDjL2j9314=</latexit>
172
+ x?(c)
173
+ <latexit sha1_base64="QqX8Q9tClpnTwMZmRbe/eLEP8FI=">AC6XicfVHdbtMwFHYzfkb4WTe4yaiQhoIVXHadOVuElxwgxjSuk1KQuU4p53V2IlsB1asPAR3iCskruAdeBDe
174
+ BqfNBd0mjmT583e+c3x+0jJnSv+n46zdePmrdvbd9y79+4/2Onu7p2opIUJrTIC3mWEgU5EzDRTOdwVkogPM3hNF28avynH0EqVohjvSwh4WQu2IxRoi017T6KU24u6g+x0kTuNw9aP5t2e37fX5l3FeAW9FBrR9Pdzu84K2jFQWiaE6Ui7Jc6MURqRnOo3bhSUBK6IHOILBSEg0rMqvzae2qZzJsV0h6hvRX7b4QhXKklT62SE32uLvsa8lpfyq+jo0rPxolhoqw0CLr+f1b
175
+ lni68ZkRexiRQnS8tIFQy24JHz4kVNtBum4s4BMtOCciM7GErI5w4hoTr+qO5DxNjN8fDcajl/6LZmjh+GDQAOyHwaDu4breTJHmFaxzbKbwxHGoY0M8CAMsQXh0A+CcZPCVvEa7KQlvLXtvStBEl3I5yYmcs7JRW3a+38yJtYye9uF48vrvQpOgj4e9ofvh73D43b12+gxeoL2EUYH6BC9QUdogij6jL6jn+iXs3C+OF+db2up02ljHqINc378BXMc5Xs=</latexit>
176
+ L(x?)
177
+ <latexit sha1_base64="VNMivXQp7mx3+oNvqOGvP/v5aNA=">AC63icfVHNbtNAEN6Yv2L+UpC4cLGIkApCkdeJ03CrBAcOIrUtJViE63Xk3TV3bW1u4ZGi5+CG+KExAlegQfh
178
+ bVgnPpC2YqTVfPrmZ2e+yUrOtAnDPx3vytVr129s3fRv3b5z9153+/6hLipFYUILXqjGjgTMLEMPhuFRARMbhKDt92cSPoLSrJAHZlCKshCsjmjxDhq1n1oE0p48KbeSTJhz+oPiTZEPZ1e2E/XFlwEeAW9FBr+7Ptzu8kL2glQBrKidZTHJYmtUQZRjnUflJpKAk9JQuYOiJAJ3a1QJ18MQxeTAvlHvSBCv23wpLhNZLkblMQcyJPh9ryEtjmbiMnlZmPk4tk2VlQNL
179
+ 1/OKB6YIGpGCnCmghi8dIFQxt0JAT4gi1DgpfT+R8IkWQhCZ20RBXk9x6lunZDP3VC2y1Ib90WA8ehE+b0SLx7uDBuAwjgZ1D9f1ZouMV7DusdkijEcYx64ywoM4xg7EwzCKxk0LN8UrcEoreOvWe1eCIqZQz2xC1EKQs9q2/n9pTK7TnHcHx+fPexEcRn087A/fD3t7B+3pt9Aj9BjtIx20R56jfbRBFH0GX1HP9EvT3hfvK/et3Wq12lrHqAN8378Bcqs5fk=</latexit>
180
+ unrolling
181
+ solver
182
+ features
183
+ DNN
184
+ prediction
185
+ unfolding
186
+ fixed-point folding
187
+ all operations on the computation graph
188
+ segments of the computation graph
189
+ replaced with precomputed optimization
190
+ steps and derivatives
191
+ blackbox forward pass and
192
+ backward pass, repeated
193
+ Figure 1: Compared to unrolling, unfolding requires fewer oper-
194
+ ations on the computational graph by replacing inner loops with
195
+ Jacobian-vector products. Fixed-point folding models the unfolding
196
+ analytically, allowing for blackbox optimization implementations.
197
+ 3
198
+ From Unrolling to Unfolding
199
+ By unrolling, which performs each arithmetic operation of an
200
+ optimization algorithm on the computational graph, optimiza-
201
+ tions can be backpropagated without explicitly representing
202
+ their Jacobians. However, these graphs can become very large
203
+ after many iterations. The strategy taken in this paper for back-
204
+ propagation through the function x⋆(c), as in unrolling, is to
205
+ backpropagate through the steps of an optimizer which solves
206
+ Problem (1). However, we begin by proposing a variation
207
+ on unrolling in which the optimizer steps are grouped into
208
+ subroutines that can be differentiated analytically.
209
+ This paper considers iterative optimization algorithms,
210
+ which refine an initial starting point x0 by repeated appli-
211
+ cation of an update routine, which we view as a function.
212
+ For optimization variables x ∈ Rn, the update function is a
213
+ vector-valued function U : Rn → Rn:
214
+ xk+1(c) = U(xk(c), c).
215
+ (U)
216
+ and the iteration (U) converges if xk(c) → x⋆(c) as k → ∞.
217
+ Evaluating the update function U may also require the use
218
+ of optimization algorithms that are difficult to differentiate.
219
+ In such cases, a full unrolling of (U) would involve unrolling
220
+ each inner algorithm used in the evaluation of U. However, if
221
+ the Jacobians of U can be modeled analytically, it is possible to
222
+ avoid unrolling each evaluation of U by modeling its backward
223
+ pass with a Jacobian-vector product instead (see Equation (2)).
224
+ Then, only the outer iterations (U) need to be unrolled.
225
+ This type of partial unrolling, which allows for backpropa-
226
+ gating large segments of computation at a time by leveraging
227
+ analytically differentiated subroutines, is referred to as unfold-
228
+ ing. It is made possible by the fact that U is often easier to
229
+ differentiate than the overall optimization mapping x⋆(c).
230
+ There are two main advantages of unfolding over unrolling:
231
+ (1) Analytical differentiation of U allows for removal of the
232
+ inner loops of unrolling, greatly reducing the total number
233
+ of unrolled operations as depicted in Figure 1, which shows
234
+ forward pass steps in red with their corresponding backward
235
+ passes in blue. More importantly, (2) unfolded optimizers can
236
+ be converted to analytically differentiated ones by the method
237
+ proposed in the next sections. Thus, unfolding will be an in-
238
+ termediate step in a system for converting unrolled optimizers
239
+ to analytically differentiated optimization mappings.
240
+
241
+ C
242
+ <latexit sha1_base64="7P0wNS7sPnP507pIcWGiCTvVgPk=">AC3XicfVFLb9
243
+ QwEPaGVxsebeHIJWKFhBaxdnNdrlVKgcuiCJ124okWjnO7Naq7US2A6ysHLkhTkicyj/h/BvcHZzYNuKkaz59M3M53nkFWfahOGfnfr9p2797a2/fsPHj7a2d17fKLW
244
+ lGY0pKX6iwnGjiTMDXMcDirFBCRczjNLw7b+OknUJqV8tgsK8gEWUg2Z5QYR31MBTHn+dweNrPdfjgIVxZcB7gDfdTZ0Wyv9zstSloLkIZyonWCw8pklijDKIfGT2sNFaE
245
+ XZAGJg5I0JldtdwEzx1TBPNSuSdNsGL/rbBEaL0UuctsW9RXYy15YywXN9FJbeaTzDJZ1QYkXf8/r3lgyqBdS1AwBdTwpQOEKuZGCOg5UYQatzfTyV8pqUQRBY2VA0Cc
246
+ 58a9NV34la5JkNB+PhZPw6fNUuLZ7sD1uAwzgaNn3cNJsSOa9hrbEpEcZjGNXGeFhHGMH4lEYRZNWwnXxBtymFbxz472vQBFTqpc2JWohyJfGdv5/aUyu05x3B8dXz3sdn
247
+ EQDPBqMPoz6B8fd6bfQU/QMvUAY7aMD9BYdoSmiSKIf6BL98mbeV+b932d6vW6midow7yfwG8B+EJ</latexit>
248
+ x?(c)
249
+ <latexit sha1_base64="3ckA8eZPznDkintZMrz5KVF+jE=">AC63icfVHdbt
250
+ MwFHbD3wh/HUjcBOokAZClZ02XbmbxC64QyJbpOaUDnOaWctdiLbgVUmT8Ed4gqJK3gFHoS3wWlzQbeJI1n+/J3vHJ+ftMy5Nhj/6XhXrl67fmPrpn/r9p2797rb9w91U
251
+ SkGE1bkhTpOqYacS5gYbnI4LhVQkeZwlJ6+avxH0FpXsj3ZlCIuhC8jln1Dhq1n0Yp8Ke1R9ibaiKH+80T1Y/m3V7uI9XFlwEpAU91NrBbLvzO84KVgmQhuVU6ynBpUk
252
+ sVYazHGo/rjSUlJ3SBUwdlFSATuyqgTp46pgsmBfKHWmCFftvhKVC6VInVJQc6LP+xryUl8qLqOnlZmPE8tlWRmQbP3/vMoDUwTNkIKMK2AmXzpAmeKuhYCdUEWZcaP0/V
253
+ jCJ1YIQWVmYwVZPSWJb28qnuqFmlicX80GI9e4hfN0KLx7qABEfhoO6Rut5MkeYVrHNspsDRiJDIRYZkEXEgWiIw3DcpHBV7IObtI3r23JShqCvXcxlQtBD2rbXv/T
254
+ 8blWuZut3Byfr0XwWHYJ8P+8N2wt7frn4LPUJP0A4iaBftodfoAE0Q5/Rd/QT/fKE98X76n1bS71OG/MAbZj34y/5meX8</latexit>
255
+ c
256
+ <latexit sha1_base64="hFziEyYNZehSvDuQr+eBwd6Ri8c=">AC2XicfVFLj9
257
+ MwEHbDawmvXThyiaiQEKVnTbdcluJPXBLBLtrpREK8eds3aTmQ7QGXlwA1xQuIE/4Ufwr/BaXOguytGsubTNzOf51FUghuL8Z9ecO36jZu3dm6Hd+7eu/9gd+/hzJS1Z
258
+ jBlpSj1SUENCK5garkVcFJpoLIQcFycv2rjx9BG16q93ZVQS7pUvEFZ9R6apYV0rHmdLePB3ht0WVAOtBHnR2d7vV+Z/OS1RKUZYIakxJc2dxRbTkT0IRZbaCi7JwuIfV
259
+ QUQkmd+t2m+ipZ+bRotT+KRut2X8rHJXGrGThMyW1Z+ZirCWvjBXyKjqt7WKSO6q2oJim/8XtYhsGbUrieZcA7Ni5QFlmvsRInZGNWXWLy4MwWfWCklVXOXaZg3KclD57
260
+ J136leFrnDg/FwMn6JX7RLSyb7wxYQnMTDpk+aZluiEDVsNLYlcDImJPGVMRkmCfEgGeE4nrQSvotD8JvW8MaP97YCTW2pn7uM6qWknxvX+f+lcbVJ894fnFw872UwiwdkN
261
+ Bi9G/UPDrvT76DH6Al6hgjaRwfoNTpCU8TQB/Qd/US/gjT4EnwNvm1Sg15X8whtWfDjL2Qt304=</latexit>
262
+ blackbox
263
+ solver
264
+ c
265
+ <latexit sha1_base64="cKvg1rNKugnWhyp9AJscQnT/g6Y=">ADBHicfVHLjt
266
+ MwFHXDawivDizZWFRICKFit0fu5GYBRvEINGZkZqoctzbjWxE9kOUFnZ8gf8BTvECsEKPoK/welDoqOBK0U+OvfcE/vctMiEsYT8bgRXrl67fmPvZnjr9p2795r7949NX
267
+ moOY5nuT5NmYFMKBhbYTM4LTQwmWZwkp6/qPsn70Abkau3dlAItlCibngzHpq2nzu4pXJRC/SxJH2gNBRNHy2BoPRBvQHVZxKx6tq2mxtRXgrwlsRpm2yqhba1NF0v/E
268
+ 9nuW8lKAsz5gxE0oKmzimreAZVGFcGigYP2cLmHiomASTuNWlKvzYMzM8z7X/lMUr9u8Jx6QxS5l6pWT2zFzs1eSlvVReRk9KOx8mTqitKD4+v/zMsM2x3V6eCY0cJstPW
269
+ BcC/8EzM+YZtz6jMwVvCe51IyNXOxhlk1oUnoLkTc7w7I+KTJSQaDro1oCTqdKsWrapdizQrYe2xa0GiPqWRn+zQbhRD6Ie6XSGtYW/xSH4pDW8s97XYBmNtdPXcz0Q
270
+ rIPlduc/5MJtZb50y98u1X8b3DcadNeu/em1zo43Kx+Dz1Ej9ATRNEAHaCX6AiNEUef0Df0E/0KPgafgy/B17U0aGxmHqCdCn78AWAC7QM=</latexit>
271
+ S
272
+ <latexit sha1_base64="WNBemNjLR75N6QRtAoG/Cbx5kw=">ADCXicfVHdih
273
+ MxGE3Hv7X+bFcvRksgoiUpPT6d2Ce+GNuKLdXZgZSiZNu2GTzJBk3C1hnsA38C28E0EQvNI38G3MtL1wu4sfDmc7+SbL+cUFWfaQPin4924ev2nZ273Xv3Hzc7e09O
274
+ tJlrQidkJKX6qTAmnIm6cQw+lJpSgWBafHxdmrtn/8kSrNSvnBLCuaC7yQbM4INo6a9kKbrYakalHkFg5gNI5H0Us4iGAQjKAD43ESxnGTCWxOCeb2fdNMe32nXJV/FaA
275
+ N6INHU73Ot+zWUlqQaUhHGudIliZ3GJlGOG06Wa1phUmZ3hBUwclFlTndrVZ4z9zMyfl8p90vgr9t8bFgutl6JwynZJvd1ryWt7hbiOTmszT3LZFUbKsn6/Oa+6b0Ww
276
+ v9GVOUGL50ABPF3BN8coVJsYZ3e1mkp6TUgsZzZTdNakKO/aLZ/jInHrb0QRskoaAGC0TBo+qhpLo8oeE3XM7ajihFqoxqiIpQm1kIh8OkHeG2OKDOaUXfuOe9rajCp
277
+ lQvbIbVQuCLxm7O/8mYXMvc6QJH2/FeBUfDAQoH4buwv3+wiX4HPAFPwXOAwAjsg9fgEwAZ/BD/AL/PY+eV+8r963tdTrbO48BpfK+/kXju0g=</latexit>
278
+ PC
279
+ <latexit sha1_base64="iz75+hzfC/1x0fFjeaHiLRjY4fs=">ADCHicfVHdih
280
+ MxGE3Hv7X+dLb4JFEJGSdDrT9m5h98IbsYLdXegMJZOm3bCTzJBkdEvIC/gGvoV34oUIXukj+DZm+gNuWf0g5HC+ky/JOVmZc20Q+t0Ibty8dfvO3t3mvfsPHj5q7R+c6
281
+ KJSlI1pkRfqLCOa5VyseEmZ2elYkRkOTvNLo7q/ul7pjQv5DuzLFkqyELyOafEeGraCm2yGjJRiy1qIOiYdyPXqJOhMKwjzwYDge9OHajqU0yY+c9NWeyuEWyHcCiH
282
+ uoFW1waZG0/3Gt2RW0EowaWhOtJ5gVJrUEmU4zZlrJpVmJaEXZMEmHkoimE7t6mEOPvPMDM4L5Zc0cMX+fcISofVSZF4piDnXu72avLaXievoSWXmg9RyWVaGSbq+f17l0B
283
+ SwdhDOuGLU5EsPCFXcfwHSc6INd7nZjOR7AMthCByZhPFZm6C06bdsTkOB/GwdhehaNAPa4BR1A1dGzt3dUSWV2w9YzepGOM6qS4OowjXkfVQtzuoR/hXHDPvtGKv/fel
284
+ EwRU6gXNiFqIcils5v9fzIu1zK/+8C3qcJ/g5NuB/c6vbe9uHxJvo98AQ8Bc8Bn1wCF6BERgDCj6B7+An+BV8D4HX4Kva2nQ2Jx5DK5U8OMP5n/usA=</latexit>
285
+ x?(c) + ↵rf(x?(c), c)
286
+ <latexit sha1_base64="jbAd3TgtePBJrbvA6QF8U1ipV0g=">ADGXicfVHLbhMxFHWmPEp4N
287
+ C1LNhYRUoAqGieZNOwqkQUbRJFIWykzRHcJxnV9oxsDzSy5kv4A/6BTvEColV+Ro8ySxIG3Ely8fnt9H3HGE218/6rm7dy6fefu7r36/QcPH+019g9OdZorykY05ak6j0Eznkg2Monh7DxTDETM2V
288
+ l8br0n31iSiep/GCWGYsEzGUySygYR0awzAW9rL4GoDqlU+aPEcv8Qh8GwBOJQc8Cz1jbZIa7ApNH02/7K8E1AKtBElZ1M9mvfwmlKc8GkoRy0HhM/M5EFZRLKWVEPc80yoBcwZ2MHJQimI7tqt8DP
289
+ HDPFs1S5Iw1esf9GWBaL0XslALMQl/3leRWXy20ePczAaRTWSWGybp+v9ZzrFJcTlSPE0Uo4YvHQCqEtcCpgtQI0bfL0eSvaZpkKAnNpQsWkxJlHd2nBV91jN48j67X530H/lH5ZDCwZH3RIQP+h0i
290
+ yYpis0UMc/ZOsdmCj/oExK4yA7pBgFxIOj5nc6gTOGqGDI3acXeuvbeZUyBSdULG4KaC7gsbHX/T5bItczdbuHk+npvgtNOm/Tavfe95vGwWv0ueoKeohYi6AgdozfoBI0QRV/RL3SF/nhfvO/eD+/nWu
291
+ rVqpjHaMO838Bom34IQ=</latexit>
292
+ Figure 2: Unfolding Projected Gradient Descent at x⋆ consists of
293
+ alternating gradient step S with projection PC. Each function’s
294
+ forward and backward pass are in blue and red, respectively.
295
+ Definition 1 (Unfolding). An unfolded differentiable optimiza-
296
+ tion of the form (U) is one in which the backpropagation of U
297
+ at each step does not require unrolling an iterative algorithm.
298
+ It is worth noting that when U has a closed-form formula
299
+ and is easy to differentiate, its derivative rule may be natively
300
+ implemented in an automatic differentiation environment (e.g.
301
+ PyTorch). In such cases, there is no distinction between un-
302
+ rolling and unfolding the iteration (U). Therefore, a method for
303
+ converting an unfolding of (U) into analytical differentiation
304
+ (proposed in Section 5) can also be applied to fully unrolled
305
+ optimizers by applying it successively from the innermost to
306
+ the outermost loop of the unrolling. Alternatively, when U
307
+ consists of a nontrivial optimization with known derivatives,
308
+ such as a Quadratic Program, it can be differentiated directly
309
+ to produce an unfolding of (U), as exemplified next.
310
+ Inner Optimizations
311
+ When U is nontrivial to evaluate, it can be viewed as the
312
+ following composition of functions:
313
+ U(xk(c), c) := ¯U (O(S(xk(c), c)), xk(c), c) ,
314
+ (O)
315
+ wherein the primary difficulty in differentiating U is posed
316
+ by the inner optimization sub-routine O : Rn → Rn. Prior
317
+ to O, a setup step S is commonly performed, viewed as a
318
+ differentiable closed-form function, such as a gradient descent
319
+ step. If O can be differentiated analytically, then so can U,
320
+ by applying a chain rule through Equation (O). This can be
321
+ easily automated using automatic differentiation tools like
322
+ PyTorch. This implementation style aligns with the goal of
323
+ the paper, which is to convert unrolled optimizers to ones with
324
+ analytically modeled Jacobians by replacing unrolled loops
325
+ with equivalent closed-form models.
326
+ The next three examples illustrate the unfolding concept,
327
+ highlighting the roles of U, O and S. They will be used to
328
+ construct analytically differentiable optimization mappings
329
+ for a variety of learning tasks in Section 6. The role of these
330
+ components is also depicted in Figure 2 which illustrates one
331
+ iteration of unfolding projected gradient descent.
332
+ Projected gradient descent
333
+ Given a problem
334
+ min
335
+ x∈C f(x)
336
+ (3)
337
+ where f is differentiable and C is a convex set, Projected
338
+ Gradient Descent (PGD) follows the update function
339
+ xk+1 = PC(xk − αk∇f(xk)),
340
+ (4)
341
+ where O = PC is the Euclidean projection onto C, and
342
+ S(x) = x−α∇f(x) is a gradient descent step w.r.t. the objec-
343
+ tive function. The operation ∇f(x) is typically in closed-form
344
+ and easy to differentiate; the projection is the main difficulty
345
+ and depends on the set C. Many simple C have differen-
346
+ tiable closed form projections to facilitate unfolding of (4) (see
347
+ [Beck, 2017]). Further, when C is linear, PC is a quadratic
348
+ programming (QP) problem for which a differentiable solver
349
+ qpth is available from Amos and Kolter [2017].
350
+ Proximal gradient descent
351
+ More generally, to solve prob-
352
+ lems of the form
353
+ min
354
+ x
355
+ f(x) + g(x)
356
+ (5)
357
+ where f is differentiable and g is a closed convex function,
358
+ proximal gradient descent follows the update function
359
+ xk+1 = Proxαkg (xk − αk∇f(xk)) .
360
+ (6)
361
+ Here O is the proximal operator, defined as
362
+ Proxg(x) = argmin
363
+ y
364
+
365
+ g(y) + 1
366
+ 2∥y − x∥2
367
+
368
+ ,
369
+ (7)
370
+ and its difficulty depends on g. Many simple proximal opera-
371
+ tors can be represented in closed form and have simple deriva-
372
+ tives. For example, when g(x) = λ∥x∥1, then Proxg = Tλ(x)
373
+ is the soft thresholding operator, whose closed-form formula
374
+ and derivative are given in Appendix C.
375
+ Sequential quadratic programming
376
+ A broadly applicable
377
+ method for solving continuous optimization problems is Se-
378
+ quential Quadratic Programming (SQP). SQP solves the over-
379
+ all optimization Problem (1) by approximating it at each step
380
+ by a QP problem, whose objective is a second-order approx-
381
+ imation of the problem’s Lagrangian function, subject to a
382
+ linearization of its constraints. SQP is well-suited for com-
383
+ posing unfolded optimizers, as it can solve a broad class of
384
+ convex and nonconvex problems and can readily be unfolded
385
+ by implementing its QP step (shown in Appendix C) with the
386
+ differentiable qpth solver [Amos and Kolter, 2017].
387
+ Alternating
388
+ Direction
389
+ Method
390
+ of
391
+ Multipliers
392
+ The
393
+ ADMM-based QP solver of Boyd et al. [2011] is detailed
394
+ in Appendix C. It can be unfolded easily, as its inner
395
+ optimization step is a simpler equality-constrained quadratic
396
+ program, whose solution as linear system of equations has
397
+ a by AD natively in PyTorch. Once converted to analytical
398
+ differentiation (see Section 5), it can be used in place of qpth
399
+ in the PGD and SQP examples above to compose analytical
400
+ backward passes.
401
+ The examples presented above illustrate a common pattern
402
+ in the construction of general-purpose optimization algorithms
403
+ (U), which is to build them from simpler models that can be
404
+ solved by more basic optimizers, such as O. This paper is
405
+ motivated by a similar principle for constructing differentiable
406
+ optimizers. When the inner optimizations of an update func-
407
+ tion U can be differentiated analytically, the only iteration
408
+ that relies on backpropagation by automatic differentiation
409
+ is the outermost one (U). This conversion from unrolling to
410
+ unfolding facilitates the next step: the conversion from unfold-
411
+ ing to an efficient and fully analytical differentiation. This
412
+ approach, called folded optimization, is the main contribution
413
+ of the paper and will be developed next.
414
+
415
+ asas
416
+ acaPc
417
+ as0
418
+ 10
419
+ 20
420
+ 30
421
+ 40
422
+ 50
423
+ 60
424
+ 70
425
+ Unfolded PGD Iteration
426
+ 0.0
427
+ 0.2
428
+ 0.4
429
+ 0.6
430
+ 0.8
431
+ 1.0
432
+ Relative L1 Error
433
+ Fwd. Pass: x0 =
434
+ Fwd. Pass: x0 = x
435
+ Bwd. Pass: x0 =
436
+ Bwd. Pass: x0 = x
437
+ Figure 3: Forward and backward pass error in unfolding PGD
438
+ 4
439
+ Unfolding at a Fixed Point
440
+ In the previous section, unfolded optimization was introduced
441
+ as a variation of unrolling in which the inner iterative subrou-
442
+ tines are differentiated analytically, while the outer iteration of
443
+ (U) is still “unrolled”. Next, it will be demonstrated that this
444
+ outer unrolling can be replaced with a simple linear system
445
+ of equations, which model the Jacobians of the overall opti-
446
+ mization mapping x⋆(c). Additionally, it will be shown that
447
+ unfolding (U) is computationally equivalent to solving this
448
+ linear system using a well-known iterative method. By model-
449
+ ing the unfolding process in closed form, it will be possible to
450
+ solve the resulting linear model using improved methods.
451
+ Optimization procedures of the form (U) generally require
452
+ a starting point x0, which is often chosen arbitrarily, since
453
+ forward-pass convergence xk → x⋆ is guaranteed regardless
454
+ of starting point. It is natural to then ask how the choice of x0
455
+ affects the convergence of the backward pass.
456
+ Definition 2. Suppose that an unfolded iteration (U) produces
457
+ a convergent sequence of solution iterates limk→∞xk = x⋆
458
+ in its forward pass. Then convergence of the backward pass is
459
+ limk→∞
460
+ ∂xk
461
+ ∂c (c) = ∂x⋆
462
+ ∂c (c).
463
+ (8)
464
+ Effect of the starting point on backpropagation.
465
+ Con-
466
+ sider the optimization mapping (20) which maps feature em-
467
+ beddings to smooth top-k class predictions, and will be used
468
+ to learn multilabel classification later in Section 6. A loss
469
+ function L targets ground-truth top-k indicators, and the result
470
+ of the backward pass is the gradient ∂L
471
+ ∂c . To evaluate back-
472
+ ward pass convergence in unfolding of this optimization, we
473
+ measure the relative L1 errors of the forward and backward
474
+ passes w.r.t. the equivalent result at full convergence. We con-
475
+ sider two starting points: the precomputed optimal solution
476
+ xa
477
+ 0 = x⋆, and a uniform random vector xb
478
+ 0 = η ∼ U(0, 1).
479
+ The former case is illustrated in Figure 2, in which xk remains
480
+ stationary at each step.
481
+ Figure 3 reports the errors of the forward and backward pass
482
+ at each iteration of the unfolded PGD under these two starting
483
+ points. The figure shows that when starting the unfolding
484
+ from the precomputed optimal solution xa
485
+ 0 the forward pass
486
+ error remains within error tolerance to zero. This is because
487
+ x⋆(c) = U(x⋆(c), c) is a fixed point of (U). Interestingly
488
+ though, the backward pass also converges, but at a slightly
489
+ faster rate than when starting from a random vector xb
490
+ 0.
491
+ Next, we show that this phenomenon holds in general: when
492
+ an unfolded optimizer is iterated at a precomputed optimal
493
+ solution, its backward pass converges. The proof elucidates
494
+ a connection between unrolling and analytical differentiation
495
+ of the fixed-point conditions of the iteration (U). First, some
496
+ practical remarks on unfolding at the precomputed optimum.
497
+ Fixed-Point Unfolding: Forward Pass
498
+ Notice that using a precomputed optimum for unfolding is not
499
+ the most efficient method, as it requires solving the optimiza-
500
+ tion problem to completion before unfolding, which requires
501
+ its own additional solver iterations (U). However, since the
502
+ optimal solution x⋆ is a fixed point of the iteration (U), all
503
+ iterations will result in xk = x⋆. Furthermore, since x⋆ is
504
+ already known and U(x⋆) = x⋆, there is no need to evaluate
505
+ the forward pass of U at each unfolded iteration, including any
506
+ associated inner optimization problems.
507
+ This means that the forward optimization step of the map-
508
+ ping c → x⋆(c) can be implemented as a blackbox software,
509
+ using any solver algorithm, without the need for it to be the
510
+ same as the algorithm used for backpropagation. This enables
511
+ the use of highly optimized solvers, such as Gurobi, and is
512
+ a major advantage over other solvers such as cvxpy, which
513
+ requires conversion of any problem to a convex cone program
514
+ before solving it with a specialized operator-splitting solver,
515
+ rendering it inefficient for many optimization problems.
516
+ Fixed-Point Unfolding: Backward Pass
517
+ Notice also that the backward pass of a fixed point unfolding
518
+ of (U) does need to be computed, as seen in Figure 3, to
519
+ produce gradients for backpropagation. However, since the
520
+ xk are fixed at each iteration, so is the associated Jacobian
521
+ operator ∂U(xk)
522
+ ∂xk
523
+ = ∂U(x⋆)
524
+ ∂x⋆ , used to model the backward pass
525
+ of the update function U. Therefore the calculation of this
526
+ Jacobian need be performed only once. What then remains
527
+ is only to iterate this backward pass until convergence to an
528
+ accurate gradient of x⋆(c); we will show that this is equivalent
529
+ to the algorithm (LFPI) of the following Lemma, which we
530
+ call Linear Fixed-Point Iteration.
531
+ Lemma 1 (Quarteroni et al. [2010]). Let B ∈ Rn×n and
532
+ b ∈ Rn. For any z0 ∈ Rn, the iteration
533
+ zk+1 = Bzk + b
534
+ (LFPI)
535
+ converges to the solution z of the linear system z = Bz + b
536
+ whenever B is nonsingular and has spectral radius ρ(B) < 1.
537
+ Furthermore, the asymptotic convergence rate for zk → z is
538
+ − log ρ(B).
539
+ (9)
540
+ LFPI is a foundational iterative linear system solver, and can be
541
+ applied to any linear system Ax=b by rearranging z=Bz+b
542
+ and identifying A=I−B. To proceed, we derive an analytical
543
+ model for the desired gradients ∂x⋆
544
+ ∂c (c). Consider the fixed-
545
+ point conditions of the iteration (U), assuming xk → x⋆:
546
+ x⋆(c) = U(x⋆(c), c)
547
+ (FP)
548
+ Differentiating (FP) with respect to c,
549
+ ∂x⋆
550
+ ∂c (c) = ∂U
551
+ ∂x⋆ (x⋆(c), c)
552
+
553
+ ��
554
+
555
+ Φ
556
+ ·∂x⋆
557
+ ∂c (c) + ∂U
558
+ ∂c (x⋆(c), c)
559
+
560
+ ��
561
+
562
+ Ψ
563
+ , (10)
564
+
565
+ by the chain rule and recognizing the implicit and explicit
566
+ dependence of U on the independent parameters c. Equation
567
+ (10) will be called the differential fixed-point conditions. Rear-
568
+ ranging (10), the desired ∂x⋆
569
+ ∂c (c) can be found in terms of Φ
570
+ and Ψ as defined above, to yield the system (DFP) below.
571
+ The results discussed next operate under the mild assump-
572
+ tions that x⋆:Rn →Rn is differentiable in an open set C, and
573
+ Equation (FP) holds for c ∈ C. Additionally U is assumed
574
+ differentiable on an open set containing the point (x⋆(c), c).
575
+ Lemma 2. When I is the identity operator and Φ nonsingular,
576
+ (I − Φ)∂x⋆
577
+ ∂c = Ψ.
578
+ (DFP)
579
+ The result follows from the Implicit Function theorem
580
+ [Munkres, 2018], and implies that the Jacobian ∂x⋆
581
+ ∂c can be
582
+ found as the solution to a linear system once the prerequisite
583
+ partial derivatives of U are known. Note that any algorithm
584
+ which solves Problem (1) can be used to create fixed-point
585
+ conditions for backpropagation, and does not have to be the
586
+ same as the blackbox algorithm that solves the forward pass
587
+ mapping c → x⋆(c). In this spirit, the technique differs from
588
+ early proposals of [Amos and Kolter, 2017] and [Wilder et
589
+ al., 2019b], which apply implicit differentiation of a quadratic
590
+ program’s KKT conditions of optimality or the fixed-point
591
+ conditions of a K-means clustering algorithm, respectively.
592
+ 5
593
+ Folded Optimization
594
+ We are now ready to discuss the central result of the paper.
595
+ Informally, it states that the backward pass of an iterative
596
+ solver (U), unfolded at a precomputed optimal solution x⋆(c),
597
+ is equivalent to solving the linear equations (DFP) using linear
598
+ fixed-point iteration, as outlined in Lemma 1.
599
+ This has significant implications for unrolling optimization.
600
+ It shows that unrolling is computationally equivalent to solving
601
+ closed-form equations using a specific algorithm and does not
602
+ require automatic differentiation. It also provides new insights
603
+ into the convergence properties of the backward pass, which
604
+ also apply to situations where the starting point, x0, is not
605
+ precomputed. Finally, it highlights that the backpropagation
606
+ of unrolled optimization is generally suboptimal in terms of
607
+ efficiency, since more efficient algorithms can be used to solve
608
+ (DFP) in place of its inherent LFPI implementation.
609
+ The following results hold under the mild assumptions that
610
+ the parameterized optimization mapping x⋆ converges for
611
+ certain parameters c through a sequence of iterates xk(c) →
612
+ x⋆(c) using algorithm (U), and that Φ is nonsingular with a
613
+ spectral radius ρ(Φ) < 1.
614
+ Theorem 1. The backward pass of an unfolding of algorithm
615
+ (U), starting at the point xk = x⋆, is equivalent to linear fixed-
616
+ point iteration on the linear system (DFP), and will converge
617
+ to its unique solution at an asymptotic rate of
618
+ − log ρ(Φ).
619
+ (11)
620
+ Proof. Since U converges given any parameters c ∈ C, Equa-
621
+ tion (FP) holds for any c ∈ C. Together with the assumption
622
+ the U is differentiable on a neighborhood of (x⋆(c), c),
623
+ (I − Φ)∂x⋆
624
+ ∂c = Ψ
625
+ (12)
626
+ holds by Lemma 2. When (U) is unfolded, its backpropagation
627
+ rule can be derived by differentiating its update rule:
628
+
629
+ ∂c [ xk+1(c) ] = ∂
630
+ ∂c [ U(xk(c), c) ]
631
+ (13a)
632
+ ∂xk+1
633
+ ∂c
634
+ (c) = ∂U
635
+ ∂xk
636
+ ∂xk
637
+ ∂c + ∂U
638
+ ∂c ,
639
+ (13b)
640
+ where all terms on the right-hand side are evaluated at c and
641
+ xk(c). Note that in the base case k = 0, since in general x0 is
642
+ arbitrary and does not depend on c, ∂x0
643
+ ∂c = 0 and
644
+ ∂x1
645
+ ∂c (c) = ∂U
646
+ ∂c (x0, c).
647
+ (14)
648
+ This holds also when x0 =x⋆ w.r.t. backpropagation of (U),
649
+ since x⋆ is precomputed outside the computational graph of
650
+ its unfolding. Now since x⋆ is a fixed point of (U),
651
+ xk(c) = x⋆(c)
652
+ ∀k ≥ 0,
653
+ (15)
654
+ which implies
655
+ ∂U
656
+ ∂xk
657
+ (xk(c), c) = ∂U
658
+ ∂x⋆ (x⋆(c), c) = Φ,
659
+ ∀k ≥ 0
660
+ (16a)
661
+ ∂U
662
+ ∂c (xk(c), c) = ∂U
663
+ ∂c (x⋆(c), c) = Ψ,
664
+ ∀k ≥ 0.
665
+ (16b)
666
+ Letting Jk := ∂xk
667
+ ∂c (c), the rule (13b) for unfolding at a fixed
668
+ point x⋆ becomes, along with initial conditions (14),
669
+ J0 = Ψ
670
+ (17a)
671
+ Jk+1 = ΦJk + Ψ.
672
+ (17b)
673
+ The result then hold by direct application of Lemma 1 to (17),
674
+ recognizing zk = Jk , B = Φ and z0 = b = Ψ.
675
+ The following is a direct result from the proof of Theorem 1.
676
+ Corollary 1. Backpropagation of the fixed-point unfolding
677
+ consists of the following rule:
678
+ J0 = Ψ
679
+ (18a)
680
+ Jk+1 = ΦJk + Ψ,
681
+ (18b)
682
+ where Jk := ∂xk
683
+ ∂c (c).
684
+ The proof illustrates that in the LFPI applied through fixed-
685
+ point unfolding, the initial iterate is J0 = Ψ. However, con-
686
+ vergence to the true Jacobian is guaranteed regardless of the
687
+ initial iterate.
688
+ Figure 4 shows a simplified computational graph of unfold-
689
+ ing the iteration (U) at a precomputed fixed point x⋆. Forward
690
+ pass operations are shown in blue arrows, and consist of re-
691
+ peated application of the update function U. Its first input,
692
+ x⋆(c), is produced by the previous call to U while the second
693
+ input c is always at the base of the graph. The corresponding
694
+ backward passes are shown in red, as viewed through the Ja-
695
+ cobians ∂U
696
+ ∂x and ∂U
697
+ ∂c , which equal Φ and Ψ at each iteration
698
+ since xk = x⋆ ∀k. Comparing to Equation (13b), notice that
699
+ ∂U
700
+ ∂c contributes to the chain rule additively while ∂U
701
+ ∂x does so
702
+ multiplicatively. This causes the resulting multivariate chain
703
+ rule to take the linear fixed-point iteration form of Lemma 1.
704
+ Theorem 1 specifically applies to the case where the ini-
705
+ tial iterate is the precomputed optimal solution, x0 = x⋆,
706
+
707
+ Figure 4: Computational graph for unfolding three iterations of (U)
708
+ at a precomputed optimal solution x⋆
709
+ however, it also has implications for the general case where
710
+ x0 is arbitrary. If the forward pass converges, meaning that
711
+ xk → x⋆ as k → ∞, this case becomes identical to the one
712
+ proved in Theorem 1 and a similar asymptotic convergence
713
+ result applies. If xk → x⋆ and Φ is a nonsingular operator
714
+ with ρ(Φ) < 1, the following result holds.
715
+ Corollary 2. When the parametric Problem (1) can be solved
716
+ by an iterative algorithm of the form (U) and the forward pass
717
+ of the unfolded algorithm converges, the backward pass con-
718
+ verges at an asymptotic rate that is bounded by − log ρ(Φ).
719
+ The result above helps explain why the forward and back-
720
+ ward pass in the experiment of Section 4 converge at different
721
+ rates. If the forward pass converges faster, the overall conver-
722
+ gence rate of an unfolding is limited by that of the backward
723
+ pass. Even if the initial point x0 is close to the optimal solution
724
+ x⋆, the backward pass still needs to converge at its own rate.
725
+ Therefore, warm-starting the LFPI applied in the backward
726
+ pass from previous steps can help accelerate training.
727
+ More generally, these results are applicable to backward
728
+ pass convergence of any unrolled optimization by applying
729
+ them at the level of each iterative procedure that is unrolled.
730
+ Fixed-Point Folding
731
+ Section 4 showed that unfolding from a precomputed optimal
732
+ solution yields the same gradients as when unfolding from
733
+ an arbitrary point. However, this method is less efficient as
734
+ it includes unnecessary forward-pass optimization steps at
735
+ the optimum. Section 5 proved that the backward pass of
736
+ this fixed-point unfolding is equivalent to directly solving the
737
+ differential fixed-point conditions (DFP) using LFPI.
738
+ To improve efficiency and building on these findings, we
739
+ propose to replace unfolding at the fixed-point x⋆ with the an-
740
+ alytical solution of (DFP). This technique leads to fixed-point
741
+ folding, a system for converting any unrolled implementation
742
+ of an optimization method (U) into a folded optimizer that
743
+ eliminates unrolling entirely.
744
+ It is important to note that as per Definition 1, the innermost
745
+ optimization loop of a full unrolling can be considered an
746
+ unfolding and can be differentiated through fixed-point folding.
747
+ Subsequently, the next innermost loop can now be considered
748
+ unfolded and the same process can be applied until all unrolled
749
+ optimization loops are replaced with their analytical models.
750
+ This procedure is illustrated schematically in Figure 1, which
751
+ depicts fixed-point folding, where the gray arrows symbolize a
752
+ blackbox forward pass and the long red arrows illustrate that a
753
+ static backward pass iterates at the fixed point. The procedure
754
+ is also exemplified by f-PGDb (introduced in Section 6), which
755
+ applies successive fixed-point folding through ADMM and
756
+ PGD to compose an analytical backward pass model.
757
+ Stepsize Selection
758
+ Many optimization algorithms rely on
759
+ a parameter such as a stepsize, which may be constant or
760
+ change according to some rule at each iteration. Since folded
761
+ optimization depends on a model of some algorithm at its
762
+ fixed point to compute gradients, it also requires a stepsize
763
+ to be chosen. In general, this stepsize need not be chosen
764
+ so that the forward pass optimization converges; in all the
765
+ example algorithms of this paper, the fixed point remains
766
+ stationary even for large stepsizes. Instead, the stepsize should
767
+ be chosen according to its effect on the spectral radius ρ(Φ)
768
+ (see Theorem 1). For example, in the case of folded PGD,
769
+ Φ = ∂
770
+ ∂xPC(x⋆ − α∇f(x⋆)),
771
+ (19)
772
+ which depends explicitly on the constant stepsize α. In prac-
773
+ tice, it is observed that larger α lead to convergence in less
774
+ LFPI iterations during fixed-point folding. However when α
775
+ becomes too large, the resulting gradients explode. A large
776
+ range of α tend to result in backward-pass convergence to
777
+ the same gradients, so that careful stepsize selection is typi-
778
+ cally not required. For the purpose of optimizing efficiency,
779
+ Φ could be analyzed to determine its optimal α, but such an
780
+ analysis is not pursued within the scope of this paper.
781
+ 6
782
+ Experiments
783
+ This section evaluates folded optimization on four end-to-
784
+ end optimization and learning tasks. It is primarily evaluated
785
+ against cvxpy, which is the preeminent general-purpose dif-
786
+ ferentiable optimization solver. Two crucial limitations of
787
+ cvxpy are its efficiency and expressiveness. This is due to its
788
+ reliance on transforming general optimization programs to con-
789
+ vex cone programs, before applying a standardized operator-
790
+ splitting cone program solver and differentiation scheme (see
791
+ Appendix A). This precludes the incorporation of problem-
792
+ specific solvers in the forward pass and limits its use to convex
793
+ problems only. One major advantage of fold-opt is the
794
+ modularity of its forward optimization pass, which can apply
795
+ any black-box algorithm to produce x⋆(c). In each experiment
796
+ below, this is used to demonstrate a different advantage.
797
+ A summary of results is provided below for each study, and
798
+ a more complete specification is provided in Appendix D.
799
+ Implementation details.
800
+ All the folded optimizers used in
801
+ this section were produced using the accompanying Python
802
+ library fold-opt, which supplies routines for constructing
803
+ and solving the system (DFP), and integrating the resulting
804
+ Jacobian-vector products into the computational graph of Py-
805
+ Torch for backpropagation. When the linear system (DFP)
806
+ is represented explicitly, it can be solved by a user-input
807
+ blackbox linear solver, as can the forward-pass optimization
808
+ solver, as mentioned in Section 4. Implementation details of
809
+ fold-opt can be found in Appendix B.
810
+ The experiments test four folded optimizer: (1) f-PGDa
811
+ applies to optimization mappings with linear constraints, and is
812
+ based on folding projected gradient descent steps, where each
813
+
814
+ auau
815
+ ac=亚0
816
+ 20
817
+ 40
818
+ 60
819
+ 80
820
+ Training Iteration
821
+ 0
822
+ 1
823
+ 2
824
+ 3
825
+ 4
826
+ 5
827
+ Avg. Regret: Test Set
828
+ Model
829
+ Two-Stage
830
+ Integrated
831
+ 0
832
+ 20
833
+ 40
834
+ 60
835
+ 80
836
+ 100
837
+ Training Epoch
838
+ 16
839
+ 18
840
+ 20
841
+ 22
842
+ 24
843
+ 26
844
+ 28
845
+ 30
846
+ Mean Square Error: Test
847
+ 7
848
+ 9
849
+ 11
850
+ 13
851
+ 15
852
+ 17
853
+ 19
854
+ 0
855
+ 10
856
+ 20
857
+ 30
858
+ 40
859
+ 50
860
+ Avg Regret: Test
861
+ 0.00
862
+ 0.02
863
+ 0.04
864
+ 0.06
865
+ 0.08
866
+ Training Epoch
867
+ Deg. Nonlinearity
868
+ 1
869
+ 2
870
+ 3
871
+ Model
872
+ cvxpy
873
+ SQP
874
+ (a)
875
+ (b)
876
+ (c)
877
+ Figure 5: Bilinear decision focus (a), Enhanced Denoising with f-FDPG (b), and Portfolio optimization (c).
878
+ inner projection is a QP solved by the differentiable QP solver
879
+ qpth [Amos and Kolter, 2017]. (2) f-PGDb is a variation
880
+ on the former, in which the inner QP step is differentiated by
881
+ fixed-point folding of the ADMM solver detailed in Appendix
882
+ C. (3) f-SQP applies to optimization with nonlinear constraints
883
+ and uses folded SQP with the inner QP differentiated by qpth.
884
+ (4) f-FDPG comes from fixed-point folding of the Fast Dual
885
+ Proximal Gradient Descent (FDPG) shown in Appendix C.
886
+ The inner Prox is a soft thresholding operator, whose simple
887
+ closed form is differentiated by AD in PyTorch.
888
+ Enabling nonconvex optimization mappings.
889
+ The first ex-
890
+ periment showcases the ability of folded optimization to be
891
+ applied in nonconvex decision-focused settings. In this experi-
892
+ ment, we predict the coefficients of a bilinear program
893
+ x⋆(c, d) = argmax
894
+ 0≤x,y≤1
895
+ cT x + xT Qy + dT y
896
+ s.t.
897
+
898
+ x = p,
899
+
900
+ y = q,
901
+ in which two separable linear programs are confounded by
902
+ a nonconvex quadratic objective term Q. Costs c and d are
903
+ predicted by a 5-layer network, while p and q are constants.
904
+ Such programs have numerous industrial applications such
905
+ as optimal mixing and pooling in gas refining [Audet et al.,
906
+ 2004]. Here we focus on the difficulty posed by the problem’s
907
+ form and propose a task to evaluate f-PGDb in learning with
908
+ nonconvex mappings. Feature and cost data are generated by
909
+ the process described in Appendix D, along with 15 distinct
910
+ Q for a collection of nonconvex problems.
911
+ It is known that PGD converges to local optima in non-
912
+ convex problems [Attouch et al., 2013], and this folded im-
913
+ plementation uses the Gurobi nonconvex QP solver to find
914
+ a global optimum. Since no known general framework can
915
+ accommodate nonconvex optimization mappings in end-to-
916
+ end models, we benchmark against the two-stage approach,
917
+ in which the costs c, and d are targeted to ground-truth costs
918
+ by MSE loss and the optimization problem is solved as a sep-
919
+ arate component from the learning task (see Appendix E for
920
+ additional details). The integrated f-PGDb model minimizes
921
+ solution regret (i.e., suboptimality) directly. [Elmachtoub and
922
+ Grigas, 2021]. Notice in Figure 5(a) how f-PGDb achieves
923
+ much lower regret for each of the 15 nonconvex objectives.
924
+ Improving performance with specialized solvers.
925
+ This
926
+ experiment illustrates the efficiency benefit of incorporating
927
+ problem-specific solvers. The optimization models a denoiser
928
+ x⋆(D) = argmin
929
+ x
930
+ 1
931
+ 2∥x − d∥2 + λ∥Dx∥1,
932
+ which seeks to recover the true signal x⋆ from a noisy input
933
+ d and is often best handled by variants of Dual Proximal
934
+ Gradient Descent. Classically, D is a differencing matrix so
935
+ that ∥Dx∥1 represents total variation. Here we initialize D to
936
+ this classic case and learn a better D by targeting a set of true
937
+ signals with MSE loss and adding Gaussian noise to generate
938
+ their corresponding noisy inputs. Figure 5(b) shows test MSE
939
+ throughout training due to f-FDPG for various choice of λ.
940
+ Appendix F shows comparable results from the framework of
941
+ Amos and Kolter [2017], which converts the problem to a QP
942
+ form (see Appendix C) in order to differentiate the mapping
943
+ analytically with qpth. Small differences in these results
944
+ likely stem from solver error tolerance in the two methods.
945
+ However, f-FDPG computes x⋆(D) up to 40 times faster.
946
+ Measuring the accuracy of backpropagation.
947
+ Since gra-
948
+ dient errors accumulate at each training step, we ask how
949
+ precise are the operations performed by fold-opt in the
950
+ backward pass. This experiment compares the backpropaga-
951
+ tion of both f-PGDa and f-SQP with that of cvxpy, by using
952
+ the forward pass of cvxpy in each model as a control factor.
953
+ This experiment, adapted from Berrada et al. [2018], imple-
954
+ ments a smooth top-5 classification model on noisy CIFAR-
955
+ 100. The optimization below maps image feature embeddings
956
+ c from DenseNet 40-40 [Huang et al., 2017], to smoothed top-
957
+ k binary class indicators (see Appendix D for more details):
958
+ x⋆(c)=argmax
959
+ 0≤x≤1
960
+ cT x +
961
+
962
+ i
963
+ xi log xi s.t.
964
+
965
+ x = k (20)
966
+ Appendix F shows that all three models have indistinguish-
967
+ able classification accuracy throughout training, indicating
968
+ the backward pass of both fold-opt models is precise and
969
+ agrees with a known benchmark even after 30 epochs of train-
970
+ ing on 45k samples. On the other hand, the more sensitive test
971
+ set shows marginal accuracy divergence after a few epochs.
972
+ Improving accuracy with specialized solvers.
973
+ Having es-
974
+ tablished the equivalence in performance of the backward pass
975
+ across these models, the final experiment describes a situation
976
+ in which cvxpy makes non negligible errors in the forward
977
+ pass of a problem with nonlinear constraints:
978
+ x⋆(c) = argmax
979
+ 0≤x
980
+ cT x s.t. xT Vx ≤ γ,
981
+
982
+ x = 1.
983
+ (21)
984
+
985
+ This model describes a risk-constrained portfolio optimiza-
986
+ tion where V is a covariance matrix, and the predicted cost
987
+ coefficients c represent assets prices [Elmachtoub and Gri-
988
+ gas, 2021]. A 5-layer ReLU network is used to predict future
989
+ prices c from exogenous feature data, and trained to minimize
990
+ regret (the difference in profit between optimal portfolios un-
991
+ der predicted and ground-truth prices) by integrating Problem
992
+ (21). The folded f-SQP layer used for this problem employs
993
+ Gurobi QCQP solver in its forward pass. This again highlights
994
+ the ability of fold-opt to accommodate a highly optimized
995
+ blackbox solver. Figure 5(c) shows test set regret throughout
996
+ training, three synthetically generated datasets of different
997
+ nonlinearity degrees. Notice the accuracy improvements of
998
+ fold-opt over cvxpy. Such dramatic differences can be
999
+ explained by non-negligible errors made in cvxpy’s forward
1000
+ pass optimization on some problem instances, which occurs
1001
+ regardless of error tolerance settings (please see Appendix
1002
+ D for details). In contrast, Gurobi agrees to machine preci-
1003
+ sion with a custom SQP solver, and solves about 50% faster
1004
+ than cvxpy. This shows the importance of highly accurate
1005
+ optimization solvers for accurate end-to-end training.
1006
+ 7
1007
+ Conclusions
1008
+ This paper introduced folded optimization, a framework for
1009
+ generating analytically differentiable optimization solvers
1010
+ from unrolled implementations. Theoretically, folded opti-
1011
+ mization was justified by a novel analysis of unrolling at a
1012
+ precomputed optimal solution, which showed that its back-
1013
+ ward pass is equivalent to solution of a solver’s differential
1014
+ fixed-point conditions, specifically by fixed-point iteration on
1015
+ the resulting linear system. This allowed for the convergence
1016
+ analysis of the backward pass of unrolling, and evidence that
1017
+ the backpropagation of unrolling can be improved by using
1018
+ superior linear system solvers. The paper showed that folded
1019
+ optimization offers substantial advantages over existing differ-
1020
+ entiable optimization frameworks, including modularization
1021
+ of the forward and backward passes and the ability to handle
1022
+ nonconvex optimization.
1023
+ Acknowledgments
1024
+ This research is partially supported by NSF grant 2007164 and
1025
+ NSF CAREER Award 2143706. Fioretto is also supported by
1026
+ a Google Research Scholar Award and an Amazon Research
1027
+ Award. Its views and conclusions are those of the authors only.
1028
+ References
1029
+ Ryan Prescott Adams and Richard S Zemel. Ranking via
1030
+ sinkhorn propagation.
1031
+ arXiv preprint arXiv:1106.1925,
1032
+ 2011.
1033
+ Akshay Agrawal, Brandon Amos, Shane Barratt, Stephen
1034
+ Boyd, Steven Diamond, and J Zico Kolter. Differentiable
1035
+ convex optimization layers. Advances in neural information
1036
+ processing systems, 32, 2019.
1037
+ Akshay Agrawal, Shane Barratt, Stephen Boyd, Enzo Bus-
1038
+ seti, and Walaa M Moursi. Differentiating through a cone
1039
+ program. arXiv preprint arXiv:1904.09043, 2019.
1040
+ Brandon Amos and J Zico Kolter. Optnet: Differentiable
1041
+ optimization as a layer in neural networks. In International
1042
+ Conference on Machine Learning, pages 136–145. PMLR,
1043
+ 2017.
1044
+ Brandon Amos, Vladlen Koltun, and J Zico Kolter.
1045
+ The
1046
+ limited multi-label projection layer.
1047
+ arXiv preprint
1048
+ arXiv:1906.08707, 2019.
1049
+ Hedy Attouch, J´erˆome Bolte, and Benar Fux Svaiter. Con-
1050
+ vergence of descent methods for semi-algebraic and tame
1051
+ problems: proximal algorithms, forward–backward split-
1052
+ ting, and regularized gauss–seidel methods. Mathematical
1053
+ Programming, 137(1):91–129, 2013.
1054
+ Charles Audet, Jack Brimberg, Pierre Hansen, S´ebastien Le
1055
+ Digabel, and Nenad Mladenovi´c. Pooling problem: Al-
1056
+ ternate formulations and solution methods. Management
1057
+ science, 50(6):761–776, 2004.
1058
+ Amir Beck. First-order methods in optimization. SIAM, 2017.
1059
+ Leonard Berrada, Andrew Zisserman, and M. Pawan Kumar.
1060
+ Smooth loss functions for deep top-k classification. ArXiv,
1061
+ abs/1802.07595, 2018.
1062
+ Quentin Berthet, Mathieu Blondel, Olivier Teboul, Marco
1063
+ Cuturi, Jean-Philippe Vert, and Francis Bach. Learning
1064
+ with differentiable pertubed optimizers. Advances in neural
1065
+ information processing systems, 33:9508–9519, 2020.
1066
+ Mathieu Blondel, Olivier Teboul, Quentin Berthet, and Josip
1067
+ Djolonga. Fast differentiable sorting and ranking. In Inter-
1068
+ national Conference on Machine Learning, pages 950–959.
1069
+ PMLR, 2020.
1070
+ Stephen Boyd, Neal Parikh, Eric Chu, Borja Peleato, Jonathan
1071
+ Eckstein, et al.
1072
+ Distributed optimization and statistical
1073
+ learning via the alternating direction method of multipliers.
1074
+ Foundations and Trends® in Machine learning, 3(1):1–122,
1075
+ 2011.
1076
+ Enzo Busseti, Walaa M Moursi, and Stephen Boyd. Solution
1077
+ refinement at regular points of conic problems. Computa-
1078
+ tional Optimization and Applications, 74(3):627–643, 2019.
1079
+ Justin Domke. Generic methods for optimization-based model-
1080
+ ing. In Artificial Intelligence and Statistics, pages 318–326.
1081
+ PMLR, 2012.
1082
+ Priya Donti, Brandon Amos, and J Zico Kolter. Task-based
1083
+ end-to-end model learning in stochastic optimization. Ad-
1084
+ vances in neural information processing systems, 30, 2017.
1085
+
1086
+ Adam N Elmachtoub and Paul Grigas. Smart “predict, then
1087
+ optimize”. Management Science, 2021.
1088
+ Aaron Ferber, Bryan Wilder, Bistra Dilkina, and Milind Tambe.
1089
+ Mipaal: Mixed integer program as a layer. In Proceedings of
1090
+ the AAAI Conference on Artificial Intelligence, volume 34,
1091
+ pages 1504–1511, 2020.
1092
+ Stephen Gould, Basura Fernando, Anoop Cherian, Peter An-
1093
+ derson, Rodrigo Santa Cruz, and Edison Guo.
1094
+ On dif-
1095
+ ferentiating parameterized argmin and argmax problems
1096
+ with application to bi-level optimization. arXiv preprint
1097
+ arXiv:1607.05447, 2016.
1098
+ Michael C Grant and Stephen P Boyd. Graph implementations
1099
+ for nonsmooth convex programs. In Recent advances in
1100
+ learning and control, pages 95–110. Springer, 2008.
1101
+ Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kil-
1102
+ ian Q Weinberger. Densely connected convolutional net-
1103
+ works. In Proceedings of the IEEE conference on computer
1104
+ vision and pattern recognition, pages 4700–4708, 2017.
1105
+ Takuya Konishi and Takuro Fukunaga. End-to-end learning for
1106
+ prediction and optimization with gradient boosting. In Joint
1107
+ European Conference on Machine Learning and Knowledge
1108
+ Discovery in Databases, pages 191–207. Springer, 2021.
1109
+ James Kotary, Ferdinando Fioretto, Pascal Van Hentenryck,
1110
+ and Bryan Wilder. End-to-end constrained optimization
1111
+ learning: A survey. In Proceedings of the Thirtieth Interna-
1112
+ tional Joint Conference on Artificial Intelligence, IJCAI-21,
1113
+ pages 4475–4482, 2021.
1114
+ James Kotary, Ferdinando Fioretto, Pascal Van Hentenryck,
1115
+ and Ziwei Zhu. End-to-end learning for fair ranking systems.
1116
+ In Proceedings of the ACM Web Conference 2022, pages
1117
+ 3520–3530, 2022.
1118
+ Jayanta Mandi and Tias Guns. Interior point solving for lp-
1119
+ based prediction+ optimisation. Advances in Neural Infor-
1120
+ mation Processing Systems, 33:7272–7282, 2020.
1121
+ Andre Martins and Ramon Astudillo. From softmax to sparse-
1122
+ max: A sparse model of attention and multi-label classifi-
1123
+ cation. In International conference on machine learning,
1124
+ pages 1614–1623. PMLR, 2016.
1125
+ Vishal Monga, Yuelong Li, and Yonina C Eldar. Algorithm
1126
+ unrolling: Interpretable, efficient deep learning for signal
1127
+ and image processing. IEEE Signal Processing Magazine,
1128
+ 38(2):18–44, 2021.
1129
+ James R Munkres. Analysis on manifolds. CRC Press, 2018.
1130
+ Arkadi Nemirovski. Advances in convex optimization: conic
1131
+ programming. In International Congress of Mathemati-
1132
+ cians, volume 1, pages 413–444, 2007.
1133
+ Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan,
1134
+ Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmai-
1135
+ son, Luca Antiga, and Adam Lerer. Automatic differentia-
1136
+ tion in pytorch. In NIPS-W, 2017.
1137
+ Max Paulus, Dami Choi, Daniel Tarlow, Andreas Krause, and
1138
+ Chris J Maddison.
1139
+ Gradient estimation with stochastic
1140
+ softmax tricks. Advances in Neural Information Processing
1141
+ Systems, 33:5691–5704, 2020.
1142
+ Marin Vlastelica Poganˇci´c, Anselm Paulus, Vit Musil, Georg
1143
+ Martius, and Michal Rolinek.
1144
+ Differentiation of black-
1145
+ box combinatorial solvers. In International Conference on
1146
+ Learning Representations, 2019.
1147
+ Alfio Quarteroni, Riccardo Sacco, and Fausto Saleri. Numeri-
1148
+ cal mathematics, volume 37. Springer Science & Business
1149
+ Media, 2010.
1150
+ Subham Sekhar Sahoo, Marin Vlastelica, Anselm Paulus, V´ıt
1151
+ Musil, Volodymyr Kuleshov, and Georg Martius. Gradient
1152
+ backpropagation through combinatorial algorithms: Identity
1153
+ with projection works. arXiv e-prints, pages arXiv–2205,
1154
+ 2022.
1155
+ Nir Shlezinger, Yonina C Eldar, and Stephen P Boyd. Model-
1156
+ based deep learning: On the intersection of deep learning
1157
+ and optimization. arXiv preprint arXiv:2205.02640, 2022.
1158
+ Bryan Wilder, Bistra Dilkina, and Milind Tambe. Melding
1159
+ the data-decisions pipeline: Decision-focused learning for
1160
+ combinatorial optimization. In AAAI, volume 33, pages
1161
+ 1658–1665, 2019.
1162
+ Bryan Wilder, Eric Ewing, Bistra Dilkina, and Milind Tambe.
1163
+ End to end learning and optimization on graphs. Advances
1164
+ in Neural Information Processing Systems, 32, 2019.
1165
+
1166
+ A
1167
+ Related Work
1168
+ This section categorizes end-to-end optimization and learn-
1169
+ ing approaches into those based on unrolling, and analytical
1170
+ differentiation. Since this paper focuses on converting un-
1171
+ rolled implementations into analytical ones, each category is
1172
+ reviewed first below.
1173
+ Unrolling optimization algorithms.
1174
+ Automatic Differenti-
1175
+ ation (AD) is the primary method of backpropagating gradients
1176
+ in modern deep models for training with stochastic gradient
1177
+ descent. Modern machine learning frameworks such as Py-
1178
+ Torch have natively implemented differentiation rules for a
1179
+ variety of functions that are commonly used in deep models,
1180
+ as well as interfaces to define custom differentiation rules for
1181
+ new functions [Paszke et al., 2017]. As a mainstay of deep
1182
+ learning, AD is also a natural tool for backpropagating through
1183
+ constrained optimization mappings. Unrolling refers to the
1184
+ execution of an optimization algorithm, entirely on the compu-
1185
+ tational graph, for backpropagation by AD from the resulting
1186
+ optimal solution to its input parameters. Such approaches are
1187
+ general and apply to a broad range of optimization models.
1188
+ They can be performed simply by implementing a solution
1189
+ algorithm within an AD framework, without the need for an-
1190
+ alytical modeling of an optimization mapping’s derivatives
1191
+ [Domke, 2012]. However, unrolling over many iterations has
1192
+ been shown to encounter issues of time and memory ineffi-
1193
+ ciency due to the size of its computational graph [Amos and
1194
+ Kolter, 2017]. Further issues encountered in unrolling, such
1195
+ as vanishing and exploding gradients, are reminiscent of recur-
1196
+ rent neural networks [Monga et al., 2021]. On the other hand,
1197
+ unrolling may offer some unique practical advantages, like the
1198
+ ability to learn optimization parameters such as stepsizes to
1199
+ accelerate the solution of each optimization during training
1200
+ [Shlezinger et al., 2022].
1201
+ Analytical differentiation of optimization models.
1202
+ Differ-
1203
+ entiation through constrained argmin problems in the con-
1204
+ text of machine learning was discussed as early as Gould et
1205
+ al. [2016], who proposed first to implicitly differentiate the
1206
+ argmin of a smooth, unconstrained convex function by its first-
1207
+ order optimality conditions, defined when the gradient of the
1208
+ objective function equals zero. This technique is then extended
1209
+ to find approximate derivatives for constrained problems, by
1210
+ applying it to their unconstrained log-barrier approximations.
1211
+ Subsequent approaches applied implicit differentiation to the
1212
+ KKT optimality conditions of constrained problems directly
1213
+ [Amos and Kolter, 2017; Amos et al., 2019], but only on spe-
1214
+ cial problem classes such as Quadratic Programs. Konishi
1215
+ and Fukunaga [2021] extend the method of Amos and Kolter
1216
+ [2017], by modeling second-order derivatives of the optimiza-
1217
+ tion for training with gradient boosting methods. Donti et al.
1218
+ [2017] uses the differentiable quadratic programming solver
1219
+ of [Amos and Kolter, 2017] to approximately differentiate gen-
1220
+ eral convex programs through quadratic surrogate problems.
1221
+ Other problem-specific approaches to analytical differentia-
1222
+ tion models include ones for sorting and ranking [Blondel et
1223
+ al., 2020], linear programming [Mandi and Guns, 2020], and
1224
+ convex cone programming [Agrawal et al., 2019b].
1225
+ The first general-purpose differentiable optimization solver
1226
+ was proposed in Agrawal et al. [2019a], which leverages
1227
+ the fact that any convex program can be converted to a con-
1228
+ vex cone program [Nemirovski, 2007]. The equivalent cone
1229
+ program is subsequently solved and differentiated follow-
1230
+ ing Agrawal et al. [2019b], which implicitly differentiates
1231
+ a zero-residual condition representing optimality [Busseti et
1232
+ al., 2019]. A differentiable solver library cvxpy is based on
1233
+ this approach, which converts convex programs to convex cone
1234
+ programs by way of their graph implementations as described
1235
+ in Grant and Boyd [2008]. The main advantage of the sys-
1236
+ tem is that it applies to any convex program and has a simple
1237
+ symbolic interface. A major disadvantage is its restriction to
1238
+ solving problems only in a standard convex cone form with
1239
+ an ADMM-based conic programming solver, which performs
1240
+ poorly on some problem classes, as seen in Section 6.
1241
+ A related line of work concerns end-to-end learning with dis-
1242
+ crete optimization problems, which includes linear programs,
1243
+ mixed-integer programs and constraint programs. These prob-
1244
+ lem classes often define discontinuous mappings with respect
1245
+ to their input parameters, making their true gradients unhelpful
1246
+ as descent directions in optimization. Accurate end-to-end
1247
+ training can be achieved by smoothing the optimization map-
1248
+ pings, to produce approximations which yield more useful
1249
+ gradients. A common approach is to augment the objective
1250
+ function with smooth regularizing terms such as euclidean
1251
+ norm or entropy functions [Wilder et al., 2019a; Ferber et al.,
1252
+ 2020; Mandi and Guns, 2020]. Others show that similar effects
1253
+ can be produced by applying random noise to the objective
1254
+ [Berthet et al., 2020; Paulus et al., 2020], or through finite dif-
1255
+ ference approximations [Poganˇci´c et al., 2019; Sekhar Sahoo
1256
+ et al., 2022]. This enables end-to-end learning with discrete
1257
+ structures such as constrained ranking policies [Kotary et al.,
1258
+ 2022], shortest paths in graphs [Elmachtoub and Grigas, 2021],
1259
+ and various decision models [Wilder et al., 2019a].
1260
+ B
1261
+ Implementation Details
1262
+ The purpose of the fold-opt library is to facilitate the con-
1263
+ version of unrolled optimization code into analytically mod-
1264
+ eled differentiable optimization. The following steps can be
1265
+ used to replace any loop of a functioning unrolled implementa-
1266
+ tion. To convert a full unrolling with one or more nested loops,
1267
+ the process is applied from the innermost to the outermost
1268
+ loop. (1) After executing a blackbox optimization, initialize
1269
+ the precomputed optimal solution x⋆ onto the computational
1270
+ graph of PyTorch. (2) Execute a single step of the unrolled
1271
+ loop’s update function to get x⋆⋆(c) = U(x⋆, c) and save its
1272
+ computational graph; in principle, the forward execution can
1273
+ be overridden with its known result x⋆. (3) Backpropagate
1274
+ an identity matrix from x⋆⋆(c) to x⋆ and from x⋆⋆(c) to c to
1275
+ get Φ and Ψ, respectively (see Section 5). (4) Solve equation
1276
+ (DFP) and apply the Jacobian-vector product to backpropa-
1277
+ gate incoming gradients. The library contains functions to
1278
+ automate each of the above steps.
1279
+ C
1280
+ Optimization Models
1281
+ Soft Thresholding Operator
1282
+ The soft thresholding opera-
1283
+ tor defined below arises in the solution of denoising problems
1284
+ proximal gradient descent variants as the proximal operator to
1285
+
1286
+ the ∥ · ∥1 norm:
1287
+ Tλ(x) = [|x| − λe]+ · sgn(x)
1288
+ Fast Dual Proximal Gradient Descent
1289
+ The following is an
1290
+ FDPG implementation from Beck [2017], specialized to solve
1291
+ the denoising problem
1292
+ x⋆(D) = argmin
1293
+ x
1294
+ 1
1295
+ 2∥x − d∥2 + λ∥Dx∥1,
1296
+ of Section 6. Letting uk be the primal solution iterates, with
1297
+ t0 = 1 and arbitrary w0 = y0:
1298
+ uk = DT wk + d
1299
+ (22a)
1300
+ yk+1 = wk − 1
1301
+ 4Duk + 1
1302
+ 4T4λ(Duk − 4wk)
1303
+ (22b)
1304
+ tk+1 = 1 +
1305
+
1306
+ 1 + 4t2
1307
+ k
1308
+ 2
1309
+ (22c)
1310
+ wk+1 = yk+1 +
1311
+ �tk − 1
1312
+ tk+1
1313
+
1314
+ (yk+1 − yk)
1315
+ (22d)
1316
+ Quadratic Programming by ADMM
1317
+ A Quadratic Pro-
1318
+ gram is an optimization problem with convex quadratic objec-
1319
+ tive and linear constraints. The following ADMM scheme of
1320
+ Boyd et al. [2011] solves any quadratic programming problem
1321
+ of the standard form:
1322
+ argmax
1323
+ x
1324
+ 1
1325
+ 2xT Qx + pT x
1326
+ (23a)
1327
+ s.t. Ax = b
1328
+ (23b)
1329
+ x ≥ 0
1330
+ (23c)
1331
+ by declaring the operator splitting
1332
+ argmax
1333
+ x
1334
+ f(x) + g(z)
1335
+ (24a)
1336
+ s.t. x = z
1337
+ (24b)
1338
+ with f(x) = 1
1339
+ 2xT Qx + pT x, dom(f) = {x : Ax = b},
1340
+ g(x) = δ(x ≥ 0) and where δ is the indicator function.
1341
+ This results in the following ADMM iterates:
1342
+ 1. Solve
1343
+
1344
+ P + ρI
1345
+ AT
1346
+ A
1347
+ 0
1348
+ � �
1349
+ xk+1
1350
+ ννν
1351
+
1352
+ =
1353
+
1354
+ −q + ρ(zk − uk)
1355
+ b
1356
+
1357
+ 2. zk+1 = (xk+1 + uk)+
1358
+ 3. uk+1 = uk + xk+1 − zk+1
1359
+ Where (1) represents the KKT conditions for equality-
1360
+ constrained minimization of f, (2) is projection onto the posi-
1361
+ tive orthant, and (3) is the dual variable update.
1362
+ Sequential Quadratic Programming
1363
+ For an optimization
1364
+ mapping defined by Problem (1) where f, g and h are contin-
1365
+ uously differentiable, define the operator T as:
1366
+ T (x,λλλ) = argmin
1367
+ d
1368
+ ∇f(x)T d + dT ∇2L(x,λλλ)d
1369
+ (25a)
1370
+ s.t. h(x) + ∇h(x)T d = 0
1371
+ (25b)
1372
+ g(x) + ∇g(x)T d ≤ 0
1373
+ (25c)
1374
+ where dependence of each function on parameters c is hidden.
1375
+ The function L is a Lagrangian function of Problem (1). Then
1376
+ given initial estimates of the primal and dual solution (x0, λ0),
1377
+ sequential quadratic programming is defined by
1378
+ (d,µµµ) = T (xk,λλλk)
1379
+ (26a)
1380
+ xk+1 = xk + αkd
1381
+ (26b)
1382
+ λλλk+1 = αk(µµµ − λλλk)
1383
+ (26c)
1384
+ Here, the inner optimization O = T as in Section 3.
1385
+ Denoising Problem - Quadratic Programming form
1386
+ The
1387
+ following quadratic program is equivalent to the unconstrained
1388
+ denoising problem of Section 6:
1389
+ x⋆(D) = argmin
1390
+ x,t
1391
+ 1
1392
+ 2∥x − d∥2 + λ−→1 t
1393
+ (27a)
1394
+ s.t.
1395
+ Dx ≤ t
1396
+ (27b)
1397
+ − t ≤ Dx
1398
+ (27c)
1399
+ D
1400
+ Experimental Details
1401
+ Additional details for each experiment of Section 6 are de-
1402
+ scribed in their respective subsections below. Note that in all
1403
+ cases, the machine learning models compared in Section 6 use
1404
+ identical settings within each study, with the exception of the
1405
+ optimization components being compared.
1406
+ D.1
1407
+ Nonconvex Bilinear Programming
1408
+ Data generation
1409
+ Data is generated as follows for the non-
1410
+ convex bilinear programming experiments. Input data con-
1411
+ sists of 1000 points ∈ R10 sampled uniformly in the interval
1412
+ [−2, 2]. To produce targets, inputs are fed into a randomly ini-
1413
+ tialized 2-layer neural network with tanh activation, and gone
1414
+ through a nonlinear function x cos 2x+ 5
1415
+ 2 log
1416
+ x
1417
+ x+2 +x2 sin 4x
1418
+ to increase the nonlinearity of the mapping between inputs
1419
+ and targets. Train and test sets are split 90/10.
1420
+ Settings
1421
+ A 5-layer NN with ReLU activation trained to pre-
1422
+ dict cost c and d. We train model with Adam optimizer on
1423
+ learning rate of 10−2 and batch size 32 for 5 epochs.
1424
+ Nonconvex objective coefficients Q are pre-generated ran-
1425
+ domly with 15 different seeds. Constraint parameters are
1426
+ chosen arbitrarily as p = 1 and q = 2. The average solving
1427
+ time in Gurobi is 0.8333s, and depends per instance on the
1428
+ predicted parameters c and d. However the average time tends
1429
+ to be dominated by a minority of samples which take up to
1430
+ ∼ 3 min. This issue is mitigated by imposing a time limit
1431
+ in solving each instance. While the correct gradient is not
1432
+ guaranteed under early stopping, the overwhelming majority
1433
+ of samples are fully optimized under the time limit, mitigating
1434
+ any adverse effect on training. Differences in training curves
1435
+ under 10s and 120s timeouts are negligible due to this effect;
1436
+ the results reported use the 120s timeout.
1437
+ D.2
1438
+ Enhanced Denoising
1439
+ Data generation
1440
+ The data generation follows Amos and
1441
+ Kolter [2017], in which 10000 random 1D signals of length
1442
+ 100 are generated and treated as targets. Noisy input data is
1443
+ generated by adding random perturbations to each element of
1444
+ each signal, drawn from independent standard-normal distri-
1445
+ butions. A 90/10 train/test split is applied to the data.
1446
+
1447
+ Settings
1448
+ A learning rate of 10−3 and batch size 32 are used
1449
+ in each training run. Each denoising model is initialized to the
1450
+ classical total variation denoiser by setting the learned matrix
1451
+ of parameters D ∈ R99×100 to the differencing operator, for
1452
+ which Di,i = 1 and Di,i+1 = −1 ∀i with all other values 0.
1453
+ D.3
1454
+ Multilabel Classification
1455
+ Dataset
1456
+ We follow the experimental settings and implemen-
1457
+ tation provided by Berrada et al. [2018]. Each model is eval-
1458
+ uated on the noisy top-5 CIFAR100 task. CIFAR-100 labels
1459
+ are organized into 20 “coarse” classes, each consisting of 5
1460
+ “fine” labels. With some probability, random noise is added
1461
+ to each label by resampling from the set of “fine” labels. The
1462
+ 50k data samples are given a 90/10 training/testing split.
1463
+ Settings
1464
+ The DenseNet 40-40 architecture is trained by SGD
1465
+ optimizer with learning rate 10−1 and batch size 64 for 30
1466
+ epochs to minimize a cross-entropy loss function.
1467
+ D.4
1468
+ Portfolio Optimization
1469
+ Data Generation
1470
+ The data generation follows exactly the
1471
+ prescription of Appendix D in Elmachtoub and Grigas [2021].
1472
+ Uniform random feature data are mapped through a random
1473
+ nonlinear function to create synthetic price data for training
1474
+ and evaluation. A random matrix is used as a linear mapping,
1475
+ to which nonlinearity is introduced by exponentiation of its
1476
+ elements to a chosen degree. The studies in Section 6 use
1477
+ degrees 1, 2 and 3.
1478
+ Settings
1479
+ A five-layer ReLU network is trained to predict
1480
+ asset prices c ∈ R20 using Adam optimizer with learning rate
1481
+ 10−2 and batch size 32.
1482
+ E
1483
+ Decision-Focused Learning
1484
+ For unfamiliar readers, this section provides background on the
1485
+ decision-focused learning setting, also known as predict-and-
1486
+ optimize, which characterizes the first and last experiments
1487
+ of Section 6 on bilinear programming and portfolio optimiza-
1488
+ tion. In this paper, those terms refer to settings in which an
1489
+ optimization mapping
1490
+ x⋆(c) = argmin
1491
+ x
1492
+ f(x, c)
1493
+ (28a)
1494
+ subject to: g(x) ≤ 0,
1495
+ (28b)
1496
+ h(x) = 0,
1497
+ (28c)
1498
+ represents a decision model and is parameterized by the vector
1499
+ c, but only in its objective function. The goal of the supervised
1500
+ learning task is to predict ˆc from feature data such that the
1501
+ resulting x⋆(ˆc) optimizes the objective under ground-truth
1502
+ parameters ¯c, which is f(x⋆(ˆc), ¯c). This is equivalent to
1503
+ minimizing the regret loss function:
1504
+ regret(ˆc, ¯c) = f(x⋆(ˆc), ¯c) − f(x⋆(¯c), ¯c),
1505
+ (29)
1506
+ which measures the suboptimality, under ground-truth objec-
1507
+ tive data, of decisions x⋆(ˆc) resulting from prediction ˆc.
1508
+ When x⋆ and f are differentiable, the prediction model for
1509
+ ˆc can be trained to minimize regret directly in an integrated
1510
+ predict-and-optimize model. Since the task amounts to pre-
1511
+ dicting ˆc under ground-truth ¯c, a two-stage approach is also
1512
+ 0
1513
+ 20
1514
+ 40
1515
+ 60
1516
+ 80
1517
+ 100
1518
+ Training Epoch
1519
+ 16
1520
+ 18
1521
+ 20
1522
+ 22
1523
+ 24
1524
+ 26
1525
+ 28
1526
+ 30
1527
+ Mean Square Error: Test
1528
+ 7
1529
+ 9
1530
+ 11
1531
+ 13
1532
+ 15
1533
+ 17
1534
+ 19
1535
+ (a) f-FDPG
1536
+ 0
1537
+ 20
1538
+ 40
1539
+ 60
1540
+ 80
1541
+ 100
1542
+ Training Epoch
1543
+ 16
1544
+ 18
1545
+ 20
1546
+ 22
1547
+ 24
1548
+ 26
1549
+ 28
1550
+ 30
1551
+ Mean Square Error: Test
1552
+ 7
1553
+ 9
1554
+ 11
1555
+ 13
1556
+ 15
1557
+ 17
1558
+ 19
1559
+ (b) qpth
1560
+ Figure 6: Enhanced Denoiser Test Loss
1561
+ available which does not require backpropagation through x⋆.
1562
+ In the two-stage approach, the loss function MSE(ˆc, ¯c) is used
1563
+ to directly target ground-truth parameters, but the final test
1564
+ criteria is still measured by regret. Since the integrated ap-
1565
+ proach minimizes regret directly, it generally outperforms the
1566
+ two-stage in this setting.
1567
+ 0
1568
+ 10
1569
+ 20
1570
+ 30
1571
+ Training Epoch
1572
+ 0.2
1573
+ 0.4
1574
+ 0.6
1575
+ 0.8
1576
+ Top-1 Acc: Train Set
1577
+ 0
1578
+ 20
1579
+ Training Epoch
1580
+ 0.2
1581
+ 0.4
1582
+ 0.6
1583
+ 0.8
1584
+ Top-k Acc: Train Set
1585
+ Model
1586
+ cvxpy
1587
+ PGD
1588
+ SQP
1589
+ 0
1590
+ 10
1591
+ 20
1592
+ 30
1593
+ Training Epoch
1594
+ 0.2
1595
+ 0.4
1596
+ 0.6
1597
+ 0.8
1598
+ Top-1 Acc: Test Set
1599
+ 0
1600
+ 20
1601
+ Training Epoch
1602
+ 0.2
1603
+ 0.4
1604
+ 0.6
1605
+ 0.8
1606
+ Top-k Acc: Test Set
1607
+ Model
1608
+ cvxpy
1609
+ PGD
1610
+ SQP
1611
+ Figure 7: Multilabel Classification Accuracy
1612
+ F
1613
+ Additional Figures
1614
+ Enhanced denoising experiment.
1615
+ Figure 6 shows test loss
1616
+ curves, for a variety of λ, in learning enhanced denoisers
1617
+ with the chosen baseline method qpth. As per the original
1618
+ experiment of Amos and Kolter [2017], the implementation is
1619
+ facilitated by conversion to the quadratic programming form
1620
+ of model (27). The results from f-FDPG are again shown
1621
+ alongside for comparison. Small differences between the
1622
+ results stem from the slightly different solutions found by
1623
+ their respective solvers at each training iteration, due to their
1624
+ differently-defined error tolerance thresholds.
1625
+ Multilabel classification experiment.
1626
+ Figure 7 shows Top-
1627
+ 1 and Top-k accuracy on both train and test sets where k = 5.
1628
+ Accuracy curves are indistinguishable on the training set even
1629
+ after 30 epochs. On the test set, generalization error manifests
1630
+ slightly differently for each model in the first few epochs.
1631
+
XdFLT4oBgHgl3EQfUC-H/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
_NAzT4oBgHgl3EQfFvpH/content/tmp_files/2301.01015v1.pdf.txt ADDED
@@ -0,0 +1,1328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {Semi}-Structured Object Sequence Encoders
2
+ Rudra Murthy V1 , Riyaz Bhat1 , Chulaka Gunasekara1 , Hui Wan1 , Tejas Indulal
3
+ Dhamecha2∗ , Danish Contractor1 , Marina Danilevsky1
4
+ 1IBM Research, India
5
+ 2Microsoft, Bangalore
6
+ rmurthyv@in.ibm.com, {Riyaz.Bhat,chulaka.gunasekara,Danish.Contractor}@ibm.com, {hwan,
7
+ mdanile}@us.ibm.com, tdhamecha@microsoft.com
8
+ Abstract
9
+ In this paper we explore the task of modeling (semi)
10
+ structured object sequences; in particular we fo-
11
+ cus our attention on the problem of developing a
12
+ structure-aware input representation for such se-
13
+ quences. In such sequences, we assume that each
14
+ structured object is represented by a set of key-
15
+ value pairs which encode the attributes of the struc-
16
+ tured object. Given a universe of keys, a sequence
17
+ of structured objects can then be viewed as an
18
+ evolution of the values for each key, over time.
19
+ We encode and construct a sequential representa-
20
+ tion using the values for a particular key (Tempo-
21
+ ral Value Modeling - TVM) and then self-attend
22
+ over the set of key-conditioned value sequences to
23
+ a create a representation of the structured object se-
24
+ quence (Key Aggregation - KA). We pre-train and
25
+ fine-tune the two components independently and
26
+ present an innovative training schedule that inter-
27
+ leaves the training of both modules with shared at-
28
+ tention heads. We find that this iterative two part-
29
+ training results in better performance than a unified
30
+ network with hierarchical encoding as well as over,
31
+ other methods that use a record-view representation
32
+ of the sequence [de Souza Pereira Moreira et al.,
33
+ 2021] or a simple flattened representation of the se-
34
+ quence. We conduct experiments using real-world
35
+ data to demonstrate the advantage of interleaving
36
+ TVM-KA on multiple tasks and detailed ablation
37
+ studies motivating our modeling choices. We find
38
+ that our approach performs better than flattening se-
39
+ quence objects and also allows us to operate on sig-
40
+ nificantly larger sequences than existing methods.
41
+ 1
42
+ Introduction
43
+ Recent work on employing self-supervision during pre-
44
+ training has led to the development of a number of
45
+ transformer-based large language models (LLM). In partic-
46
+ ular, such LLMs are pre-trained with self-supervised objec-
47
+ tives based on different forms of input masking [Devlin et al.,
48
+ ∗Work done while employed at IBM Research, India
49
+ 2019; Yang et al., 2019], auto-regressive pre-training [Rad-
50
+ ford et al., 2019; Brown et al., 2020; Workshop et al., 2022],
51
+ or both [Beltagy et al., 2020a; Lewis et al., 2020], and then
52
+ applied to downstream tasks after fine-tuning [Devlin et al.,
53
+ 2019; Yang et al., 2019; Lewis et al., 2020], zero-shot and in-
54
+ context learning with prompting[Brown et al., 2020]. These
55
+ methods have also been applied jointly to images and text
56
+ [Ramesh et al., 2021; Rombach et al., 2022]. One of the
57
+ key elements in such models is the choice of representation
58
+ of the input tokens to the transformer; in the case of natu-
59
+ ral language byte-pair encodings are often used to represent
60
+ sub-words and sequences of text are modeled then using these
61
+ encodings.
62
+ In this paper, we explore the task of modeling (semi) struc-
63
+ tured object sequences; in particular we focus our attention
64
+ on the problem of developing a structure-aware input repre-
65
+ sentation for such sequences. In such sequences, we assume
66
+ that each structured object is represented by a set of key-value
67
+ pairs which encode the attributes of the structured object (Fig-
68
+ ure 1(a)). Given a universe of keys, a sequence of structured
69
+ objects can then be viewed as a column-evolution of the val-
70
+ ues for each key, over time. A trivial method of representing
71
+ such sequences would be to flatten each structured object and
72
+ view them as individual words for tokenization in natural lan-
73
+ guage.1 However, this causes the sequence length to become
74
+ extremely large (thousands of tokens). For instance, in our
75
+ study of structured objects from user-interaction sessions on
76
+ software from a large cloud-based service provider, we found
77
+ these objects could contain, on average 25 fields with values
78
+ that include timestamps, identifiers, log messages, etc (aver-
79
+ age 7 words). A session length of 15 minutes results in 105
80
+ such session objects, amounting to nearly 18000 words which
81
+ would further increase the sequence length after sub-words
82
+ are created.
83
+ Contributions: To overcome these challenges, we use a
84
+ modular, two-part hierarchical encoding strategy. First, we
85
+ encode and construct a sequential representation using the
86
+ values for a particular key (Temporal Value Modeling). This
87
+ may be achieved using any encoder.2 We then self-attend over
88
+ the over the set of key-conditioned value sequences to a create
89
+ a representation of the structured object sequence (Key Ag-
90
+ 1with markers to indicate boundaries for each structured objects
91
+ 2We use BERT in our experiments
92
+ arXiv:2301.01015v1 [cs.CV] 3 Jan 2023
93
+
94
+ Figure 1: {Semi}-Structured Object Sequences: (a) Generic representation consisting of multiple key-value pairs at time steps t0 . . . tn. (b)
95
+ Sequences of events triggered by the use of a graphical user interface. (c) Viewing a text paragraph as a sequence of sentences. (d) Viewing
96
+ a conversation as a sequence of role annotated turns.
97
+ gregation). We develop a novel sequential transfer learning
98
+ training paradigm to facilitate information sharing between
99
+ two independently trained sub-networks. In contrast to tradi-
100
+ tional approaches based on parameter sharing [Baxter, 1997;
101
+ Duong et al., 2015; Zhang et al., 2022], fine tuning of pre-
102
+ trained models [Devlin et al., 2019], and the use of adapter
103
+ layers [Pfeiffer et al., 2020], we share complete attention
104
+ heads between two networks. First we pre-train the TVM
105
+ network with shared attention heads in place and then use the
106
+ frozen representations from this network to initialize the KA
107
+ network which has its own untrained attention heads as well
108
+ the shared attention heads from the TVM network as part of
109
+ its parameters. We utilize a training schedule that interleaves
110
+ the training of both modules to iteratively train the TVM and
111
+ KA modules. We find that this iterative two part-training re-
112
+ sults in better performance than a unified network with hier-
113
+ archical encoding (with no attention-head sharing) as well as
114
+ over, other methods that use a record-view representation of
115
+ the sequence [de Souza Pereira Moreira et al., 2021].
116
+ We present experiments using real-world data to demon-
117
+ strate the advantage of TVM-KA on multiple tasks and de-
118
+ tailed ablation studies motivating our modeling choices. We
119
+ find that our approach significantly outperforms baseline
120
+ methods for encoding semi-structured object sequences.
121
+ In summary, our paper makes the following contributions:
122
+ (i) We present a two part encoder, TVM-KA that mod-
123
+ els structured sequence objects (ii) It easily accommodates
124
+ the modeling of unstructured natural language text as well
125
+ as modeling semi-structured natural language data (iii) We
126
+ present detailed experiments to demonstrate the strengths of
127
+ our work and include ablation studies motivating our model-
128
+ ing choices.
129
+ 2
130
+ Modeling
131
+ Let Ji = {ki,1 : vi,1, ki,2 : vi,2, . . . , ki,n : vi,n} de-
132
+ note a structured object Ji, containing n key-value pairs
133
+ < kj, vj > . A sequence of structured objects is denoted
134
+ as J = [J1, J2, J3, . . . , JN] that is a (temporal) sequence of
135
+ structured objects corresponding to the N time steps. The
136
+ goal of our modeling is to learn a representation of a se-
137
+ quence of structured objects J ; and subsequently, learn f :
138
+ Embd(J ) → {1, 2, . . . , C} for an end-task, such as a C-way
139
+ classification task.
140
+ We develop a modular two-part modeling strategy to rep-
141
+ resent a sequence of structured objects.
142
+ 1. Our first module called the Temporal Value Modeler
143
+ (TVM), is used to learn a combined representation (re-
144
+ ferred to as the key-representations) for the different val-
145
+ ues that each key takes in the sequence.
146
+ 2. The second module, called the Key-Aggregator (KA)
147
+ uses the key-representations corresponding to each key,
148
+ to create an overall representation for J .
149
+ Temporal Value Modeling : Let k be a key from the univer-
150
+ sal set of all the keys K in the sequence. The sequence of the
151
+ values corresponding to the key is represented as V(k) and a
152
+
153
+ t1
154
+ >
155
+ Key l: <value>,
156
+ Key l: <value>,
157
+ Key l: <value>,
158
+ Key l: <value>,
159
+ Key_2: <value>,
160
+ Key_2: <value>,
161
+ Key 2: null,
162
+ Key 2: <value>,
163
+ (a)
164
+ Key_3: <value>
165
+ Key_3: <value>
166
+ Key_3: <value>
167
+ Key 3: null
168
+ event:add text,
169
+ event:add node,
170
+ event:delete node,
171
+ color:white,
172
+ color:blue,
173
+ color:null,
174
+ Key_l: <value>,
175
+ text: "Node 1"
176
+ text:null
177
+ text:null
178
+ Key_2: <value>,
179
+ (b)
180
+ node id: 2301,
181
+ node_id: 2301
182
+ node_id: 2301
183
+ Key_3: null
184
+ time_stamp:
185
+ time stamp:
186
+ time stamp:
187
+ <value>
188
+ t1
189
+ text: As of 2022,
190
+ text: New Delhi
191
+ text: The river
192
+ it has
193
+ (c)
194
+ is the capital of
195
+ Yamuna flows
196
+ population of 21
197
+ text: .
198
+ India.
199
+ through the city.
200
+ million.
201
+ ti
202
+ 6
203
+ utterance: Sure,
204
+ utterance:Hi,
205
+ I can help with
206
+ utterance: I'm
207
+ my system is
208
+ you that. Which
209
+ on my linux
210
+ (d)
211
+ very slow.
212
+ machine are you
213
+ machine.
214
+ utterance: ..
215
+ role: user
216
+ on?
217
+ role: user
218
+ time_stamp...
219
+ role: agent
220
+ time_stamp...
221
+ time_stamp: ...TextEncoder
222
+ Key-Aggregator
223
+ Cross Entropy
224
+ Loss
225
+ TextEncoder
226
+ MLM Loss
227
+ (Frozen)
228
+ TextEncoder
229
+ Key-Aggregator
230
+ Cross Entropy
231
+ Loss
232
+ Pre-training
233
+ Fine-Tuning
234
+ Key-Aggregator
235
+ MLM Loss
236
+ Key-Aggregator
237
+ End-to-EndNetwork Training
238
+ Two-Part Network Training
239
+ Layer 1
240
+ Layer 12
241
+ Layer 2
242
+ Layer 1
243
+ Layer 12
244
+ Layer 2
245
+ Word+Pos
246
+ Embd
247
+ attention
248
+ head
249
+ sharing
250
+ TextEncoder
251
+ KeyAggregator
252
+ Text Encoder
253
+ S(k1)=[CLS] k1 [VAL_SEP] V(k1)1 [VAL_SEP] V(k1)2[VAL_SEP]…
254
+ S(k2)=[CLS] k2 [VAL_SEP] V(k2)1 [VAL_SEP] V(k2)2[VAL_SEP]…
255
+
256
+ S(kN)=[CLS] kN [VAL_SEP] V(kN)1 [VAL_SEP] V(kN)2[VAL_SEP]…
257
+ S(k)
258
+ (sequence of values of
259
+ a key)
260
+ KR(k)
261
+ (representation
262
+ of
263
+ a key)
264
+ [S(k1), S(k2)... S(kN)]
265
+ [KR(k1), KR(k2)... KR(kN)]
266
+ (representations of all keys)
267
+ Embd(J)
268
+ (representation of
269
+ structured object
270
+ sequence)
271
+ Figure 2: (left) Proposed two-part training in contrast to end-to-end training and (right) Attention Head Sharing
272
+ value sequence S(k) from it as the following:
273
+ V (k)
274
+ =
275
+ [vi,j | ki,j = k, ∀i ∈ {1, 2, . . . , N}]
276
+ (1)
277
+ S(k)
278
+ =
279
+ [CLS] k[VAL_SEP]V (k)1 [VAL_SEP]
280
+ V (k)2 [VAL_SEP] . . . V (k)≤N
281
+ (2)
282
+ where [VAL_SEP] and [CLS] are special tokens.
283
+ Note that this formulation allows us to accommodate the
284
+ modeling of natural language text as in Figure 1 (c). Specifi-
285
+ cally, in Eq. 2, if N = 1 such, V (k) = [v1,i]. V (k) reduces
286
+ to modeling the tokens using the TextEncoder. For illustra-
287
+ tion, if the TextEncoder is based on BERT [Devlin et al.,
288
+ 2019], Eq. 3 reduces to the encoding scheme typically em-
289
+ ployed in BERT where the [VAL_SEP] corresponds to an
290
+ end of sentence marker.
291
+ With any choice of a transformer-based [Vaswani et al.,
292
+ 2017] language encoder, an embedding for S(k), termed as
293
+ the key-representation (KR), can be obtained:
294
+ KR(k) = TextEncoder(S(k))[0]
295
+ (3)
296
+ TextEncoder gives us a dim x L dimension tensor where
297
+ dim is the output embedding dimension size and L corre-
298
+ sponds to the tokenized sequence length for S(k). We use
299
+ output embedding representation at the first position as the
300
+ key-representation.
301
+ Key-Aggregation: Once we create key-representations we
302
+ need to utilize them for an end-task. We encode the key-
303
+ representations KR(k), k ∈ K using another encoder. The
304
+ choice of encoder used can be task specific as we make no
305
+ assumptions about the encoder.
306
+ Embd(J ) = KeyAggregator ({KR(k) | k ∈ K})
307
+ (4)
308
+ Column-evolution vs. Record-view Representation: In-
309
+ stead of the column-evolution representation used by the
310
+ TVM one could construct a record-level view to model the
311
+ sequence [de Souza Pereira Moreira et al., 2021]. Specifi-
312
+ cally, instead of modeling the evolution of columns in a semi-
313
+ structured object sequence using V (k) and S(k) for each key,
314
+ k such models view the sequence as a series of Ji. However,
315
+ the record-view representation forces the network to com-
316
+ press information present in multiple keys and values of a
317
+ record (Ji) which can create an information bottleneck for
318
+ the downstream task. We compare and contrast the benefit
319
+ of these alternative views in the experiments (Section 3) and
320
+ discussion section (Section 5) of this paper.
321
+ Challenges of Scalable Training: The training of the hierar-
322
+ chical two-part network for
323
+ first obtaining the key-representations (Eq. 3) followed by
324
+ obtaining structured object sequence representation (Eq. 4),
325
+ could be done end-to-end where the network parameters are
326
+ directly trained for the downstream task as illustrated in Fig-
327
+ ure 2. However, due to challenges of scale, it is infeasible
328
+ to train the network end-to-end. Specifically, the sequence
329
+ length (N) can be very large3, and the size of the universe of
330
+ the keys present in the structured object sequence can also be
331
+ very large. 4
332
+ Therefore, the end-to-end model architecture operating
333
+ over a batch of J sequences would exceed the memory of
334
+ most commodity GPUs.
335
+ By a conservative estimate, for
336
+ n = 11 and N = 512, a typical 120M parameter model,
337
+ would exceed 40gb RAM limit with batch-size of 2.
338
+ Interleaved Task Training: To address this challenge, we
339
+ use a sequential task training paradigm where we interleave
340
+ the training of the TVM and KA components.
341
+ Note that
342
+ 3We experiment with object sequences with 3000 time steps.
343
+ 4Real-world data can have hundreds of keys in each structured
344
+ object.
345
+
346
+ our training paradigm is different from traditional training
347
+ schedules for sequential task training where one network is
348
+ fully trained before the next module or from fine-tuning ap-
349
+ proaches where a part of the network maybe initialized with
350
+ a pre-trained model and additional layers of the network ini-
351
+ tialized randomly and then updated for an end-task.
352
+ The interleaved training procedure is described in Algo-
353
+ rithm 1. In our work we use the Masked language modeling
354
+ (MLM) objective [Devlin et al., 2019] to train the TVM com-
355
+ ponent and an end-task specific objective for training the VA.
356
+ Algorithm 1 Interleaved training
357
+ 1: Initialize Temporal Value Modeler Mv, Key Aggregator Mk
358
+ parameters randomly.
359
+ 2: Prepare the dataset Dv consisting of value Sequences S(k) as
360
+ per Eq. 2
361
+ 3: for i = 1,2,. . . ,p do
362
+ 4:
363
+ ▷ TVM training
364
+ 5:
365
+ Update TVM Mv model parameters with MLM objective on
366
+ Dv.
367
+ 6:
368
+ Prepare the dataset Dk consisting of key-representations
369
+ KR(k) as per Eq. 3
370
+ 7:
371
+ ▷ KA training
372
+ 8:
373
+ Update Key Aggregator Mk model parameters with cross-
374
+ entropy loss for downstream task.
375
+ 9: end for
376
+ Although, the interleaved training allows the modules to be
377
+ trained independently, it makes the overall training harder as
378
+ the two networks are disjoint. To overcome this limitation,
379
+ we propose a novel model parameter sharing strategy as de-
380
+ scribed below.
381
+ Sharing Attention Heads: The TVM network creates a rep-
382
+ resentation for each key by attending on the values that occur
383
+ in the sequence for each of them. The KA network then uses
384
+ these representations to learn the end-task. However, if the
385
+ KA network could influence how these representations are
386
+ created for each key, it could perhaps help improve the per-
387
+ formance of the KA on the end-task. We therefore, introduce
388
+ hard-parameter sharing between the TVM and KA compo-
389
+ nents. Instead of sharing specific layers, we share some of the
390
+ attention heads between the two networks. We hypothesize
391
+ that by sharing a few attention heads between the two net-
392
+ works, the KA will be able to increasingly rely on the shared
393
+ attention heads because as training progresses and updates the
394
+ parameters used in these heads, it will have an effect of ad-
395
+ justing the key-representations from the TVM in a way that
396
+ could help improve overall end-task performance.
397
+ Formally, let the TVM and KA networks use h = p + q
398
+ heads in multi-head attention of the transformer-based net-
399
+ work. Then at any given layer in the network, their multihead
400
+ attention layers are defined respectively as
401
+ TE MultiHead(Q, K, V )
402
+ =
403
+ Concat(heads0, . . . , headsp,
404
+ headt1, . . . , headtq)W O
405
+ t
406
+ KA MultiHead(Q, K, V )
407
+ =
408
+ Concat(heads0, . . . , headsp,
409
+ headk1, . . . , headkq)W O
410
+ k
411
+ where headi
412
+ =
413
+ softmax
414
+
415
+ QW Q
416
+ i
417
+
418
+ KW K
419
+ i
420
+ �T
421
+ √dk
422
+
423
+ V W V
424
+ i
425
+ where s0, . . . , sp denotes the p shared attention heads,
426
+ t1, . . . , tq denotes TextEncoder specific attention heads
427
+ in the TVM, and k1, . . . , kq denotes KA specific attention
428
+ heads, W Q
429
+ i , W K
430
+ i , are W V
431
+ i projection matrices for query, key,
432
+ and value for the i attention head. W O
433
+ t and W O
434
+ k are the out-
435
+ put projection matrices for a layer of TextEncoder and the
436
+ KA. We summarize our parameter sharing approach in Fig.
437
+ 2.
438
+ The use of interleaved training as outlined in Algorithm
439
+ 1 prevents the problem of catastrophic forgetting [French,
440
+ 1999; McCloskey and Cohen, 1989; McClelland et al., 1995;
441
+ Kumaran et al., 2016; Ratcliff, 1990] when the KA is trained.
442
+ Further, it is possible that when the TVM is trained for the
443
+ first time it may rely heavily on the heads that are shared.
444
+ Thus, any change to the representation from these heads
445
+ could lead to poorer key-representations and attention shar-
446
+ ing in that case would be counter-productive. To overcome
447
+ this problem, we apply DropHead [Zhou et al., 2020] on the
448
+ shared attention heads in TVM and pre-train the model before
449
+ beginning the interleaving schedule.
450
+ Choice of Encoders: An advantage of the modular two-part
451
+ encoder is that we can easily plug-in existing text encoder for
452
+ TVM. We use BERT [Devlin et al., 2019] in our experimen-
453
+ tation.
454
+ For Key Aggregation we use the same model as the TVM
455
+ TextEncoder but we do not use positional embeddings since
456
+ we encode a set (and not a sequence) of keys.
457
+ 3
458
+ Experiments
459
+ Our experiments are designed to answer the following ques-
460
+ tions: (i) How helpful is the TVM-KA architecture over
461
+ baseline strategies that involve flattening semi-structured se-
462
+ quence objects?
463
+ (ii) How important is the use of shared-
464
+ attention heads for fine-tuning of the Key Aggregator? (iii)
465
+ How does the model compare to record-view representations
466
+ [de Souza Pereira Moreira et al., 2021].
467
+ 3.1
468
+ Data
469
+ We choose two application/cloud logs datasets and one e-
470
+ commerce purchase history dataset. The first application logs
471
+ dataset, referred to as the ‘Cloud Service Logs’, is an in-
472
+ ternal dataset consisting of interaction traces typically used
473
+ for product usage analysis. We also use publicly available
474
+ LogHub [He et al., 2020] dataset that consists of system log
475
+ messages from Hadoop distributed file system and a publicly
476
+ available e-commerce dataset consisting of product purchase
477
+ information [stanley et al., 2017].
478
+ Cloud Service Logs Data: Application event traces from
479
+ a large cloud provider
480
+ In the Cloud Service Logs (CSL) dataset, application event
481
+ traces are logged in the cloud provider website. Each user
482
+ is assigned a unique identifier. Event types range from lo-
483
+ gin, browsing, account creation/maintenance/update, UI nav-
484
+ igation, search, service creation/deletion, app interactions,
485
+
486
+ Dataset
487
+ Train
488
+ Dev
489
+ Test
490
+ # classes
491
+ Task
492
+ Metric
493
+ Cloud Service Logs
494
+ 12,833
495
+ 1,605
496
+ 1,604
497
+ 3
498
+ Milestone Prediction
499
+ Micro-F1 & Macro- F1
500
+ LogHub
501
+ 402,542
502
+ 57,506
503
+ 115,012
504
+ 2
505
+ Anomaly Detection
506
+ F1
507
+ Instacart
508
+ 780,003
509
+ 97,501
510
+ 97,500
511
+ 3,212
512
+ Next Product Prediction
513
+ Recall@k
514
+ Table 1: Dataset Statistics
515
+ among several others. We have about ?? unique event types.
516
+ Each event has an associated payload which provides context
517
+ around the event. For example, if a user performed a search,
518
+ the payload captures the search query and the page where the
519
+ search was performed. If a user interacted with a service, the
520
+ payload captures the service ID and action performed on the
521
+ service, among other information.
522
+ Our raw data is a snapshot of application event traces span-
523
+ ning 3-months comprising of about 450M events.
524
+ Using
525
+ these, we build our user sessions. A user session is essen-
526
+ tially a temporal sequence of event traces for that user.
527
+ We constructed user sessions for 100k users. The applica-
528
+ tion events corresponding to 1) plan upgrade, and 2) opening
529
+ chat bot (to seek help) are considered as milestone events.
530
+ These milestone events are chosen as they represent revenue
531
+ generation and user experience, respectively. The case of no
532
+ milestone event occurring is treated as third class.
533
+ From the traces, the temporal sequences of 300 events are
534
+ extracted to predict if a milestone (or no-milestone) event will
535
+ occur in next 50 time steps.
536
+ Instacart eCommerce Data
537
+ The publicly available Instacart dataset5 contains 3 million
538
+ grocery purchase orders of nearly 200, 000 users of the appli-
539
+ cation. Each order consists of multiple products and each
540
+ structured object associated with a product contains meta-
541
+ data such as the day of the week, the product category, de-
542
+ partment, aisle, etc. We reprocess this dataset to create se-
543
+ quences of product purchases and evaluate models on task
544
+ of next product prediction. We create variable length train-
545
+ ing instances from each user’s order history by sampling be-
546
+ tween 50 to 200 previous product purchases for a certain tar-
547
+ get product. Additionally, we only sample a training instance
548
+ if the target product has been ordered at-least 50 times across
549
+ users.
550
+ As per our task formulation, we predict the product name
551
+ given the sequence of product orders6, which is effectively
552
+ a classification task over a universe of 3212 products. Ex-
553
+ isting work on this dataset has focused on a simpler binary
554
+ prediction task where models are asked to predict whether a
555
+ particular item is likely to be purchased again.7
556
+ LogHub Data
557
+ HDFS-1 from LogHub [He et al., 2020] is utilized for log
558
+ anomaly detection task. Originally, the dataset consists of log
559
+ 5https://tech.instacart.com/3-million-instacart-orders-open-
560
+ sourced-d40d29ead6f2
561
+ 6We use the complete structured object.
562
+ 7https://www.kaggle.com/competitions/instacart-market-basket-
563
+ analysis/leaderboard
564
+ lines. We utilize Drain [He et al., 2017] log parser, that iden-
565
+ tifies 48 log templates. In a semi-manual fashion, we assign
566
+ key names to the value slots of the templates. Thus, each log
567
+ line is converted to a structured object. Each structured ob-
568
+ ject contains about 10 key-value pairs. A block consists of
569
+ a sequence of such structured objects. The binary task is to
570
+ predict if a block is anomalous or not.
571
+ 3.2
572
+ Encoders
573
+ Baselines: We experiment with BERT [Devlin et al., 2019]
574
+ encoders as baseline.
575
+ We flatten each key-value pair in a
576
+ structured object and encode them with special markers in-
577
+ dicating boundaries for objects and timesteps. We fine-tune
578
+ the pre-trained encoders for each evaluation task and report
579
+ their performance. In addition, instead of using a pre-trained
580
+ encoder for BERT we also use the equivalent untrained model
581
+ (with the same number of parameters) which is directly opti-
582
+ mized for the evaluation tasks.
583
+ Encoders for TVM-KA: One of the advantages of the TVM-
584
+ KA architecture is agnostic to the choice of encoder. We use
585
+ the same encoder architectures used in our baselines to fa-
586
+ cilitate a direct comparison in performance. Recall that the
587
+ TVM module and KA module share attention heads to facili-
588
+ tate sharing of information between them.
589
+ To pre-train the TVM, we mask 15
590
+ 3.3
591
+ Hyperparameters and Training schedule
592
+ Due to the compute limitations,
593
+ we estimate hyper-
594
+ parameters with Cloud Service Logs datasets and use them
595
+ for other two datasets.
596
+ We tune for the learning rates
597
+ in {1e−4, 3e−4, 5e−4, 1e−5, 3e−5, 5e−5, 1e−6, 3e−6, 5e−6},
598
+ and number of shared heads p in {2, 4, 6, 8}. The drophead
599
+ mechanism is only activated during TVM training, with drop-
600
+ head probability set to 0.2.
601
+ For the TVM training in the first iteration, we vary learning
602
+ rates and observe model convergence (in terms of train and
603
+ dev loss) after a fixed 100K steps. The best learning rate for
604
+ TVM is identified from this exercise. A similar approach is
605
+ used for identifying the best learning rate for the KA training
606
+ stage too, albeit for a smaller number of update steps. In
607
+ the first iteration, TVM training is done for 1 epoch. Then
608
+ we proceed with the interleaving step. We alternate between
609
+ TVM training and KA training with their number of training
610
+ steps in 2:1 proportion; specifically, for cloud service logs
611
+ dataset, the number of TVM training steps is 50K and the
612
+ number of KA training steps is around 100K.
613
+ 3.4
614
+ Results
615
+ In this section, we report the results from our experiments.
616
+
617
+ Comments
618
+ Cloud Service Logs
619
+ Instacart
620
+ Loghub
621
+ Macro F1
622
+ Micro F1
623
+ Recall @10
624
+ F1
625
+ Flattened Encoding
626
+ Pre-Trained
627
+ 74.77
628
+ 80.31
629
+ 16.4
630
+ 61.63
631
+ Random
632
+ 50.22
633
+ 72.38
634
+ 9.6
635
+ 53.62
636
+ TVM-KA
637
+ No Interleaving
638
+ 73.22
639
+ 84.14
640
+ 17.5
641
+ 98.64
642
+ Interleaving
643
+ 79.60
644
+ 86.49
645
+ 22.5
646
+ 99.32
647
+ Table 2: Comparison of TVM-KA model with the baseline approaches on various datasets. All our transformer encoders have the same
648
+ architecture and number of parameters as bert-base-uncased. In our proposed approach, both the TVM and KA have the same architecture
649
+ and number of parameters as bert-base-uncased. The results from our approach are statistically significant compared to the baseline approach
650
+ Model
651
+ Configuration
652
+ # Parameters
653
+ Cloud Service Logs
654
+ Loghub
655
+ Joint Modeling
656
+ No TVM Pre-Training
657
+ 209.42M
658
+ 69.61
659
+ 53.62
660
+ TVM Pre-Training
661
+ 209.42M
662
+ 77.68
663
+ 70.72
664
+ Record View
665
+ 104.41M
666
+ 77.33
667
+ 99.51
668
+ Flattened Encoding
669
+ bert-base-uncased
670
+ 104.41M
671
+ 74.77
672
+ 61.63
673
+ bert-large-uncased
674
+ 319.62M
675
+ 76.59
676
+ 75.86
677
+ Flattened Encoding
678
+ allenai/longformer-base-4096
679
+ 141.77M
680
+ 73.71
681
+ 98.54
682
+ google/bigbird-roberta-base
683
+ 122.13M
684
+ 70.69
685
+ 99.31
686
+ TVM-KA
687
+ With Interleaved Training
688
+ 199.28M
689
+ 79.60
690
+ 99.32
691
+ Table 3: Ablation Experiments
692
+ Comparison with Flattened encoding: Table 2 compares
693
+ the performance of our approach with the baseline models.
694
+ Our approach obtains statistically significant results com-
695
+ pared to the baseline approaches on all three datasets. Loghub
696
+ data suffers the most from information compression due to
697
+ the stripping of sequences to 512 tokens. For Cloud Service
698
+ Logs we report both Macro and Micro F1-Score due to class
699
+ imbalance.
700
+ For the baseline approach, we could either train an encoder
701
+ with randomly initialized weights or an existing pre-trained
702
+ model. We observe that training (fine-tuning) a pre-trained
703
+ encoder results in the best performance compared to training
704
+ an encoder with randomly initialized weights for the down-
705
+ stream task.
706
+ Importance of interleaved training: As seen from Table 2,
707
+ our interleaving approach outperforms the model trained with
708
+ no interleaving emphasizing that interleaving is a crucial step
709
+ in our approach.
710
+ 3.5
711
+ Ablation Study for Modeling choices
712
+ Joint Modeling vs Interleaved: We observe that joint mod-
713
+ eling performs poorly compared to our interleaving approach.
714
+ In the joint modeling approach, the weights of the Temporal
715
+ Value Modeler (TVM) are randomly initialized. We could
716
+ train only the weights of the TVM using Masked Language
717
+ Modeling (MLM) objective [Devlin et al., 2019] followed by
718
+ joint training.
719
+ We observe that joint modeling largely benefits from ini-
720
+ tializing the TVM weights. When we train (pre-train) TVM
721
+ using MLM objective, we are inducing a prior on the TVM
722
+ which is not the case when we jointly train TVM-KA with
723
+ randomly initialized weights.
724
+ However, the performance of the joint model is poor com-
725
+ pared to the interleaved training. By interleaving and sharing
726
+ attention heads between TVM and KA, we are inducing task
727
+ bias which benefits the model for the end task.
728
+ Comparison with record-view We additionally compare our
729
+ approach with an alternate view of the data, namely, the
730
+ record view. Given a temporal sequence of JSON objects, we
731
+ obtain a representation for the entire JSON. The sequence of
732
+ the JSON representation is now passed through a transformer
733
+ encoder followed by a sequence classifier module.
734
+ We observe that our model obtains comparable perfor-
735
+ mance when compared with the record view model.
736
+ Effect of parameter size/model capacity To rule out the
737
+ possibility of model capacity being the bottleneck for the
738
+ baseline approach, we fine-tune bert-large-uncased model
739
+ which has 3x the parameter of bert-base-uncased model. The
740
+ results are presented in Table 3.
741
+ We observe significant improvements on both Cloud Ser-
742
+ vice Logs and Loghub datasets when we use bert-large-
743
+ uncased over bert-base-uncased model with the flattened en-
744
+ coding. However, the results are poor compared to our TVM-
745
+ KA model. Thereby, demonstrating that the sequence length
746
+ limit largely impacts the model performance with flattened
747
+ encoding which is absent in our TVM-KA approach.
748
+
749
+ Figure 3: Performance of Flattened Encoding models and out TVM-KA on Cloud Service. We divide the test set into four bins based on the
750
+ sequence length
751
+ Figure 4: Effect of sharing different attention heads between TVM and KA
752
+ Comparison with Long Sequence Models: In the previous
753
+ ablation, we observed that the sequence length limit is re-
754
+ sponsible for existing pre-trained models performing poorly
755
+ with the flattened encoding. We now use pre-trained models
756
+
757
+ 1.0
758
+ bert-random
759
+ bert-pretrained
760
+ bert-large-pretrained
761
+ longformer
762
+ 0.8
763
+ TVM KA
764
+ e
765
+ 0.6
766
+ -Scor
767
+ F1
768
+ acro
769
+ M
770
+ 0.4
771
+ 0.2
772
+ 0.0
773
+ : 1024
774
+ < len(x)
775
+ Λ=
776
+ 512
777
+ Seguence
778
+ Lenath100
779
+ Cloud Service Logs
780
+ Instacart (right)
781
+ 20
782
+ 80
783
+ 15
784
+ 60
785
+ Recall@10
786
+ F1
787
+ acro
788
+ Ma(
789
+ 10
790
+ 40
791
+ 20
792
+ 5
793
+ 0
794
+ 0
795
+ 0
796
+ 2
797
+ 4
798
+ 6
799
+ 8
800
+ Number of Shared Attention Headsnamely Longformer [Beltagy et al., 2020b], BigBird [Zaheer
801
+ et al., 2020] which can handle longer sequences of up to 4096
802
+ tokens (as opposed to 512 tokens of BERT models) with the
803
+ flattened encoding. The results are presented in Table 3.
804
+ We observe that ability to handle longer sequence length
805
+ benefits the Loghub dataset. The performance of these mod-
806
+ els is comparable with our TVM-KA approach. On the con-
807
+ trary, we do not observe any improvements over the bert-
808
+ base-uncased models for Cloud Service Logs.
809
+ Additionally, we report the performance of the above mod-
810
+ els on sequences from each test set having varying sequence
811
+ lengths. Figure 3 demonstrates the performance vs sequence
812
+ length comparison. We observe that our TVM-KA approach
813
+ consistently outperforms flattened encoding baselines on all
814
+ sequence length bins. Surprisingly, pre-trained longformer
815
+ gives comparable performance with bert-base-uncased and
816
+ bert-large-uncased models.
817
+ Number of Shared Attention Heads We use BERT encoder
818
+ [Devlin et al., 2019] to model both TVM and KA compo-
819
+ nents in our model. This allows us to share attention heads
820
+ between the TVM and KA components. bert-base-uncased
821
+ has 12 attention heads at each encoder layer. We experiment
822
+ with sharing 0, 2, 4, 6, 8 attention heads between TVM and
823
+ KA components.
824
+ Figure 4 presents the performance of our TVM-KA ap-
825
+ proach with different numbers of shared attention heads be-
826
+ tween TVM and KA components. In general, we observe that
827
+ sharing of 4 and 6 attention heads helps the most. While not
828
+ sharing any attention heads or sharing more results in poor
829
+ performance.
830
+ 4
831
+ Related Work
832
+ Our work relates to multiple areas in existing literature. First,
833
+ modeling multiple sets of key-value entries has similarities to
834
+ modeling the rows and columns in a table. Second, the pre-
835
+ dictive tasks we apply our models are related to multi-variate
836
+ regression and spatio-temporal modeling tasks. Finally, our
837
+ two stage TVM-KA architecture is a form of hierarchical en-
838
+ coding, forms of which have been used for multiple tasks in
839
+ the past – from tabular data modeling to encoding text.
840
+ Modeling Tabular and Timeseries Data: In our work we
841
+ explicitly model the temporal sequences for values for each
842
+ key that is encountered in the sequences. In this sense the na-
843
+ ture of the data we are modeling is related to modeling tabular
844
+ data where each row is a time-step and the columns indicate
845
+ different values for a particular field. However, even though
846
+ at the surface the modeling of data appears related, the actual
847
+ tasks and models developed for tasks on tabular data cannot
848
+ be applied to semi-structured sequence objects. This is be-
849
+ cause the work on modeling textual tabular data often focuses
850
+ on retrieving information from cells
851
+ [Zayats et al., 2021;
852
+ Wang et al., 2021; Iida et al., 2021], multi-hop reasoning
853
+ across information in different cells across parts of the table
854
+ [Chen et al., 2021; Chen et al., 2020; Zhao et al., 2022], com-
855
+ bining information present in tables and unstructured text for
856
+ information seeking tasks [Li et al., 2021; Zhu et al., 2021;
857
+ Zayats et al., 2021; Chen et al., 2020], etc. In addition, work
858
+ on modeling time-series data present in the form of tables has
859
+ been focused on numerical data [Zhou et al., 2021; Zerveas
860
+ et al., 2021] with purpose-built task specific architectures that
861
+ cannot be easily adapted for other tasks [Wu et al., 2021;
862
+ Padhi et al., 2021].
863
+ Modeling Temporal Graph Sequences and Recommender
864
+ Systems: Finally our work is also related to a rich body
865
+ of work related to modeling temporal graphs and recom-
866
+ mender systems [Xu et al., 2021b; Grigsby et al., 2021;
867
+ de Souza Pereira Moreira et al., 2021].
868
+ Temporal Graph
869
+ evolution problems involve constructing representations to
870
+ enable tasks such as link prediction [Sankar et al., 2020;
871
+ Xu et al., 2021a], item recommendation in user sessions
872
+ [Hsu and te Li, 2021], answering queries on graphs and se-
873
+ quences [Saxena et al., 2022], classifying graph instances in
874
+ a sequence,[Xu et al., 2021a; Xu et al., 2021b] etc. How-
875
+ ever, we find that such approaches either do not scale for long
876
+ sequences for the tasks we experiment with or are likely to
877
+ suffer from an information bottleneck resulting in lower per-
878
+ formance as compared to TVM-KA.
879
+ Fundamentally, we find that such approaches also limit our
880
+ ability to model text sequences as in Figure 1(c), because such
881
+ a model would create a sequence representation over graph-
882
+ computed sentence representations (each sentence would be
883
+ graph and the tokens within that sentence a node). As a result,
884
+ sentence tokens would not directly interact across times-step
885
+ boundaries. Similarly in the case of inputs with features such
886
+ as in Figure 1(d) it would limit the ability to model such ut-
887
+ terances across time-step boundaries since a representation
888
+ would first be created using fields and meta-data within each
889
+ turn (time-step) and then aggregated as a sequence. We dis-
890
+ cuss more about how temporal graphs sequences and our ap-
891
+ proach to model structured object sequences differ in Section
892
+ 5.
893
+ 5
894
+ Discussion
895
+ The choice of a column-evolution representation enables us
896
+ to encode larger objects as well as long sequences. Our ex-
897
+ periments demonstrate that by using the two-part TVM-KA
898
+ architecture we are able to incorporate information about the
899
+ downstream task as a form of inductive bias for the value
900
+ modeler network that provides key-representations. We hy-
901
+ pothesize it is this task-specific bias that helps our model per-
902
+ form better than joint-modeling (when computationally fea-
903
+ sible).
904
+ However,
905
+ the column-evolution representation of se-
906
+ quences does not allow it to support tasks as sequence tagging
907
+ of the structured objects. In addition, it also does not allow
908
+ it to model graph sequences effectively as it does not use a
909
+ global view of a structured objects. Thus, it may not be able
910
+ learn patterns across fields at different time steps. For such
911
+ tasks, a record-view representation is more helpful. The both
912
+ representations have their strengths and weaknesses and the
913
+ choice of using a column-view representation vs a a record-
914
+ view representation should be made keeping the downstream
915
+ task in mind. To the best of our knowledge we are the first
916
+ to demonstrate the use of a column-view representation for
917
+ structured object sequence encoding at scale.
918
+
919
+ 6
920
+ Conclusion
921
+ In this paper, we have presented a two-part encoder to model
922
+ structured sequence objects. We additionally present a novel
923
+ interleaving scheme to train our two-part encoder. We also
924
+ induce task bias into the model by sharing attention heads
925
+ between Temporal Value Modeler and Key Aggregator com-
926
+ ponents. Our proposed approach outperforms baseline ap-
927
+ proaches which flatten structured sequence objects and gives
928
+ comparable performance to record-view approaches.
929
+ References
930
+ [Baxter, 1997] Jonathan Baxter. A bayesian/information the-
931
+ oretic model of learning to learn via multiple task sam-
932
+ pling. Machine learning, 28(1):7–39, 1997.
933
+ [Beltagy et al., 2020a] Iz Beltagy, Matthew E. Peters, and
934
+ Arman Cohan.
935
+ Longformer: The long-document trans-
936
+ former. arXiv:2004.05150, 2020.
937
+ [Beltagy et al., 2020b] Iz Beltagy, Matthew E. Peters, and
938
+ Arman Cohan.
939
+ Longformer: The long-document trans-
940
+ former. ArXiv, abs/2004.05150, 2020.
941
+ [Brown et al., 2020] Tom Brown, Benjamin Mann, Nick Ry-
942
+ der, Melanie Subbiah, Jared D Kaplan, Prafulla Dhari-
943
+ wal, Arvind Neelakantan, Pranav Shyam, Girish Sastry,
944
+ Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss,
945
+ Gretchen Krueger, Tom Henighan, Rewon Child, Aditya
946
+ Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter,
947
+ Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin,
948
+ Scott Gray, Benjamin Chess, Jack Clark, Christopher
949
+ Berner, Sam McCandlish, Alec Radford, Ilya Sutskever,
950
+ and Dario Amodei. Language models are few-shot learn-
951
+ ers. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Bal-
952
+ can, and H. Lin, editors, Advances in Neural Information
953
+ Processing Systems, volume 33, pages 1877–1901. Curran
954
+ Associates, Inc., 2020.
955
+ [Chen et al., 2020] Wenhu Chen, Hanwen Zha, Zhiyu Chen,
956
+ Wenhan Xiong, Hong Wang, and William Yang Wang.
957
+ HybridQA: A dataset of multi-hop question answering
958
+ over tabular and textual data.
959
+ In Findings of the As-
960
+ sociation for Computational Linguistics: EMNLP 2020,
961
+ pages 1026–1036, Online, November 2020. Association
962
+ for Computational Linguistics.
963
+ [Chen et al., 2021] Zhiyu Chen, Wenhu Chen, Charese Smi-
964
+ ley, Sameena Shah, Iana Borova, Dylan Langdon, Reema
965
+ Moussa, Matt Beane, Ting-Hao Huang, Bryan Routledge,
966
+ and William Yang Wang. FinQA: A dataset of numeri-
967
+ cal reasoning over financial data. In Proceedings of the
968
+ 2021 Conference on Empirical Methods in Natural Lan-
969
+ guage Processing, pages 3697–3711, Online and Punta
970
+ Cana, Dominican Republic, November 2021. Association
971
+ for Computational Linguistics.
972
+ [de Souza Pereira Moreira et al., 2021] Gabriel
973
+ de
974
+ Souza
975
+ Pereira Moreira, Sara Rabhi, Jeong Min Lee, Ronay Ak,
976
+ and Even Oldridge. Transformers4rec: Bridging the gap
977
+ between nlp and sequential/session-based recommenda-
978
+ tion.
979
+ In Fifteenth ACM Conference on Recommender
980
+ Systems, pages 143–153, 2021.
981
+ [Devlin et al., 2019] Jacob Devlin, Ming-Wei Chang, Ken-
982
+ ton Lee, and Kristina Toutanova. BERT: pre-training of
983
+ deep bidirectional transformers for language understand-
984
+ ing. In Jill Burstein, Christy Doran, and Thamar Solorio,
985
+ editors, Proceedings of the 2019 Conference of the North
986
+ American Chapter of the Association for Computational
987
+ Linguistics: Human Language Technologies, NAACL-HLT
988
+ 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1
989
+ (Long and Short Papers), pages 4171–4186. Association
990
+ for Computational Linguistics, 2019.
991
+ [Duong et al., 2015] Long Duong, Trevor Cohn, Steven
992
+ Bird, and Paul Cook.
993
+ Low resource dependency pars-
994
+ ing: Cross-lingual parameter sharing in a neural network
995
+ parser. In Proceedings of the 53rd Annual Meeting of the
996
+ Association for Computational Linguistics and the 7th In-
997
+ ternational Joint Conference on Natural Language Pro-
998
+ cessing (Volume 2: Short Papers), pages 845–850, Beijing,
999
+ China, July 2015. Association for Computational Linguis-
1000
+ tics.
1001
+ [French, 1999] Robert M. French.
1002
+ Catastrophic forgetting
1003
+ in connectionist networks. Trends in Cognitive Sciences,
1004
+ 3(4):128–135, 1999.
1005
+ [Grigsby et al., 2021] Jake Grigsby, Zhe Wang, and Yanjun
1006
+ Qi. Long-range transformers for dynamic spatiotemporal
1007
+ forecasting. arXiv preprint arXiv:2109.12218, 2021.
1008
+ [He et al., 2017] Pinjia He, Jieming Zhu, Zibin Zheng, and
1009
+ Michael R Lyu. Drain: An online log parsing approach
1010
+ with fixed depth tree. In 2017 IEEE international confer-
1011
+ ence on web services (ICWS), pages 33–40. IEEE, 2017.
1012
+ [He et al., 2020] Shilin He, Jieming Zhu, Pinjia He, and
1013
+ Michael R Lyu. Loghub: a large collection of system log
1014
+ datasets towards automated log analytics. arXiv preprint
1015
+ arXiv:2008.06448, 2020.
1016
+ [Hsu and te Li, 2021] Cheng-Mao Hsu and Cheng te Li. Re-
1017
+ tagnn:
1018
+ Relational temporal attentive graph neural net-
1019
+ works for holistic sequential recommendation. Proceed-
1020
+ ings of the Web Conference 2021, 2021.
1021
+ [Iida et al., 2021] Hiroshi Iida, Dung Thai, Varun Manju-
1022
+ natha, and Mohit Iyyer.
1023
+ Tabbie: Pretrained representa-
1024
+ tions of tabular data. In Proceedings of the 2021 Confer-
1025
+ ence of the North American Chapter of the Association for
1026
+ Computational Linguistics: Human Language Technolo-
1027
+ gies, pages 3446–3456, 2021.
1028
+ [Kumaran et al., 2016] Dharshan Kumaran, Demis Hass-
1029
+ abis, and James L McClelland. What learning systems do
1030
+ intelligent agents need? complementary learning systems
1031
+ theory updated. Trends in cognitive sciences, 20(7):512–
1032
+ 534, 2016.
1033
+ [Lewis et al., 2020] Mike Lewis, Yinhan Liu, Naman Goyal,
1034
+ Marjan Ghazvininejad, Abdelrahman Mohamed, Omer
1035
+ Levy, Veselin Stoyanov, and Luke Zettlemoyer. BART:
1036
+ Denoising sequence-to-sequence pre-training for natural
1037
+ language generation, translation, and comprehension. In
1038
+ Proceedings of the 58th Annual Meeting of the Association
1039
+ for Computational Linguistics, pages 7871–7880, Online,
1040
+ July 2020. Association for Computational Linguistics.
1041
+
1042
+ [Li et al., 2021] Alexander Hanbo Li, Patrick Ng, Peng Xu,
1043
+ Henghui Zhu, Zhiguo Wang, and Bing Xiang. Dual reader-
1044
+ parser on hybrid textual and tabular evidence for open do-
1045
+ main question answering. In ACL-IJCNLP 2021, 2021.
1046
+ [McClelland et al., 1995] James L McClelland, Bruce L Mc-
1047
+ Naughton, and Randall C O’Reilly. Why there are comple-
1048
+ mentary learning systems in the hippocampus and neocor-
1049
+ tex: insights from the successes and failures of connection-
1050
+ ist models of learning and memory. Psychological review,
1051
+ 102(3):419, 1995.
1052
+ [McCloskey and Cohen, 1989] Michael
1053
+ McCloskey
1054
+ and
1055
+ Neal J Cohen. Catastrophic interference in connectionist
1056
+ networks: The sequential learning problem. In Psychology
1057
+ of learning and motivation, volume 24, pages 109–165.
1058
+ Elsevier, 1989.
1059
+ [Padhi et al., 2021] Inkit Padhi, Yair Schiff, Igor Melnyk,
1060
+ Mattia Rigotti, Youssef Mroueh, Pierre Dognin, Jerret
1061
+ Ross, Ravi Nair, and Erik Altman. Tabular transformers
1062
+ for modeling multivariate time series. In ICASSP 2021-
1063
+ 2021 IEEE International Conference on Acoustics, Speech
1064
+ and Signal Processing (ICASSP), pages 3565–3569. IEEE,
1065
+ 2021.
1066
+ [Pfeiffer et al., 2020] Jonas
1067
+ Pfeiffer,
1068
+ Andreas
1069
+ R¨uckl´e,
1070
+ Clifton Poth, Aishwarya Kamath, Ivan Vuli´c, Sebastian
1071
+ Ruder, Kyunghyun Cho, and Iryna Gurevych. Adapterhub:
1072
+ A framework for adapting transformers. In Proceedings
1073
+ of the 2020 Conference on Empirical Methods in Natural
1074
+ Language Processing (EMNLP 2020): Systems Demon-
1075
+ strations, pages 46–54, Online, 2020. Association for
1076
+ Computational Linguistics.
1077
+ [Radford et al., 2019] Alec Radford, Jeff Wu, Rewon Child,
1078
+ David Luan, Dario Amodei, and Ilya Sutskever. Language
1079
+ models are unsupervised multitask learners. 2019.
1080
+ [Ramesh et al., 2021] Aditya
1081
+ Ramesh,
1082
+ Mikhail
1083
+ Pavlov,
1084
+ Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford,
1085
+ Mark Chen, and Ilya Sutskever. Zero-shot text-to-image
1086
+ generation. CoRR, abs/2102.12092, 2021.
1087
+ [Ratcliff, 1990] Roger Ratcliff.
1088
+ Connectionist models of
1089
+ recognition memory: constraints imposed by learning and
1090
+ forgetting functions.
1091
+ Psychological review, 97(2):285,
1092
+ 1990.
1093
+ [Rombach et al., 2022] Robin
1094
+ Rombach,
1095
+ Andreas
1096
+ Blattmann, Dominik Lorenz, Patrick Esser, and Bj¨orn
1097
+ Ommer.
1098
+ High-resolution image synthesis with latent
1099
+ diffusion models.
1100
+ In Proceedings of the IEEE/CVF
1101
+ Conference on Computer Vision and Pattern Recognition
1102
+ (CVPR), pages 10684–10695, June 2022.
1103
+ [Sankar et al., 2020] Aravind Sankar, Yanhong Wu, Liang
1104
+ Gou, Wei Zhang, and Hao Yang. Dysat: Deep neural rep-
1105
+ resentation learning on dynamic graphs via self-attention
1106
+ networks. In Proceedings of the 13th International Con-
1107
+ ference on Web Search and Data Mining, WSDM ’20,
1108
+ page 519–527, New York, NY, USA, 2020. Association
1109
+ for Computing Machinery.
1110
+ [Saxena et al., 2022] Apoorv Saxena, Adrian Kochsiek, and
1111
+ Rainer Gemulla. Sequence-to-sequence knowledge graph
1112
+ completion and question answering. In Proceedings of the
1113
+ 60th Annual Meeting of the Association for Computational
1114
+ Linguistics (Volume 1: Long Papers), pages 2814–2828,
1115
+ 2022.
1116
+ [stanley et al., 2017] jeremy stanley, Meg Risdal, sharathrao,
1117
+ and Will Cukierski. Instacart market basket analysis, 2017.
1118
+ [Vaswani et al., 2017] Ashish Vaswani, Noam Shazeer, Niki
1119
+ Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez,
1120
+ Łukasz Kaiser, and Illia Polosukhin. Attention is all you
1121
+ need.
1122
+ In Proceedings of the 31st International Confer-
1123
+ ence on Neural Information Processing Systems, NIPS’17,
1124
+ page 6000–6010, Red Hook, NY, USA, 2017. Curran As-
1125
+ sociates Inc.
1126
+ [Wang et al., 2021] Fei Wang, Kexuan Sun, Muhao Chen,
1127
+ Jay Pujara, and Pedro A Szekely. Retrieving complex ta-
1128
+ bles with multi-granular graph representation learning. In
1129
+ SIGIR, 2021.
1130
+ [Workshop et al., 2022] BigScience Workshop, :, Teven Le
1131
+ Scao, Angela Fan, Christopher Akiki, Ellie Pavlick,
1132
+ Suzana Ili´c, Daniel Hesslow, Roman Castagn´e, Alexan-
1133
+ dra Sasha Luccioni, Franc¸ois Yvon, Matthias Gall´e,
1134
+ Jonathan Tow, Alexander M. Rush, Stella Biderman, Al-
1135
+ bert Webson, Pawan Sasanka Ammanamanchi, Thomas
1136
+ Wang, Benoˆıt Sagot, Niklas Muennighoff, Albert Vil-
1137
+ lanova del Moral, Olatunji Ruwase, Rachel Bawden,
1138
+ Stas Bekman, Angelina McMillan-Major, Iz Beltagy,
1139
+ Huu Nguyen, Lucile Saulnier, Samson Tan, Pedro Or-
1140
+ tiz Suarez, Victor Sanh, Hugo Laurenc¸on, Yacine Jer-
1141
+ nite, Julien Launay, Margaret Mitchell, Colin Raffel,
1142
+ Aaron Gokaslan, Adi Simhi, Aitor Soroa, Alham Fikri
1143
+ Aji, Amit Alfassy, Anna Rogers, Ariel Kreisberg Nitzav,
1144
+ Canwen Xu, Chenghao Mou, Chris Emezue, Christopher
1145
+ Klamm, Colin Leong, Daniel van Strien, David Ifeoluwa
1146
+ Adelani, Dragomir Radev, Eduardo Gonz´alez Ponferrada,
1147
+ Efrat Levkovizh, Ethan Kim, Eyal Bar Natan, Francesco
1148
+ De Toni, G´erard Dupont, Germ´an Kruszewski, Giada Pis-
1149
+ tilli, Hady Elsahar, Hamza Benyamina, Hieu Tran, Ian Yu,
1150
+ Idris Abdulmumin, Isaac Johnson, Itziar Gonzalez-Dios,
1151
+ Javier de la Rosa, Jenny Chim, Jesse Dodge, Jian Zhu,
1152
+ Jonathan Chang, J¨org Frohberg, Joseph Tobing, Joydeep
1153
+ Bhattacharjee, Khalid Almubarak, Kimbo Chen, Kyle Lo,
1154
+ Leandro Von Werra, Leon Weber, Long Phan, Loubna Ben
1155
+ allal, Ludovic Tanguy, Manan Dey, Manuel Romero
1156
+ Mu˜noz, Maraim Masoud, Mar´ıa Grandury, Mario ˇSaˇsko,
1157
+ Max Huang, Maximin Coavoux, Mayank Singh, Mike
1158
+ Tian-Jian Jiang, Minh Chien Vu, Mohammad A. Jauhar,
1159
+ Mustafa Ghaleb, Nishant Subramani, Nora Kassner, Nu-
1160
+ rulaqilla Khamis, Olivier Nguyen, Omar Espejel, Ona
1161
+ de Gibert,
1162
+ Paulo Villegas,
1163
+ Peter Henderson,
1164
+ Pierre
1165
+ Colombo, Priscilla Amuok, Quentin Lhoest, Rheza Harli-
1166
+ man, Rishi Bommasani, Roberto Luis L´opez, Rui Ribeiro,
1167
+ Salomey Osei, Sampo Pyysalo, Sebastian Nagel, Shamik
1168
+ Bose, Shamsuddeen Hassan Muhammad, Shanya Sharma,
1169
+ Shayne Longpre, Somaieh Nikpoor, Stanislav Silber-
1170
+ berg, Suhas Pai, Sydney Zink, Tiago Timponi Torrent,
1171
+ Timo Schick, Tristan Thrush, Valentin Danchev, Vassilina
1172
+ Nikoulina, Veronika Laippala, Violette Lepercq, Vrinda
1173
+
1174
+ Prabhu, Zaid Alyafeai, Zeerak Talat, Arun Raja, Ben-
1175
+ jamin Heinzerling, Chenglei Si, Davut Emre Tas¸ar, Eliz-
1176
+ abeth Salesky, Sabrina J. Mielke, Wilson Y. Lee, Ab-
1177
+ heesht Sharma, Andrea Santilli, Antoine Chaffin, Arnaud
1178
+ Stiegler, Debajyoti Datta, Eliza Szczechla, Gunjan Chh-
1179
+ ablani, Han Wang, Harshit Pandey, Hendrik Strobelt, Ja-
1180
+ son Alan Fries, Jos Rozen, Leo Gao, Lintang Sutawika,
1181
+ M Saiful Bari, Maged S. Al-shaibani, Matteo Manica, Ni-
1182
+ hal Nayak, Ryan Teehan, Samuel Albanie, Sheng Shen,
1183
+ Srulik Ben-David, Stephen H. Bach, Taewoon Kim, Tali
1184
+ Bers, Thibault Fevry, Trishala Neeraj, Urmish Thakker,
1185
+ Vikas Raunak, Xiangru Tang, Zheng-Xin Yong, Zhiqing
1186
+ Sun, Shaked Brody, Yallow Uri, Hadar Tojarieh, Adam
1187
+ Roberts, Hyung Won Chung, Jaesung Tae, Jason Phang,
1188
+ Ofir Press, Conglong Li, Deepak Narayanan, Hatim Bour-
1189
+ foune, Jared Casper, Jeff Rasley, Max Ryabinin, Mayank
1190
+ Mishra, Minjia Zhang, Mohammad Shoeybi, Myriam Pey-
1191
+ rounette, Nicolas Patry, Nouamane Tazi, Omar Sanse-
1192
+ viero, Patrick von Platen, Pierre Cornette, Pierre Franc¸ois
1193
+ Lavall´ee, R´emi Lacroix, Samyam Rajbhandari, Sanchit
1194
+ Gandhi, Shaden Smith, St´ephane Requena, Suraj Patil,
1195
+ Tim Dettmers, Ahmed Baruwa, Amanpreet Singh, Anasta-
1196
+ sia Cheveleva, Anne-Laure Ligozat, Arjun Subramonian,
1197
+ Aur´elie N´ev´eol, Charles Lovering, Dan Garrette, Deepak
1198
+ Tunuguntla, Ehud Reiter, Ekaterina Taktasheva, Ekate-
1199
+ rina Voloshina, Eli Bogdanov, Genta Indra Winata, Hai-
1200
+ ley Schoelkopf, Jan-Christoph Kalo, Jekaterina Novikova,
1201
+ Jessica Zosa Forde, Jordan Clive, Jungo Kasai, Ken Kawa-
1202
+ mura, Liam Hazan, Marine Carpuat, Miruna Clinciu, Na-
1203
+ joung Kim, Newton Cheng, Oleg Serikov, Omer Antverg,
1204
+ Oskar van der Wal, Rui Zhang, Ruochen Zhang, Sebas-
1205
+ tian Gehrmann, Shachar Mirkin, Shani Pais, Tatiana Shav-
1206
+ rina, Thomas Scialom, Tian Yun, Tomasz Limisiewicz,
1207
+ Verena Rieser, Vitaly Protasov, Vladislav Mikhailov, Yada
1208
+ Pruksachatkun, Yonatan Belinkov, Zachary Bamberger,
1209
+ Zdenˇek Kasner, Alice Rueda, Amanda Pestana, Amir
1210
+ Feizpour, Ammar Khan, Amy Faranak, Ana Santos, An-
1211
+ thony Hevia, Antigona Unldreaj, Arash Aghagol, Are-
1212
+ zoo Abdollahi, Aycha Tammour, Azadeh HajiHosseini,
1213
+ Bahareh Behroozi, Benjamin Ajibade, Bharat Saxena,
1214
+ Carlos Mu˜noz Ferrandis, Danish Contractor, David Lan-
1215
+ sky, Davis David, Douwe Kiela, Duong A. Nguyen, Ed-
1216
+ ward Tan, Emi Baylor, Ezinwanne Ozoani, Fatima Mirza,
1217
+ Frankline Ononiwu, Habib Rezanejad, Hessie Jones, In-
1218
+ drani Bhattacharya, Irene Solaiman, Irina Sedenko, Isar
1219
+ Nejadgholi, Jesse Passmore, Josh Seltzer, Julio Bo-
1220
+ nis Sanz, Livia Dutra, Mairon Samagaio, Maraim El-
1221
+ badri, Margot Mieskes, Marissa Gerchick, Martha Akin-
1222
+ lolu, Michael McKenna, Mike Qiu, Muhammed Ghauri,
1223
+ Mykola Burynok, Nafis Abrar, Nazneen Rajani, Nour
1224
+ Elkott, Nour Fahmy, Olanrewaju Samuel, Ran An, Ras-
1225
+ mus Kromann, Ryan Hao, Samira Alizadeh, Sarmad Shub-
1226
+ ber, Silas Wang, Sourav Roy, Sylvain Viguier, Thanh Le,
1227
+ Tobi Oyebade, Trieu Le, Yoyo Yang, Zach Nguyen, Abhi-
1228
+ nav Ramesh Kashyap, Alfredo Palasciano, Alison Calla-
1229
+ han, Anima Shukla, Antonio Miranda-Escalada, Ayush
1230
+ Singh, Benjamin Beilharz, Bo Wang, Caio Brito, Chenxi
1231
+ Zhou, Chirag Jain, Chuxin Xu, Cl´ementine Fourrier,
1232
+ Daniel Le´on Peri˜n´an, Daniel Molano, Dian Yu, Enrique
1233
+ Manjavacas, Fabio Barth, Florian Fuhrimann, Gabriel Al-
1234
+ tay, Giyaseddin Bayrak, Gully Burns, Helena U. Vrabec,
1235
+ Imane Bello, Ishani Dash, Jihyun Kang, John Giorgi,
1236
+ Jonas Golde, Jose David Posada, Karthik Rangasai Sivara-
1237
+ man, Lokesh Bulchandani, Lu Liu, Luisa Shinzato,
1238
+ Madeleine Hahn de Bykhovetz, Maiko Takeuchi, Marc
1239
+ P`amies, Maria A Castillo, Marianna Nezhurina, Mario
1240
+ S¨anger, Matthias Samwald, Michael Cullan, Michael
1241
+ Weinberg, Michiel De Wolf, Mina Mihaljcic, Minna
1242
+ Liu, Moritz Freidank, Myungsun Kang, Natasha See-
1243
+ lam, Nathan Dahlberg, Nicholas Michio Broad, Niko-
1244
+ laus Muellner, Pascale Fung, Patrick Haller, Ramya Chan-
1245
+ drasekhar, Renata Eisenberg, Robert Martin, Rodrigo
1246
+ Canalli, Rosaline Su, Ruisi Su, Samuel Cahyawijaya,
1247
+ Samuele Garda, Shlok S Deshmukh, Shubhanshu Mishra,
1248
+ Sid Kiblawi, Simon Ott, Sinee Sang-aroonsiri, Srishti
1249
+ Kumar, Stefan Schweter, Sushil Bharati, Tanmay Laud,
1250
+ Th´eo Gigant, Tomoya Kainuma, Wojciech Kusa, Yanis
1251
+ Labrak, Yash Shailesh Bajaj, Yash Venkatraman, Yifan
1252
+ Xu, Yingxin Xu, Yu Xu, Zhe Tan, Zhongli Xie, Zifan
1253
+ Ye, Mathilde Bras, Younes Belkada, and Thomas Wolf.
1254
+ Bloom: A 176b-parameter open-access multilingual lan-
1255
+ guage model, 2022.
1256
+ [Wu et al., 2021] Haixu Wu, Jiehui Xu, Jianmin Wang, and
1257
+ Mingsheng Long. Autoformer: Decomposition transform-
1258
+ ers with Auto-Correlation for long-term series forecasting.
1259
+ In Advances in Neural Information Processing Systems,
1260
+ 2021.
1261
+ [Xu et al., 2021a] Dongkuan Xu, Junjie Liang, Wei Cheng,
1262
+ Hua Wei, Haifeng Chen, and Xiang Zhang. Transformer-
1263
+ style relational reasoning with dynamic memory updat-
1264
+ ing for temporal network modeling. Proceedings of the
1265
+ AAAI Conference on Artificial Intelligence, 35(5):4546–
1266
+ 4554, May 2021.
1267
+ [Xu et al., 2021b] Jiehui Xu, Haixu Wu, Jianmin Wang, and
1268
+ Mingsheng Long.
1269
+ Anomaly transformer: Time series
1270
+ anomaly detection with association discrepancy. In Inter-
1271
+ national Conference on Learning Representations, 2021.
1272
+ [Yang et al., 2019] Zhilin Yang, Zihang Dai, Yiming Yang,
1273
+ Jaime Carbonell, Ruslan Salakhutdinov, and Quoc V. Le.
1274
+ XLNet: Generalized Autoregressive Pretraining for Lan-
1275
+ guage Understanding. Curran Associates Inc., Red Hook,
1276
+ NY, USA, 2019.
1277
+ [Zaheer et al., 2020] Manzil Zaheer, Guru Guruganesh, Ku-
1278
+ mar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santi-
1279
+ ago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang,
1280
+ Li Yang, et al.
1281
+ Big bird: Transformers for longer se-
1282
+ quences. Advances in Neural Information Processing Sys-
1283
+ tems, 33, 2020.
1284
+ [Zayats et al., 2021] Victoria Zayats, Kristina Toutanova,
1285
+ and Mari Ostendorf. Representations for question answer-
1286
+ ing from documents with tables and text. In EACL, 2021.
1287
+ [Zerveas et al., 2021] George Zerveas, Srideepika Jayara-
1288
+ man, Dhaval Patel, Anuradha Bhamidipaty, and Carsten
1289
+ Eickhoff. A transformer-based framework for multivariate
1290
+ time series representation learning. In Proceedings of the
1291
+
1292
+ 27th ACM SIGKDD Conference on Knowledge Discovery
1293
+ & Data Mining, pages 2114–2124, 2021.
1294
+ [Zhang et al., 2022] Zizhao Zhang, Han Zhang, Long Zhao,
1295
+ Ting Chen, Sercan ¨O. Arik, and Tomas Pfister.
1296
+ Nested
1297
+ hierarchical transformer: Towards accurate, data-efficient
1298
+ and interpretable visual understanding. In AAAI, 2022.
1299
+ [Zhao et al., 2022] Yilun Zhao, Yunxiang Li, Chenying Li,
1300
+ and Rui Zhang.
1301
+ Multihiertt: Numerical reasoning over
1302
+ multi hierarchical tabular and textual data. In Proceedings
1303
+ of the 60th Annual Meeting of the Association for Compu-
1304
+ tational Linguistics (Volume 1: Long Papers), pages 6588–
1305
+ 6600, 2022.
1306
+ [Zhou et al., 2020] Wangchunshu Zhou, Tao Ge, Furu Wei,
1307
+ Ming Zhou, and Ke Xu. Scheduled drophead: A regular-
1308
+ ization method for transformer models. In Findings of the
1309
+ Association for Computational Linguistics: EMNLP 2020,
1310
+ pages 1971–1980, 2020.
1311
+ [Zhou et al., 2021] Haoyi Zhou, Shanghang Zhang, Jieqi
1312
+ Peng, Shuai Zhang, Jianxin Li, Hui Xiong, and Wancai
1313
+ Zhang. Informer: Beyond efficient transformer for long
1314
+ sequence time-series forecasting. In Proceedings of the
1315
+ AAAI Conference on Artificial Intelligence, volume 35,
1316
+ pages 11106–11115, 2021.
1317
+ [Zhu et al., 2021] Fengbin Zhu, Wenqiang Lei, Youcheng
1318
+ Huang, Chao Wang, Shuo Zhang, Jiancheng Lv, Fuli
1319
+ Feng, and Tat-Seng Chua. TAT-QA: A question answer-
1320
+ ing benchmark on a hybrid of tabular and textual content
1321
+ in finance.
1322
+ In Proceedings of the 59th Annual Meeting
1323
+ of the Association for Computational Linguistics and the
1324
+ 11th International Joint Conference on Natural Language
1325
+ Processing (Volume 1: Long Papers), pages 3277–3287,
1326
+ Online, August 2021. Association for Computational Lin-
1327
+ guistics.
1328
+
_NAzT4oBgHgl3EQfFvpH/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
_NAzT4oBgHgl3EQfvf2q/content/tmp_files/2301.01708v1.pdf.txt ADDED
@@ -0,0 +1,2069 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Extremal problems for the eccentricity matrices of complements of
2
+ trees
3
+ Iswar Mahato ∗
4
+ M. Rajesh Kannan†
5
+ January 5, 2023
6
+ Abstract
7
+ The eccentricity matrix of a connected graph G, denoted by E(G), is obtained from the
8
+ distance matrix of G by keeping the largest nonzero entries in each row and each column, and
9
+ leaving zeros in the remaining ones. The E-eigenvalues of G are the eigenvalues of E(G), in which
10
+ the largest one is the E-spectral radius of G. The E-energy of G is the sum of the absolute values
11
+ of all E-eigenvalues of G. In this article, we study some of the extremal problems for eccentricity
12
+ matrices of complements of trees and characterize the extremal graphs. First, we determine the
13
+ unique tree whose complement has minimum (respectively, maximum) E-spectral radius among
14
+ the complements of trees. Then, we prove that the E-eigenvalues of the complement of a tree are
15
+ symmetric about the origin. As a consequence of these results, we characterize the trees whose
16
+ complement has minimum (respectively, maximum) least E-eigenvalues among the complements
17
+ of trees. Finally, we discuss the extremal problems for the second largest E-eigenvalue and the
18
+ E-energy of complements of trees and characterize the extremal graphs. As an application, we
19
+ obtain a Nordhaus-Gaddum type lower bounds for the second largest E-eigenvalue and E-energy
20
+ of a tree and its complement.
21
+ AMS Subject Classification (2010): 05C50, 05C35.
22
+ Keywords. Complements of trees, Eccentricity matrix, E-spectral radius, Second largest E-
23
+ eigenvalue, Least E-eigenvalue, E-energy.
24
+ 1
25
+ Introduction
26
+ Throughout the paper, we consider finite, simple, and connected graphs. Let G = (V (G), E(G))
27
+ be a graph with the vertex set V (G) = {v1, v2, . . . , vn} and the edge set E(G) = {e1, e2, . . . , em}.
28
+ The number of vertices in G is the order of G. If two vertices u and v in G are adjacent, then we
29
+ write u ∼ v, otherwise u ≁ v. The adjacency matrix A(G) of G is the n × n matrix with its rows
30
+ and columns indexed by the vertices of G, and the entries are defined as
31
+ A(G)uv =
32
+
33
+ 1
34
+ if u ∼ v,
35
+ 0
36
+ otherwise.
37
+ ∗Department of Mathematics, Indian Institute of Technology Kharagpur, Kharagpur 721302, India.
38
+ Email:
39
+ iswarmahato02@gmail.com, iswarmahato02@iitkgp.ac.in
40
+ †Department of Mathematics, Indian Institute of Technology Hyderabad, Hyderabad 502285, India. Email: ra-
41
+ jeshkannan1.m@gmail.com, rajeshkannan@math.iith.ac.in
42
+ 1
43
+ arXiv:2301.01708v1 [math.CO] 4 Jan 2023
44
+
45
+ Let λ1(G) ≥ λ2(G) ≥ . . . ≥ λn(G) be the eigenvalues of the adjacency matrix A(G) of G. The
46
+ largest eigenvalue of A(G) is the spectral radius of G. The energy (or the A-energy) of G is defined
47
+ as EA(G) = �n
48
+ i=1
49
+ ��λi(G)
50
+ ��. The distance between the vertices u and v in G, denoted by dG(u, v), is
51
+ the length of a shortest path between them in G, and define dG(u, u) = 0 for all u ∈ V (G). The
52
+ distance matrix D(G) of G is the n×n matrix whose rows and columns are indexed by the vertices
53
+ of G and the (u, v)-th entry is equal to dG(u, v). Let NG(v) denote the collection of vertices that
54
+ are adjacent to the vertex v in G, and NG(v) is called the neighborhood of v in G. The eccentricity
55
+ eG(v) of a vertex v ∈ V (G) is the maximum distance from v to all other vertices of G.
56
+ The
57
+ maximum eccentricity of all vertices of G is the diameter of G, which is denoted by diam(G). The
58
+ diametrical path is a path whose length is equal to the diameter of G.
59
+ The eccentricity matrix of a graph G on n vertices, denoted by E(G), is the n × n matrix whose
60
+ rows and columns are indexed by the vertices of G, and the entries are defined as
61
+ E(G)uv =
62
+
63
+ dG(u, v)
64
+ if dG(u, v) = min{eG(u), eG(v)},
65
+ 0
66
+ otherwise.
67
+ The eccentricity matrix E(G) of a graph G is a real symmetric matrix, and hence all of its eigenvalues
68
+ are real. The eigenvalues of E(G) are the E-eigenvalues of G, in which the largest one is the E-
69
+ spectral radius of G. The set of all E-eigenvalues of G is the E-spectrum of G. If ξ1 > ξ2 > . . . > ξk
70
+ are the distinct E-eigenvalues of G, then we write the E-spectrum of G as
71
+ Specε(G) =
72
+
73
+ ξ1
74
+ ξ2
75
+ . . .
76
+ ξk
77
+ m1
78
+ m2
79
+ . . .
80
+ mk
81
+
82
+ ,
83
+ where mi is the multiplicity of ξi for i = 1, 2, . . . , k.
84
+ Spectral extremal problems are one of the interesting problems in spectral graph theory. Re-
85
+ cently, the extremal problems for eccentricity matrices of graphs have gained significant importance
86
+ and attracted the attention of researchers. In [23], Wei et al. considered the extremal problem for
87
+ E-spectral radius of trees and determined the trees with minimum E-spectral radius among all trees
88
+ on n vertices. Also, they characterized the trees with minimum E-spectral radius among the trees
89
+ with a given diameter. In [14], the authors studied the minimal problem for E-spectral radius of
90
+ graphs with a given diameter and characterized the extreme graphs. Moreover, they identified the
91
+ unique bipartite graph with minimum E-spectral radius. Wang et al. [19] characterized the graphs
92
+ with minimum and second minimum E-spectral radius as well as the graphs with maximum least
93
+ and second least E-eigenvalues. Recently, He and Lu [4] considered the maximal problem for the
94
+ E-spectral radius of trees with the fixed odd diameter and determined the extremal trees. Wei et
95
+ al. [25] characterized the trees with second minimum E-spectral radius and identified the trees with
96
+ the small matching number having the minimum E-spectral radius. Very Recently, Wei and Li [24]
97
+ studied the relationship between the majorization and E-spectral radius of complete multipartite
98
+ graphs and determined the extremal complete multipartite graphs with minimum and maximum
99
+ E-spectral radius. Mahato and Kannan [16] considered the extremal problem for the second largest
100
+ E-eigenvalue of trees and determined the unique tree with minimum second largest E-eigenvalue
101
+ among all trees on n vertices other than the star. For more advances on the eccentricity matrices
102
+ of graphs, we refer to [7, 8, 13, 15, 17, 18, 21, 22].
103
+ The eccentricity energy (or the E-energy) of a graph G is defined [20] as
104
+ EE(G) =
105
+ n
106
+
107
+ i=1
108
+ |ξi| ,
109
+ 2
110
+
111
+ where ξ1, ξ2, . . . , ξn are the E-eigenvalues of G. Recently, many researchers focused on the eccen-
112
+ tricity energy of graphs. In [20], Wang et al. studied the E-energy of graphs and obtained some
113
+ bounds for the E-energy of graphs and determined the corresponding extremal graphs. Lei et al.
114
+ [8] obtained an upper bound for the E-energy of graphs and characterized the extremal graphs.
115
+ Very recently, Mahato and Kannan [16] studied the minimization problem for the E-energy of trees
116
+ and characterized the trees with minimum E-energy among all trees on n vertices. For more details
117
+ about the E-energy of graphs, we refer to [14, 15, 18].
118
+ Motivated by the above-mentioned works, in this article, we study the extremal problems for
119
+ eccentricity matrices of complements of trees and characterize the extremal graphs for the E-spectral
120
+ radius, second largest E-eigenvalue, least E-eigenvalue and E-energy among the complements of
121
+ trees. The extremal problems for the complements of trees with respect to the other graph matrices
122
+ have been studied in [2, 9, 10, 11]. For a tree T, let T c be the complement of T. Throughout the
123
+ paper, we always assume that T c is connected; hence, T is not isomorphic to the star graph. Let
124
+ T c
125
+ n denote the complements of all trees on n vertices. Note that if T is a tree with diam(T) > 3,
126
+ then E(T c) = 2A(T). If diam(T) = 3, then E(T c) ≥ 2A(T) entrywise. Since A(T) is irreducible,
127
+ therefore E(T c) is also irreducible.
128
+ The article is organized as follows: In section 2, we collect needed notations and some prelim-
129
+ inary results. In section 3, we characterize the extremal graphs with the minimum and maximum
130
+ E-spectral radius among the complements of trees. As a consequence, we determine the unique
131
+ graphs with the minimum and maximum least E-eigenvalues among the complements of trees. We
132
+ discuss the extremal problems for the second largest E-eigenvalue and the E-energy of complements
133
+ of trees in section 4 and section 5, respectively.
134
+ 2
135
+ Preliminaries
136
+ In this section, we introduce some notations and collect some preliminary results, which will be
137
+ used in the subsequent sections. First, we define the quotient matrix and equitable partition.
138
+ Definition 2.1 (Equitable partition [1]). Let A be a real symmetric matrix whose rows and columns
139
+ are indexed by X = {1, 2, . . . , n}. Let Π = {X1, X2, . . . , Xm} be a partition of X. The characteristic
140
+ matrix C is the n×m matrix whose j-th column is the characteristic vector of Xj (j = 1, 2, . . . , m).
141
+ Let A be partitioned according to Π as
142
+ A =
143
+
144
+ �����
145
+ A11
146
+ A12
147
+ . . .
148
+ A1m
149
+ A21
150
+ A22
151
+ . . .
152
+ A2m
153
+ ...
154
+ . . .
155
+ ...
156
+ ...
157
+ Am1
158
+ Am2
159
+ . . .
160
+ Amm
161
+
162
+ �����
163
+ ,
164
+ where Aij denotes the submatrix (block) of A formed by rows in Xi and the columns in Xj. If qij
165
+ denotes the average row sum of Aij, then the matrix Q = (qij) is called the quotient matrix of A.
166
+ If the row sum of each block Aij is a constant, then the partition Π is called equitable partition.
167
+ In the following theorem, we state a well-known result about the spectrum of a quotient matrix
168
+ corresponding to an equitable partition.
169
+ Theorem 2.1 ([1]). Let Q be a quotient matrix of any square matrix A corresponding to an equitable
170
+ partition. Then the spectrum of A contains the spectrum of Q.
171
+ 3
172
+
173
+ Figure 1: The tree T a,b
174
+ n,3, where a + b = n − 4.
175
+ Figure 2: The tree Da,b
176
+ n,d, where a + b = n − d − 1 and d ≥ 4.
177
+ Let Kn, Pn, and K1,n−1 denote the complete graph, the path, and the star on n vertices,
178
+ respectively. For d = 3, let T a,b
179
+ n,d be the tree obtained from P4 = v0v1v2v3 by attaching a pendant
180
+ vertices to v1 and b pendent vertices to v2, where a + b = n − 4 and b ≥ a ≥ 0. For d ≥ 4, let Da,b
181
+ n,d
182
+ be the tree obtained from Pd+1 = v0v1v2 . . . vd by attaching a pendant vertices to v1 and b pendent
183
+ vertices to vd−1, where a + b = n − d − 1 and b ≥ a ≥ 0. The trees T a,b
184
+ n,3 and Da,b
185
+ n,d are depicted in
186
+ Figure 1 and Figure 2, respectively. For a real number x, let ⌊x⌋ denote the greatest integer less
187
+ than or equal to x, and ⌈x⌉ denote the least integer greater than or equal to x. In the following
188
+ lemma, we compute the E-spectrum of the complement of the tree T a,b
189
+ n,3, where a + b = n − 4 and
190
+ b ≥ a ≥ 0.
191
+ Lemma 2.1. For b ≥ a ≥ 0 with a + b = n − 4, the E-spectrum of (T a,b
192
+ n,3)c is given by
193
+ SpecE
194
+
195
+ (T a,b
196
+ n,3)c�
197
+ =
198
+
199
+
200
+
201
+
202
+
203
+ 4n+1±√
204
+ (4n+1)2−64(a+1)(b+1)
205
+ 2
206
+ 0
207
+
208
+ 4n+1±√
209
+ (4n+1)2−64(a+1)(b+1)
210
+ 2
211
+ 1
212
+ n − 4
213
+ 1
214
+
215
+
216
+ � .
217
+ Proof. Let T a,b
218
+ n,3 be the tree obtained from P4 = v0v1v2v3 by attaching a pendant vertices u1, . . . , ua
219
+ to v1 and b pendent vertices w1, . . . , wb to v2, where a + b = n − 4 and b ≥ a ≥ 0. Then the
220
+ 4
221
+
222
+ Vo
223
+ V1
224
+ V2
225
+ V3
226
+ bVo
227
+ V1
228
+ V2
229
+ Vd-2
230
+ Vd-1
231
+ Vd
232
+ a
233
+ beccentricity matrix of (T a,b
234
+ n,3)c is given by
235
+ E
236
+
237
+ (T a,b
238
+ n,3)c�
239
+ =
240
+ u1
241
+ . . .
242
+ ua
243
+ v0
244
+ v1
245
+ v2
246
+ v3
247
+ w1
248
+ . . .
249
+ wb
250
+
251
+
252
+
253
+
254
+
255
+
256
+
257
+
258
+
259
+
260
+
261
+
262
+
263
+
264
+
265
+
266
+
267
+
268
+
269
+
270
+
271
+
272
+
273
+
274
+
275
+
276
+
277
+
278
+
279
+
280
+
281
+
282
+
283
+
284
+ u1
285
+ 0
286
+ . . .
287
+ 0
288
+ 0
289
+ 2
290
+ 0
291
+ 0
292
+ 0
293
+ . . .
294
+ 0
295
+ ...
296
+ ...
297
+ ...
298
+ ...
299
+ ...
300
+ ...
301
+ ...
302
+ ...
303
+ ...
304
+ ...
305
+ ...
306
+ ua
307
+ 0
308
+ . . .
309
+ 0
310
+ 0
311
+ 2
312
+ 0
313
+ 0
314
+ 0
315
+ . . .
316
+ 0
317
+ v0
318
+ 0
319
+ . . .
320
+ 0
321
+ 0
322
+ 2
323
+ 0
324
+ 0
325
+ 0
326
+ . . .
327
+ 0
328
+ v1
329
+ 2
330
+ . . .
331
+ 2
332
+ 2
333
+ 0
334
+ 3
335
+ 0
336
+ 0
337
+ . . .
338
+ 0
339
+ v2
340
+ 0
341
+ . . .
342
+ 0
343
+ 0
344
+ 3
345
+ 0
346
+ 2
347
+ 2
348
+ . . .
349
+ 2
350
+ v3
351
+ 0
352
+ . . .
353
+ 0
354
+ 0
355
+ 0
356
+ 2
357
+ 0
358
+ 0
359
+ . . .
360
+ 0
361
+ w1
362
+ 0
363
+ . . .
364
+ 0
365
+ 0
366
+ 0
367
+ 2
368
+ 0
369
+ 0
370
+ . . .
371
+ 0
372
+ ...
373
+ ...
374
+ ...
375
+ ...
376
+ ...
377
+ ...
378
+ ...
379
+ ...
380
+ ...
381
+ ...
382
+ ...
383
+ wb
384
+ 0
385
+ . . .
386
+ 0
387
+ 0
388
+ 0
389
+ 2
390
+ 0
391
+ 0
392
+ . . .
393
+ 0
394
+ .
395
+ It is easy to see that the rank of E
396
+
397
+ (T a,b
398
+ n,3)c�
399
+ is 4. Therefore, 0 is an eigenvalue of E
400
+
401
+ (T a,b
402
+ n,3)c�
403
+ with
404
+ multiplicity n − 4.
405
+ If U = {u1, . . . , ua, v0} and W = {v3, w1, . . . , wb}, then Π1 = U ∪{v1}∪{v2}∪W is an equitable
406
+ partition of E
407
+
408
+ (T a,b
409
+ n,3)c�
410
+ with the quotient matrix
411
+ Q1 =
412
+
413
+
414
+
415
+
416
+
417
+ 0
418
+ 2
419
+ 0
420
+ 0
421
+ 2(a + 1)
422
+ 0
423
+ 3
424
+ 0
425
+ 0
426
+ 3
427
+ 0
428
+ 2(b + 1)
429
+ 0
430
+ 0
431
+ 2
432
+ 0
433
+
434
+
435
+
436
+
437
+ � .
438
+ By a direct calculation, the eigenvalues of Q1 are
439
+ ±
440
+
441
+ 4a + 4b + 17 +
442
+
443
+ (4a + 4b + 17)2 − 64(a + 1)(b + 1)
444
+ 2
445
+ and
446
+ ±
447
+
448
+ 4a + 4b + 17 −
449
+
450
+ (4a + 4b + 17)2 − 64(a + 1)(b + 1)
451
+ 2
452
+ .
453
+ Now, the proof follows from Theorem 2.1 by substituting a + b = n − 4.
454
+ In the following theorem, we find the adjacency energy of the path graph on n vertices. We
455
+ include proof of this result for the sake of completeness.
456
+ Theorem 2.2. The energy of a path Pn on n vertices is given by
457
+ EA(Pn) =
458
+
459
+
460
+
461
+
462
+
463
+ 2
464
+
465
+ cot
466
+
467
+ π
468
+ 2(n+1)
469
+
470
+ − 1
471
+
472
+ if n is odd,
473
+ 2
474
+
475
+ csc
476
+
477
+ π
478
+ 2(n+1)
479
+
480
+ − 1
481
+
482
+ if n is even.
483
+ Proof. We know that the eigenvalues of Pn are 2 cos πk
484
+ n+1, k = 1, 2, . . . , n. By using the formula
485
+ cos x + cos 2x + . . . + cos nx = sin
486
+ � nx
487
+ 2
488
+
489
+ csc
490
+ � x
491
+ 2
492
+
493
+ cos
494
+ � (n+1)x
495
+ 2
496
+
497
+ (for a proof of this identity, we refer to
498
+ 5
499
+
500
+ [6]), we have
501
+ EA(Pn) =
502
+
503
+
504
+
505
+
506
+
507
+ 4
508
+
509
+ cos
510
+
511
+ π
512
+ n+1
513
+
514
+ + cos
515
+ � 2π
516
+ n+1
517
+
518
+ + . . . + cos
519
+ � (n−1)π
520
+ 2(n+1)
521
+ ��
522
+ if n is odd,
523
+ 4
524
+
525
+ cos
526
+
527
+ π
528
+ n+1
529
+
530
+ + cos
531
+ � 2π
532
+ n+1
533
+
534
+ + . . . + cos
535
+
536
+
537
+ 2(n+1)
538
+ ��
539
+ if n is even;
540
+ =
541
+
542
+
543
+
544
+ 4 sin
545
+ � (n−1)π
546
+ 4(n+1)
547
+
548
+ csc
549
+
550
+ π
551
+ 2(n+1)
552
+
553
+ cos
554
+ � π
555
+ 4
556
+
557
+ if n is odd,
558
+ 4 sin
559
+
560
+
561
+ 4(n+1)
562
+
563
+ csc
564
+ � (n+2)π
565
+ 4(n+1)
566
+
567
+ cos
568
+ � π
569
+ 4
570
+
571
+ if n is even;
572
+ =
573
+
574
+
575
+
576
+
577
+
578
+ 2
579
+
580
+ cos
581
+
582
+ π
583
+ 2(n+1)
584
+
585
+ − sin
586
+
587
+ π
588
+ 2(n+1)
589
+ ��
590
+ csc
591
+
592
+ π
593
+ 2(n+1)
594
+
595
+ if n is odd,
596
+ 2
597
+
598
+ sin
599
+ � π
600
+ 2
601
+
602
+ − sin
603
+
604
+ π
605
+ 2(n+1)
606
+ ��
607
+ csc
608
+
609
+ π
610
+ 2(n+1)
611
+
612
+ if n is even;
613
+ =
614
+
615
+
616
+
617
+
618
+
619
+ 2
620
+
621
+ cot
622
+
623
+ π
624
+ 2(n+1)
625
+
626
+ − 1
627
+
628
+ if n is odd,
629
+ 2
630
+
631
+ csc
632
+
633
+ π
634
+ 2(n+1)
635
+
636
+ − 1
637
+
638
+ if n is even.
639
+ Now, we collect some results related to the spectral radius, second-largest eigenvalue, and energy
640
+ for the adjacency matrices of trees.
641
+ Theorem 2.3 ([12]). Let T be a tree on n vertices. Then λ1(T) ≥ 2 cos
642
+ π
643
+ n+1 with equality if and
644
+ only if T ∼= Pn.
645
+ Theorem 2.4 ([5, Theorem 2]). Let T be a tree on n ≥ 4 vertices and T ≇ K1,n−1.
646
+ Then
647
+ λ1(T) ≤
648
+
649
+ n−1+
650
+
651
+ n2−6n+13
652
+ 2
653
+ with equality if and only if T ∼= T 0,n−4
654
+ n,3
655
+ .
656
+ Theorem 2.5 ([26, Theorem 2]). Let T be a tree of order n ≥ 4 and T ≇ K1,n−1, T 0,n−4
657
+ n,3
658
+ . Then
659
+ λ2(T) ≥ 1.
660
+ Theorem 2.6 ([5, Theorem 10]). Let T be a tree with n ≥ 4 vertices.
661
+ 1. If T ≇ T s−1,s−1
662
+ n,5
663
+ , then λ2(T) ≤
664
+
665
+ n−3
666
+ 2 . The equality holds if and only if n = 2s + 3 and
667
+ T ∼= T s−1,s−1
668
+ n,4
669
+ , T s−2,s−1
670
+ n,5
671
+ , T s−2,s−2
672
+ n,6
673
+ .
674
+ 2. If T ∼= T s−1,s−1
675
+ n,5
676
+ , then λ2(T) = x >
677
+
678
+ n−3
679
+ 2 , where x is the positive root of the equation
680
+ x3 + x2 − (s + 1) − s = 0.
681
+ Lemma 2.2 ([3, Proposition 4]). Let T be a tree on n ≥ 2 vertices such that T is not isomorphic
682
+ to K1,n−1, T 0,n−4
683
+ n,3
684
+ , T 1,n−3
685
+ n,3
686
+ and T 0,n−5
687
+ n,4
688
+ . Then
689
+ EA(T) > EA(T 0,n−5
690
+ n,4
691
+ ) > EA(T 1,n−3
692
+ n,3
693
+ ) > EA(T 0,n−4
694
+ n,3
695
+ ) > EA(K1,n−1).
696
+ Lemma 2.3 ([3, Proposition 3]). Let T be a tree on n ≥ 2 vertices such that T ≇ K1,n−1, Pn. Then
697
+ EA(K1,n−1) < EA(T) < EA(Pn).
698
+ 6
699
+
700
+ Next, we state a result about the minimum second largest E-eigenvalue of trees.
701
+ Theorem 2.7 ([16]). Let T be a tree on n ≥ 4 vertices other than the star. Then
702
+ ξ2(T) ≥
703
+
704
+ 13n − 35 −
705
+
706
+ 169n2 − 974n + 1417
707
+ 2
708
+ with equality if and only if T ∼= T 0,n−4
709
+ n,3
710
+ .
711
+ The following theorem is about the characterization of trees with minimum E-energy.
712
+ Theorem 2.8 ([16]). Let T be a tree on n ≥ 5 vertices. Then
713
+ EE(T) ≥ 2
714
+
715
+ 13n − 35 + 8
716
+
717
+ n − 3
718
+ with equality if and only if T ∼= T 0,n−4
719
+ n,3
720
+ .
721
+ 3
722
+ Extremal problems for E-spectral radius and least E-eigenvalue
723
+ In this section, we characterize the graphs with the minimum and maximum E-spectral radius
724
+ among the complements of all trees on n vertices. As a consequence, we determine the graphs for
725
+ which the least E-eigenvalues attain the minimum and maximum value among T c
726
+ n , where T c
727
+ n denote
728
+ the complements of all trees on n vertices. First, we give an ordering of the complements of trees
729
+ with diameter 3 according to their E-spectral radius.
730
+ Theorem 3.1. Let T be a tree with diameter 3, that is, T ∼= T a,b
731
+ n,3 with a + b = n − 4, b ≥ a ≥ 0.
732
+ Then the complements of T a,b
733
+ n,3 can be ordered with respect to their E-spectral radius as follows:
734
+ ξ1
735
+
736
+ (T 0,n−4
737
+ n,3
738
+ )c�
739
+ > ξ1
740
+
741
+ (T 1,n−3
742
+ n,3
743
+ )c�
744
+ > . . . > ξ1
745
+ ��
746
+ T
747
+ ⌊ n−4
748
+ 2 ⌋,⌈ n−4
749
+ 2 ⌉
750
+ n,3
751
+ �c�
752
+ .
753
+ Proof. By Lemma 2.1 , we have
754
+ ξ1
755
+
756
+ (T a,b
757
+ n,3)c�
758
+ =
759
+
760
+ 4n + 1 +
761
+
762
+ (4n + 1)2 − 64(a + 1)(b + 1)
763
+ 2
764
+ =
765
+
766
+ 4n + 1 +
767
+
768
+ (4n + 1)2 − 64(n − 3 + a(n − 4 − a))
769
+ 2
770
+ (since, a + b = n − 4).
771
+ Now, consider the function f(x) = 4n + 1 +
772
+
773
+ (4n + 1)2 − 64(n − 3 + x(n − 4 − x)), where
774
+ 0 ≤ x ≤ ⌊n−4
775
+ 2 ⌋. Therefore,
776
+ f′(x) =
777
+ −32(n − 4 − 2x)
778
+
779
+ (4n + 1)2 − 64(n − 3 + x(n − 4 − x))
780
+ ≤ 0.
781
+ Hence, f(x) is an decreasing function of x, where 0 ≤ x ≤ ⌊ n−4
782
+ 2 ⌋. Therefore, ξ1
783
+
784
+ (T a,b
785
+ n,3)c�
786
+ =
787
+
788
+ f(a)
789
+ 2
790
+ is an decreasing function for 0 ≤ a ≤ ⌊ n−4
791
+ 2 ⌋. This completes the proof.
792
+ 7
793
+
794
+ As a consequence of the above result, we get the following corollaries.
795
+ Corollary 3.1. Let T be a tree on n ≥ 4 vertices with diam(T) = 3. Then
796
+ ξ1(T c) ≤
797
+
798
+ 4n + 1 +
799
+
800
+ (4n + 1)2 − 64(n − 3)
801
+ 2
802
+ with equality if and only if T ∼= T 0,n−4
803
+ n,3
804
+ .
805
+ Proof. The proof follows from Lemma 2.1 and Theorem 3.1.
806
+ Corollary 3.2. If T is a tree with diameter 3, then
807
+ ξ1(T c) ≥
808
+
809
+
810
+
811
+
812
+
813
+
814
+ 4n+1+√72n−63
815
+ 2
816
+ if n is even,
817
+
818
+ 4n+1+√72n−47
819
+ 2
820
+ if n is odd.
821
+ The equality holds if and only if T ∼= T
822
+ ⌊ n−4
823
+ 2 ⌋,⌈ n−4
824
+ 2 ⌉
825
+ n,3
826
+ .
827
+ Proof. If T ∼= T
828
+ ⌊ n−4
829
+ 2 ⌋,⌈ n−4
830
+ 2 ⌉
831
+ n,3
832
+ , by Lemma 2.1 it follows that
833
+ ξ1(T c) =
834
+
835
+
836
+
837
+ �4n + 1 +
838
+
839
+ (4n + 1)2 − 64
840
+
841
+ ⌈ n−2
842
+ 2 ⌉⌊ n−2
843
+ 2 ⌋
844
+
845
+ 2
846
+ =
847
+
848
+
849
+
850
+
851
+
852
+
853
+ 4n+1+√72n−63
854
+ 2
855
+ if n is even,
856
+
857
+ 4n+1+√72n−47
858
+ 2
859
+ if n is odd.
860
+ If T ≇ T
861
+ ⌊ n−4
862
+ 2 ⌋,⌈ n−4
863
+ 2 ⌉
864
+ n,3
865
+ , then, by Theorem 3.1, it follows that ξ1(T c) > ξ1
866
+ ��
867
+ T
868
+ ⌊ n−4
869
+ 2 ⌋,⌈ n−4
870
+ 2 ⌉
871
+ n,3
872
+ �c�
873
+ . This
874
+ completes the proof.
875
+ In the following theorem, we characterize the trees whose complement has the minimum E-
876
+ spectral radius among the complements of all trees on n vertices. Note that if T is a tree with
877
+ diam(T) > 3, then E(T c) = 2A(T). If diam(T) = 3, then E(T c) ≥ 2A(T) entrywise.
878
+ Theorem 3.2. Let T be a tree of order n ≥ 4. Then
879
+ ξ1(T c) ≥ ξ1(P c
880
+ n)
881
+ with equality if and only if T ∼= Pn.
882
+ Proof. Since P4 is the only tree on 4 vertices with connected complement, we assume that n ≥ 5.
883
+ For n ≥ 5, we have ξ1(P c
884
+ n) = 2λ1(Pn) = 4 cos
885
+ π
886
+ n+1 < 4.
887
+ If T is a tree with diameter 3, by Corollary 3.2 it follows that
888
+ ξ1(T c) ≥
889
+
890
+
891
+
892
+
893
+
894
+
895
+ 4n+1+√72n−63
896
+ 2
897
+ if n is even,
898
+
899
+ 4n+1+√72n−47
900
+ 2
901
+ if n is odd.
902
+ It is easy to check that
903
+
904
+ 4n+1+√72n−47
905
+ 2
906
+ >
907
+
908
+ 4n+1+√72n−63
909
+ 2
910
+ > 4. Therefore, ξ1(T c) ≥ ξ1(P c
911
+ n).
912
+ If T is a tree with diam(T) ≥ 4, then the proof follows from Theorem 3.2.
913
+ 8
914
+
915
+ Now, we determine the unique tree whose complement has maximum E-spectral radius in T c
916
+ n .
917
+ Theorem 3.3. Let T be a tree of order n ≥ 4. Then
918
+ ξ1(T c) ≤ ξ1
919
+
920
+ (T 0,n−4
921
+ n,3
922
+ )c�
923
+ with equality if and only if T ∼= T 0,n−4
924
+ n,3
925
+ .
926
+ Proof. If T is a tree with diameter 3, then the proof follows from Corollary 3.1. If T is a tree with
927
+ diam(T) ≥ 4, then ξ1(T c) = 2λ1(T) and the proof follows from Theorem 2.4.
928
+ Now, we consider the extremal problem for the least E-eigenvalue of complements of trees and
929
+ characterize the extremal graphs. First, we show that the E-eigenvalues of the complements of trees
930
+ are symmetric about the origin.
931
+ Theorem 3.4. Let T be a tree of order n ≥ 4. Then the eigenvalues of E(T c) are symmetric about
932
+ the origin, that is, if λ is an eigenvalue of E(T c) with multiplicity k, then −λ is also an eigenvalue
933
+ of E(T c) with multiplicity k.
934
+ Proof. If T is a tree with diameter 3, then the proof follows from Lemma 2.1. Let T be a tree with
935
+ diam(T) ≥ 4. Then E(T c) = 2A(T). Since every tree is a bipartite graph, the eigenvalues of A(T)
936
+ are symmetric about the origin, and hence the E-eigenvalues of T c are also symmetric about the
937
+ origin.
938
+ Let ξn(T c) denote the least E-eigenvalue of E(T c). In the following theorems, we characterize
939
+ the trees whose complements have minimum and maximum least E-eigenvalue in T c
940
+ n , respectively.
941
+ Theorem 3.5. Let T be a tree of order n ≥ 4. Then
942
+ ξn(T c) ≥ ξn
943
+
944
+ (T 0,n−4
945
+ n,3
946
+ )c�
947
+ with equality if and only if T ∼= T 0,n−4
948
+ n,3
949
+ .
950
+ Proof. The proof follows from Theorem 3.3 and Theorem 3.4.
951
+ Theorem 3.6. Let T be a tree of order n ≥ 4. Then
952
+ ξn(T c) ≤ ξn(P c
953
+ n)
954
+ with equality if and only if T ∼= Pn.
955
+ Proof. The proof follows from Theorem 3.2 and Theorem 3.4.
956
+ 4
957
+ Extremal problems for the second largest E-eigenvalue
958
+ In this section, we study the extremal problems for the second largest E-eigenvalue of complements
959
+ of trees and characterize the extremal graphs. First, we give an ordering of the complements of
960
+ trees with diameter 3 according to their second-largest E-eigenvalues.
961
+ 9
962
+
963
+ Theorem 4.1. Let T be a tree with diameter 3, that is, T ∼= T a,b
964
+ n,3 with a+b = n−4, b ≥ a ≥ 0. Then
965
+ the complements of the trees T a,b
966
+ n,3 can be ordered with respect to their second largest E-eigenvalues
967
+ as follows:
968
+ ξ2
969
+
970
+ (T 0,n−4
971
+ n,3
972
+ )c�
973
+ < ξ2
974
+
975
+ (T 1,n−3
976
+ n,3
977
+ )c�
978
+ < . . . < ξ2
979
+ ��
980
+ T
981
+ ⌊ n−2
982
+ 2 ⌋,⌈ n−2
983
+ 2 ⌉
984
+ n,3
985
+ �c�
986
+ .
987
+ Proof. By Lemma 2.1, it follows that
988
+ ξ2
989
+
990
+ (T a,b
991
+ n,3)c�
992
+ =
993
+
994
+ 4n + 1 −
995
+
996
+ (4n + 1)2 − 64(a + 1)(b + 1)
997
+ 2
998
+ =
999
+
1000
+ 4n + 1 −
1001
+
1002
+ (4n + 1)2 − 64(n − 3 + a(n − 4 − a))
1003
+ 2
1004
+ (since, a + b = n − 4).
1005
+ Now, consider the function f(x) = 4n + 1 −
1006
+
1007
+ (4n + 1)2 − 64(n − 3 + x(n − 4 − x)), where
1008
+ 0 ≤ x ≤ ⌊n−4
1009
+ 2 ⌋. Therefore,
1010
+ f′(x) =
1011
+ 32(n − 4 − 2x)
1012
+
1013
+ (4n + 1)2 − 64(n − 3 + x(n − 4 − x))
1014
+ ≥ 0.
1015
+ Hence, f(x) is an increasing function of x for 0 ≤ x ≤ ⌊n−4
1016
+ 2 ⌋. Therefore, ξ2
1017
+
1018
+ (T a,b
1019
+ n,3)c�
1020
+ =
1021
+
1022
+ f(a)
1023
+ 2
1024
+ is an
1025
+ increasing function for 0 ≤ a ≤ ⌊n−4
1026
+ 2 ⌋. This completes the proof.
1027
+ As a consequence of the above result, we get the following corollaries.
1028
+ Corollary 4.1. If T is a tree with diameter 3, then
1029
+ ξ2(T c) ≥ 2
1030
+
1031
+ 4n + 1 −
1032
+
1033
+ (4n + 1)2 − 64(n − 3)
1034
+ with equality if and only if T ∼= T 0,n−4
1035
+ n,3
1036
+ .
1037
+ Proof. The proof follows from Lemma 2.1 and Theorem 4.1.
1038
+ Corollary 4.2. If T is a tree with diameter 3, then
1039
+ ξ2(T c) ≤
1040
+
1041
+
1042
+
1043
+
1044
+
1045
+
1046
+ 4n+1−√72n−63
1047
+ 2
1048
+ if n is even,
1049
+
1050
+ 4n+1−√72n−47
1051
+ 2
1052
+ if n is odd.
1053
+ The equality holds if and only if T ∼= T
1054
+ ⌊ n−4
1055
+ 2 ⌋,⌈ n−4
1056
+ 2 ⌉
1057
+ n,3
1058
+ .
1059
+ Proof. If T ∼= T
1060
+ ⌊ n−4
1061
+ 2 ⌋,⌈ n−4
1062
+ 2 ⌉
1063
+ n,3
1064
+ , by Lemma 2.1 it follows that
1065
+ ξ2(T c) =
1066
+
1067
+
1068
+
1069
+ �4n + 1 −
1070
+
1071
+ (4n + 1)2 − 64
1072
+
1073
+ ⌈ n−2
1074
+ 2 ⌉⌊ n−2
1075
+ 2 ⌋
1076
+
1077
+ 2
1078
+ =
1079
+
1080
+
1081
+
1082
+
1083
+
1084
+
1085
+ 4n+1−√72n−63
1086
+ 2
1087
+ if n is even,
1088
+
1089
+ 4n+1−√72n−47
1090
+ 2
1091
+ if n is odd.
1092
+ 10
1093
+
1094
+ If T ≇ T
1095
+ ⌊ n−4
1096
+ 2 ⌋,⌈ n−4
1097
+ 2 ⌉
1098
+ n,3
1099
+ , by Theorem 4.1 it follows that ξ2(T c) < ξ2
1100
+ ��
1101
+ T
1102
+ ⌊ n−2
1103
+ 2 ⌋,⌈ n−2
1104
+ 2 ⌉
1105
+ n,3
1106
+ �c�
1107
+ . This completes
1108
+ the proof.
1109
+ In the following theorem, we determine the unique tree whose complement has minimum second
1110
+ largest E-eigenvalue in T c
1111
+ n .
1112
+ Theorem 4.2. Let T be a tree of order n ≥ 4. Then
1113
+ ξ2(T c) ≥ ξ2
1114
+
1115
+ (T 0,n−4
1116
+ n,3
1117
+ )c�
1118
+ with equality if and only if T ∼= T 0,n−4
1119
+ n,3
1120
+ .
1121
+ Proof. If T is a tree on n ≥ 4 vertices with diameter 3, then the proof follows from Corollary 4.1.
1122
+ Let T be a tree with diam(T) ≥ 4. Then E(T c) = 2A(T), and hence by Theorem 2.5, we
1123
+ have ξ2(T c) ≥ 2. Note that
1124
+
1125
+ (4n − 7)2 + 144 > 4n − 7 and hence (4n + 1 −
1126
+
1127
+ (4n − 7)2 + 144) <
1128
+ 8.
1129
+ Therefore, ξ2
1130
+
1131
+ (T 0,n−4
1132
+ n,3
1133
+ )c�
1134
+ =
1135
+
1136
+ 4n+1−√
1137
+ (4n−7)2+144
1138
+ 2
1139
+ < 2.
1140
+ Thus, ξ2(T c) > ξ2
1141
+
1142
+ (T 0,n−4
1143
+ n,3
1144
+ )c�
1145
+ .
1146
+ This
1147
+ completes the proof.
1148
+ Next, we characterize the maximal graphs for the second largest E-eigenvalue of complements
1149
+ of trees.
1150
+ Theorem 4.3. Let T be a tree with n ≥ 4 vertices.
1151
+ 1. If T ≇ T s−1,s−1
1152
+ n,5
1153
+ , then ξ2(T) ≤
1154
+
1155
+ 2(n − 3). The equality holds if and only if n = 2s + 3 and
1156
+ T ∼= T s−1,s−1
1157
+ n,4
1158
+ , T s−2,s−1
1159
+ n,5
1160
+ , T s−2,s−2
1161
+ n,6
1162
+ .
1163
+ 2. If T ∼= T s−1,s−1
1164
+ n,5
1165
+ , then ξ2(T) = 2x >
1166
+
1167
+ 2(n − 3), where x is the positive root of the equation
1168
+ x3 + x2 − (s + 1) − s = 0.
1169
+ Proof. If T is a tree on n ≥ 4 vertices with diameter 3, by Corollary 4.2 it follows that
1170
+ ξ2(T c) ≤
1171
+
1172
+
1173
+
1174
+
1175
+
1176
+
1177
+ 4n+1−√72n−63
1178
+ 2
1179
+ if n is even,
1180
+
1181
+ 4n+1−√72n−47
1182
+ 2
1183
+ if n is odd.
1184
+ It is easy to check that
1185
+
1186
+ 4n+1−√72n−47
1187
+ 2
1188
+ <
1189
+
1190
+ 4n+1−√72n−63
1191
+ 2
1192
+ <
1193
+
1194
+ 2(n − 3) for n ≥ 4. Therefore,
1195
+ ξ2(T c) <
1196
+
1197
+ 2(n − 3).
1198
+ Let T be a tree with diam(T) ≥ 4.
1199
+ Then ξ2(T c) = 2λ2(A(T)) and the proof follows from
1200
+ Theorem 2.6.
1201
+ Now, we give a Nordhaus-Gaddum type lower bound for the second largest E-eigenvalue of a
1202
+ tree and its complement, which directly follows from Theorem 2.7 and Theorem 4.2.
1203
+ Theorem 4.4. Let T be a tree of order n ≥ 4. Then
1204
+ ξ2(T) + ξ2(T c) ≥
1205
+ ��
1206
+ 13n − 35 −
1207
+
1208
+ 169n2 − 974n + 1417
1209
+ 2
1210
+ +
1211
+
1212
+ 4n + 1 −
1213
+
1214
+ 16n2 − 56n + 193
1215
+ 2
1216
+
1217
+ with equality if and only if T ∼= T 0,n−4
1218
+ n,3
1219
+ .
1220
+ 11
1221
+
1222
+ 5
1223
+ Extremal problems for E-energy
1224
+ In this section, we characterize the complements of trees with the minimum and maximum E-
1225
+ energy among the complements of all trees on n vertices, respectively.
1226
+ To begin with, in the
1227
+ following lemma, we give an ordering of the complements of trees with diameter 3 according to
1228
+ their E-energy.
1229
+ Theorem 5.1. Let T be a tree with diameter 3, that is, T ∼= T a,b
1230
+ n,3 with a + b = n − 4, b ≥ a ≥ 0.
1231
+ Then the complements of T a,b
1232
+ n,3 can be ordered with respect to their E-energy as follows:
1233
+ EE
1234
+
1235
+ (T 0,n−4
1236
+ n,3
1237
+ )c�
1238
+ < EE
1239
+
1240
+ (T 1,n−3
1241
+ n,3
1242
+ )c�
1243
+ < . . . < EE
1244
+
1245
+ (T
1246
+ ⌊ n−4
1247
+ 2 ⌋,⌈ n−4
1248
+ 2 ⌉
1249
+ n,3
1250
+ )c�
1251
+ .
1252
+ Proof. By Lemma 2.1, it follows that EE
1253
+
1254
+ (T a,b
1255
+ n,3)c�
1256
+ = 2
1257
+
1258
+ ξ1
1259
+
1260
+ (T a,b
1261
+ n,3)c�
1262
+ + ξ2
1263
+
1264
+ (T a,b
1265
+ n,3)c��
1266
+ , where
1267
+ ξ1
1268
+
1269
+ (T a,b
1270
+ n,3)c�
1271
+ =
1272
+
1273
+ 4n + 1 +
1274
+
1275
+ (4n + 1)2 − 64(a + 1)(b + 1)
1276
+ 2
1277
+ and
1278
+ ξ2
1279
+
1280
+ (T a,b
1281
+ n,3)c�
1282
+ =
1283
+
1284
+ 4n + 1 −
1285
+
1286
+ (4n + 1)2 − 64(a + 1)(b + 1)
1287
+ 2
1288
+ .
1289
+ Therefore,
1290
+ EE
1291
+
1292
+ (T a,b
1293
+ n,3)c�
1294
+ = 2
1295
+
1296
+ 4n + 1 + 8
1297
+
1298
+ (a + 1)(b + 1)
1299
+ = 2
1300
+
1301
+ 4n + 1 + 8
1302
+
1303
+ n − 3 + a(n − 4 − a)
1304
+ (since, a + b = n − 4).
1305
+ Now, consider the function f(x) = 4n + 1 + 8
1306
+
1307
+ n − 3 + x(n − 4 − x), where 0 ≤ x ≤ ⌊ n−4
1308
+ 2 ⌋.
1309
+ Therefore,
1310
+ f′(x) =
1311
+ 8(n − 4 − 2x)
1312
+ 4n + 1 + 8
1313
+
1314
+ n − 3 + x(n − 4 − x)
1315
+ ≥ 0.
1316
+ Hence, f(x) is an increasing function of x, where 0 ≤ x ≤ ⌊n−4
1317
+ 2 ⌋. Therefore, EE
1318
+
1319
+ (T a,b
1320
+ n,3)c�
1321
+ = 2
1322
+
1323
+ f(a)
1324
+ is an increasing function for 0 ≤ a ≤ ⌊n−4
1325
+ 2 ⌋. This completes the proof.
1326
+ As a consequence of the above result, we get the following corollaries.
1327
+ Corollary 5.1. If T is a tree with diameter 3, then
1328
+ EE(T c) ≥ 2
1329
+
1330
+ 4n + 1 + 8
1331
+
1332
+ n − 3
1333
+ with equality if and only if T ∼= T 0,n−4
1334
+ n,3
1335
+ .
1336
+ Proof. By Lemma 2.1, it follows that
1337
+ ξ1
1338
+
1339
+ (T 0,n−4
1340
+ n,3
1341
+ )c�
1342
+ =
1343
+
1344
+ 4n + 1 +
1345
+
1346
+ (4n + 1)2 − 64(n − 3)
1347
+ 2
1348
+ ,
1349
+ and
1350
+ ξ2
1351
+
1352
+ (T 0,n−4
1353
+ n,3
1354
+ )c�
1355
+ =
1356
+
1357
+ 4n + 1 −
1358
+
1359
+ (4n + 1)2 − 64(n − 3)
1360
+ 2
1361
+ .
1362
+ 12
1363
+
1364
+ Thus,
1365
+ EE
1366
+
1367
+ (T 0,n−4
1368
+ n,3
1369
+ )c�
1370
+ = 2
1371
+
1372
+ ξ1
1373
+
1374
+ (T 0,n−4
1375
+ n,3
1376
+ )c�
1377
+ + ξ2
1378
+
1379
+ (T 0,n−4
1380
+ n,3
1381
+ )c��
1382
+ = 2
1383
+
1384
+ 4n + 1 + 8
1385
+
1386
+ n − 3.
1387
+ Now, the proof follows from Theorem 5.1.
1388
+ Corollary 5.2. If T is a tree with diameter 3, then
1389
+ EE(T c) ≤
1390
+
1391
+
1392
+
1393
+ 2√8n − 7
1394
+ if n is even,
1395
+ 2
1396
+
1397
+ 4n + 1 + 4
1398
+
1399
+ n2 − 4n + 3
1400
+ if n is odd.
1401
+ The equality holds if and only if T ∼= T
1402
+ ⌊ n−4
1403
+ 2 ⌋,⌈ n−4
1404
+ 2 ⌉
1405
+ n,3
1406
+ .
1407
+ Proof. It follows from Lemma 2.1 that
1408
+ ξ1
1409
+
1410
+ (T
1411
+ ⌊ n−4
1412
+ 2 ⌋,⌈ n−4
1413
+ 2 ⌉
1414
+ n,3
1415
+ )c�
1416
+ =
1417
+
1418
+
1419
+
1420
+ �4n + 1 +
1421
+
1422
+ (4n + 1)2 − 64
1423
+
1424
+ ⌈ n−2
1425
+ 2 ⌉⌊ n−2
1426
+ 2 ⌋
1427
+
1428
+ 2
1429
+ =
1430
+
1431
+
1432
+
1433
+
1434
+
1435
+
1436
+ 4n+1+√72n−63
1437
+ 2
1438
+ if n is even,
1439
+
1440
+ 4n+1+√72n−47
1441
+ 2
1442
+ if n is odd.
1443
+ and
1444
+ ξ2
1445
+
1446
+ (T
1447
+ ⌊ n−4
1448
+ 2 ⌋,⌈ n−4
1449
+ 2 ⌉
1450
+ n,3
1451
+ )c�
1452
+ =
1453
+
1454
+
1455
+
1456
+ �4n + 1 −
1457
+
1458
+ (4n + 1)2 − 64
1459
+
1460
+ ⌈ n−2
1461
+ 2 ⌉⌊ n−2
1462
+ 2 ⌋
1463
+
1464
+ 2
1465
+ =
1466
+
1467
+
1468
+
1469
+
1470
+
1471
+
1472
+ 4n+1−√72n−63
1473
+ 2
1474
+ if n is even,
1475
+
1476
+ 4n+1−√72n−47
1477
+ 2
1478
+ if n is odd.
1479
+ Therefore,
1480
+ EE
1481
+
1482
+ (T
1483
+ ⌊ n−4
1484
+ 2 ⌋,⌈ n−4
1485
+ 2 ⌉
1486
+ n,3
1487
+ )c�
1488
+ = 2
1489
+
1490
+ ξ1
1491
+
1492
+ (T
1493
+ ⌊ n−4
1494
+ 2 ⌋,⌈ n−4
1495
+ 2 ⌉
1496
+ n,3
1497
+ )c�
1498
+ + ξ2
1499
+
1500
+ (T
1501
+ ⌊ n−4
1502
+ 2 ⌋,⌈ n−4
1503
+ 2 ⌉
1504
+ n,3
1505
+ )c��
1506
+ =
1507
+
1508
+
1509
+
1510
+ 2√8n − 7
1511
+ if n is even,
1512
+ 2
1513
+
1514
+ 4n + 1 + 4
1515
+
1516
+ n2 − 4n + 3
1517
+ if n is odd.
1518
+ Now, the proof follows from Theorem 5.1.
1519
+ Next, we find the E-energy of (T 0,n−5
1520
+ n,4
1521
+ )c by computing the E-spectrum of (T 0,n−5
1522
+ n,4
1523
+ )c. This will
1524
+ be used in the proof of Theorem 5.2.
1525
+ Lemma 5.1. For n ≥ 5, the E-energy of EE
1526
+
1527
+ (T 0,n−5
1528
+ n,4
1529
+ )c�
1530
+ is given by
1531
+ EE
1532
+
1533
+ (T 0,n−5
1534
+ n,4
1535
+ )c�
1536
+ = 2
1537
+
1538
+ 4(n − 1) + 8
1539
+
1540
+ 2n − 7.
1541
+ 13
1542
+
1543
+ Figure 3: The tree T 0,n−5
1544
+ n,4
1545
+ .
1546
+ Proof. The eccentricity matrix of (T 0,n−5
1547
+ n,4
1548
+ )c is given by
1549
+ E
1550
+
1551
+ (T 0,n−5
1552
+ n,4
1553
+ )c�
1554
+ =
1555
+ v0
1556
+ v1
1557
+ v2
1558
+ v3
1559
+ v4
1560
+ w1
1561
+ . . .
1562
+ wn−5
1563
+
1564
+
1565
+
1566
+
1567
+
1568
+
1569
+
1570
+
1571
+
1572
+
1573
+
1574
+
1575
+
1576
+
1577
+
1578
+
1579
+
1580
+
1581
+
1582
+
1583
+
1584
+
1585
+
1586
+
1587
+ v0
1588
+ 0
1589
+ 2
1590
+ 0
1591
+ 0
1592
+ 0
1593
+ 0
1594
+ . . .
1595
+ 0
1596
+ v1
1597
+ 2
1598
+ 0
1599
+ 2
1600
+ 0
1601
+ 0
1602
+ 0
1603
+ . . .
1604
+ 0
1605
+ v2
1606
+ 0
1607
+ 2
1608
+ 0
1609
+ 2
1610
+ 0
1611
+ 0
1612
+ . . .
1613
+ 0
1614
+ v3
1615
+ 0
1616
+ 0
1617
+ 2
1618
+ 0
1619
+ 2
1620
+ 2
1621
+ . . .
1622
+ 2
1623
+ v4
1624
+ 0
1625
+ 0
1626
+ 0
1627
+ 2
1628
+ 0
1629
+ 0
1630
+ . . .
1631
+ 0
1632
+ w1
1633
+ 0
1634
+ 0
1635
+ 0
1636
+ 2
1637
+ 0
1638
+ 0
1639
+ . . .
1640
+ 0
1641
+ ...
1642
+ ...
1643
+ ...
1644
+ ...
1645
+ ...
1646
+ ...
1647
+ ...
1648
+ ...
1649
+ ...
1650
+ wn−5
1651
+ 0
1652
+ 0
1653
+ 0
1654
+ 2
1655
+ 0
1656
+ 0
1657
+ . . .
1658
+ 0
1659
+ .
1660
+ It is easy to see that the rank of E
1661
+
1662
+ (T 0,n−5
1663
+ n,4
1664
+ )c�
1665
+ is 4, and hence 0 is an eigenvalue of E
1666
+
1667
+ (T 0,n−5
1668
+ n,4
1669
+ )c�
1670
+ with multiplicity n − 4.
1671
+ If W = {v4, w1, . . . , wn−5}, then Π2 = {v0} ∪ {v1} ∪ {v2} ∪ {v3} ∪ U is an equitable partition of
1672
+ E
1673
+
1674
+ (T 0,n−5
1675
+ n,4
1676
+ )c�
1677
+ with the quotient matrix
1678
+ Q2 =
1679
+
1680
+
1681
+
1682
+
1683
+
1684
+
1685
+
1686
+ 0
1687
+ 2
1688
+ 0
1689
+ 0
1690
+ 0
1691
+ 2
1692
+ 0
1693
+ 2
1694
+ 0
1695
+ 0
1696
+ 0
1697
+ 2
1698
+ 0
1699
+ 2
1700
+ 0
1701
+ 0
1702
+ 0
1703
+ 2
1704
+ 0
1705
+ 2(n − 4)
1706
+ 0
1707
+ 0
1708
+ 0
1709
+ 2
1710
+ 0
1711
+
1712
+
1713
+
1714
+
1715
+
1716
+
1717
+
1718
+ .
1719
+ Now, the eigenvalues of Q2 are
1720
+
1721
+
1722
+ 2n − 2 ± 2
1723
+
1724
+ n2 − 10n + 29,
1725
+ 0,
1726
+ and
1727
+
1728
+ 2n − 2 ± 2
1729
+
1730
+ n2 − 10n + 29.
1731
+ Therefore, by Theorem 2.1, we have
1732
+ SpecE
1733
+
1734
+ (T 0,n−5
1735
+ n,4
1736
+ )c�
1737
+ =
1738
+
1739
+
1740
+
1741
+ 2n − 2 ± 2
1742
+
1743
+ n2 − 10n + 29
1744
+ 0
1745
+
1746
+ 2n − 2 ± 2
1747
+
1748
+ n2 − 10n + 29
1749
+ 1
1750
+ n − 4
1751
+ 1
1752
+
1753
+ .
1754
+ Hence, EE
1755
+
1756
+ (T 0,n−5
1757
+ n,4
1758
+ )c�
1759
+ = 2
1760
+ ��
1761
+ 2n − 2 + 2
1762
+
1763
+ n2 − 10n + 29 +
1764
+
1765
+ 2n − 2 − 2
1766
+
1767
+ n2 − 10n + 29
1768
+
1769
+ .
1770
+ If
1771
+ α =
1772
+
1773
+ 2n − 2 + 2
1774
+
1775
+ n2 − 10n + 29 and β =
1776
+
1777
+ 2n − 2 − 2
1778
+
1779
+ n2 − 10n + 29, then α2 +β2 = 4n−4 and
1780
+ 14
1781
+
1782
+ Vo
1783
+ V1
1784
+ V2
1785
+ V3
1786
+ V4
1787
+ W1
1788
+ Wn-52αβ = 8√2n − 7. Therefore,
1789
+ EE
1790
+
1791
+ (T 0,n−5
1792
+ n,4
1793
+ )c�
1794
+ = 2(α + β)
1795
+ = 2
1796
+ ��
1797
+ α2 + β2 + 2αβ
1798
+
1799
+ = 2
1800
+ ��
1801
+ 4n − 4 + 8
1802
+
1803
+ 2n − 7
1804
+
1805
+ .
1806
+ Theorem 5.2. Let T be a tree of order n ≥ 4. Then
1807
+ EE(T c) ≥ EE
1808
+
1809
+ (T 0,n−4
1810
+ n,3
1811
+ )c�
1812
+ with equality if and only if T ∼= T 0,n−4
1813
+ n,3
1814
+ .
1815
+ Proof. For n = 4, T 0,0
1816
+ n,3 is the only tree with a connected complement. For n = 5 and 6, it is easy
1817
+ to see that EE(T c) > EE
1818
+
1819
+ (T 0,n−4
1820
+ n,3
1821
+ )c�
1822
+ (see Appendix). So, let us assume that T is a tree on n ≥ 7
1823
+ vertices.
1824
+ If T is a tree with diam(T) = 3, then the proof follows from Corollary 5.1. If T is a tree with
1825
+ diam(T) ≥ 4, then, by Lemma 2.2, it follows that
1826
+ EE(T c) = 2EA(T) ≥ 2EA(T 0,n−5
1827
+ n,4
1828
+ ) = EE
1829
+
1830
+ (T 0,n−5
1831
+ n,4
1832
+ )c�
1833
+ .
1834
+ Again, by Lemma 5.1, we have EE
1835
+
1836
+ (T 0,n−5
1837
+ n,4
1838
+ )c�
1839
+ = 2
1840
+
1841
+ 4(n − 1) + 8√2n − 7. Note that 4(n − 1) +
1842
+ 8√2n − 7 > 4n + 1 + 8√n − 3 for n ≥ 7 and hence EE
1843
+
1844
+ (T 0,n−5
1845
+ n,4
1846
+ )c�
1847
+ > EE
1848
+
1849
+ (T 0,n−4
1850
+ n,3
1851
+ )c�
1852
+ for n ≥ 7. This
1853
+ completes the proof.
1854
+ In the following theorem, we characterize the complements of trees with maximum E-energy.
1855
+ Theorem 5.3. Let T be a tree of order n ≥ 4. Then
1856
+ EE(T c) ≤ EE(P c
1857
+ n)
1858
+ with equality if and only if T ∼= Pn.
1859
+ Proof. For n = 4, P4 is the only tree with a connected complement. For n = 5 and 6, it is easy to
1860
+ see that EE(T c) ≤ EE(P c
1861
+ n) with equality if and only if T ∼= Pn (see, Appendix). So, let us assume
1862
+ that T is a tree on n ≥ 7 vertices. For n ≥ 7, it follows from Theorem 2.2 that
1863
+ EE(P c
1864
+ n) = 2EA(Pn) =
1865
+
1866
+
1867
+
1868
+
1869
+
1870
+ 4
1871
+
1872
+ cot
1873
+
1874
+ π
1875
+ 2(n+1)
1876
+
1877
+ − 1
1878
+
1879
+ if n is odd,
1880
+ 4
1881
+
1882
+ csc
1883
+
1884
+ π
1885
+ 2(n+1)
1886
+
1887
+ − 1
1888
+
1889
+ if n is even;
1890
+
1891
+
1892
+ cot
1893
+
1894
+ π
1895
+ 2(n + 1)
1896
+
1897
+ − 1
1898
+
1899
+ .
1900
+ We know that cot x >
1901
+ � 1
1902
+ x +
1903
+ 1
1904
+ x−π
1905
+
1906
+ for 0 < x < π
1907
+ 2 . Since 0 <
1908
+ π
1909
+ 2(n+1) < π
1910
+ 2 , therefore
1911
+ EE(P c
1912
+ n) ≥ 4
1913
+
1914
+ cot
1915
+
1916
+ π
1917
+ 2(n + 1)
1918
+
1919
+ − 1
1920
+
1921
+ > 4
1922
+ �4n(n + 1)
1923
+ (2n + 1)π − 1
1924
+
1925
+ .
1926
+ (1)
1927
+ 15
1928
+
1929
+ Let T be a tree with diam(T) = 3. Therefore, by Corollary 5.2 it follows that
1930
+ EE(T c) ≤
1931
+
1932
+
1933
+
1934
+ 2√8n − 7
1935
+ if n is even,
1936
+ 2
1937
+
1938
+ 4n + 1 + 4
1939
+
1940
+ n2 − 4n + 3
1941
+ if n is odd;
1942
+ ≤ 2
1943
+
1944
+ 8n − 7.
1945
+ By using (1) one can check that
1946
+ EE(P c
1947
+ n) > 4
1948
+ �4n(n + 1)
1949
+ (2n + 1)π − 1
1950
+
1951
+ > 2
1952
+
1953
+ 8n − 7 ≥ EE(T c)
1954
+ for n ≥ 7.
1955
+ Thus, for any tree T with diameter 3, EE(P c
1956
+ n) > EE(T c).
1957
+ Let T be a tree with diam(T) ≥ 4. Therefore, E(T c) = 2A(T) and hence EE(T c) = 2EA(T).
1958
+ Now, by Lemma 2.3 it follows that
1959
+ EE(T c) = 2EA(T) ≤ 2EA(Pn) = EE(P c
1960
+ n)
1961
+ and the equality holds if and only if T ∼= Pn. This completes the proof.
1962
+ Finally, we obtain a Nordhaus-Gaddum type lower bound for the E-energy of a tree and its
1963
+ complement, which directly follows from Theorem 2.8 and Theorem 5.2.
1964
+ Theorem 5.4. Let T be a tree of order n ≥ 4. Then
1965
+ EE(T) + EE(T c) ≥ 2
1966
+ ��
1967
+ 13n − 35 + 8
1968
+
1969
+ n − 3 +
1970
+
1971
+ 4n + 1 + 8
1972
+
1973
+ n − 3
1974
+
1975
+ with equality if and only if T ∼= T 0,n−4
1976
+ n,3
1977
+ .
1978
+ References
1979
+ [1] Andries E. Brouwer and Willem H. Haemers. Spectra of graphs. Universitext. Springer, New
1980
+ York, 2012.
1981
+ [2] Yi-Zheng Fan, Fei-Fei Zhang, and Yi Wang. The least eigenvalue of the complements of trees.
1982
+ Linear Algebra Appl., 435(9):2150–2155, 2011.
1983
+ [3] Ivan Gutman. Acyclic systems with extremal h¨uckel π-electron energy. Theoretica chimica
1984
+ acta, 45(2):79–87, 1977.
1985
+ [4] Xiaocong He and Lu Lu. On the largest and least eigenvalues of eccentricity matrix of trees.
1986
+ Discrete Math., 345(1):Paper No. 112662, 11, 2022.
1987
+ [5] M Hofmeister. On the two largest eigenvalues of trees. Linear algebra and its applications,
1988
+ 260:43–59, 1997.
1989
+ [6] Michael P Knapp. Sines and cosines of angles in arithmetic progression. Mathematics magazine,
1990
+ 82(5):371, 2009.
1991
+ 16
1992
+
1993
+ [7] Xingyu Lei and Jianfeng Wang.
1994
+ Spectral determination of graphs with one positive anti-
1995
+ adjacency eigenvalue. Appl. Math. Comput., 422:Paper No. 126995, 13, 2022.
1996
+ [8] Xingyu Lei, Jianfeng Wang, and Guozheng Li. On the eigenvalues of eccentricity matrix of
1997
+ graphs. Discrete Appl. Math., 295:134–147, 2021.
1998
+ [9] Shuchao Li and Shujing Wang. The least eigenvalue of the signless Laplacian of the comple-
1999
+ ments of trees. Linear Algebra Appl., 436(7):2398–2405, 2012.
2000
+ [10] Yuanjing Li, Rui Qin, and Dan Li. On distance signless Laplacian spectrum of the complements
2001
+ of unicyclic graphs and trees. Linear Algebra Appl., 631:235–253, 2021.
2002
+ [11] Huiqiu Lin and Stephen Drury. The distance spectrum of complements of trees. Linear Algebra
2003
+ Appl., 530:185–201, 2017.
2004
+ [12] L´aszl´o Lov´asz and J´ozsef Pelik´an. On the eigenvalues of trees. Periodica Mathematica Hun-
2005
+ garica, 3(1-2):175–182, 1973.
2006
+ [13] Iswar Mahato, R. Gurusamy, M. Rajesh Kannan, and S. Arockiaraj. Spectra of eccentricity
2007
+ matrices of graphs. Discrete Appl. Math., 285:252–260, 2020.
2008
+ [14] Iswar Mahato, R Gurusamy, M Rajesh Kannan, and S Arockiaraj. On the spectral radius
2009
+ and the energy of eccentricity matrices of graphs. Linear and Multilinear Algebra, pages 1–11,
2010
+ 2021.
2011
+ [15] Iswar Mahato and M. Rajesh Kannan. Eccentricity energy change of complete multipartite
2012
+ graphs due to edge deletion. Spec. Matrices, 10:193–202, 2022.
2013
+ [16] Iswar Mahato and M Rajesh Kannan. Minimizers for the energy of eccentricity matrices of
2014
+ trees. arXiv preprint arXiv:2208.13462, 2022.
2015
+ [17] Iswar Mahato and M. Rajesh Kannan.
2016
+ On the eccentricity matrices of trees: Inertia and
2017
+ spectral symmetry. Discrete Math., 345(11):Paper No. 113067, 2022.
2018
+ [18] Ajay Kumar Patel, Lavanya Selvaganesh, and Sanjay Kumar Pandey. Energy and inertia of
2019
+ the eccentricity matrix of coalescence of graphs. Discrete Math., 344(12):Paper No. 112591,
2020
+ 11, 2021.
2021
+ [19] Jianfeng Wang, Xingyu Lei, Shuchao Li, Wei Wei, and Xiaobing Luo. On the eccentricity
2022
+ matrix of graphs and its applications to the boiling point of hydrocarbons. Chemometrics and
2023
+ Intelligent Laboratory Systems, page 104173, 2020.
2024
+ [20] Jianfeng Wang, Lu Lu, Milan Randi´c, and Guozheng Li. Graph energy based on the eccentricity
2025
+ matrix. Discrete Math., 342(9):2636–2646, 2019.
2026
+ [21] Jianfeng Wang, Mei Lu, Maurizio Brunetti, Lu Lu, and Xueyi Huang. Spectral determinations
2027
+ and eccentricity matrix of graphs. Adv. in Appl. Math., 139:Paper No. 102358, 25, 2022.
2028
+ [22] Jianfeng Wang, Mei Lu, Lu Lu, and Francesco Belardo. Spectral properties of the eccentricity
2029
+ matrix of graphs. Discrete Appl. Math., 279:168–177, 2020.
2030
+ 17
2031
+
2032
+ [23] Wei Wei, Xiaocong He, and Shuchao Li. Solutions for two conjectures on the eigenvalues of
2033
+ the eccentricity matrix, and beyond. Discrete Math., 343(8):111925, 21, 2020.
2034
+ [24] Wei Wei and Shuchao Li. On the eccentricity spectra of complete multipartite graphs. Appl.
2035
+ Math. Comput., 424:Paper No. 127036, 12, 2022.
2036
+ [25] Wei Wei, Shuchao Li, and Licheng Zhang. Characterizing the extremal graphs with respect
2037
+ to the eccentricity spectral radius, and beyond. Discrete Math., 345(2):Paper No. 112686, 29,
2038
+ 2022.
2039
+ [26] Hong Yuan. Sharp lower bounds on the eigenvalues of trees. Linear Algebra Appl., 113:101–105,
2040
+ 1989.
2041
+ Appendix
2042
+ Figure 4: List of trees on 5 and 6 vertices with connected complements.
2043
+ Trees(T)
2044
+ EE(T c)
2045
+ Trees(T)
2046
+ EE(T c)
2047
+ T1
2048
+ ≈ 10.4528
2049
+ T5
2050
+ ≈ 13.798
2051
+ T2
2052
+ ≈ 10.9284
2053
+ T6
2054
+ ≈ 12.3108
2055
+ T3
2056
+ ≈ 11.6372
2057
+ T7
2058
+ ≈ 13.9756
2059
+ T4
2060
+ = 12
2061
+ Table 1: E-energy of complements of the trees T1-T7.
2062
+ 18
2063
+
2064
+ Ti
2065
+ T2
2066
+ T3
2067
+ T4
2068
+ Ts
2069
+ T7