jackkuo commited on
Commit
25aafac
·
verified ·
1 Parent(s): f85675b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +4 -0
  2. 3dFRT4oBgHgl3EQfoTds/content/tmp_files/2301.13608v1.pdf.txt +1120 -0
  3. 3dFRT4oBgHgl3EQfoTds/content/tmp_files/load_file.txt +0 -0
  4. 4dE4T4oBgHgl3EQf0w2N/content/tmp_files/2301.05285v1.pdf.txt +1002 -0
  5. 4dE4T4oBgHgl3EQf0w2N/content/tmp_files/load_file.txt +0 -0
  6. 4tAyT4oBgHgl3EQfpPhP/content/tmp_files/2301.00521v1.pdf.txt +3696 -0
  7. 4tAyT4oBgHgl3EQfpPhP/content/tmp_files/load_file.txt +0 -0
  8. 79E1T4oBgHgl3EQfTwOd/content/tmp_files/2301.03082v1.pdf.txt +295 -0
  9. 79E1T4oBgHgl3EQfTwOd/content/tmp_files/load_file.txt +216 -0
  10. 9dE1T4oBgHgl3EQfoAQF/content/tmp_files/2301.03314v1.pdf.txt +818 -0
  11. 9dE1T4oBgHgl3EQfoAQF/content/tmp_files/load_file.txt +0 -0
  12. AtFLT4oBgHgl3EQfFC_E/content/tmp_files/2301.11986v1.pdf.txt +2817 -0
  13. AtFLT4oBgHgl3EQfFC_E/content/tmp_files/load_file.txt +0 -0
  14. CtAyT4oBgHgl3EQfR_f1/vector_store/index.pkl +3 -0
  15. D9AzT4oBgHgl3EQfif2Z/content/tmp_files/2301.01501v1.pdf.txt +741 -0
  16. D9AzT4oBgHgl3EQfif2Z/content/tmp_files/load_file.txt +376 -0
  17. EdA0T4oBgHgl3EQfA_-G/content/tmp_files/2301.01970v1.pdf.txt +2269 -0
  18. EdA0T4oBgHgl3EQfA_-G/content/tmp_files/load_file.txt +0 -0
  19. GtE4T4oBgHgl3EQfgQ2M/content/tmp_files/2301.05115v1.pdf.txt +0 -0
  20. GtE4T4oBgHgl3EQfgQ2M/content/tmp_files/load_file.txt +0 -0
  21. H9AyT4oBgHgl3EQfTPcY/content/tmp_files/2301.00100v1.pdf.txt +1251 -0
  22. H9AyT4oBgHgl3EQfTPcY/content/tmp_files/load_file.txt +0 -0
  23. H9AyT4oBgHgl3EQfrvmy/content/2301.00567v1.pdf +3 -0
  24. I9FJT4oBgHgl3EQfGCzl/content/tmp_files/2301.11446v1.pdf.txt +877 -0
  25. I9FJT4oBgHgl3EQfGCzl/content/tmp_files/load_file.txt +0 -0
  26. KdE2T4oBgHgl3EQfpgjD/content/tmp_files/2301.04030v1.pdf.txt +802 -0
  27. KdE2T4oBgHgl3EQfpgjD/content/tmp_files/load_file.txt +0 -0
  28. L9E3T4oBgHgl3EQfYgo-/content/tmp_files/2301.04488v1.pdf.txt +1527 -0
  29. L9E3T4oBgHgl3EQfYgo-/content/tmp_files/load_file.txt +0 -0
  30. OtFKT4oBgHgl3EQffy6V/content/2301.11831v1.pdf +3 -0
  31. PdE3T4oBgHgl3EQfCglF/content/tmp_files/2301.04276v1.pdf.txt +902 -0
  32. PdE3T4oBgHgl3EQfCglF/content/tmp_files/load_file.txt +477 -0
  33. PdFKT4oBgHgl3EQfgi6G/content/tmp_files/2301.11834v1.pdf.txt +1274 -0
  34. PdFKT4oBgHgl3EQfgi6G/content/tmp_files/load_file.txt +0 -0
  35. QNE0T4oBgHgl3EQfkQEN/vector_store/index.faiss +3 -0
  36. RtE2T4oBgHgl3EQfWQe7/content/tmp_files/2301.03832v1.pdf.txt +1216 -0
  37. RtE2T4oBgHgl3EQfWQe7/content/tmp_files/load_file.txt +0 -0
  38. W9FQT4oBgHgl3EQfcjYS/content/2301.13327v1.pdf +3 -0
  39. _dFAT4oBgHgl3EQfqx1t/content/tmp_files/2301.08649v1.pdf.txt +1101 -0
  40. _dFAT4oBgHgl3EQfqx1t/content/tmp_files/load_file.txt +0 -0
  41. bNAzT4oBgHgl3EQfLPu9/content/tmp_files/2301.01112v1.pdf.txt +1206 -0
  42. bNAzT4oBgHgl3EQfLPu9/content/tmp_files/load_file.txt +0 -0
  43. bdAyT4oBgHgl3EQf-PoQ/content/tmp_files/2301.00887v1.pdf.txt +303 -0
  44. bdAyT4oBgHgl3EQf-PoQ/content/tmp_files/load_file.txt +286 -0
  45. bdFAT4oBgHgl3EQfXh2G/content/tmp_files/2301.08534v1.pdf.txt +910 -0
  46. bdFAT4oBgHgl3EQfXh2G/content/tmp_files/load_file.txt +0 -0
  47. c9E3T4oBgHgl3EQfeQoX/vector_store/index.pkl +3 -0
  48. edE3T4oBgHgl3EQfewrM/content/tmp_files/2301.04547v1.pdf.txt +0 -0
  49. edE3T4oBgHgl3EQfewrM/content/tmp_files/load_file.txt +0 -0
  50. edFRT4oBgHgl3EQfVDdZ/content/tmp_files/2301.13538v1.pdf.txt +1095 -0
.gitattributes CHANGED
@@ -154,3 +154,7 @@ etE2T4oBgHgl3EQfGgZ8/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -tex
154
  9tAyT4oBgHgl3EQfqPjX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
155
  n9E5T4oBgHgl3EQfjw-t/content/2301.05658v1.pdf filter=lfs diff=lfs merge=lfs -text
156
  o9FMT4oBgHgl3EQf7jFB/content/2301.12464v1.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
154
  9tAyT4oBgHgl3EQfqPjX/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
155
  n9E5T4oBgHgl3EQfjw-t/content/2301.05658v1.pdf filter=lfs diff=lfs merge=lfs -text
156
  o9FMT4oBgHgl3EQf7jFB/content/2301.12464v1.pdf filter=lfs diff=lfs merge=lfs -text
157
+ W9FQT4oBgHgl3EQfcjYS/content/2301.13327v1.pdf filter=lfs diff=lfs merge=lfs -text
158
+ H9AyT4oBgHgl3EQfrvmy/content/2301.00567v1.pdf filter=lfs diff=lfs merge=lfs -text
159
+ OtFKT4oBgHgl3EQffy6V/content/2301.11831v1.pdf filter=lfs diff=lfs merge=lfs -text
160
+ QNE0T4oBgHgl3EQfkQEN/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
3dFRT4oBgHgl3EQfoTds/content/tmp_files/2301.13608v1.pdf.txt ADDED
@@ -0,0 +1,1120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.13608v1 [quant-ph] 25 Jan 2023
2
+ Orbit quantization in a retarded harmonic oscillator
3
+ Alvaro G. Lopez1
4
+ 1Nonlinear Dynamics, Chaos and Complex Systems Group,
5
+ Departamento de F´ısica, Universidad Rey Juan Carlos,
6
+ Tulip´an s/n, 28933 M´ostoles, Madrid, Spain
7
+ (Dated: February 1, 2023)
8
+ Abstract
9
+ We study the dynamics of a damped harmonic oscillator in the presence of a retarded potential
10
+ with state-dependent time-delayed feedback. In the limit of small time-delays, we show that the
11
+ oscillator is equivalent to a Li´enard system. This allows us to analytically predict the value of the
12
+ first Hopf bifurcation, unleashing a self-oscillatory motion. We compute bifurcation diagrams for
13
+ several model parameter values and analyse multistable domains in detail. Using the Lyapunov
14
+ energy function, two well-resolved energy levels represented by two coexisting stable limit cycles
15
+ are discerned. Further exploration of the parameter space reveals the existence of a superposition
16
+ limit cycle, encompassing two degenerate coexisting limit cycles at the fundamental energy level.
17
+ When the system is driven very far from equilibrium, a multiscale strange attractor displaying
18
+ intrinsic and robust intermittency is uncovered.
19
+ 1
20
+
21
+ I.
22
+ INTRODUCTION
23
+ The importance of time-delayed feedback has been extensively confirmed across many
24
+ disciplines in science, ranging from mechanical physical systems [1], to chemical complex
25
+ reactions [2], or complex biological systems, as for example cardiac oscillations [3], the prop-
26
+ agation of impulses through the nervous system [4] or the modelling of the cell cycle [5]. Time
27
+ delays are also inescapable to understand climate phenomena, such as El Ni˜no-Southern Os-
28
+ cillation [6]. In epidemiology and population dynamics retardations can be crucial as well
29
+ [7], just as much as they are in the modelling of economic cycles [8] or transmission lines
30
+ [9]. Indeed, whenever the forces between two physical interacting bodies are mediated by a
31
+ medium or a field, or whenever large causal chains in a network of connections are reduced
32
+ in a model, time-delays must be present. Consequently, the existence of retardations in
33
+ differential equations describing the evolution of dynamical systems should be taken more
34
+ as the rule, than as the exception.
35
+ However, this contrasts with the standard practice, where ordinary differential equations
36
+ are much preferred for their simplicity, both from an analytical and a numerical point of view.
37
+ Furthermore, not much attention has been dedicated to study dynamical systems where the
38
+ the time retardation is state-dependent [10–13], specially in the field of fundamental physics.
39
+ Recent findings in the study of extended electrodynamic bodies using the retarded Li´enard-
40
+ Wiechert potential have shown that these particles can experience nonlinear oscillations due
41
+ to self-forces, with a frequency similar to the “zitterbewegung” frequency [13, 14].
42
+ The
43
+ crucial importance of time-delay in atomic physics had initially been stressed by C. K. Raju
44
+ [15]. Later on, the expression “Atiyah’s hypothesis” was coined, after Sir Michael Atiyah
45
+ claimed the necessity of functional differential equations to faithfully represent the dynamics
46
+ of microscopic bodies, in a lecture entitled “The nature of space”, which was the first annual
47
+ Einstein Public Lecture, delivered in 2005 [16].
48
+ By the same year, Yves Couder and his collaborators demonstrated empirically the po-
49
+ tential of hydrodynamic quantum analogs consisting of silicone oil droplets bouncing on a
50
+ vibrating bath to describe quantum phenomena [17, 18]. These pilot-wave mechanical sys-
51
+ tems present striking similarities with the quantum mechanics of electromagnetically charged
52
+ bodies, such as orbit quantization [19], diffraction and interference phenomena through slits
53
+ [20], tunneling over barriers [21], or the entanglement of particles [22]. In mathematical
54
+ 2
55
+
56
+ models where the fluid is not explicitly represented [23], these features translate into a time-
57
+ delayed feedback arising from the self-affection of the particle through the fluid medium.
58
+ Just as it occurs in electrodynamics, perturbations produced in the past by the particle can
59
+ affect it at the present time, introducing memory effects that can trigger its self-propulsion
60
+ [23].
61
+ In the present work we demonstrate that the phase space orbits of a harmonic oscillator
62
+ with state-dependent time-delayed feedback are quantized and organized conforming a two-
63
+ level system. We also report new compelling dynamical phenomena, as for example the
64
+ superposition of quantized orbits and the existence of robust intermittency. The paper is
65
+ organized as follows. In Sec. 2 we introduce the mathematical model of our oscillator and
66
+ explain its origin.
67
+ Then, in Sec.
68
+ 3 we analytically bridge state-dependent time-delayed
69
+ oscillators and Li´enard systems, proving that the periodic motion corresponds to a self-
70
+ oscillation [24]. In the following section we show that there exist quantized orbits, which
71
+ are well-resolved in the energy landscape given by the harmonic external potential. Secs. 4
72
+ and 5 are dedicated to introduce two new phenomena, which might be crucial to understand
73
+ other aspects of microscopic physics, such as the superposition of orbits and the passage
74
+ of particles over external potential barriers. Finally, in the conclusions, we summarize the
75
+ main results of the present work and the future perspectives, as usual.
76
+ II.
77
+ MODEL
78
+ We use an apparently simple model consisting in a harmonic oscillator with linear damp-
79
+ ing, according to Stokes’s law of dissipation [25], an external quadratic potential V (x) repre-
80
+ senting Hooke’s law and another quadratic potential with state-dependent time-delay Q(xτ),
81
+ where xτ ≡ x(t − τ(x)). Following traditional studies in classical electrodynamics, we bor-
82
+ row the concept of retarded potential [26] hereafter to denote this self-excited contribution.
83
+ Therefore, we can write our dynamical equation of motion in the form
84
+ m¨x + µ ˙x + dV
85
+ dx + dQ
86
+ dxτ
87
+ = 0,
88
+ (1)
89
+ where m is the mass of the oscillator and µ represents the rate of dissipation.
90
+ This model is a simplified version of an oscillator recently encountered in the study of the
91
+ dynamics of extended electromagnetic bodies, for which the presence of self-forces produces
92
+ 3
93
+
94
+ self-oscillations through a Hopf bifurcation [13, 14].
95
+ Thus, if desired, we can physically
96
+ interpret to some extent this new time-delay term as the result of some complicated mass
97
+ self-interactions in a mass-spring system, arising from the mass’ structure. A similar, though
98
+ more sophisticated, model has been previously used in the literature to study the effect of
99
+ state-dependence of the delay on the phenomenon of vibrational resonance [11]. In summary,
100
+ we can mathematically express the external potential as V (x) = kx2/2 and the same holds
101
+ for Q(xτ) = αx2
102
+ τ/2, yielding the differential equation
103
+ m¨x(t) + µ ˙x(t) + kx(t) + αx(t − τ(x)) = 0.
104
+ (2)
105
+ For convenience and without loss of generality, we shall consider m = 1, k = 1 and µ = 0.1
106
+ hereafter. We are intending to describe the dynamics of our oscillator in the phase space
107
+ representation, as it is traditionally done in the study of nonlinear dynamical systems,
108
+ specially regarding mechanical and electronic oscillators. However, we notice that, rigorously
109
+ speaking, the true phase space of our dynamical systems is infinite-dimensional, since history
110
+ functions have to be provided to integrate the Eq. (2), instead of mere initial conditions [27].
111
+ In the phase space (x, y) we can write the differential equation as follows
112
+ ˙x = y
113
+ (3)
114
+ ˙y = −0.1y − x − αxτ.
115
+ (4)
116
+ One of the crucial issues of the present work is the nature of the function τ(x). Some
117
+ constraints on this function to ensure that the system is well-behaved must be provided.
118
+ For example, we want the trajectories to remain bounded in the external well for x → ±∞,
119
+ so that the state-dependent delay decays to zero asymptotically. It is also reasonable to
120
+ demand that the delay function τ(x) remains bounded all over its domain, guaranteeing
121
+ that the feedback coming from the past history of the dynamics does not extend to minus
122
+ infinity. In this manner, we bound the memory of this non-Markovian system to a finite
123
+ domain of its temporal past. Finally, symmetry with respect to spatial reflections (x → −x)
124
+ is also present in the original model [13]. Moreover, as we show ahead, we can exploit the
125
+ degeneracy introduced by this symmetry to obtain intriguing new dynamical phenomena. A
126
+ simple function that has been used in previous works is the Gaussian distribution [11], which
127
+ accounts for these three requirements. In conclusion, we assume that τ(x) = τ0e−x2/2σ2, and
128
+ fix σ = 1/
129
+
130
+ 2 unless otherwise stated. The parameter τ0 represents the maximum value of
131
+ 4
132
+
133
+ the time-delay feedback, attained at the centre of the potential well. It consitutes one of the
134
+ two key parameters investigated in the present study.
135
+ All things considered, we have a time-delayed nonlinear oscillator with two independent
136
+ parameters α and τ0. Interestingly, we note that the system’s nonlinearity comes entirely
137
+ from the retardation, since both potentials have been assumed harmonic. Even though this
138
+ system has been designed following previous findings in electrodynamics, we would like to
139
+ stress all the simplifications performed. Firstly, the delay in the functional differential equa-
140
+ tion appearing in Ref. [13] depends both on the speed and the acceleration of the particle.
141
+ Secondly, such differential equation is of the advanced type [13], since the acceleration and
142
+ the speed appearing in the Li´enard-Wiechert potentials are retarded themselves. Unfortu-
143
+ nately, numerical schemes to integrate advanced differential equations with state-dependent
144
+ delays are lacking. This has motivated the authors to develop the present approximated
145
+ model. Finally, some speed-dependent nonlinearities appearing in the dissipation term and
146
+ also in the restoring force term have been neglected.
147
+ They are related to the Lorentz’s
148
+ gamma factor, which is required to comply with the principle of relativity in classical elec-
149
+ trodynamics. Consequently, the present model remains somewhat abstract. It is not our
150
+ purpose to rigorously fit it to any specific physical system. We just use it to illustrate some
151
+ physical phenomena that are frequently believed to belong exclusively to the atomic realm
152
+ of physics.
153
+ III.
154
+ RELATED LI´ENARD SYSTEM
155
+ Given the fact that there is dissipation in the system, in the absence of retardation
156
+ (α = 0 or τ0 = 0), it can be immediately proved using the Lyapunov energy function
157
+ E(x, y) = (x2 + y2)/2 that the rest state at the equilibrium x = 0 is the only global stable
158
+ fixed point of the system, which asymptotically attracts all the initial conditions in the phase
159
+ space [28]. We recall that this function only comprises the conservative part of the energetic
160
+ content of our dynamical system. However, when the retarded potential is activated for
161
+ α > 0, as we increase τ0 bellow a critical value, a Hopf bifurcation appears destabilizing
162
+ such an equilibrium point. A fundamental energy level appears with non-zero energy fluctu-
163
+ ations, in which the system performs limit cycle oscillations. Therefore, the orbit becomes
164
+ quantized as a consequence of the time-delayed feedback. The system becomes unstable and
165
+ 5
166
+
167
+ locally active [29], performing a periodic self-oscillatory motion around the minimum of the
168
+ square well potential. Of course, this is only possible at the expense of an energy input in
169
+ the system, which must come from external field sources [13, 24]. Therefore, the present
170
+ dynamical system must be regarded as as a non-equilibrium open physical system, whose
171
+ nonlinear periodic motion can be interpreted as a cyclic thermodynamic engine [30]. Due
172
+ to the existence of energy losses, these dynamical systems are frequently named dissipative
173
+ structures. This contrasts to conservative dynamical systems, which are generally equipped
174
+ with a symplectic structure [31].
175
+ We now prove that the dynamics of the system is a self-oscillation, triggered by the well-
176
+ known Hopf bifurcation. To compute the value of τ0 at the bifurcation point, we approximate
177
+ this system to a Li´enard system. Expanding in Taylor series the retarded potential to second
178
+ order yields
179
+ x(t − τ(x)) = x(t) − τ(x) ˙x(t) + 1
180
+ 2τ 2(x)¨x(t) + O(τ 3).
181
+ (5)
182
+ When the time-delay is small, we can neglect the third and higher order terms, substitute
183
+ the two leading order contributions in the Eq. 2, and obtain
184
+ ¨x + f(x) ˙x + g(x) = 0,
185
+ (6)
186
+ where the functions f(x) = (µ − ατ(x))/(1 + ατ 2(x)/2) and g(x) = (k + α)/(1 + ατ 2(x)/2)
187
+ have been defined. Thus, as we can see, small retardations have two fundamental physical
188
+ consequences. Firstly, an antidamping correction to the drag force appears to first order.
189
+ Secondly, the inertia of the mass becomes dependent on the dynamical state through its
190
+ evolution along the trajectory. To demonstrate that this Li´enard system with f(x) and
191
+ g(x) as defined, fulfils the conditions required to produce the Hopf bifurcation, we appeal to
192
+ Li´enard’s theorem. Given the importance of this theorem, we first enunciate it, so that the
193
+ reader is aware of all the technical details [32]. For this purpose it is convenient to introduce
194
+ the primitive function F(x) =
195
+ � x
196
+ 0 f(s)ds, since it is alluded in the theorem, which reads
197
+ Theorem 1 (Li´enard, 1928) Under the assumptions that the functions F(x), g(x) ∈
198
+ C1(R) are odd, xg(x) > 0 for x ̸= 0, F(0) = 0, F ′(0) < 0 and F has one single root
199
+ for x = a, beyond which it increases monotonically to infinity, it follows that there exists
200
+ only one limit cycle and that it is stable.
201
+ For a proof of the theorem we refer the reader to Ref. [33]. It is immediate to confirm
202
+ that all the requirements for the existence of the Hopf bifurcation are accomplished. Indeed,
203
+ 6
204
+
205
+ the function F fulfils F(0) = 0, has a root at a ∈ R+, and for x ≥ a we find that it
206
+ increases monotonically towards infinity (see Fig. 1(a)). This follows from an analytical
207
+ estimation of F(x), which can be provided by neglecting the postive term τ 2(x) ≪ 2/α in
208
+ the denominator of f(x), yielding the function F(x) = µx −
209
+
210
+ 2πατ0 erf(x/
211
+
212
+ 2), with erf(x)
213
+ the error function. It is also confirmed by the numerical solution of the integral, which has
214
+ been computed using the trapezoidal rule. Recall, this rule works by approximating the
215
+ region under the graph of a function as a trapezoid and calculating its area. The condition
216
+ xg(x) > 0 is trivially verified, while the condition F ′(0) > 0 can be used to find out the value
217
+ of the Hopf bifurcation, since F ′(0) = (µ − ατ0)/(1 + ατ 2
218
+ 0 /2), which entails that τ0 > µ/α.
219
+ For the parameter values here considered and α = 1/2, we can approximate the value of
220
+ the point where the Hopf bifurcation takes place to τ0 = 1/5. This analytical results holds
221
+ nicely, as depicted in Fig. 1(b).
222
+ In the following section we show that, as we push the
223
+ system even further from thermodynamic equilibrium [30] by increasing the effects of the
224
+ time-delay feedback, the oscillator undergoes further bifurcation phenomena producing a
225
+ second quantized excited orbit, at a higher energy level.
226
+ IV.
227
+ ENERGY LEVELS: MULTISTABILITY
228
+ Once we have demonstrated that the time retardation can destabilize the rest state,
229
+ generating a fundamental energy level with zero-point fluctuations, it is worth asking if by
230
+ posing the system even further from equilibrium, i.e. by increasing the delay feedback, orbits
231
+ with larger amplitude representing excited energy levels can appear. For this purpose we
232
+ have computed the bifurcation diagrams of the related maxima map of the system. As it is
233
+ well-known, this map can be constructed by computing the local maxima of the temporal
234
+ series. Together with its related minima map, this is the simplest general way to discretize
235
+ the dynamics of delayed differential equations.
236
+ We insist again that, strictly speaking,
237
+ the phase space of retarded differential equations is infinite-dimensional.
238
+ An alternative
239
+ possibility is to build an embedding from the temporal series and to construct a Poincar´e
240
+ section out of it. However, this technique it computationally more intensive and does not
241
+ produce better insights into the dynamics of the system.
242
+ To compute the bifurcation diagrams we have to integrate the Eq. (2). This requires
243
+ to consider history functions [27].
244
+ Since in the absence of time retardation the system
245
+ 7
246
+
247
+ Figure 1: Hopf bifurcation. The conditions for Li´enard’s theorem are shown. (a) The function
248
+ F for τ0 = 1 and α = 1/2. It clearly verifies F(0) = 0, F ′(0) < 0, has a single root for a ≈ 10 and
249
+ increases monotonically without bounds thereafter. An analytical approximation using the error
250
+ function. (b) The bifurcation diagram, with the maximum value of x along the orbit resulting
251
+ from the numerical solution of the retarded differential (red curve), showing a Hopf bifurcation for
252
+ the value τ0c = 0.195, very close to the analytical prediction of the related Li´enard system, which
253
+ corresponds to τ0c = 0.2. The rest state becomes unstable beyond the critical bifurcation point,
254
+ entailing the zero-point fluctuations of the fundamental energy level.
255
+ is harmonic, we consider that the most natural choice of history functions are periodic
256
+ solutions, Therefore, we take the functions x(t) = A sin(ωt + ϕ) for t < 0. Moreover, this
257
+ choice can be used later to ascertain relevant dynamical aspects of the system under finite-
258
+ time external periodic drivings, which can be physically interpreted as brief pulses exerted
259
+ on the oscillator. As it is expected, external perturbations acting on the system can produce
260
+ transitions between the energy levels.
261
+ Because our aim is to figure out if there exists multistable parameter regimes, repre-
262
+ sented by two or more coexisting stable limit cycles, we throw ten different initial conditions
263
+ randomly chosen in the range A ∈ [0, 3], ω ∈ [−π, π] and ϕ ∈ [0, π]. We compute the trajec-
264
+ tories in the temporal interval t ∈ [0, 2000] using a residual order integrator implemented in
265
+ MATLAB. Transients as long as seven-tenths or even larger of the whole temporal series are
266
+ 8
267
+
268
+ 3
269
+ Numerical
270
+ Stable
271
+ Analytical approximation
272
+ 1
273
+ 2
274
+ F
275
+ mac
276
+ Hopf
277
+ 0
278
+ 0
279
+ Toc
280
+ Unstable
281
+ 0
282
+ 0.1
283
+ 0.2
284
+ 0.3
285
+ 0.4
286
+ 0.5
287
+ 0
288
+ 10
289
+ 20
290
+ To
291
+ (a)
292
+ (b)1.5 F
293
+ 1.0
294
+ 0.5
295
+ 0.0
296
+ 0.5
297
+ 0
298
+ 5
299
+ 10
300
+ 15
301
+ 20
302
+ 253
303
+ 2.5
304
+ 20.4
305
+ 0.51.5
306
+ 1
307
+ 0.5
308
+ 0
309
+ -0.5
310
+ 0
311
+ 0.1
312
+ 0.2
313
+ 0.3Figure 2: Bifurcation diagrams (α > 0). The bifurcation diagrams of the maxima map of x are
314
+ represented for increasing values of the maximum delay τ0. A total number of ten initial randomly
315
+ chosen histories have been used, and depicted using two different colors to represent the asymptotic
316
+ sets, clearly distinguishing multistable regions (green background). (a) For α = 0.5 several Hopf
317
+ bifurcations (arrows), interrupted by an amplitude death region (AD), end in a quasiperiodic
318
+ route to chaos.
319
+ Multistability (MS) starts at the critical value τ0c = 6.1, when a high energy
320
+ limit cycle (green) is born, coexisting with the fundamental energy level, which involves periodic,
321
+ quasiperiodic or chaotic attractors (blue). (b) For α = 0.9 similar results are observed, except for
322
+ the disappearance of the amplitude death region, and the fact that the mustilstable regions appear
323
+ and disappear through several crises.
324
+ discarded, since time-delayed systems usually display long transient phenomena [34]. Finally,
325
+ we obtain the maxima map and represent these points for 1200 varying parameter values of
326
+ the maximum time-delay in the range τ0 ∈ [0, 11]. Recalling that several conditions can lead
327
+ to the same asymptotic limit cycle, we have coloured the bifurcations diagrams in two colors,
328
+ to clearly distinguish the two energy levels, whenever they exist. As we can see in Fig. 2(a),
329
+ and as detailed in the previous section, for α = 1/2, as the time-delay is increased from
330
+ zero, a first Hopf bifurcation reveals at τ0 = 1/5. Then, if we increase further the maximum
331
+ delay τ0, the fundamental orbit first enlarges reaching a maximum amplitude of xmax = 6.0
332
+ for τ0 close to 2.0, then shrinks again and, finally, it disappears. This is the well-studied
333
+ 9
334
+
335
+ AD
336
+ MS
337
+ 6
338
+ MS
339
+ 8
340
+ 6
341
+ 4
342
+ mac
343
+ 4
344
+ Cmac
345
+ 2
346
+ 2
347
+ α= 0.5
348
+ 0
349
+ α = 0.9
350
+ 0
351
+ 2
352
+ 4
353
+ 6
354
+ 8
355
+ 10
356
+ 0
357
+ 2
358
+ 4
359
+ 6
360
+ 8
361
+ 10
362
+ To
363
+ To
364
+ (a)
365
+ (b)10
366
+ 8
367
+ 6: 2. .:
368
+ 8
369
+ 104
370
+ 2
371
+ 0
372
+ 2
373
+ 0
374
+ 2
375
+ 4
376
+ 6AD
377
+ MS
378
+ 6
379
+ Cmac
380
+ 2
381
+ 0
382
+ 4
383
+ 6
384
+ 8
385
+ 10
386
+ 2
387
+ To6
388
+ 58
389
+ 104
390
+ 3
391
+ 2
392
+ 0
393
+ 0
394
+ 2
395
+ 4
396
+ 6phenomena of amplitude death (AD), frequently displayed by time-delayed differential equa-
397
+ tions [35]. However, for values beyond τ0 = 5.25 a Hopf bifurcation shows anew, which is
398
+ now followed by a secondary Hopf bifurcation, giving rise to quasiperiodic motion. For an
399
+ approximate critical value of the maximum time-delay τ0c = 6.1, a new periodic limit cycle
400
+ of higher amplitude is born, rendering a multistable (MS) two-level system. The second
401
+ quantized excited state shall persist all along the bifurcation diagram and remains periodic,
402
+ although its amplitude shrinks as the retardation increases. Then, the fundamental energy
403
+ level experiences further bifurcations through a quasiperiodic route to chaos [36], ending in a
404
+ chaotic strange attractor (see Figs. 3(a) and (b)). The chaotic attractor experiences a crisis
405
+ at approximately τ0 = 9.0, yielding two coexisting periodic limit cycles, which are depicted
406
+ in Fig. 3(c). For α = 0.9 similar results are observed in Fig. 2(b), except for the fact that the
407
+ amplitude death region is missing, and also the multistable regions appear and disappear
408
+ intermittently along the bifurcation diagram through several crises. Some new interesting
409
+ dynamical features are also discerned, as for example the coexistence of two quasiperiodic
410
+ attractors for τ0 = 9.5. Naturally, whenever a strange attractor disappears through a cri-
411
+ sis, transient chaos phenomena [37] can be observed, where a trajectory can spend large
412
+ transients in the fundamental level, and then spiral away towards the first excited level.
413
+ We now investigate if the two energy levels are well-resolved across the different energy
414
+ shells. For this purpose, and also for aesthetic purposes, we have used a value of α = 0.9
415
+ and τ0 = 5.87 to illustrate this two-level system. For these parameter values, we can find
416
+ two stable symmetric degenerate coexisting orbits at the fundamental level, as shown in
417
+ Fig. 4(a). This degeneracy is a consequence of the fact that the Eq. (2) is invariant under
418
+ spatial reflections, and the splitting of these two orbits constitutes a typical phenomenon
419
+ of symmetry breaking at the fundamental energy level. We recall that symmetry breaking
420
+ is an ordinary phenomenon frequently observed in nonlinear self-excited systems [29]. In
421
+ Fig. 4(b) we have plotted the harmonic external potential in red. We have used the Lyapunov
422
+ energy function E(x, y) = (x2 + y2)/2 to compute the energy of the particle along the
423
+ limit cycles [33], and numerically integrated its average value along these periodic orbits,
424
+ using the trapezoidal rule once more. The average energy has been plotted in the energy
425
+ diagram in dashed lines, together with the energy fluctuations that the stable quantized
426
+ orbits experience along their periodic motions. As we can clearly appreciate, despite the
427
+ fact that the fluctuations are substantial and the oscillator performs excursions out of shell
428
+ 10
429
+
430
+ Figure 3: Multistability. Three phase space portraits along a quasiperiodic route to chaos in the
431
+ multistable region for α = 0.5 (see first bifurcation diagram in Fig. 2). (a) Two doubly-degenerate
432
+ quasiperiodic attractors coexist with a higher amplitude periodic limit cycle surrounding them.
433
+ (b) The quasiperiodic attractors have merged into a single chaotic attractor as the delay increases,
434
+ while the most exterior limit cycle has enlarged. (c) The chaotic orbit disappears through a crisis
435
+ for even higher time-delays, yielding two coexisting periodic limit cycles of different amplitude.
436
+ with respect the average energy, the two levels are well differentiated and they do not overlap
437
+ in the energy diagram. Consequently, it can be safely stated that the present system displays
438
+ quantized stable orbits at two independent energies, which can be denoted as E1 = 0.4 and
439
+ E2 = 26.
440
+ To conclude this section, we have also studied the basins of attraction of the system for
441
+ this particular situation, to ascertain if there exists sensitivity to external perturbations.
442
+ This is of crucial importance, for if an external perturbation is effected on this system,
443
+ we may wonder which of the possible asymptotic limit cycles is attained in the end. Or,
444
+ equivalently, we may ask about the ultimate energy of the oscillator when it is perturbed from
445
+ the outside. In Fig. 5 we show the basins of attraction in the history subspace of periodic
446
+ functions. We have used a resolution of 300 × 300, fixed an amplitude of A = 0.43, and
447
+ computed trajectories until they get close enough to one of the three attractors. Depending
448
+ on which attractor is approached, each initial history is plotted in the parameter space with
449
+ a different color. As we can see, the basins are fractalized, what introduces unpredictability
450
+ at all the scales of precision [38]. However, this basin does not posses the Wada property
451
+ [27]. In general, unless infinite experimental accuracy is accessible, the best that we can say
452
+ 11
453
+
454
+ 4
455
+ 3
456
+ 22
457
+ 3
458
+ 41
459
+ 0
460
+ -1
461
+ -2
462
+ -3
463
+ -4
464
+ -4
465
+ -3
466
+ -2
467
+ -1
468
+ 0
469
+ 12
470
+ 1.5
471
+ 11
472
+ 1.5
473
+ 20.5
474
+ 0
475
+ -0.5
476
+ -1
477
+ -1.5
478
+ -2
479
+ -2
480
+ -1.5
481
+ -1
482
+ -0.5
483
+ 0
484
+ 0.54
485
+ 3
486
+ 21
487
+ 2
488
+ 31
489
+ 0
490
+ -1
491
+ -2
492
+ -3
493
+ -3
494
+ -2
495
+ -1
496
+ 0Figure 4: Energy levels. A two-level system for α = 0.9 and τ0 = 5.87. The fundamental level E1
497
+ is doubly degenerate, with two coexisting symmetric (under reflection) limit cycles, E1,+ and E1,−.
498
+ (a) Limit cycles representing the quantization of orbits, with two different average energies, one
499
+ corresponding to the fundamental level, and the other to the first and last excited level. (b) The
500
+ harmonic potential is represented in red, while the average energy of the limit cycles is represented
501
+ with dashed lines. In gray we can see the detour of the orbit through different energy shells. The
502
+ fluctuations are considerable, although the two levels are well resolved.
503
+ is that there exists some probability that the system might end in one of the two energy
504
+ levels.
505
+ This probability can be roughly approximated by merging the two basins of the
506
+ respective orbits at the fundamental level, and by computing the size of the resulting basins
507
+ of attraction in the parameter space. The fraction of volume of each basin in relation to the
508
+ total volume in the parameter space in the region at investigation allows to introduce the
509
+ concept of basin stability [39]. In addition, the asymptotic uncertainty can be further studied
510
+ through the concept of basin entropy, which offers a more concise probabilistic account of
511
+ the hidden structure of the basins [40].
512
+ 12
513
+
514
+ 10
515
+ 30
516
+ E2
517
+ E2
518
+ 6
519
+ 25
520
+ 2
521
+ 20
522
+ E1,
523
+ E1,+
524
+ E
525
+ y
526
+ 15
527
+ -2
528
+ 10
529
+ -6
530
+ 5
531
+ E1
532
+ E1,+
533
+ -10
534
+ 0
535
+ -6
536
+ -4
537
+ 4
538
+ 6
539
+ -5
540
+ 0
541
+ 5
542
+ (b)
543
+ aFigure 5: Unpredictability. The basins of attraction in the history space of the three stable
544
+ attractive orbits for α = 0.9 and τ0 = 5.87.
545
+ The two energy levels are clearly mixed in the
546
+ phase space of initial histories, rendering the basins their fractal nature. Thus, arbitrarily small
547
+ perturbations in the initial histories can lead to different asymptotic energy levels. (a) The basin
548
+ of attraction for A = 0.43 and varying frequency in the phase space of the periodic histories. (b)
549
+ A blow-up of the basins, evincing the sensitivity of the system to initial conditions, which entails
550
+ unpredictability at all scales of precision.
551
+ V.
552
+ LIMIT CYCLE SUPERPOSITION
553
+ The present section is dedicated to describe a new dynamical phenomenon that we have
554
+ encountered for α < 0, which reminds of phenomena typically appearing in microscopic
555
+ physics. In fact, the Eq. (2) with α < 0 resembles more exactly the electrodynamic self-
556
+ oscillator encountered in previous works [13]. Specifically, we refer to the existence of states
557
+ of superposition of orbits. In the present case, this corresponds to a quasiperiodic limit cycle
558
+ encompassing two smaller symmetric degenerate limit cycles. This phenomenon can only be
559
+ detected when the effects of the retarded potential are comparable to the magnitude of the
560
+ external potential. Here we have selected a value of α = −0.9 to illustrate the phenomenon,
561
+ which in absolute value is rather close to the value k = 1.
562
+ In the first place we plot the bifurcation diagram. It has been computed following exactly
563
+ the same recipe described in the previous section. As the reader can see in Fig. 6, for α < 0
564
+ we cannot find a corresponding Li´enard system that experiences a Hopf bifurcation for
565
+ 13
566
+
567
+ 2.2
568
+ 1.8
569
+ 1.6
570
+ 1.4
571
+ 1.2
572
+ 1.4
573
+ 1.6
574
+ 1.8
575
+ 2
576
+ 2.2
577
+ 2.4
578
+ 1.2
579
+ 2.64.5
580
+ 4
581
+ 3.5
582
+ 3
583
+ 2.5
584
+ 1.5
585
+ 1
586
+ 0.5
587
+ .5
588
+ 5Figure 6: Bifurcation diagram (α < 0). The bifurcation diagrams of the maxima map of x
589
+ are represented for increasing values of the maximum delay τ0 and α = −0.9. A total number
590
+ of ten initial randomly chosen histories have been used and depicted using two different colors to
591
+ represent the asymptotic sets. A first Hopf bifurcation (arrow) appears now for τ0 = 2.4, followed
592
+ by a Pitchfork bifurcation (green and blue branches). We can distinguish a region where limit cycle
593
+ superposition (LCS) is detected (red background). For τ0 > 9.2 a strange attractor with robust
594
+ intermittency (RI) appears.
595
+ small values of the maximum time-delay τ0. This occurs because the change in the sign of α
596
+ precludes the antidamping effect produced in the first derivative of x appearing in Eq. (6).
597
+ However, as τ0 is further increased, again a Hopf bifurcation reveals at the approximate
598
+ maximum delay critical value τ0c = 2.4. Thus, now, the instability occurs when the system
599
+ is posed quite far from the original equilibrium. It must be the result of high-order terms
600
+ in the Taylor expansion of the delayed potential, involving the jerk, the jounce and other
601
+ derivatives of higher order. Later on, at the critical value τ0c = 2.7, a Pitchfork bifurcation
602
+ ensues, which then transits to the chaotic regime, as we keep increasing the retardation.
603
+ As far as we have computed, a period three orbit coexisting with the two period one orbits
604
+ suddenly appears. As we zoom in the bifurcation diagram, we can see that these period-3
605
+ orbits then experience a period doubling bifurcation. Nevertheless, the cascade cannot be
606
+ clearly distinguished, since it includes very complicated dynamics with truly large chaotic
607
+ 14
608
+
609
+ 3
610
+ LCS
611
+ RI
612
+ 2
613
+ Cmac
614
+ 0
615
+ -1
616
+ 0
617
+ 2
618
+ 4
619
+ 6
620
+ 8
621
+ 10
622
+ To3
623
+ 2.5
624
+ 28
625
+ 101.5
626
+ 1
627
+ 0.5
628
+ 0
629
+ -0.5
630
+ -1
631
+ -1.5
632
+ 0
633
+ 2
634
+ 4
635
+ 6transients, involving heterogeneous alternating motions.
636
+ For higher values of the maximum time-delay, around τ0 = 8.5, we can find a window
637
+ of parameter values in the bifurcation diagram where a limit cycle superposition can be
638
+ found. We describe this new phenomenon in detail. In this region, we have numerically
639
+ detected at least five different coexisting limit cycles, by scrutinizing the history parameter
640
+ space. Two of them are symmetric and have lower amplitude. They could describe a first
641
+ fundamental level, but this time unresolved from the second, which is the one concerned
642
+ now. For simplicity, we omit them from our analysis. Then, another two limit cycles of
643
+ larger amplitude have also been found, which consist in two complicated period-6 stable
644
+ symmetric degenerate orbits. By varying initial histories in the parameter space (A, ω, ϕ),
645
+ one can find many past histories leading to any these two limit cycles, just as shown in Fig. 5
646
+ for E1,±. But, to our surprise, we have also found a superposition limit cycle travelling along
647
+ both limit cycles (see Figs. 7(a)). This orbit spends some time going close to one of the
648
+ degenerate stable periodic orbits, and then switches to the other one, alternating between
649
+ them in a regular fashion.
650
+ The new limit set corresponds to an apparently quasiperiodic stable attractor, and it can
651
+ also be accessed from many parameter values (A, ω, ϕ) in the parameter space chosen as
652
+ initial histories. Since this superposition limit cycle resembles to its encompassed orbits, it
653
+ can be numerically shown that its average energy is, although slightly below, close to the
654
+ average energy of the other two period-6 orbits. The small difference arises because the
655
+ superposition limit set visits regions of the phase space with lower energy (closer to the
656
+ origin of the square well), which are not covered by the periodic trajectories. Thus, as far as
657
+ we are concerned, we describe here for the first time a stable limit cycle that can be partly
658
+ constructed from two smaller stable orbits, by nearly taking their union in the phase space.
659
+ This would be impossible in a finite-dimensional dynamical system represented by some
660
+ set of ordinary differential equations, as they are frequently used to describe conventional
661
+ mechanical conservative systems: two orbits cannot cross in the phase space of a finite-
662
+ dimensional continuous system.
663
+ Of course, if we interpret the true phase space of our
664
+ retarded oscillator as infinite-dimensional, neither they do cross here.
665
+ To conclude our analysis, because the superposition state takes after the two encom-
666
+ passed smaller cycles, we have computed the power spectra (see Figs. 7(b) and (c)) of the
667
+ temporal series of the quantized periodic orbits and their superposition orbit, to ascertain
668
+ 15
669
+
670
+ Figure 7: Limit cycle superposition. We can see two symmetric degenerate periodic limit cycles
671
+ at the fundamental (red and blue orbits) level for τ0 = 8.5 and α = −0.9. Another limit cycle
672
+ (green orbit) encompassing the previous two orbits can be appreciated. (b) Power spectra of the
673
+ periodic orbits. (c) Power spectra of the superposition limit cycle encompassing the periodic orbits,
674
+ where the lower frequencies (arrows) are different, rendering this attractor its quasiperiodic nature.
675
+ the periodicity of the later. As expected, the power spectra of both orbits take after one
676
+ another, since their average energy is similar. However, we can see that differences appear
677
+ in the lower frequency domain of the spectrum, which render the superposition limit cycle
678
+ quasiperiodic or, in the worst case, of a very high period, as compared to the other orbits.
679
+ Nevertheless, without taking advantage of spectral analysis, it is really striking to see how
680
+ this quasiperiodic orbit resembles to the underlying periodic limit cycles.
681
+ VI.
682
+ ROBUST INTERMITTENCY
683
+ We now investigate an interesting dynamical phenomenon that is encountered in our
684
+ retarded oscillator for α = −0.9 when the maximum time-delay is substantially increased
685
+ (see Fig. 8(a)). This phenomenon consists in a multiscale strange attractor that appears
686
+ to be robust [41] and which also exhibits intrinsic intermittency in a double sense.
687
+ To
688
+ understand it properly, we first show some complicated symmetric degenerate limit cycles
689
+ 16
690
+
691
+ 0.45
692
+ 0.4
693
+ 0.35.4
694
+ 0.50.3
695
+ Powe
696
+ 0.2
697
+ 0.15
698
+ 0.1
699
+ 0.05
700
+ 0.1
701
+ 0.2
702
+ 0.3
703
+ Frequency (Hz)1.5
704
+ 1
705
+ 0.5
706
+ 0
707
+ -0.5
708
+ -1
709
+ -1.5
710
+ -1.5
711
+ -1
712
+ -0.5
713
+ 0
714
+ 0.5
715
+ 1
716
+ 1.50.5
717
+ 0.40.4
718
+ 0.5Power
719
+ 0.3
720
+ 0.2
721
+ 0.1
722
+ 0.1
723
+ 0.2
724
+ 0.3
725
+ Frequency (Hz)Figure 8: Multiscale limit cycles. (a) Two degenerate symmetric limit quiasiperiodic attractors
726
+ for α = −0.9 and τ0 = 3.35. The trajectories are reminiscent of a saddle-focus projected on the
727
+ 2D phase space. (b) The magnitude of the continuous wavelet transform is represented (colorbar),
728
+ using the analytic Morse wavelet with the symmetry parameter equal to 3 and a time-bandwidth
729
+ product equal to 60.
730
+ We can already appreciate in the temporal evolution of the spectrum a
731
+ complex on-off periodic oscillatory behaviour. The inset shows the total power spectra, with a
732
+ bimodal distribution displaying a rich frequency content.
733
+ with two intrinsic scales. By intrinsic we mean a property that results from the structure
734
+ of the limit cycles, and not as a consequence of some crises at a bifurcation point.
735
+ As
736
+ shown in Fig. 8(a), for τ0 = 3.35, this attracting orbits spiral out of the rest state and
737
+ then are reinjected back to the limit cycle, drifting slowly towards the equilibrium point
738
+ without oscillating at all.
739
+ They clearly evoke a saddle-focus structure, as appearing in
740
+ Shilnikov’s bifurcation [42], specially when embedded in a higher dimensional subspace of the
741
+ full infinite-dimensional true phase space (see below). Their frequency spectrum is very rich,
742
+ having two maxima and many frequencies at different scales. Interestingly, by implementing
743
+ a continuous wavelet transform method, we can capture dynamical phenomena that is not
744
+ displayed by conventional stationary spectral analysis. As it can be appreciated in Fig. 8(b),
745
+ this time-multiscale method uses several time-windows, showing how the frequency spectrum
746
+ evolves in time, and evincing the alternation in the system between oscillatory dynamics
747
+ and low-speed silent drifts. This dynamics is somewhat reminiscent of relaxation oscillators,
748
+ 17
749
+
750
+ 0.20.3
751
+ 0.35
752
+ 0.40.15
753
+ Power
754
+ 0.1
755
+ 0.05
756
+ 0.05
757
+ 0.1
758
+ 0.15
759
+ 0.2
760
+ 0.25
761
+ Frequency (Hz)1.5
762
+ 0.50.5
763
+ 1
764
+ 1.50
765
+ -0.5
766
+ -1
767
+ -1.5
768
+ -1.5
769
+ -1
770
+ -0.5
771
+ 0Magnitude Scalogram
772
+ 0.7
773
+ 8
774
+ 4
775
+ 0.6
776
+ 2
777
+ 0.5
778
+ 1
779
+ Frequency (Hz)
780
+ 0.5
781
+ Magnitude
782
+ 0.4
783
+ 0.25
784
+ 0.125
785
+ 0.3
786
+ 0.0625
787
+ 0.2
788
+ 0.03125
789
+ 0.015625
790
+ 0.1
791
+ 0.0078125
792
+ 0
793
+ 2
794
+ 4
795
+ 6
796
+ 8
797
+ 10
798
+ Time (mins)Figure 9: Intrinsic intermittency. (a) The two degenerate symmetric limit quiasiperiodic at-
799
+ tractors have merged into a chaotic strange attractor in the 2D phase space. This attractor possess
800
+ two dynamical and well differentiated scales. (b) The continuous wavelet transform is represented
801
+ (colorbar), using again the analytic Morse wavelet with the symmetry parameter equal to 3 and
802
+ a time-bandwidth product equal to 60. We can newly appreciate in the temporal evolution of
803
+ the spectrum a complex behaviour that switches between two oscillatory motions with different
804
+ amplitude. (c) The time series of x and its derivative y in the phase space. A sequence of bursts
805
+ is clearly appreciated. Note how the trajectories can be reinserted into the attractor through two
806
+ different arms, making the phenomenon doubly intermittent.
807
+ although these limit cycles are way more sophisticated in the present case [43].
808
+ For higher parameter values, as for example for τ0 = 9.5, these two complex limit cycles
809
+ have merged into a strange chaotic attractor, as shown in Fig. 9(a). Now we find that the
810
+ system alternates between two different states of chaotic oscillation, one with low amplitude
811
+ 18
812
+
813
+ Magnitude Scalogram
814
+ 2
815
+ 1.2
816
+ 1
817
+ 0.5
818
+ 0.25
819
+ (zH)
820
+ 0.125
821
+ 0.8
822
+ Magnitude
823
+ Frequency (
824
+ 0.0625
825
+ 0.6
826
+ 0.03125
827
+ 0.015625
828
+ 0.4
829
+ 0.0078125
830
+ 0.00390625
831
+ 0.2
832
+ 0.00195312
833
+ 0
834
+ 10
835
+ 20
836
+ 30
837
+ 40
838
+ Time (mins)1.5
839
+ 1
840
+ 0.5
841
+ 0
842
+ -0.5
843
+ -1
844
+ -1.5
845
+ -2
846
+ 2
847
+ -1.5
848
+ -1
849
+ -0.5
850
+ 0
851
+ 0.5
852
+ 1
853
+ 1.5
854
+ 2Figure 10: Robustness. (a) The largest Lyapunov exponent has been computed by using embed-
855
+ ding techniques across different values of the time-delay for α = −0.9. A value of 0.05 has been
856
+ chosen as a threshold to determine if the motion is chaotic. We see that its positive value rarely
857
+ goes below the threshold, what entails great robustness of the attractor to parameter perturba-
858
+ tions. (b) The attractor embedded in D = 3 dimensions, reconstructed with an embedding delay
859
+ τ = 10. We see how it unfolds in this higher-dimensional space, so that the saddle-focus hidden
860
+ structure is more clearly appreciated. Its projected shadow manifestly resembles the attractor in
861
+ 2D phase space.
862
+ and another with a higher amplitude (Fig. 9(c)).
863
+ In this sense, we can affirm that the
864
+ system displays intermittent behaviour, switching between these two nonperiodic modes of
865
+ oscillation. Comparing this dynamics with the dynamics along the underlying multiscale
866
+ limit cycles previously described, we can say that the low-speed drift towards the original
867
+ equilibrium of the system without retardation, have now become an oscillation of small
868
+ amplitude around it.
869
+ Note also how the system is reinjected into the domain through
870
+ two possible routes: the lower branch and the higher branch of the residual multiscale
871
+ attractors, rendering a second form of intermittency. Importantly, this doubly intermittent
872
+ behavior is intrinsic to the complex heterogeneous nature of the attractor. Simply put, it
873
+ does not require a fine-tuning of the parameter τ0, as opposed to conventional intermittency
874
+ phenomena, which occurs close to bifurcation critical points [44]. Moreover, it can be shown
875
+ that this chaotic attractor does not disappear as we move across the parameter space τ0.
876
+ Thus it is robust under parameter perturbations.
877
+ 19
878
+
879
+ 2
880
+ 1.5
881
+ 0.5
882
+ -0.5
883
+ .1
884
+ -1.5
885
+ -2
886
+ 2.5
887
+ -3
888
+ 2
889
+ 1
890
+ 0
891
+ 2
892
+ 1
893
+ 0
894
+ 2
895
+ -2Fascinated by this dynamical behavior and by the fact that the attractor seems to be
896
+ robust, in the sense that no periodic windows appear as we zoom in the bifurcation diagram
897
+ around some value of τ0, we have computed the largest Lyapunov exponent (LLE) across a
898
+ continuous interval of parameter values of the maximum time-delay τ0. Since MATLAB’s
899
+ integrator does not allow to compute the LLE dynamically, we have taken advantage of
900
+ embedology and used the entire time series. We follow a method exposed by Rosenstein et
901
+ al. to efficiently compute the LLE from experimental time series [45]. These computations
902
+ have been carried out using an embedding dimension of D = 3, and embedding time-delay
903
+ for the series of τ = 10. The mean period T to compute the LLE considered can be obtained
904
+ from spectral analysis (see Ref. [45]). We have used a value of T = 35, which is an upper
905
+ bound obtained for many parameter values of the attractor. The time of integration has
906
+ been considered t ∈ [0, 3000] and the maximum number of iteration for the algorithm was
907
+ set to 1500, keeping our conservative attitude (see again Ref. [45]). The 3D embedding is
908
+ depicted in Fig. 10(b).
909
+ In Fig. 10(a) we can see the value of the maximum Lyapunov exponent for α = −0.9,
910
+ starting with a periodic orbit at τ0 = 9.1, where the value of the Lyapunov exponents is very
911
+ small or negative, as it should be for a periodic stable motion. When the chaotic attractor
912
+ is born, a sudden jump to positive high values of the exponent is computed. We have set
913
+ a threshold of λmax = 0.05 as the limiting value below which we cannot safely affirm that
914
+ a sensitivity to initial histories occurs. This value is a conservative choice consistent with
915
+ the temporal series of the periodic window, before the chaotic dynamics is triggered. As
916
+ shown in Fig. 10(a), we have performed magnifications at several scales whenever downward
917
+ peak fluctuations in the LLE exponent are present. The threshold limit is rarely exceeded.
918
+ Furthermore, whenever the exponent drops bellow the value of 0.05, we have systematically
919
+ computed bifurcation diagrams to see if the chaotic behavior vanishes. However, we have
920
+ not found any periodic windows, and if periodic orbits exist, they coexist with the chaotic
921
+ attractor. Thus we can conclude that the chaotic attractor is very robust in the present
922
+ dynamical system, even though an analytical proof of robustness can not be easily provided
923
+ in this case, as in previous works [41]. Since the intermittency arises as a consequence of the
924
+ complicated nature of the attractor, which is robust, it is reasonable to say that, in addition
925
+ to being intrinsic, it is robust, as well.
926
+ 20
927
+
928
+ VII.
929
+ CONCLUSIONS
930
+ In the present work we have developed a very simple retarded oscillator with state-
931
+ dependent delays, uncovering crucial dynamical behaviour that is frequently believed to be
932
+ impossible in classical physics. Firstly, we have shown that orbits can be quantized in the
933
+ phase space, producing one or more energy levels. We believe that the fact that these levels
934
+ are produced in a finite number, as compared to having an infinite spectra of energy levels,
935
+ is due to the fact that our delayed differential equations are not of the advanced type, as
936
+ encountered in electrodynamics [46]. Secondly, we have found sensitivity to initial condi-
937
+ tions in the history space, what introduces unpredictability in a simple fashion, making the
938
+ concept of randomness redundant, in principle [47]. Are the apparent random fluctuations
939
+ of fundamental physical systems just a byproduct of the complicated, even heterogeneous
940
+ and high-dimensional [48], chaotic dynamics introduced by the dynamics of fields and the
941
+ subsequent retardation effects in functional differential equations? [46]. Finally, we have
942
+ uncovered a robust intermittency in the absence of multistable external wells, simply caused
943
+ by the inherent multiscale nature of our chaotic system. Of course, this is possible because
944
+ retardation introduces more dimensions in the dynamical system, ultimately approaching
945
+ its center or slow manifold. In this respect, a deep connection between Lorenz-like chaotic
946
+ dynamical systems and walking droplets has been recently proved [49].
947
+ Interestingly, other related phenomena commonly attributed to the microscopic realm,
948
+ such as tunneling through external potential barriers (or in multistable external potentials)
949
+ can be easily demonstrated with our retarded potential by introducing an external Duffing
950
+ potential in replacement of the harmonic well used here [50]. A similar situation occurs when
951
+ studying the flow of electrons through potential barriers, where this paradoxical phenomenon
952
+ becomes explained when interpreted in terms of the quantum potential, which appears in
953
+ the Hamilton-Jacobi equation of the quantum system, and which is frequently disregarded
954
+ when interpreting physical phenomena [51]. For a connection between retarded potentials
955
+ and the quantum potential we refer the reader to previous works [13]. In other words, we
956
+ are suggesting that the switch between different wells leading to an intermittent behavior
957
+ can be interpreted in terms of the robust intermittency phenomenon. This dynamics is due
958
+ to the nonlinear resonances that allow the particle to jump back and forth over the potential
959
+ barrier [50].
960
+ 21
961
+
962
+ Another important phenomena that might be studied with our oscillator is the existence
963
+ of entangled states, which can be explained in terms of synchronization of oscillations [13].
964
+ These states have already been predicted in previous works in classical electrodynamics to
965
+ arise as a consequence of delay-coupling τi(xi, xj) and synchronization between systems of
966
+ self-oscillating bodies. Synchronization phenomena has already found to actually produce
967
+ entanglement in theoretical models of bouncing silicone oil droplets [22], although not with
968
+ dynamical setups closing the locality loophole so far. Synchronization is more complicated
969
+ for fluids, because the dissipation is higher at the scale of macroscopic fluid dynamics.
970
+ Specially when compared to electrodynamic fields, where light travels mostly unimpeded
971
+ when particles communicate through the electrovacuum. This can entail loopholes produced
972
+ by the long-range correlations in the background fields [52].
973
+ Importantly, time-delays are frequently considered constant, so that their dynamical na-
974
+ ture is disregarded.
975
+ Fortunately, thanks to the development of numerical methods and
976
+ computational techniques, an increasing number of works in the literature of dynamical
977
+ systems is being dedicated to the dynamical evolution of time-delays [53]. We have shown
978
+ that the state-dependence of delays can produce very complicated behavior, entailing non-
979
+ linear oscillations through the ubiquitous Hopf bifurcation, and producing counterintuitive
980
+ new complex dynamical chaotic behavior. The connection between state-dependent time-
981
+ delayed differential equations and Li´enard systems had been barely suggested [24]. A much
982
+ deeper exploration has been provided here. It was certainly lacking in the literature, and
983
+ opens forefront possibilities to study new physical nonlinear phenomena.
984
+ In summary, we have provided new evidence in support of Raju-Atiyah’s hypothesis,
985
+ claiming that physical phenomena in the microscopic physical realm can be understood by
986
+ using functional differential equations to study dynamical phenomena produced by time
987
+ retardation in non-Markovian systems. Importantly, we highlight that the dissipation and
988
+ the time-delay, which both constitute genuine radiative phenomena, introduce an arrow of
989
+ time in physical systems [54]. Thus perhaps the time-reversal symmetry of conservative field
990
+ theories might be broken when oscillating and radiating solitons are formed in these fields
991
+ [55]. Partly, the abusive neglect of delayed feedback in physics stems from the tradition of
992
+ Newtonian mechanics, where action at a distance is artificially introduced to simplify forces
993
+ of interaction.
994
+ Certainly, this approximation has rendered many accurate and valuable
995
+ results, allowing a great progress in the knowledge of many macroscopic physical systems,
996
+ 22
997
+
998
+ which would have been impossible otherwise. Quite the opposite, the principle of causality
999
+ in classical field theories produces memory effects that are always present whenever physical
1000
+ entities communicate through a background field with themselves, and among each other.
1001
+ VIII.
1002
+ ACKNOWLEDGMENT
1003
+ The author would like to thank Mattia Coccolo for valuable comments on the elaboration
1004
+ of the present manuscript, the discussion of some of its ideas and the computation of the
1005
+ basins of attraction.
1006
+ [1] Airy, G. B. (1830). On certain conditions under which perpetual motion is possible. Trans.
1007
+ Cambridge Phil. Soc. 3, 369-372.
1008
+ [2] Schell, M., Ross, J. (1986). Effects of time delay in rate processes. J. Chem. Phys. 85, 6489-
1009
+ 6503.
1010
+ [3] Mackey, M. C., Glass, L. (1977). Oscillation and chaos in physiological control systems. Science
1011
+ 197, 287-289.
1012
+ [4] Hansen, M., Protachevicz, P. R., Iarosz, K. C., Caldas, I. L., Batista, A. M., Macau, E. E. N.
1013
+ (2022). The effect of time delay for synchronisation suppression in neuronal networks. Chaos,
1014
+ Solitons & Fractals 164, 112690.
1015
+ [5] Ferrell Jr, J. E., Tsai, T. Y. C., Yang, Q. (2011). Modeling the cell cycle: Why do certain
1016
+ circuits oscillate? Cell 144, 874-885.
1017
+ [6] Boutle, I., Taylor, R. H., R¨omer, R. A. (2007). El Ni˜no and the delayed action oscillator. Am.
1018
+ J. Phys. 75, 15-24.
1019
+ [7] Salpeter, E. E., Salpeter, S. R. (1998). Mathematical model for the epidemiology of tuberculo-
1020
+ sis, with estimates of the reproductive number and infection-delay function. Am. J. Epidemiol.
1021
+ 147, 398–406.
1022
+ [8] Kalecki, M. (1935). A macrodynamic theory of business cycles. Econometrica 3, 327-344.
1023
+ [9] Kolmanovskii, V. B., Nosov, V. R. (1986). Stability of functional differential equations (Vol.
1024
+ 180). Elsevier.
1025
+ 23
1026
+
1027
+ [10] Insperger, T., St´ep´an, G. Turi, J. (2007). State-dependent delay in regenerative turning pro-
1028
+ cesses. Nonlinear Dyn. 47, 275–283.
1029
+ [11] Jeevarathinam, C., Rajasekar, S. (2015). Vibrational resonance in the Duffing oscillator with
1030
+ state-dependent time-delay. IJARPS 2, 1-8.
1031
+ [12] Mart´ınez-Llin`as, J., Porte, X., Soriano, M. et al. (2015). Dynamical properties induced by
1032
+ state-dependent delays in photonic systems. Nat. Commun. 6, 7425
1033
+ [13] L´opez, A. G. (2020). On an electrodynamic origin of quantum fluctuations. Nonlinear Dyn.
1034
+ 102, 621-634.
1035
+ [14] L´opez, A. G. (2021). Stability analysis of the uniform motion of electrodynamic bodies. Phys.
1036
+ Scr. 96, 015506.
1037
+ [15] Raju, C. K. (2004). The electrodynamic 2-body problem and the origin of quantum mechanics.
1038
+ Found. Phys. 34, 937-963.
1039
+ [16] Johnson, G. W., Walker, M. E. (2006). Sir Michael Atiyah’s Einstein Lecture: “The Nature
1040
+ of Space”. Notices of the AMS 53, 674-678.
1041
+ [17] Couder, Y., Proti`ere, S., Fort, E., Boudaoud, A. (2005). Dynamical phenomena: walking and
1042
+ orbiting droplets. Nature 437, 208.
1043
+ [18] Proti`ere, S., Boudaoud, A., Couder, Y. (2006). Particle-wave association on a fluid interface.
1044
+ J. Fluid Mech. 544, 85-108.
1045
+ [19] Fort, E., Eddi, A., Boudaoud, A., Moukhtar, J., Couder, Y. (2010). Path-memory induced
1046
+ quantization of classical orbits. Proc. Natl. Acad. Sci. 107, 17515-17520.
1047
+ [20] Pucci, G., Harris, D., Faria, L., Bush, J. (2018). Walking droplets interacting with single and
1048
+ double slits. J. Fluid Mech. 835, 1136-1156.
1049
+ [21] Eddi, A., Fort, E., Moisy, F., Couder, Y. (2009). Unpredictable tunneling of a classical wave-
1050
+ particle association. Phys. Rev. Lett. 102, 240401.
1051
+ [22] Papatryfonos, K., Vervoort, L., Nachbin, A., Matthieu, L., Bush, J. W. M. (2022). Bell test
1052
+ in a classical pilot-wave system. arXiv:2208.08940 [physics.flu-dyn]
1053
+ [23] Turton S. E., Couchman, M. M. P., Bush, J. W. M. (2018). A review of theoretical modeling
1054
+ of walking droplets: toward a generalized pilot-wave framework. Chaos 28, 096111.
1055
+ [24] Jenkins, A. (2013). Self-oscillation. Phys. Rep. 525, 167-222.
1056
+ [25] Stokes, G. G. (1851). On the effect of internal friction of fluids on the motion of pendulums.
1057
+ Trans. Cambridge Philos. Soc. 9, 8–106.
1058
+ 24
1059
+
1060
+ [26] Li´enard, A. (1898). Champ ´electrique et magn´etique produit par une charge concentr´ee en un
1061
+ point et anim´ee d’un mouvement quelconque. L’´eclairage electrique 16, 5-14.
1062
+ [27] Daza, A., Wagemakers, A., Sanju´an, M.A.F. (2017). Wada property in systems with delay.
1063
+ Commun. Nonlinear Sci. Numer. Simulat. 43, 220-226.
1064
+ [28] Alligood, K. T., Sauer, T. D., Yorke, J. A. (1996). Chaos: An Introduction to Dynamical
1065
+ Systems. Springer, New York.
1066
+ [29] Mainzer, K., Chua, L. O. (2013). Local Activity Principle: the Cause of Complexity and
1067
+ Symmetry Breaking. Imperial College Press, London.
1068
+ [30] L´opez, ´A. G., Benito, F., Sabuco, J., Delgado-Bonal, A. (2022). The thermodynamic efficiency
1069
+ of the Lorenz system. arXiv:2202.07653 [cond-mat.stat-mech]
1070
+ [31] Abraham, R., Marsden, J. E. (1987). Foundations of Mechanics (Second Ed.). Addison-Wesley
1071
+ Publishing Company, Inc., Redwood City, CA.
1072
+ [32] Li´enard, A. (1928). Etude des oscillations entretenues. Revue g´en´erale de l’´electricit´e 23,
1073
+ 901–912.
1074
+ [33] Perko, L. (1991). Differential Equations and Dynamical Systems (Third ed.). New York,
1075
+ Springer. pp. 254–257.
1076
+ [34] Lakshmanan, M., Senthilkumar, D. V. (2011). Dynamics of nonlinear time-delay systems.
1077
+ Springer Science & Business Media. pp. 37-41.
1078
+ [35] Ramana Reddy, D. V., Sen, A., Johnston, G. L. (1998). Time delay induced death in coupled
1079
+ limit cycle oscillators. Phys. Rev. Lett. 80, 5109.
1080
+ [36] Elhadj, Z. Sprott, J. C. (2008). A minimal 2-D quadradtic map with quasi-periodic route to
1081
+ chaos. Int. J. Bif. Chaos 18, 1567-1577.
1082
+ [37] Lai, Y. -C. and T´el, T. (2011). Transient Chaos: Complex Dynamics on Finite-Time Scales.
1083
+ Springer, New York.
1084
+ [38] Aguirre, J., Viana, R. L., and Sanju´an, M. A. F. (2009). Fractal structures in nonlinear
1085
+ dynamics. Rev. Mod. Phys. 81, 333-386.
1086
+ [39] Menck, P., Heitzig, J., Marwan, N. et al. (2013). How basin stability complements the linear-
1087
+ stability paradigm. Nature Phys. 9, 89–92.
1088
+ [40] Daza, A., Wagemakers, A., Georgeot, B. et al. (2016). Basin entropy: a new tool to analyze
1089
+ uncertainty in dynamical systems. Sci. Rep. 6, 31416.
1090
+ [41] Banerjee, S., Yorke, J. A., Grebogi, C. (1998). Robust chaos. Phys. Rev. Lett. 80, 3049.
1091
+ 25
1092
+
1093
+ [42] Shilnikov, L. P. (1967). The existence of a denumerable set of periodic motions in four-
1094
+ dimensional space in an extended neighborhood of a saddle-focus. Soviet Math. Dokl. 8, 54–58.
1095
+ [43] Van der Pol, B. (1920). A theory of the amplitude of free and forced triode vibrations. Radio
1096
+ Review 1, 701-710.
1097
+ [44] Pomeau, Y., Manneville, P. (1980). Intermittent transition to turbulence in dissipative dy-
1098
+ namical systems. Commun. Math. Phys. 74, 189–197.
1099
+ [45] Rosenstein, M. T., Collins, J. J., De Luca, C. J. (1993). A practical method for calculating
1100
+ largest Lyapunov exponents from small data sets. Phys. D: Nonlinear Phenom. 65, 117-134.
1101
+ [46] L´opez, ´A. G. (2022). The electrodynamic origin of the wave-particle duality. In: Banerjee, S.,
1102
+ Saha, A. (eds) Nonlinear Dynamics and Applications. Springer Proceedings in Complexity.
1103
+ Springer, Cham.
1104
+ [47] Sprott, J. C. (2007). A simple chaotic delay differential equation. Phys. Lett. A 366, 397-402.
1105
+ [48] Saiki, Y., Takahasi, H., Yorke, J. A. (2021). Piecewise linear maps with heterogeneous chaos.
1106
+ Nonlinearity 34, 5744.
1107
+ [49] Valani, R. N. (2022). Lorenz-like systems emerging from an integro-differential trajectory
1108
+ equation of a one-dimensional wave–particle entity. Chaos 32, 023129.
1109
+ [50] Coccolo, M., Zhu, B., Sanju´an, M.A.F. et al. (2018). Bogdanov–Takens resonance in time-
1110
+ delayed systems. Nonlinear Dyn. 91, 1939–1947.
1111
+ [51] Bohm, D. (1952). A suggested interpretation of the quantum theory in terms of “hidden”
1112
+ variables. I. Phys. Rev. 85, 166-179.
1113
+ [52] Morgan, P. (2006). Bell inequalities for random fields. Phys. A: Math. Gen. 39, 7441.
1114
+ [53] M¨uller, D., Otto, A., Radons, G. (2018). Laminar chaos. Phys. Rev. Lett. 120, 084102.
1115
+ [54] Mackey, M. C. (1992). Time’s arrow: the origins of thermodynamic behaviour. Springer, New
1116
+ York. pp. 101-102.
1117
+ [55] Fodor, G., Forg´acs, P., Grandcl´ement, P, R´acz, I. (2006). Oscillons and quasibreathers in the
1118
+ φ4 Klein-Gordon model. Phys. Rev. D 74, 124003.
1119
+ 26
1120
+
3dFRT4oBgHgl3EQfoTds/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4dE4T4oBgHgl3EQf0w2N/content/tmp_files/2301.05285v1.pdf.txt ADDED
@@ -0,0 +1,1002 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Laser Inter-Satellite Link Setup Delay:
2
+ Quantification, Impact, and Tolerable Value
3
+ Dhiraj Bhattacharjee1, Aizaz U. Chaudhry1, Halim Yanikomeroglu1, Peng Hu2, and Guillaume Lamontagne3
4
+ 1Department of Systems and Computer Engineering, Carleton University, Ottawa, ON K1S 5B6, Canada
5
+ 2National Research Council Canada (NRC), 1200 Montreal Road, Ottawa, ON K1A 0R6, Canada
6
+ 3MDA, Sainte-Anne-de-Bellevue, QC H9X 3R2, Canada
7
+ 1{dhirajbhattacharjee, auhchaud, halim}@sce.carleton.ca, 2peng.hu@nrc-cnrc.gc.ca, 3guillaume.lamontagne@mda.space
8
+ Abstract—Dynamic laser inter-satellite links (LISLs) provide
9
+ the flexibility of connecting a pair of satellites as required (dynam-
10
+ ically) while static LISLs need to be active continuously between
11
+ the energy-constrained satellites. However, due to the LISL estab-
12
+ lishment time (termed herein as LISL setup delay) being in the
13
+ order of seconds, realizing dynamic LISLs is currently unfeasible.
14
+ Towards the realization of dynamic LISLs, we first study the
15
+ quantification of LISL setup delay; then we calculate the end-
16
+ to-end latency of a free-space optical satellite network (FSOSN)
17
+ with the LISL setup delay; subsequently, we analyze the impact
18
+ of LISL setup delay on the end-to-end latency of the FSOSN.
19
+ We also provide design guidelines for the laser communication
20
+ terminal manufacturers in the form of maximum tolerable value
21
+ of LISL setup delay for which the FSOSN based on Starlink’s
22
+ Phase I satellite constellation will be meaningful to use for low-
23
+ latency long-distance inter-continental data communications.
24
+ Index Terms—dynamic laser inter-satellite links, free-space
25
+ optical satellite networks, laser inter-satellite link setup delay,
26
+ Starlink.
27
+ I. INTRODUCTION
28
+ In recent advancements of wireless communication of 6G era,
29
+ satellite networks have been seen as an integral part along with
30
+ terrestrial networks for global broadband coverage specially
31
+ for enabling broadband Internet in rural and remote areas [1],
32
+ low-latency long-distance inter-continental data communications
33
+ [2], and IoT based monitoring and remote surveillance [3]. From
34
+ 3GPP definition, satellite payloads could either be transparent
35
+ or regenerative [4]. In transparent scenario, inter-continental
36
+ communication has to go up (ground station to satellite) and
37
+ down (satellite to ground station) frequently to reach from
38
+ source to destination. With regenerative payload, communication
39
+ between satellites over inter-satellite links (ISLs) could be a
40
+ better option in such long-distance communication. Compared
41
+ to RF-based ISLs, laser ISLs (LISLs) have the advantage of
42
+ higher bandwidth, smaller antenna size, higher directivity, less
43
+ power consumption, less chance of interception and interference,
44
+ etc [5]. Exploiting these LISLs in low Earth orbit (LEO) or very
45
+ low Earth orbit (VLEO) satellite mega constellations, free-space
46
+ optical satellite networks (FSOSNs) can be realized in space [6].
47
+ On the basis of an LISL’s active duration, LISLs can be
48
+ classified into two types: static LISLs and dynamic LISLs. Static
49
+ LISLs are those LISLs which are kept active all the time, e.g.,
50
+ SpaceX’s Starlink will have four static LISLs per satellite which
51
+ will be operating all the time [7]. In contrast, dynamic LISLs
52
+ can be established dynamically between satellites (which are
53
+ within the LISL range) at any time on demand depending upon
54
+ data communication requirements. To realize such dynamic
55
+ LISLs instantaneously, we need to have very precise and
56
+ efficient pointing, acquisition, and tracking (PAT) system [8].
57
+ Before two satellites start communicating via LISLs,
58
+ transmitting satellite needs to position its laser beam within
59
+ the field of view of receiver satellite (pointing). Then the
60
+ receiver satellite needs to align itself towards the arriving beam
61
+ (acquisition). Finally, transmitter and receiver continue this
62
+ process as the communication goes on (tracking) [9]. Now, we
63
+ define LISL setup delay as the time taken by the PAT system to
64
+ establish the LISL, i.e., the sum of pointing time and acquisition
65
+ time. This delay will be introduced to the end-to-end latency
66
+ from a source ground station to destination ground station when
67
+ the path over an FSOSN changes. Note that when the path
68
+ changes, it could lead to one or multiple new LISLs. However,
69
+ LISL setup delay will be introduced only once as multiple
70
+ LISLs can be established simultaneously during a time slot.
71
+ Satellites are driven by onboard battery and solar power, and
72
+ satellite battery power is a very precious resource, which should
73
+ be used intelligently. On that regard, static LISLs are always
74
+ active whether they are being used or not. This will drain the
75
+ satellite battery and satellites could be dead more often and they
76
+ need to be de-orbited and new satellites have to be launched.
77
+ This in turn will increase the maintenance expenditure. On the
78
+ other hand, dynamic LISLs will be an energy efficient approach
79
+ where LISLs are only established as required. With dynamic
80
+ LISLs, two neighbour satellites could connect whenever they
81
+ are within LISL range and this will provide more routing
82
+ options. These links between neighbour satellites could be
83
+ inter-orbital plane, crossing orbital plane, inter-shell, and even
84
+ inter-constellation (e.g., between Starlink and OneWeb). Also, as
85
+ the LEO/VLEO satellites are mobile, communications between
86
+ satellites and ground stations will always be through dynamic
87
+ laser links. Furthermore, in an operating satellite constellation, if
88
+ one or many satellites fail, dynamic LISLs will instantaneously
89
+ reroute the traffic by avoiding the dead satellite(s).
90
+ LISL setup delay for current laser communication terminals
91
+ (LCTs) varies from few seconds to tens of seconds [10]. This
92
+ prevents us from realizing dynamic LISLs in next-generation
93
+ FSOSNs (NG-FSOSNs) in late 2020s. In next-next-generation
94
+ FSOSNs (NNG-FSOSNs) (in 2030s), due to advancement in
95
+ satellite PAT technology, LISL setup delay could be reduced to
96
+ the order of a few milliseconds and dynamic LISLs could become
97
+ a reality. In this context, we study the quantification of LISL setup
98
+ delay in the FSOSN based on Starlink’s Phase I constellation [7].
99
+ We calculate the end-to-end latency of this FSOSN using different
100
+ values of the LISL setup delay in different inter-continental
101
+ connection scenarios and different LISL ranges for satellites.
102
+ We investigate the impact of LISL setup delay on overall
103
+ latency and provide design guidelines for LCT manufacturers
104
+ to leverage full potential of NNG-FSOSNs via dynamic LISLs.
105
+ To the best of our knowledge, there exists no study on LISL
106
+ arXiv:2301.05285v1 [cs.NI] 12 Jan 2023
107
+
108
+ setup delay that examines its quantification, and its impact on
109
+ end-to-end latency along with its maximum tolerable values.
110
+ The authors of [11] state that for terrestrial distances
111
+ larger than 3000 km, FSOSNs could provide a better latency
112
+ performance as compared to the optical fiber terrestrial network
113
+ (OFTN). In high-frequency trading of stocks, even 1 ms
114
+ improvement in latency could generate $100 million of revenue
115
+ per year [12]. Thus, in such long distance-communication,
116
+ FSOSNs could be a better solution compared to the OFTN.
117
+ With this objective, we come up with maximum tolerable values
118
+ of LISL setup delay for which latency performance of the
119
+ FSOSN based on Starlink’s Phase I constellation will be better
120
+ than the OFTN. This maximum value of LISL setup delay
121
+ can be a design guideline for LCT manufacturers to leverage
122
+ advantages of dynamic LISLs in NNG-FSOSNs.
123
+ The paper organization is as follows. We discuss related
124
+ work on network latency of satellite networks and examine
125
+ LISL setup delays of current LCT manufacturers in Section
126
+ II. In Section III, we elaborate on how we quantify LISL setup
127
+ delay, calculate end-to-end latency, and define performance
128
+ metrics. We present our results in Section IV, discuss insights
129
+ and design guidelines in Section V, and conclude our discussion
130
+ with some possible future extensions in Section VI.
131
+ II. RELATED WORK
132
+ Currently, Mynaric’s LCT CONDOR needs 30 seconds to
133
+ establish an LISL between two satellites for the first time. Once
134
+ the orbital parameters are exchanged between satellites, it takes 2
135
+ seconds to setup an LISL for every next time [10]. Tesat [13] and
136
+ General Atomics [14] have LCTs for LEO/VLEO constellations
137
+ which have LISL setup delay in the range of tens of seconds.
138
+ In any communications network, the end-to-end latency from
139
+ source to destination typically has four components: propagation
140
+ delay, transmission delay, queuing delay, and processing delay
141
+ [15]. Based on this latency model, authors of [2] compared the
142
+ latency performance of FSOSNs and OFTNs. As stated earlier
143
+ that latency-wise, FSOSNs can be a better alternative to OFTN for
144
+ longer communication distances [11]. On that regard, authors of
145
+ [12] and [16] have come up with a concept of crossover distance
146
+ to determine that for a certain terrestrial distance, which one will
147
+ provide a better latency performance among FSOSN and OFTN.
148
+ Authors of [17] have suggested ground stations as relays as a
149
+ substitute of ISLs where satellites have transparent payload. With
150
+ this network architecture, they proved that constellations like Star-
151
+ link can provide better latency performance compared to OFTN.
152
+ In addition to that, idle user terminals can also be used which will
153
+ provide further improvement in latency performance. However,
154
+ [18] shows that exploiting ISLs can reduce variation in latency
155
+ performance as well as reduce the effects of weather impairments.
156
+ To analyze network delay, authors of [19] have modeled each
157
+ satellite node as M/M/1 queue in a multihop scenario where
158
+ each satellite can receive packets from ground station as well as
159
+ other satellite node. Authors of [20] highlighted the importance
160
+ of temporary LISLs (defined as LISLs which are established
161
+ temporarily with satellites that are within LISL range) in order
162
+ to achieve better latency performance compared to static LISLs
163
+ in FSOSNs. They showed that with temporary LISLs, there exist
164
+ more number of LISLs which will provide better routing options.
165
+ They also reported that temporary LISLs are more useful at lower
166
+ LISL ranges. Authors of [12] and [20] mentioned about LISL
167
+ setup delay in FSOSNs. With that respect, in FSOSNs, along
168
+ with the other four end-to-end latency components discussed
169
+ earlier, we introduce LISL setup delay to the latency model.
170
+ III. METHODOLOGY
171
+ To quantify LISL setup delay, calculate end-to-end latency
172
+ from source to destination with the LISL setup delay, investigate
173
+ the impact of LISL setup delay on overall latency, and present
174
+ design guidelines for LCT as a form of maximum tolerable value
175
+ of LISL setup delay, we simulate Starlink’s Phase I Version 2 con-
176
+ stellation in AGI’s Systems Tool Kit (STK) platform [21]. This
177
+ constellation has a total of 1584 satellites consisting 24 orbital
178
+ planes with each of them having 66 satellites [7]. The orbits are
179
+ at an inclination of 53° with respect to the equator and satellites
180
+ are at an altitude of 550 km. With these constellation parameters,
181
+ we generate this constellation’s satellites in STK with a certain
182
+ LISL range (i.e., the range over which a satellite in an FSOSN
183
+ can establish an LISL with any other satellite within this range)
184
+ along with ground stations at New York, London, Istanbul, and
185
+ Hanoi. Next we extract the data from STK (e.g., vertices, edges,
186
+ length of edges, etc) at every second for one hour simulation
187
+ period to Python platform. Then we apply Dijkstra’s shortest
188
+ path algorithm [22] to find shortest path at every time slot (equal
189
+ to one second in duration) for the source to destination pairs:
190
+ New York–London, New York–Istanbul, and New York–Hanoi.
191
+ In our investigation, we consider 4 different values of LISL
192
+ range: 1500 km, 1700 km, 2500 km, and 5016 km. The minimum
193
+ range to have communication with nearest neighbor at the imme-
194
+ diate left and right orbital planes is 1500 km in Starlink’s Phase
195
+ I Version 2 constellation. At this range, a satellite can connect to
196
+ two satellites in front and two at rear in the same orbital plane
197
+ making total 6 connections. At 1700 km range, a satellite can
198
+ connect to three immediate neighbors on the left, three immediate
199
+ neighbors on the right, and four intra-orbital plane neighbors
200
+ making a total of 10 possible connections. The maximum possible
201
+ LISL range for Starlink’s Phase 1 constellation can be calculated
202
+ as 5016 km [6]. The 2500 km LISL range is taken as an
203
+ intermediate value between 1700 km and 5016 km.
204
+ A. Quantification of LISL Setup Delay
205
+ We define LISL setup delay indicator (a binary variable)
206
+ as follows: if the shortest paths of (i − 1)th time slot and
207
+ ith time slot are exactly same, no LISL setup delay is to be
208
+ included in the end-to-end latency, and the LISL setup delay
209
+ indicator, αi is 0. If shortest path changes from (i−1)th to
210
+ ith time slot, αi is 1. Considering ηs as LISL setup delay, we
211
+ denote end-to-end latency without and with LISL setup delay
212
+ as ηLE(withoutηs) and ηLE(withηs), respectively. In Table
213
+ I, we show shortest paths (satellite naming convention follows
214
+ [6]) and corresponding values of ηLE(without ηs), αi, and
215
+ ηLE(withηs) for first 6 time slots over the FSOSN for New
216
+ York to Istanbul inter-continental connection at an LISL range
217
+ of 1500 km. From Table I, we can see that shortest path could
218
+ change from time to time. This is due to the fact that as LEO
219
+ satellites are moving at high orbital speeds, either a shortest path
220
+ at one time instance may even not exist in the next time instance
221
+ (due to one or multiple satellites moving out of range) or there
222
+ may become available a new shortest path. ηLE(withoutηs) has
223
+
224
+ Table I. ηLE(without ηs), αi, ηs, and ηLE(with ηs) of the shortest paths at first 6 time slots
225
+ over the FSOSN for New York–Istanbul inter-continental connection.
226
+ ηLE
227
+ ηLE
228
+ Time
229
+ Shortest
230
+ (without
231
+ αi
232
+ ηs
233
+ (with
234
+ Slot
235
+ Path
236
+ ηs)
237
+ (ms)
238
+ ηs)
239
+ (ms)
240
+ (ms)
241
+ 1
242
+ GS at New York, satellite x10919, x11115, x11312, x11509, x11609, x11611, x12166, GS at Istanbul
243
+ 38.09
244
+ 0
245
+ 0
246
+ 38.09
247
+ 2
248
+ GS at New York, satellite x11503, x11505, x11507, x11509, x11609, x11611, x12166, GS at Istanbul
249
+ 38.08
250
+ 1
251
+ 100
252
+ 138.08
253
+ 3
254
+ GS at New York, satellite x11503,x11505, x11507, x11509, x11609, x11611, x12166, GS at Istanbul
255
+ 38.07
256
+ 0
257
+ 0
258
+ 38.07
259
+ 4
260
+ GS at New York, satellite x11503,x11505, x11507, x11509, x11609, x11611, x12166, GS at Istanbul
261
+ 38.06
262
+ 0
263
+ 0
264
+ 38.06
265
+ 5
266
+ GS at New York, satellite x11503,x11505, x11507, x11509, x11609, x11611, x12166, GS at Istanbul
267
+ 38.05
268
+ 0
269
+ 0
270
+ 38.05
271
+ 6
272
+ GS at New York, satellite x11503, x11505, x11507, x11508, x11608, x11903, x12166, GS at Istanbul
273
+ 38.00
274
+ 1
275
+ 100
276
+ 138.00
277
+ two major components: propagation delay and node delay (sum
278
+ of processing and queuing delay is node delay). We calculate
279
+ propagation delay as sum of lengths of all the laser links in
280
+ the shortest path divided by speed of light in vacuum and we
281
+ consider node delay as 1 ms [23]. From Table I, it is evident that
282
+ shortest paths are not same for 1st and 2nd time slots, so α2 =1.
283
+ Considering ηs =100 ms, ηLE(withηs) will be 38.08+100, i.e.,
284
+ 138.08 ms at time slot 2. The shortest path remains unchanged
285
+ from time slot 3 to 5, i.e., α3 =α4 =α5 =0 and corresponding
286
+ ηLE(withηs) values remain same as ηLE(withoutηs). Then
287
+ again at time slot 6, shortest path changes which makes α6 =1.
288
+ B. Path Change Rate
289
+ We simulate for 3600 time slots, one time slot being equal
290
+ to 1 second in duration and we define the path change rate,
291
+ λ as the average number of instances the shortest path from
292
+ source to destination changes (represented in percentage) and
293
+ mathematically it can be calculated as
294
+ λ=
295
+ 1
296
+ 3600
297
+ 3600
298
+
299
+ i=1
300
+ αi ×100%.
301
+ (1)
302
+ C. End-to-End Latency
303
+ Averaging ηLE(with ηs) and ηLE(without ηs) over 3600
304
+ time slots, we get average end-to-end latency with and without
305
+ ηs as ηLE(withηs) and ηLE(withoutηs), respectively. They
306
+ are related to λ and ηs as follows:
307
+ ηLE(withηs)=ηLE(withoutηs)+ λ
308
+ 100 ηs.
309
+ (2)
310
+ D. Impact of ηs
311
+ To measure the impact of LISL setup delay, ηs on average end-
312
+ to-end latency, we define β as percentage of delay introduced due
313
+ to ηs in average end-to-end latency and calculate it as follows:
314
+ β = ηLE(withηs)−ηLE(withoutηs)
315
+ ηLE(withηs)
316
+ ×100%.
317
+ (3)
318
+ E. Tolerable Value of ηs
319
+ For an inter-continental connection, it is meaningful to use
320
+ the FSOSN only when ηLE(withηs) is lesser than end-to-end
321
+ latency of the OFTN, ηLE,OF T N. Using (2), the following can
322
+ be written,
323
+ ηLE(withoutηs)+ λ
324
+ 100 ηs ≤ηLE,OF T N.
325
+ (4)
326
+ Now we define the maximum tolerable value of LISL setup
327
+ delay, ηs,max as the maximum value of ηs so that the average
328
+ Figure 1. Path change rate.
329
+ end-to-end latency of the FSOSN is lesser or equal to that of
330
+ the OFTN and calculate it from (4) as follows:
331
+ ηs,max = ηLE,OF T N −ηLE(withoutηs)
332
+ λ/100
333
+ .
334
+ (5)
335
+ To calculate ηLE,OF T N, first we determine the distance
336
+ from the source ground station to the destination ground
337
+ station along the surface of the Earth using Haversine formula
338
+ [24] from latitudes and longitudes of source and destination
339
+ ground stations. Later we find ηLE,OF T N as that distance
340
+ divided by speed of light in the optical fiber (having refractive
341
+ index=1.4675), i.e., 204,287,876 m/s.
342
+ IV. RESULTS
343
+ We consider three inter-continental connections: New York to
344
+ London (low inter-continental distance connection with terrestrial
345
+ distance=5593 km), New York to Istanbul (mid inter-continental
346
+ distance connection with terrestrial distance=8079 km), and
347
+ New York to Hanoi (high inter-continental distance connection
348
+ with terrestrial distance=13164 km). For the metrics λ, ηLE,
349
+ and β, we show bar plots for the four LISL ranges. To clearly
350
+ show both high and low values in the same figure, we use log
351
+ scale in the y-axis in Figs. 1 to 3.
352
+ A. Path Change Rate
353
+ In Fig. 1, we plot λ with LISL range varying along x-axis for
354
+ the three inter-continental connections. For any inter-continental
355
+ connection, we can observe that λ reduces as LISL range
356
+ increases. Also note that for a particular LISL range, the more
357
+ the inter-continental distance, the higher the value of λ.
358
+ B. End-to-End Latency
359
+ Fig. 2 shows end-to-end latency for the three inter-continental
360
+ connections averaged over one hour of simulation period
361
+ without considering ηs and with four ηs values. As LISL range
362
+ increases along x-axis, both ηLE(withoutηs) (black bars) and
363
+ ηLE(withηs) (other bars) decrease. For a certain LISL range, the
364
+
365
+ 102
366
+ New York to London
367
+ New York to Istanbul
368
+ 37.5 39.1
369
+ New York to Hanoi
370
+ 33.9
371
+ 17.5
372
+ 14.4
373
+ 12.3
374
+ (%)
375
+ 9.8
376
+ 9.6
377
+ 101
378
+ 8.7
379
+ 8.0
380
+ 6.8
381
+ 5.8
382
+ 100
383
+ 1500
384
+ 1700
385
+ 2500
386
+ 5016
387
+ LISL Range (km)(a) New York to London.
388
+ (b) New York to Istanbul.
389
+ (c) New York to Hanoi.
390
+ Figure 2. Average end-to-end latency performance.
391
+ more the value of ηs, the more the overall latency. For example, in
392
+ Fig. 2a with LISL range of 1700 km, ηLE(withηs) is 123.9 ms
393
+ for ηs=1 sec and it reduces to 35.7 ms when ηs is considered to
394
+ be 100 ms. Also, for a certain LISL range with a certain ηs value,
395
+ the more the inter-continental distance, the more the end-to-end
396
+ latency for both the cases: ηLE(withoutηs) and ηLE(withηs).
397
+ It is interesting to note that with the increase of LISL range,
398
+ ηLE(with ηs) reduces faster compared to ηLE(without ηs).
399
+ For example, considering Fig. 2a, ηLE(withoutηs) drops from
400
+ 25.9 ms to 24.6 ms when LISL range increases from 1700
401
+ km to 2500 km. If we take the ratio and term the ratio as
402
+ reduction ratio, for this case it will be 25.9
403
+ 24.6 =1.053. Similarly,
404
+ for ηLE(with ηs), the reduction ratio will be 123.9
405
+ 92.1 = 1.345
406
+ which is greater than that of ηLE(withoutηs).
407
+ C. Impact of ηs
408
+ In Fig. 3, we show the variation of β with LISL range for
409
+ four ηs values in the three inter-continental connections. As we
410
+ see, β reduces as LISL range increases for a certain ηs value.
411
+ Also, at a certain LISL range, β reduces as ηs reduces. For
412
+ example, in Fig. 3b with LISL range of 2500 km, β is 71%
413
+ for ηs=1 sec. However, when ηs reduces to 100 ms, β reduces
414
+ to 19.7%. In addition, for a certain LISL range with a particular
415
+ ηs value, β reduces as inter-continental distance increases. For
416
+ example, assuming 1700 km of LISL range and ηs as 1 sec, β
417
+ reduces from 77% to 73.1% when inter-continental connection
418
+ changes from New York–Istanbul to New York–Hanoi.
419
+ D. Tolerable Value of ηs
420
+ In Fig. 4, we plot ηLE(withηs) and ηLE,OF T N against ηs.
421
+ Note that, ηLE(withηs) is a straight line with a constant slope
422
+ and as LISL range increases, the slope reduces. The significance
423
+ of this figure is where ηLE(withηs) for a certain LISL range
424
+ cuts ηLE,OF T N, the x-coordinate value of the intersection point
425
+ is ηs,max as beyond that point, ηLE(with ηs) will be greater
426
+ than ηLE,OF T N. To show the intersection points clearly, we
427
+ only present ηs values on the x-axis varying from 1 ms to
428
+ 100 ms where we mention the coordinates of the intersection
429
+ points. If we substitute ηLE,OF T N =39.55 ms (from Fig. 4b),
430
+ ηLE(without ηs) = 37.9 ms (from Fig. 2b), and λ = 37.5%
431
+ (from Fig. 1) for New York to Istanbul inter-continental
432
+ connection with 1500 km LISL range in (5), we get ηs,max
433
+ as 4.4 ms which matches with Fig. 4b. Also, we should observe
434
+ from Fig. 4 that, as LISL range increases, ηs,max also increases.
435
+ Interesting point to note in Fig. 4c is that it only shows two
436
+ intersection points because ηLE(withηs) for 1500 km and 1700
437
+ km LISL range straight lines (black and blue lines) never intersect
438
+ with ηLE,OF T N for ηs >1 ms values. For 1500 km LISL range,
439
+ ηLE(withoutηs)=66.5 ms (from Fig. 2c) and ηLE,OF T N=64.44
440
+ ms (from Fig. 4c). Putting these values in (5), we get negative
441
+ ηs,max value which does not exist. Similarly, for 1700 km LISL
442
+ range, ηLE(withoutηs)≈ηLE,OF T N which makes ηs,max ≈0.
443
+ V. INSIGHTS AND DESIGN GUIDELINES
444
+ A. Insights
445
+ 1) Path Change Rate
446
+ • As LISL range increases, there will be lesser hops, i.e., lesser
447
+ number of satellites for the signal to reach from source to
448
+ destination. For example, in New York to Istanbul inter-
449
+ continental connection, average number of hops drops from
450
+ 7 to 6 when LISL range increases from 1500 km to 1700 km.
451
+ The lesser the number of hops, the lesser is the chance of a
452
+ new shortest path. This in turn reduces λ. Also, when the LISL
453
+ range increases, two satellites remain in communication range
454
+ for a longer time span. One of the reasons for the shortest
455
+ path to change is satellites going out of LISL range, and a
456
+ shortest path tends to change lesser with longer LISL range.
457
+ Due to these two reasons, λ reduces as LISL range increases.
458
+ • For a certain LISL range, the longer the inter-continental
459
+ distance, the higher the average number of hops which leads
460
+ to more chances of a new shortest path, and this increases
461
+ the path change rate, λ.
462
+ 2) End-to-End Latency
463
+ • Increase in LISL range reduces the number of hops which
464
+ reduces total node delays. This decreases ηLE(withoutηs)
465
+ with the increase in LISL range. Now, as both λ and
466
+ ηLE(withoutηs) decrease with the increase of LISL range,
467
+ from (2), it is clear that ηLE(withηs) also decreases.
468
+ • From (2), we can see that if ηs reduces, ηLE(with ηs)
469
+ reduces for a certain LISL range.
470
+ • For a certain LISL range with a certain ηs value, when
471
+ inter-continental distance increases, λ increases. From (2), we
472
+ can say that ηLE(withηs) will increase with the increase of
473
+ λ. For longer inter-continental connections, propagation delay
474
+ as well as node delay (due to more number of hops) is more
475
+ which increases ηLE(withoutηs) for longer inter-continental
476
+ connections.
477
+ • We consider that at LISL range la and lb (lb > la), path
478
+ change rates are λa and λb, respectively. Also, the average
479
+ end-to-end latencies without ηs are ηLEa(without ηs)
480
+
481
+ 103
482
+ Without ns
483
+ With ns=1 sec
484
+ With ns=100 ms
485
+ 365.1
486
+ Withns=10 ms
487
+ With ns=1 ms
488
+ (sw) 3u
489
+ 123.9
490
+ 102
491
+ 92.1
492
+ 81.6
493
+ 60.5
494
+ 35.7
495
+ 30.0
496
+ 31.4
497
+ 29.2
498
+ 26.8.
499
+ 25.3.
500
+ 26.6
501
+ 27.0
502
+ 25.9
503
+ 25.9
504
+ 24.6
505
+ 23.9.
506
+ 24.7
507
+ 23.3
508
+ 23.4
509
+ 101
510
+ 1500
511
+ 2500
512
+ 1700
513
+ 5016
514
+ LiSL Range (km)103
515
+ Without ns
516
+ With ns=l sec
517
+ With ns=100 ms
518
+ 413.0
519
+ With ns=10 ms
520
+ With ns=1 ms
521
+ 160.2
522
+ (sw)
523
+ 122.8
524
+ 113.0
525
+ 102
526
+ 75.4
527
+ 49.3
528
+ 44.3
529
+ 41.6
530
+ 38.2
531
+ 41.4
532
+ 36.5
533
+ 37.9
534
+ 38.3
535
+ 37.1
536
+ 36.9
537
+ 34.2.
538
+ 35.6
539
+ 35.7
540
+ 33.4
541
+ 33.5
542
+ 101
543
+ 1500
544
+ 1700
545
+ 2500
546
+ 5016
547
+ LISL Range (km)Without ns
548
+ With ns=1 sec
549
+ With ns=100 ms
550
+ 103
551
+ With ns=10 ms
552
+ With ns=1 ms
553
+ 457.8
554
+ (ms)
555
+ 239.6
556
+ 205.3
557
+ nLE
558
+ 153.1
559
+ 105.7
560
+ 102
561
+ 82.0
562
+ 70.4
563
+ 75.3
564
+ 66.2
565
+ 66.3
566
+ 66.5
567
+ 62.3
568
+ 66.9
569
+ 64.4
570
+ 64.6
571
+ 60.9
572
+ 57.6 56.8
573
+ 61.0
574
+ 56.7
575
+ 101
576
+ 1500
577
+ 1700
578
+ 2500
579
+ 5016
580
+ LiSL Range (km)(a) New York to London.
581
+ (b) New York to Istanbul.
582
+ (c) New York to Hanoi.
583
+ Figure 3. Impact of ηs on end-to-end latency.
584
+ (a) New York to London.
585
+ (b) New York to Istanbul.
586
+ (c) New York to Hanoi.
587
+ Figure 4. Maximum tolerable value of ηs.
588
+ and ηLEb(without ηs), respectively. From Figs. 1 and 2
589
+ values we observe that λa
590
+ λb > ηLEa(withoutηs)
591
+ ηLEb(withoutηs) . For example,
592
+ considering New York to Istanbul inter-continental connection,
593
+ assuming la=1500 km and lb=1700 km, λa
594
+ λb = 37.5
595
+ 12.3 = 3.049
596
+ and ηLEa(withoutηs)
597
+ ηLEb(withoutηs) = 37.9
598
+ 36.9 =1.027. Now, we can write the
599
+ following:
600
+ λa
601
+ λb
602
+ > ηLEa(withoutηs)
603
+ ηLEb(withoutηs) ,
604
+ (6)
605
+ λa ηs/100
606
+ ηLEa(withoutηs) >
607
+ λb ηs/100
608
+ ηLEb(withoutηs),
609
+ (7)
610
+ 1+
611
+ λa ηs/100
612
+ ηLEa(withoutηs) >1+
613
+ λb ηs/100
614
+ ηLEb(withoutηs),
615
+ (8)
616
+ ηLEa(withoutηs)+ λa
617
+ 100 ηs
618
+ ηLEa(withoutηs)
619
+ > ηLEb(withoutηs)+ λb
620
+ 100 ηs
621
+ ηLEb(withoutηs)
622
+ .
623
+ (9)
624
+ Assuming
625
+ average
626
+ end-to-end
627
+ latency
628
+ with
629
+ ηs
630
+ as
631
+ ηLEa(withηs) and ηLEb(withηs) for LISL range la and lb,
632
+ respectively and using (2) we can rewrite (9) as follows:
633
+ ηLEa(withηs)
634
+ ηLEb(withηs) > ηLEa(withoutηs)
635
+ ηLEb(withoutηs) .
636
+ (10)
637
+ 3) Impact of ηs
638
+ • We have seen that ηLE(with ηs) reduces faster compared
639
+ to ηLE(withoutηs) as LISL range increases. Thus, the ratio
640
+ ηLE(withoutηs)
641
+ ηLE(withηs)
642
+ increases as LISL range increases. From (3),
643
+ as we can see that β is proportional to
644
+
645
+ 1− ηLE(withoutηs)
646
+ ηLE(withηs)
647
+
648
+ ,
649
+ β reduces with the increase of LISL range.
650
+ • ηLE(withηs) reduces when ηs reduces but ηLE(withoutηs)
651
+ remains the same which causes the ratio ηLE(withoutηs)
652
+ ηLE(withηs)
653
+ to
654
+ increase. As β is proportional to
655
+
656
+ 1 − ηLE(withoutηs)
657
+ ηLE(withηs)
658
+
659
+ , it
660
+ decreases when ηs reduces.
661
+ • Let us consider that for inter-continental distance dx and dy
662
+ (dy >dx), path change rates are λx and λy, respectively. Also,
663
+ average end-to-end latencies without ηs are ηLEx(withoutηs)
664
+ and ηLEy(without ηs), respectively. From Figs. 1 and 2
665
+ values, we also observe that λx
666
+ λy > ηLEx(withoutηs)
667
+ ηLEy (withoutηs) (note that
668
+ in this discussion, we are varying inter-continental distance, not
669
+ LISL range). For example, at 1700 km LISL range, for New
670
+ York to Istanbul and New York to Hanoi inter-continental con-
671
+ nection, λx, λy, ηLEx(without ηs), and ηLEy(without ηs)
672
+ are 12.3%, 17.5%, 36.9 ms, and 64.4 ms, respectively from
673
+ which we get λx
674
+ λy =0.703 and ηLEx(withoutηs)
675
+ ηLEy (withoutηs) =0.573. Using
676
+ the approach in (6) – (10), we can come to the conclusion that
677
+ ηLEx(withoutηs)
678
+ ηLEx(withηs)
679
+ <
680
+ ηLEy (withoutηs)
681
+ ηLEy (withηs)
682
+ where ηLEx(withηs) and
683
+ ηLEy(withηs) are average end-to-end latencies considering ηs
684
+ for inter-continental distance dx and dy, i.e., ηLE(withoutηs)
685
+ ηLE(withηs)
686
+ in-
687
+ creases as inter-continental distance increases which reduces β.
688
+ 4) Tolerable Value of ηs
689
+ • (2) represents an equation of a straight line with slope
690
+ proportional to λ considering ηLE(withηs) as y variable and
691
+ ηs as the x variable. As LISL range increases, λ decreases
692
+ which makes the slope of the straight lines to reduce. In
693
+ addition to λ, ηLE(without ηs) also reduces with the
694
+
695
+ 110
696
+ FSOSN with LiSL Range=1500km
697
+ FSOSN with LISL Range=1700km
698
+ 100
699
+ FSOSN with LISL Range=2500km
700
+ FSOSN with LISL Range=5016km
701
+ 90
702
+ OFTN
703
+ (ms)
704
+ 80
705
+ 37u
706
+ 70
707
+ (24.58,64.44)
708
+ (80.63,64.44)
709
+ 60
710
+ 50
711
+ 20
712
+ 40
713
+ 60
714
+ 80
715
+ 100
716
+ ns (ms)92.7
717
+ 102
718
+ 79.1
719
+ 73.3
720
+ 71.4
721
+ 56.0
722
+ 27.5
723
+ 21.5
724
+ 20.0
725
+ 11.3
726
+ 101
727
+ (%)
728
+ 3.7
729
+ B
730
+ 2.7
731
+ 2.4
732
+ 1.3
733
+ 100
734
+ 0.4
735
+ 0.3
736
+ 0.2
737
+ 1500
738
+ 1700
739
+ 2500
740
+ 5016
741
+ LISL Range (km)Ns=l sec
742
+ ns=100 ms
743
+ ns=10 ms
744
+ ns=1 ms
745
+ 90.8
746
+ 102
747
+ 77.0
748
+ 71.0
749
+ 70.4
750
+ 49.8
751
+ 25.0
752
+ 19.7
753
+ 19.2
754
+ 101
755
+ 9.0
756
+ (%)
757
+ 3.2
758
+ B
759
+ 2.4
760
+ 2.3
761
+ 1.0
762
+ 100
763
+ 0.3
764
+ 0.2
765
+ 0.2
766
+ 1500
767
+ 1700
768
+ 2500
769
+ 5016
770
+ LiSL Range (km)85.5
771
+ 102
772
+ 73.1
773
+ 70.3
774
+ 63.0
775
+ 37.0
776
+ 21.4
777
+ 19.2
778
+ 14.5
779
+ 101
780
+ 5.6
781
+ 2.6
782
+ 2.3
783
+ β
784
+ 1.7
785
+ 100
786
+ 0.6
787
+ 0.3
788
+ 0.2
789
+ 0.17
790
+ 1500
791
+ 2500
792
+ 5016
793
+ 1700
794
+ LISL Range (km)60
795
+ FSOSN with LISL Range=1500km
796
+ 55
797
+ FSOSN with LISLRange=1700km
798
+ FSOSN with LISL Range=2500km
799
+ 50
800
+ FSOSN with LISL Range=5016km
801
+ OFTN
802
+ 45
803
+ (ms)
804
+ 40
805
+ 37
806
+ 35
807
+ (2.30,27.38)
808
+ 30
809
+ (15.10,27.38)
810
+ (40.88,27.38)
811
+ (70.34,27.38)
812
+ 25
813
+ 20
814
+ 10
815
+ 20
816
+ 30
817
+ 40
818
+ 50
819
+ 60
820
+ 70
821
+ 80
822
+ ns (ms)80
823
+ FSOSN with LISL Range=1500km
824
+ FSOSN with LISL Range=1700km
825
+ 70
826
+ FSOSN withLISL Range=2500km
827
+ FSOSN with LISL Range=5016km
828
+ OFTN
829
+ 60
830
+ (sw) u
831
+ 50
832
+ (4.40,39.55)
833
+ (21.54,39.55)
834
+ (45.40,39.55)
835
+ 40
836
+ (76.87,39.55)
837
+ 30
838
+ 20
839
+ 40
840
+ 60
841
+ 80
842
+ 100
843
+ Ns (ms)increase of LISL range, and from (5) we can say that ηs,max
844
+ increases with increase in LISL range.
845
+ B. Design Guidelines
846
+ The values we get from (5) are exactly same as we
847
+ get from intersection points shown in Fig. 4. Given that
848
+ ηLE,OF T N, ηLE(withoutηs), and λ are known for a particular
849
+ inter-continental connection for a certain LISL range, (5) can
850
+ be used to design LCTs in order to exploit full potential of
851
+ NNG-FSOSNs. For example:
852
+ • For New York to London inter-continental connection with
853
+ 1500 km of LISL range, ηLE,OF T N, ηLE(withoutηs), and λ
854
+ are 27.38 ms, 26.6 ms, and 33.9%, respectively. Putting these
855
+ values in (5), we get ηs,max as 2.3 ms (same as in Fig. 4a).
856
+ • With 1700 km LISL range for New York to Istanbul
857
+ inter-continental connection, putting ηLE,OF T N=39.55 ms,
858
+ ηLE(without ηs)=36.9 ms, and λ=12.3% in (5), we get
859
+ ηs,max=21.54 ms, i.e., exactly as shown in Fig. 4b.
860
+ • Considering New York to Hanoi inter-continental connection
861
+ with 5016 km of LISL range, values of ηLE,OF T N,
862
+ ηLE(withoutηs), and λ are 64.44 ms, 56.7 ms, and 9.6%,
863
+ respectively. Using these values in (5), we get ηs,max equal
864
+ to 80.63 ms (same as in Fig. 4c).
865
+ VI. CONCLUSION AND FUTURE WORK
866
+ Dynamic LISLs are essential to leverage the full potential
867
+ of NNG-FSOSNs due to their on-demand flexibility. However,
868
+ whenever a new LISL is established, LISL setup delay is added to
869
+ the end-to-end latency. To model the end-to-end latency including
870
+ LISL setup delay, we study the quantification of LISL setup delay,
871
+ and calculate the end-to-end latencies for low, medium, and high
872
+ inter-continental distance connections for different LISL setup
873
+ delay values. We find that the end-to-end latency depends on path
874
+ change rate which reduces as LISL range increases but increases
875
+ as inter-continental distance increases. We also highlight the
876
+ impact of LISL setup delay on total end-to-end latency which
877
+ clearly indicates that LISL setup delay cannot be ignored. We
878
+ observe that the impact of LISL setup delay reduces as LISL
879
+ range or inter-continental distance increases. We also deduce
880
+ the formula to find maximum tolerable value of LISL setup
881
+ delay which represents design guidelines for LCT manufacturers
882
+ so that FSOSNs can have better latency performance compared
883
+ to OFTN. We see that for some LISL range, there does not
884
+ exist any such value of ηs,max. An interesting takeaway point is
885
+ that higher LISL range has two major benefits. Firstly, highest
886
+ possible LISL range has the best latency performance. Secondly,
887
+ it has the highest value of ηs,max which can be attainable.
888
+ However, with high LISL range, the penalty is more satellite
889
+ transmission power and energy consumption.
890
+ It is evident that due to change of shortest path with time
891
+ slots, LISL setup delay is introduced which negatively impacts
892
+ the latency of an FSOSN using dynamic LISLs. In order to
893
+ minimize end-to-end latency, we need to minimize the path
894
+ change rate so that LISL setup delay is introduced less often.
895
+ In future, we plan to develop algorithms to minimize the path
896
+ change rate for a better latency performance.
897
+ ACKNOWLEDGMENT
898
+ This work was supported by the High Throughput and Secure
899
+ Networks Challenge Program at the National Research Council
900
+ of Canada. The authors would also like to acknowledge Dr.
901
+ Pablo Madoery for his technical help and feedback.
902
+ REFERENCES
903
+ [1] T. Ahmmed, A. Alidadi, Z. Zhang, A. U. Chaudhry, and H. Yanikomeroglu,
904
+ “The Digital Divide in Canada and the Role of LEO Satellites in Bridging the
905
+ Gap,” IEEE Communications Magazine, vol. 60(6), pp. 24–30, Jun. 2022.
906
+ [2] A. U. Chaudhry and H. Yanikomeroglu, “Optical Wireless Satellite
907
+ Networks versus Optical Fiber Terrestrial Networks: The Latency
908
+ Perspective–Invited Paper,” in Proc. 30th Biennial Symposium on
909
+ Communications, Saskatoon, Canada, 2021, pp. 1–6.
910
+ [3] D. Bhattacharjee, T. Acharya, and S. Chakravarty, “Energy Efficient Data
911
+ Gathering in IoT Networks With Heterogeneous Traffic for Remote Area
912
+ Surveillance Applications: A Cross Layer Approach,” IEEE Transactions
913
+ on Green Communications and Networking, vol. 5(3), pp. 1165–1178,
914
+ Sep. 2021.
915
+ [4] 3GPP, “Technical Specification Group Radio Access Network, Study on
916
+ New Radio (NR) to Support Non-Terrestrial Networks (NTN),” Oct. 2020,
917
+ TR 38.811, v15.4.0, Release 15.
918
+ [5] A. U. Chaudhry and H. Yanikomeroglu, “Free Space Optics for
919
+ Next-Generation Satellite Networks,” IEEE Consumer Electronics
920
+ Magazine, vol. 10(6), pp. 21–31, Nov. 2021.
921
+ [6] A. U. Chaudhry and H. Yanikomeroglu, “Laser Intersatellite Links in
922
+ a Starlink Constellation: A Classification and Analysis,” IEEE Vehicular
923
+ Technology Magazine, vol. 16(2), pp. 48–56, Jun. 2021.
924
+ [7] SpaceX FCC update, 2018, “SpaceX Non-Geostationary Satellite
925
+ System,
926
+ Attachment
927
+ A,
928
+ Technical
929
+ Information
930
+ to
931
+ Supplement
932
+ Schedule
933
+ S,”
934
+ [Online].
935
+ Available:
936
+ https://licensing.fcc.gov/myibfs/
937
+ download.do?attachment key=1569860, accessed on Oct. 2, 2022.
938
+ [8] H. Kaushal, V. Jain, and S. Kar, “Acquisition, Tracking, and Pointing,”
939
+ in Free Space Optical Communication.
940
+ New Delhi: Springer-Verlag,
941
+ 2017, pp. 119–137.
942
+ [9] Y. Kaymak, R. Rojas-Cessa, J. Feng, N. Ansari, M. Zhou, and T. Zhang,
943
+ “A Survey on Acquisition, Tracking, and Pointing Mechanisms for Mobile
944
+ Free-Space Optical Communications,” IEEE Communications Surveys
945
+ & Tutorials, vol. 20(2), pp. 1104–1123, 2018.
946
+ [10] C. Carrizo, M. Knapek, J. Horwath, D. D. Gonzalez, and P. Cornwell,
947
+ “Optical Inter-Satellite Link Terminals for Next Generation Satellite
948
+ Constellations,” in Proc. Society of Photo-Optical Instrumentation
949
+ Engineers (SPIE), vol. 11272, 2020, pp. 1–11.
950
+ [11] M. Handley, “Delay is Not an Option: Low Latency Routing in Space,”
951
+ in Proc. 17th ACM Workshop on Hot Topics in Networks, Redmond, WA,
952
+ USA, 2018, pp. 85–91.
953
+ [12] A. U. Chaudhry and H. Yanikomeroglu, “When to Crossover from Earth to
954
+ Space for Lower Latency Data Communications?” IEEE Transactions on
955
+ Aerospace and Electronic Systems, vol. 58(5), pp. 3962–3978, Mar. 2022.
956
+ [13] TESAT,
957
+ “Laser
958
+ products.”
959
+ [Online].
960
+ Available:
961
+ https://www.tesat.de/products#laser, accessed on Oct. 2, 2022.
962
+ [14] General Atomics, “General Atomics Partners with Space Development
963
+ Agency to Demonstrate Optical Intersatellite Link,” Jun. 2020, [Online].
964
+ Available:
965
+ https://www.ga.com/general-atomics-partners-with-space-
966
+ development-agency-to-demonstrate-optical-intersatellite-link, accessed
967
+ on Oct. 2, 2022.
968
+ [15] J. F. Kurose and K. W. Ross, Computer Networks: A Top Down Approach
969
+ Featuring the Internet, Boston: Addison-Wesley, 2010.
970
+ [16] A. U. Chaudhry and H. Yanikomeroglu, “On Crossover Distance for Optical
971
+ Wireless Satellite Networks and Optical Fiber Terrestrial Networks,” in Proc.
972
+ 2022 IEEE Future Networks World Forum, Montreal, Canada, 2022, pp. 1–6.
973
+ [17] M. Handley, “Using Ground Relays for Low-Latency Wide-Area Routing
974
+ in Megaconstellations,” in Proc. 18th ACM Workshop on Hot Topics in
975
+ Networks, New York, NY, USA, 2019, pp. 125–132.
976
+ [18] Y. Hauri, D. Bhattacherjee, M. Grossmann, and A. Singla, ““Internet from
977
+ Space” without Inter-Satellite Links?” in Proc. 19th ACM Workshop on
978
+ Hot Topics in Networks, New York, NY, USA, 2020, pp. 205–211.
979
+ [19] B. Soret, S. Ravikanti, and P. Popovski, “Latency and Timeliness
980
+ in Multi-Hop Satellite Networks,” in Proc. 2020 IEEE International
981
+ Conference on Communications (ICC), Dublin, Ireland, pp. 1–6.
982
+ [20] A. U. Chaudhry and H. Yanikomeroglu, “Temporary Laser Inter-Satellite
983
+ Links in Free-Space Optical Satellite Networks,” IEEE Open Journal
984
+ of the Communications Society, vol. 3, pp. 1413–1427, Aug. 2022.
985
+ [21] AGI,
986
+ “Systems
987
+ Tool
988
+ Kit
989
+ (STK),”
990
+ [Online].
991
+ Available:
992
+ https://www.agi.com/products/stk, accessed on Oct. 2, 2022.
993
+ [22] E. W. Dijkstra, “A Note on Two Problems in Connexion with Graphs,”
994
+ Numerische Mathematik, vol. 1, pp. 269–271, Dec. 1959.
995
+
996
+ [23] R. Hermenier, C. Kissling, and A. Donner, “A Delay Model for Satellite
997
+ Constellation Networks with Inter-Satellite Links,” in Proc. 2009
998
+ International Workshop on Satellite and Space Communications, Siena,
999
+ Italy, 2009, pp. 3–7.
1000
+ [24] C. C. Robusto, “The Cosine-Haversine Formula,” The American
1001
+ Mathematical Monthly, vol. 64(1), pp. 38–40, Jan. 1957.
1002
+
4dE4T4oBgHgl3EQf0w2N/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4tAyT4oBgHgl3EQfpPhP/content/tmp_files/2301.00521v1.pdf.txt ADDED
@@ -0,0 +1,3696 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A RL-based Policy Optimization Method Guided by Adaptive Stability
2
+ Certification
3
+ Shengjie Wang 1 Fengbo Lan 1 Xiang Zheng 2 Yuxue Cao 3 Oluwatosin Oseni 4 Haotian Xu 1 Yang Gao 5
4
+ Tao Zhang 1
5
+ Abstract
6
+ In contrast to the control-theoretic methods, the
7
+ lack of stability guarantee remains a significant
8
+ problem for model-free reinforcement learning
9
+ (RL) methods. Jointly learning a policy and a
10
+ Lyapunov function has recently become a promis-
11
+ ing approach to ensuring the whole system with
12
+ a stability guarantee. However, the classical Lya-
13
+ punov constraints researchers introduced cannot
14
+ stabilize the system during the sampling-based
15
+ optimization. Therefore, we propose the Adap-
16
+ tive Stability Certification (ASC), making the sys-
17
+ tem reach sampling-based stability. Because the
18
+ ASC condition can search for the optimal policy
19
+ heuristically, we design the Adaptive Lyapunov-
20
+ based Actor-Critic (ALAC) algorithm based on
21
+ the ASC condition. Meanwhile, our algorithm
22
+ avoids the optimization problem that a variety of
23
+ constraints are coupled into the objective in cur-
24
+ rent approaches. When evaluated on ten robotic
25
+ tasks, our method achieves lower accumulated
26
+ cost and fewer stability constraint violations than
27
+ previous studies.
28
+ 1. Introduction
29
+ Learning-based (especially Reinforcement-Learning-based)
30
+ controllers have become increasingly popular and have
31
+ achieved excellent performance in non-linear dynamic sys-
32
+ tems (Hwangbo et al., 2019; Andrychowicz et al., 2020).
33
+ However, a lack of some safety notions introduces addi-
34
+ tional risks to the agents and environments. Stability is a
35
+ crucial notion of safety, whose violation can cause unsafe
36
+ behaviours (Jin et al., 2020). Fortunately, there exists an
37
+ effective tool to assess the stability, Lyapunov functions, in
38
+ 1Department of Automation, Tsinghua University 2Department
39
+ of Computer Science, City University of Hong Kong 3Beijing
40
+ Institute of Control Engineering 4Covenant University 5Institute
41
+ for Interdisciplinary Information Sciences,Tsinghua University.
42
+ Correspondence to: Tao Zhang <taozhang@tsinghua.edu.cn>.
43
+ State space
44
+ 𝑐! 𝑠 ≤ 𝑏
45
+ 𝑠"
46
+ 𝑐! 𝑠 = 0
47
+ Diverging trajectory
48
+ Optimal trajectory w/ stability
49
+ Sub-optimal trajectory w/ stability
50
+ Unstable trajectory
51
+ Figure 1. Intuitive example showing the mean cost stabil-
52
+ ity according to Definition 3.1. The figure shows the re-
53
+ lationship between three sets, like the whole state space,
54
+ {s | cπ(s) ≤ b} and {s | cπ(s) = 0}. The red line repre-
55
+ sents a diverging trajectory. The yellow line represents a
56
+ trajectory without the mean cost stability. The trajectories
57
+ coloured cyan and purple remain stable, whereas the state in
58
+ the purple trajectory reaches the set {s | cπ(s) = 0} more
59
+ quickly. The task aims to obtain the policy that can generate
60
+ the purple trajectory.
61
+ control-theoretic approaches. Lyapunov functions can be
62
+ designed for a linear system with specific criteria in the form
63
+ of a quadratic positive-definite function. But how to find
64
+ a suitable Lyapunov function remains an open challenge
65
+ in the non-linear dynamic system (La Salle & Lefschetz,
66
+ 2012).
67
+ Thanks to the emergence of deep learning, researchers no-
68
+ ticed that the neural Lyapunov function had an outperform-
69
+ ing representation ability, thereby making it possible to
70
+ search for a feasible Lyapunov function. According to Lya-
71
+ punov’s second method for stability, the neural Lyapunov
72
+ function is trained by forcing the updating direction toward
73
+ decreasing the Lyapunov function along an episode’s state
74
+ trajectories (Chang et al., 2019). In the early stage of time,
75
+ these methods are only suitable for the model-based setting
76
+ because the difference of Lyapunov function with respect to
77
+ time brings a requirement of dynamic model (Berkenkamp
78
+ arXiv:2301.00521v1 [cs.RO] 2 Jan 2023
79
+
80
+ Adaptive Stability Certification
81
+ et al., 2017; Richards et al., 2018). In contrast to model-
82
+ based methods, model-free reinforcement learning meth-
83
+ ods have achieved superior performance on many complex
84
+ robotic systems (Hwangbo et al., 2019; Andrychowicz et al.,
85
+ 2020). Therefore, researchers proposed some methods to
86
+ overcome the above issue. Firstly, they applied the discrete
87
+ Lyapunov stability condition directly in the optimization.
88
+ Then, the Lyapunov conditions can be viewed as multiple
89
+ constraints. Prior studies added those constraints into the ob-
90
+ jective function. Specifically, the objective function enables
91
+ the system to reach optimality and stability together. Fur-
92
+ thermore, it is worth noting that current RL-based methods
93
+ with stability guarantee have been applied in some practi-
94
+ cal problems successfully, such as monitoring the security
95
+ of interconnected microgrids (Huang et al., 2021), power
96
+ system control (Zhao et al., 2021), automatic assembly (Li
97
+ et al., 2022) and motion planning of autonomous vehicles
98
+ (Zhang et al., 2021).
99
+ Admittedly, RL-based approaches with the help of the Lya-
100
+ punov function achieve promising performance. However,
101
+ the discrete Lyapunov stability condition should be satisfied
102
+ in the whole state space, which means infinity constraints
103
+ for continuous control tasks. Due to the sampling-based
104
+ optimization in RL, current approaches sample some data
105
+ pairs randomly, which causes a serious problem that the
106
+ discrete stability condition is not convincing. Furthermore,
107
+ reaching optimality and stability together remains a chal-
108
+ lenge for their objective function. Because the Lyapunov
109
+ stability condition introduces various constraints in the ob-
110
+ jective function, it is hard to balance the optimality and
111
+ stability of the system. In practical experiments, we find
112
+ that previous constraints are either loose to find ineffective
113
+ Lyapunov functions or tight to make the policy trapped into
114
+ a sub-optimal point. Therefore, to better improve the cur-
115
+ rent research, we hope the Lyapunov-based constraint can
116
+ facilitate the policy to reach the optima within the set of the
117
+ sampling-based stability. To better understand the principle,
118
+ we give an intuitive example in Figure 1.
119
+ To address the above issues, we propose the Adaptive
120
+ Lyapunov-based Actor-Critic algorithm (ALAC). Unlike
121
+ the discrete Lyapunov stability condition, we propose a
122
+ novel sampling-based stability condition called Adaptive
123
+ Stability Certification (ASC). Meanwhile, the certifica-
124
+ tion can guide the policy to search for the optimal point
125
+ heuristically. Thus, based on the ASC condition, we de-
126
+ sign the Adaptive Lyapunov-based Actor-Critic algorithm
127
+ (ALAC) to reach the optimality and stability of the system.
128
+ Thanks to the supervised learning for a Lyapunov candidate
129
+ and the Lagrangian-based policy optimization, our method
130
+ eliminates the coupling relationship between various con-
131
+ straints and the objective function. Experiments show that
132
+ our method provides promising results under stability con-
133
+ straints on some robotic control problems, like walking of
134
+ legged robots, trajectory planning of a free-floating space
135
+ robot, and so on. Another interesting finding is that, in
136
+ contrast to traditional RL algorithms, our method facilitates
137
+ the controller to enhance the robustness, generalization, and
138
+ efficiency of the whole system 1.
139
+ 2. Related works
140
+ Due to the difficulty of a manual design, constructing a
141
+ Lyapunov neural network has become increasingly popu-
142
+ lar in a non-linear dynamic system. For the model-known
143
+ situation, the approaches jointly learn a Lyapunov func-
144
+ tion and a controller (Richards et al., 2018; Chang et al.,
145
+ 2019; Mittal et al., 2020; Dai et al., 2020; Lechner et al.,
146
+ 2021; Donti et al., 2020; Gaby et al., 2021). But it re-
147
+ stricts the application of complex systems which are hard
148
+ to obtain accurate models. Therefore, some researchers
149
+ present model-learned methods with a stability guarantee,
150
+ in which Gaussian Process or Neural Network approximates
151
+ the model. The model-learned method can be separated into
152
+ two types. The first one is learning dynamics models guided
153
+ by a learnable Lyapunov function, in which policies are
154
+ inherently included or learned by LQR method (Kashima
155
+ et al., 2022; Zhou et al., 2022; Chen et al., 2021; Lawrence
156
+ et al., 2020; Schlaginhaufen et al., 2021). Another approach
157
+ is to construct a learnable policy network updated by a neu-
158
+ ral Lyapunov function, thereby satisfying the stability of
159
+ system (Berkenkamp et al., 2017; Dawson et al., 2022; Zhou
160
+ et al., 2022; Dai et al., 2021; Lale et al., 2022). However, we
161
+ notice that most model-learned methods are only verified
162
+ in relatively easy environments. A possible reason is that
163
+ the coupling of the Lyapunov function and dynamic model
164
+ makes learning unstable or incompatible due to interdepen-
165
+ dency.
166
+ A promising direction is to study model-free methods with
167
+ a stability guarantee. Recently, a large variety of methods
168
+ have been proposed to address the issue. One method is that
169
+ the policy is updated by a mixed objective with respect to the
170
+ neural Lyapunov function and Q function. POLYC (Chang
171
+ & Gao, 2021) introduced the necessary conditions required
172
+ by the Lyapunov function into objectives to optimize the
173
+ policy network. LBPO (Sikchi et al., 2021) applied the log-
174
+ arithmic barrier function based on the form of the Lyapunov
175
+ function. TNLF (Xiong et al., 2022) constructed Lyapunov
176
+ V and Q functions trained by the stability certification. The
177
+ other form is policy optimization with Lyapunov constraints.
178
+ Chow et al. (2018) designed a constrained RL algorithm to
179
+ project a policy in a trust region with Lyapunov stability.
180
+ Han et al. (2020a) provided a novel constrained RL-based
181
+ approach called LAC, using the prime-dual method to mod-
182
+ ify the constraint. Their latter work verified them in both
183
+ 1See our project page at https://sites.google.com/view/adaptive-
184
+ lyapunov-actor-critic.
185
+
186
+ Adaptive Stability Certification
187
+ on-policy and off-policy settings (Han et al., 2021; 2020b).
188
+ In the previous study, there still exist two main drawbacks
189
+ to obtaining the optimal policy and a suitable Lyapunov
190
+ function. The discrete Lyapunov condition they used did
191
+ not meet the demand for a sampling-based stability guar-
192
+ antee in RL. Furthermore, a combined objective function
193
+ via a simple addition causes a sub-optimal policy or invalid
194
+ Lyapunov function, especially for multiple and complex
195
+ constraints. In contrast, our method proposes the Adap-
196
+ tive Lyapunov-based Actor-Critic algorithm to satisfy the
197
+ sampling-based stability and search for the optimal policy
198
+ heuristically.
199
+ 3. Problem Formulation
200
+ The pre-process of implementing RL-based algorithms is to
201
+ formulate robotic control problems as a Markov Decision
202
+ Process (MDP). In our paper, MDP mainly consists of four
203
+ elements, such as S, A, P, and C. Here, S is the state space,
204
+ A is the action space, P is the dynamic transition function,
205
+ and C is the cost function. At timestep t, st ∈ S represents
206
+ the state the robot observes. Then, at ∈ A is the action
207
+ executed by the agent(robot). Note that at is sampled from
208
+ the agent’s policy π(at|st). According to P(st+1|st, at),
209
+ the state of system transfers to the next state st+1 with a
210
+ certain probability. At the same time, the agent receives the
211
+ cost cπ(st) = Ea∼πC(st, at). Besides, the distribution of
212
+ starting state denotes s0 ∼ ρ. And then, we can define the
213
+ state distribution T :
214
+ T (s|ρ,π, t + 1) =
215
+
216
+ S
217
+
218
+ A
219
+ π(at|st)P(st+1|st, at) daT (s|ρ, π, t) ds
220
+ (1)
221
+ Note that T (s|ρ, π, 0) = ρ holds.
222
+ The optimal objec-
223
+ tive is to find the optimal polity π∗, which can minimize
224
+ Eπ[�∞
225
+ t=0 γtcπ(t)|s0 = s] where γ is a discounting factor.
226
+ An MDP system corresponds to a deterministic, continuous-
227
+ state, discrete-time dynamical system st+1 = f(s, a) with
228
+ state space S and action space A. Therefore, the system’s
229
+ stability can be verified by a classical tool, Lyapunov’s Sta-
230
+ bility Function (see Appendix A.3). If a Lyapunov function
231
+ exists, a discrete-time system can achieve stability in the
232
+ sense of Lyapunov as depicted in Appendix A.1 and A.2.
233
+ In order to search for a Lyapunov function, current ap-
234
+ proaches attempt to train a Lyapunov network by min-
235
+ imizing the requirements in Appendix A.3.
236
+ However,
237
+ optimization-based methods meet a common problem in
238
+ that the trained Lyapunov function remains close to 0 along
239
+ the whole trajectories, thus resulting in ineffective guidance
240
+ for the policy. More importantly, the classical definition
241
+ is based on the whole state space, so it is unsuitable for
242
+ RL-based methods due to sampling optimization.
243
+ Therefore, we should extend the stability to a reasonable
244
+ case in RL. We notice that for most robotic tasks, cost
245
+ functions are related to the stability of the closed-loop
246
+ system. For example, the goal of stabilization tasks is
247
+ to make the norm of state equal to zero finally, where
248
+ the cost function is C(s, a) = EP (·|st,at)∥st+1∥. Another
249
+ main task is the tracking task, which aims to achieve the
250
+ reference state r. The cost function can be denoted as
251
+ C(s, a) = EP (·|st,at)∥st+1 − r∥. In this context, we in-
252
+ troduce a Mean Cost Stability to connect the stability and
253
+ the cost.
254
+ Definition 3.1 (Mean Cost Stability). A robotic system
255
+ remains stable in mean cost when satisfying the following
256
+ equation, where b is a constant (Han et al., 2020a).
257
+ lim
258
+ t→∞ Est∼T cπ(st) = 0, cπ(s0) ≤ b
259
+ (2)
260
+ More importantly, the above definition corresponds to the
261
+ mean square stability when the cost function is chosen to
262
+ be the norm of the state; it is also equivalent to the partial
263
+ stability in tracking tasks (Han et al., 2020a).
264
+ In the other direction, the stability definition with regard
265
+ to cost avoids the inconsistent scale between the objective
266
+ and constraint. Specifically, the problem formulation can be
267
+ represented as follows:
268
+ min
269
+ π Eρ,π,P[
270
+
271
+
272
+ t=0
273
+ γtcπ(st)]
274
+ s.t. lim
275
+ t→∞ Est∼T cπ(st) = 0
276
+ (3)
277
+ 4. Adaptive Lyapunov-based Actor-Critic
278
+ Algorithm
279
+ To target the above-mentioned problem, we propose the
280
+ Adaptive Lyapunov-based Actor-Critic algorithm (ALAC)
281
+ in this section. The main contents are as follows: 1) we
282
+ design an Adaptive Stability Certification (ASC), a union
283
+ of two objectives guaranteeing a system’s mean cost sta-
284
+ bility and guiding the policy to search the optimal point
285
+ heuristically (Section 4.1); 2) we construct an Actor-Critic
286
+ optimizing framework based on ASC and use a supervised
287
+ learning method to update the parameters of the Lyapunov
288
+ critic network(Section 4.2); 3) we apply the prime-dual
289
+ method to update the parameters of the policy network and
290
+ tune the parameters of ASC adaptively (Section 4.3).
291
+ 4.1. Adaptive Stability Certification
292
+ In order to achieve the mean cost stability in Definition
293
+ 3.1, we first give a reasonable assumption to ensure that the
294
+ starting state is sampled in the region of attraction, which is
295
+ represented as the starting state space.
296
+
297
+ Adaptive Stability Certification
298
+ Assumption 4.1 (Region of Attraction). There exists a pos-
299
+ itive constant b such that ρ(s) > 0, ∀s ∈ {s|cπ(s) ≤ b}.
300
+ Another assumption is the existence of the stationary state
301
+ distribution, which is generally exploited in the RL litera-
302
+ ture.
303
+ Assumption 4.2 (Ergodic Property). The Markov Chain
304
+ driven by the policy π is ergodic, which means the following
305
+ equation holds.
306
+ ωπ(s) = lim
307
+ t→∞ T (s|ρ, π, t)
308
+ (4)
309
+ Furthermore, we define a new variable Uπ for further deriva-
310
+ tion.
311
+ Uπ = lim
312
+ T →∞
313
+ 1
314
+ T
315
+ T
316
+
317
+ t=0
318
+ T (s | ρ, π, t)
319
+ (5)
320
+ Based on these mild assumptions, we formalize the
321
+ sampling-based Lyapunov stability that meets the mean
322
+ cost stability in Theorem 4.3 below, proven in Appendix
323
+ B.1.
324
+ Theorem 4.3 (Sampling-based Lyapunov Stability). An
325
+ MDP system is stable with regard to the mean cost, if there
326
+ exists a function L : S → R meets the following conditions:
327
+ αcπ(s) ≤ L(s) ≤ βcπ(s)
328
+ (6)
329
+ L(s) ≥ cπ(s) + λEs′∼PπL(s′)
330
+ (7)
331
+ Es∼Uπ[Es′∼PπL(s′) − L(s)]
332
+ ≤ −k[Es∼Uπ[L(s) − λEs′∼PπL(s′)]]
333
+ (8)
334
+ where α, β, λ and k is positive constants. Among them,
335
+ Pπ(s′|s) =
336
+
337
+ A π(a|s)P(s′|s, a) da holds.
338
+ In practice, our method constructs sampling-based require-
339
+ ments for the Lyapunov function. Taking advantage of the
340
+ sampling-based stability, we can learn the policy that guar-
341
+ antees the system’s stability in RL framework. Moreover,
342
+ finding a Lyapunov function directly remains a challenge
343
+ when the search space becomes large due to the increas-
344
+ ing dimension of the state. To mitigate the issue, we use a
345
+ Lyapunov candidate which is related to the sum of cost or
346
+ value function. it has been proven to be a valid Lyapunov
347
+ candidate in a previous study for stability analysis (Mayne
348
+ et al., 2000). Thus, we can denote a Lyapunov candidate as:
349
+ Lπ(s) = Eπ[
350
+
351
+
352
+ t=0
353
+ γtcπ(st)|s0 = s]
354
+ (9)
355
+ It is noteworthy that the Lyapunov candidate naturally meets
356
+ the constraints in Equation (6) and (7) when λ ≤ γ holds.
357
+ Please see Appendix A.2 for a detailed demonstration.
358
+
359
+ {𝑊(𝑡)}!"#
360
+ $
361
+ {𝑅(𝑡)}!"#
362
+ $
363
+ 𝑡
364
+ 𝑡 + 1
365
+ 𝑘(𝑊(𝑡) − 𝑅(𝑡))
366
+ 𝑊(𝑡 + 1)
367
+ 𝜆 ↓
368
+ 𝜆 ↓
369
+ 𝑅(𝑡)
370
+ 𝑊(𝑡)
371
+ Figure 2. Illustrative example of how ASC condition can
372
+ minimize L(s).
373
+ Note that {W(t)}∞
374
+ t=0 = {EL(s)}∞
375
+ t=0
376
+ and {R(t)}∞
377
+ t=0 = = {λEL(s′)}∞
378
+ t=0. The blue point de-
379
+ notes W(t), and the orange point denotes R(t), respectively.
380
+ W(t + 1) should decrease by k(W(t) − R(t)) according to
381
+ the ASC condition. Besides, as {R(t)}∞
382
+ t=0 decreases with
383
+ λ, {W(t)}∞
384
+ t=0 decreases together. When λ remains at the
385
+ minimum value, the ASC condition will help the policy
386
+ minimize L(s).
387
+ Recalling the optimization problem (3), we find that
388
+ the constraint part equals Equation (8) with respect to
389
+ Lπ(s), as well as the objective part can be rewritten as
390
+ minπ Es∼ρLπ(s). The Lyapunov candidate builds a bridge
391
+ between the constraint and the objective. A common ap-
392
+ proach is to use the lagrangian-based method to achieve the
393
+ trade-off. However, making the constraints soft poses a chal-
394
+ lenge to the guarantee of mean cost stability. Accordingly,
395
+ we hope to propose a method which guides the policy to find
396
+ the optimal point on the premise of the stability guarantee.
397
+ Fortunately, we find that Equation (8) can achieve the goal
398
+ when finding the minimum value of λ adaptively. We call
399
+ this the Adaptive Stability Certification (ASC) that pro-
400
+ vides promising insight into searching for the optimal policy
401
+ heuristically without violating stability. In the following,
402
+ we illustrate the underlying reasons based on an interesting
403
+ lemma in a continuous-time system.
404
+ Lemma 4.4 (Finite Tracking Time). In a continuous-time
405
+ system, a trajectory W(t) tracks the reference R(t). W(t)
406
+ can track the reference within a finite time T, such that
407
+ R(t) = W(t), t ≥ T, if the following conditions holds.
408
+ ∇tW(t) ≤ −k(W(t) − R(t)), ∀t ∈ [0, T]
409
+ (10)
410
+ Note that the gradient of R(t) should be bounded, meaning
411
+ that ∇tR(t) ≤ µ. The proof see Appendix B.2
412
+ It can be seen that Equation (8) is very similar to Equation
413
+ (10). Although the discrete-time condition does not have a
414
+ finite tracking time unlike the continuous-time setting, the
415
+ same principle comes from the feedback control. To some
416
+ extent, we can think Equation (8) as a special tracking task
417
+
418
+ Adaptive Stability Certification
419
+ that the sequence {W(t)}∞
420
+ t=0 = {EL(s)}∞
421
+ t=0 tracks the ref-
422
+ erence sequence {R(t)}∞
423
+ t=0 = {λEL(s′)}∞
424
+ t=0. To explain
425
+ it intuitively, Figure 2 depicts an illustrative example. As
426
+ we can see, the value of W(t + 1) needs to decrease by
427
+ k(W(t) − R(t)) at time t + 1. If λ decreases, {R(t)}∞
428
+ t=0
429
+ forces {W(t)}∞
430
+ t=0 to minimize L(s) along the whole se-
431
+ quence. Because of the introduction of the Lyapunov can-
432
+ didate, minimizing L(s) equals the previous objective part
433
+ in Equation (3). Therefore, we can find the optimal policy
434
+ based on the certification by finding the minimum value of λ
435
+ adaptively. To sum up, the optimization problem is modified
436
+ to find the minimum value of λ as well as make Equation (8)
437
+ holds. Note that we do not provide an in-depth theoretical
438
+ analysis of the heuristic method. Thus, it leaves room for
439
+ our future work, indicating that meeting ASC corresponds
440
+ to finding the optimal policy.
441
+ 4.2. Lyapunov Critic Learning
442
+ We leverage a traditional Actor-Critic framework to solve
443
+ the optimization problem mentioned above. First, we con-
444
+ struct two neural networks, namely actor πφ(a|s) and Lya-
445
+ punov critic Lθ(s, a). Among them, φ and θ represent
446
+ the parameters of two networks, respectively. The actor
447
+ πφ(a|s) maps a given state s to a distribution over action.
448
+ The action distribution is modelled as a Gaussian, with a
449
+ state-dependent mean µφ(s) and diagonal covariance matrix
450
+ Σφ(s). Unlike traditional Actor-Critic methods, our critic
451
+ network is related to the Lyapunov candidate Lπ(s). As a
452
+ matter of fact, Lπ(s) is the expectation of Lθ(s, a) over the
453
+ distribution of actions. Specifically, EπLθ(s, a) = Lπ(s)
454
+ holds. In the context of this property, the above theoretical
455
+ results about Lπ(s) are also suitable for our critic network.
456
+ For the training of Lθ(s, a), because we choose the value
457
+ function as the Lyapunov candidate illustrated in Equation
458
+ (9), we can update θ according to the TD error:
459
+ θk+1 = θk + αθ(∇θ(Lθ(s, a) − (cπ + γL′(s′, π′(·|s′))))2)
460
+ (11)
461
+ where k is the number of iterations. L′ and π′ are the target
462
+ networks parameterized by θ′ and φ′, respectively. In the
463
+ Actor-Critic method, the parameters θ′ and φ′ are usually
464
+ updated through exponentially moving average of weights
465
+ controlled by a hyper-parameter σ ∈ (0, 1), which is shown
466
+ as:
467
+ θ′
468
+ k+1 ← σθk + (1 − σ)θ′
469
+ k; φ′
470
+ k+1 ← σφk + (1 − σ)φ′
471
+ k
472
+ (12)
473
+ Furthermore, we introduce an interesting mechanism on
474
+ the Lyapunov critic network to speed up the learning pro-
475
+ cess. Admittedly, the Lyapunov candidate Lπ(s) meets
476
+ some requirements sufficiently shown in Appendix A.2,
477
+ which means the output of the critic network can guarantee
478
+ them eventually based on TD-like updating. In order to
479
+ encourage accurate and efficient learning, we construct a
480
+ constrained critic network. Concretely, we denote the output
481
+ of a neural network as f(s, a). And then, Lθ(s, a) can be
482
+ described by:
483
+ Lθ(s, a) = (Gs(f(s, a)))(Gs(f(s, a)))⊤
484
+ (13)
485
+ where Gs is a linear transformation, which guarantees
486
+ Gs=se(f(s = se, a)) = 0 (se is an equilibrium point de-
487
+ fined in Definition A.1.). Note that Gs contains no parame-
488
+ ters to be learned, so the operator does not cause harm to the
489
+ representation ability of the neural network. For the details,
490
+ see Appendix B.3. To this end, the constrained network
491
+ ensures that the output is non-negative and the Lyapunov
492
+ value should be zero when the state is an equilibrium point.
493
+ 4.3. Lagrangian-based Policy Learning
494
+ Policy learning aims to search feasible parameters of the
495
+ policy network to make the output of the Lyapunov network
496
+ meet the requirements of the ASC condition.
497
+ For optimizing the policy πφ(a|s), we denote the specific
498
+ constrained condition as follows according to Equation (8).
499
+ ∆Lπφ(s, a) = Lθ(s′, πφ(· | s′)) − Lθ(s, a)
500
+ + k[Lθ(s, a) − λLθ(s′, πφ(· | s′))] ≤ 0
501
+ (14)
502
+ We can see that the current policy’s parameters can be up-
503
+ dated due to the embedding of πφ. Meanwhile, because of
504
+ the sampling-based stability theorem (Theorem 4.3), it is
505
+ convenient to efficiently implement the random sampling in
506
+ the replay buffer D that stores the interaction data. More-
507
+ over, the optimal policy can be obtained, when we maxi-
508
+ mize ∆Lπφ(s, a) by finding the minimum λ according to
509
+ the ASC condition. Specifically, The optimization problem
510
+ in Equation (3) can be reformulated as follows.
511
+ max
512
+ λ,πφ
513
+ ED∆Lπφ(s, a)
514
+ s.t. ED∆Lπφ(s, a) ≤ 0
515
+ (15)
516
+ First, we focus on the sub-problem of finding πφ under
517
+ satisfying the constraint with arbitrary λ, which is shown as:
518
+ find πφ
519
+ s.t. ED∆Lπφ(s, a) ≤ 0
520
+ (16)
521
+ Applying the Lagrangian-based method (Stooke et al.,
522
+ 2020), the parameters of πφ are updated by:
523
+ φk+1 = φk + αφ(λl∇a∆Lπφ(s, a)∇φπφ(s, a))
524
+ (17)
525
+ where αφ is the learning rate of φ. λl represents the La-
526
+ grange multiplier of the constraint. During the training, λl
527
+
528
+ Adaptive Stability Certification
529
+ is updated by gradient ascent to maximize ∆Lπφ(s, a).
530
+ λk+1
531
+ l
532
+ = λk
533
+ l + αλl∆Lπφ(s, a)
534
+ (18)
535
+ Note that λl should always be positive. αλl is the learning
536
+ rate. It is worth noting that λl is clipped by 0 and 1, to
537
+ bound the value. In addition, to improve the exploration
538
+ efficiency, we add a constraint about the minimum entropy
539
+ as the same as the maximum entropy RL algorithms.
540
+ 4.3.1. THE CHOICE OF λ
541
+ However, how to maximize ∆Lπφ(s, a) in Equation (15)
542
+ remains to be an unsolved problem. Finding the maximum
543
+ value of ∆Lπφ(s, a) equals finding the minimum value of
544
+ λ. First, The range of λ is from γ to 0 in the ASC condition,
545
+ and γ is very close to 1 in practical usage. Moreover, we
546
+ find the Lagrange multiplier λl ranges from 0 to 1, and it
547
+ decreases with the constraint’s satisfaction. Therefore, we
548
+ can update λ by the following method.
549
+ λ ← min(λl, γ)
550
+ (19)
551
+ To be specific, when satisfying the constraint, λ decreases
552
+ toward 0 to maximize ∆Lπφ(s, a). When λ remains at a
553
+ stable level, the minimum value of λ realizes the aim of
554
+ maximizing ∆Lπφ(s, a). Until now, we have constructed
555
+ the whole training process for the optimization problem in
556
+ Equation (15).
557
+ 4.3.2. THE CHOICE OF k
558
+ For the choice of k, we also use a heuristics approach to
559
+ adjust the value of k dynamically, k ← 1 − λl. There is
560
+ an essential advantage to the approach. As we can see, λl
561
+ is close to 1 at the early stage of training; thus, k leads to
562
+ slow tracking according to Lemma 4.4. When λl decreases,
563
+ it indicates the current constraint is loose. In the setup,
564
+ kl increases instead, and the constraint forces the tracking
565
+ process to be performed more quickly. Besides, It offers
566
+ another benefit we explain in Section 4.4.
567
+ We have designed the complete ALAC algorithm, and the
568
+ pseudo-code of the proposed algorithm is shown in Ap-
569
+ pendix C.
570
+ 4.4. Theoretical Analysis
571
+ A common fact we notice is that there exists a bias be-
572
+ tween the practical computing and theoretical analysis about
573
+ Uπ in Theorem 4.3. To estimate the distribution Uπ, we
574
+ need an infinite number of trajectories with infinite time
575
+ steps, whereas in practice, only M trajectories of T time
576
+ steps are accessible. To better illustrate the issue, we de-
577
+ fine a finite sampling distribution UT
578
+ π , apparently where
579
+ UT
580
+ π = 1
581
+ T
582
+ �T
583
+ t=0 T (s | ρ, π, t). Besides, we introduce a new
584
+ variable ∆Lπ(s) about the Lyapunov candidate to simplify
585
+ the expression of derivation.
586
+ ∆Lπ(s) =Est+1∼PπLπ(st+1) − Lπ(st)
587
+ + k[Lπ(st) − λEst+1∼PπLπ(st+1)]
588
+ (20)
589
+ First, we provide a quantitative bound of expectation from
590
+ Uπ and UT
591
+ π .
592
+ Theorem 4.5. Suppose that the length of sampling trajecto-
593
+ ries is T, then the bound can be expressed as:
594
+ |Es∼Uπ∆Lπ(s) − Es∼UT
595
+ π ∆Lπ(s)| ≤ 2(k + 1)cπ
596
+ 1 − γ
597
+ T q−1
598
+ (21)
599
+ where cπ is the maximum of cost and q is a constant in
600
+ (0, 1). For proof see Appendix B.4.
601
+ Next, we take the number of trajectories into considera-
602
+ tion and derive the probabilistic bound of the difference of
603
+ ∆Lπ(s) estimated by UT
604
+ π distribution and M trajectories.
605
+ Theorem 4.6. Suppose that the length of sampling trajec-
606
+ tories is T and the number of trajectories is M, then there
607
+ exists the following upper bound:
608
+ P(| 1
609
+ MT
610
+ M
611
+
612
+ m=1
613
+ T
614
+
615
+ t=1
616
+ ∆Lπ(sm
617
+ t ) − Es∼UT
618
+ π ∆Lπ(s)| ≥ α)
619
+ ≤ 2 exp(−
620
+ Mα2(1 − γ)2
621
+ ((1 − kλ)2 + (k − 1)2)cπ2 )
622
+ (22)
623
+ where sm
624
+ t represents the state in the m-th trajectory at the
625
+ timestep t. For proof see Appendix B.5.
626
+ Theorem 4.5 suggests that if k is close to 0, the gap becomes
627
+ small in practice. Alternatively, Theorem 4.6 indicates that
628
+ k has better to be 1. That means the best choice of k ranges
629
+ from 0 to 1. Thus, the updating method of k in Section 4.3.2
630
+ balances the impacts of each other efficiently. Furthermore,
631
+ two theorems illustrate the theoretical gap between infinite
632
+ and finite samples in practical usage, thus making Theorem
633
+ 4.3 more complete.
634
+ 5. Experiments
635
+ In this section, we demonstrate empirical evidence that
636
+ ALAC captures an improved trade-off between optimal-
637
+ ity and stability compared to the baseline approaches.
638
+ We test our method and baselines in ten robotic control
639
+ environments, including Cartpole-cost,Point-circle-cost,
640
+ Halfcheetah-cost, Swimmer-cost, Ant-cost, Humanoid-
641
+ cost, Minitaur-cost, Spacereach-cost, Spacerandom-
642
+ cost and Spacedualarm-cost. Details of the environments
643
+ are given in Appendix D.1. Furthermore, we benchmark
644
+ the ALAC method against five algorithms with a neu-
645
+ ral Lyapunov function. The algorithms can be separated
646
+
647
+ Adaptive Stability Certification
648
+ Table 1. Performance evaluations of the cultivated costs and stability constraint violations on ten environments compared with six baselines.
649
+ All quantities are provided in a scale of 0.1. Standard errors are provided in brackets. (if the mean constraints are less than 0.2, the sign is
650
+ ↓ else ↑.‘-’ indicates the algorithm does not contain the stability constraints.)
651
+ Task
652
+ Metrics
653
+ ALAC
654
+ SAC-cost
655
+ SPPO
656
+ LAC
657
+ LAC∗
658
+ POLYC
659
+ LBPO
660
+ TNLF
661
+ Cartpole-cost
662
+ Cost Return
663
+ 26.2(7.0)
664
+ 22.7(12.6)
665
+ 102.3(59.3)
666
+ 31.0(10.1)
667
+ 31.5(5.1)
668
+ 104.8(70.7)
669
+ 205.3(27.0)
670
+ 33.5(24.5)
671
+ Violation
672
+
673
+ -
674
+
675
+
676
+
677
+
678
+ -
679
+
680
+ Point-circle-cost
681
+ Cost Return
682
+ 111.1(4.5)
683
+ 111.8(2.4)
684
+ 247.9(58.2)
685
+ 958.6(15.5)
686
+ 112.0(5.0)
687
+ 207.0(62.4)
688
+ 722.1(126.1)
689
+ 145.8(38.0)
690
+ Violation
691
+
692
+ -
693
+
694
+
695
+
696
+
697
+ -
698
+
699
+ Halfcheetah-cost
700
+ Cost Return
701
+ 1.7(0.7)
702
+ 16.6(25.2)
703
+ 144.0(14.6)
704
+ 119.5(37.3)
705
+ 1.8(0.5)
706
+ 168.8(10.7)
707
+ 37.8(24.8)
708
+ 6.5(1.4)
709
+ Violation
710
+
711
+ -
712
+
713
+
714
+
715
+
716
+ -
717
+
718
+ Swimmer-cost
719
+ Cost Return
720
+ 44.6(4.8)
721
+ 53.7(12.4)
722
+ 52.5(4.2)
723
+ 47.5(1.3)
724
+ 44.8(3.0)
725
+ 104.7(11.0)
726
+ 52.3(11.3)
727
+ 46.5(2.4)
728
+ Violation
729
+
730
+ -
731
+
732
+
733
+
734
+
735
+ -
736
+
737
+ Ant-cost
738
+ Cost Return
739
+ 101.0(42.1)
740
+ 155.2(29.9)
741
+ 255.0(31.2)
742
+ 166.9(13.6)
743
+ 125.6(12.5)
744
+ 259.8(37.1)
745
+ 114.6(26.1)
746
+ 186.8(11.0)
747
+ Violation
748
+
749
+ -
750
+
751
+
752
+
753
+
754
+ -
755
+
756
+ Humanoid-cost
757
+ Cost Return
758
+ 354.6(97.1)
759
+ 441.9(18.3)
760
+ 531.8(22.9)
761
+ 431.3(14.9)
762
+ 368.3(76.6)
763
+ 490.4(32.5)
764
+ 452.4(13.9)
765
+ 317.7(31.1)
766
+ Violation
767
+
768
+ -
769
+
770
+
771
+
772
+
773
+ -
774
+
775
+ Minitaur-cost
776
+ Cost Return
777
+ 493.0(67.9)
778
+ 692.2(93.0)
779
+ 950.0(72.3)
780
+ 612.2(47.8)
781
+ 666.6(306.7)
782
+ 608.3(65.6)
783
+ 838.3(237.0)
784
+ 382.9(62.6)
785
+ Violation
786
+
787
+ -
788
+
789
+
790
+
791
+
792
+ -
793
+
794
+ Spacereach-cost
795
+ Cost Return
796
+ 1.6(0.2)
797
+ 8.9(8.8)
798
+ 19.4(2.5)
799
+ 35.2(1.6)
800
+ 1.8(0.4)
801
+ 125.7(20.8)
802
+ 31.0(19.1)
803
+ 112.1(53.0)
804
+ Violation
805
+
806
+ -
807
+
808
+
809
+
810
+
811
+ -
812
+
813
+ Spacerandom-cost
814
+ Cost Return
815
+ 2.3(0.3)
816
+ 38.4(28.6)
817
+ 53.2(32.7)
818
+ 33.9(3.5)
819
+ 2.8(0.9)
820
+ 112.8(19.4)
821
+ 35.82.9)
822
+ 85.9(42.3)
823
+ Violation
824
+
825
+ -
826
+
827
+
828
+
829
+
830
+ -
831
+
832
+ Spacedualarm-cost
833
+ Cost Return
834
+ 26.1(3.5)
835
+ 36.1(8.3)
836
+ 201.9(48.8)
837
+ 66.3(10.6)
838
+ 63.6(62.1)
839
+ 140.6(17.4)
840
+ 37.87.5)
841
+ 280.1(99.3)
842
+ Violation
843
+
844
+ -
845
+
846
+
847
+
848
+
849
+ -
850
+
851
+ 0.0
852
+ 0.2
853
+ 0.4
854
+ 0.6
855
+ 0.8
856
+ 1.0
857
+ Timestep
858
+ 1e6
859
+ 300
860
+ 350
861
+ 400
862
+ 450
863
+ 500
864
+ 550
865
+ Cost Return
866
+ Humanoid-cost
867
+ ALAC(original)
868
+ ALAC(
869
+ 1 )
870
+ ALAC(
871
+ 2 )
872
+ ALAC(Tanh)
873
+ ALAC(kl = 0.1)
874
+ 0.0
875
+ 0.2
876
+ 0.4
877
+ 0.6
878
+ 0.8
879
+ 1.0
880
+ Timestep
881
+ 1e6
882
+ 0
883
+ 5
884
+ 10
885
+ 15
886
+ 20
887
+ 25
888
+ 30
889
+ Violation
890
+ Humanoid-cost
891
+ ALAC(original)
892
+ ALAC(
893
+ 1 )
894
+ ALAC(
895
+ 2 )
896
+ ALAC(Tanh)
897
+ ALAC(kl = 0.1)
898
+ 0.0
899
+ 0.2
900
+ 0.4
901
+ 0.6
902
+ 0.8
903
+ 1.0
904
+ Timestep
905
+ 1e6
906
+ 400
907
+ 600
908
+ 800
909
+ 1000
910
+ Cost Return
911
+ Minitaur-cost
912
+ ALAC(original)
913
+ ALAC(
914
+ 1 )
915
+ ALAC(
916
+ 2 )
917
+ ALAC(Tanh)
918
+ ALAC(kl = 0.1)
919
+ 0.0
920
+ 0.2
921
+ 0.4
922
+ 0.6
923
+ 0.8
924
+ 1.0
925
+ Timestep
926
+ 1e6
927
+ 0
928
+ 2
929
+ 4
930
+ 6
931
+ 8
932
+ 10
933
+ 12
934
+ Violation
935
+ Minitaur-cost
936
+ ALAC(original)
937
+ ALAC(
938
+ 1 )
939
+ ALAC(
940
+ 2 )
941
+ ALAC(Tanh)
942
+ ALAC(kl = 0.1)
943
+ Figure 3. Ablation studies of the ASC condition. ALAC(original) shows comparable or the best performance compared with
944
+ other certifications on each task.
945
+ into two categories, optimizing policy with a mixed ob-
946
+ jective and Lyapunov-like constraints. The first one con-
947
+ tains POLYC (Chang & Gao, 2021), LBPO(Sikchi et al.,
948
+ 2021) and TNLF(Xiong et al., 2022). The second one in-
949
+ cludes SPPO(Chow et al., 2019), LAC (Han et al., 2020a)
950
+ 2.
951
+ We also take the SAC-cost (Haarnoja et al., 2018)
952
+ method into account because the method is very close to
953
+ our method without the ASC condition. For the detailed
954
+ hyper-parameter settings see Appendix D.2.1 and D.2.2.
955
+ 5.1. Comparing with Baselines
956
+ In this part, we evaluate the optimality and stability of our
957
+ methods and baselines. The results demonstrate ALAC out-
958
+ 2We find LAC with a large α3 (see Appendix D.2.1) performs
959
+ better, so we call it LAC∗ for the distinction between them.
960
+ performs baselines. To fairly evaluate the performance of
961
+ the methods mentioned above, we run the experiments over
962
+ 5 rollouts and 5 seeds for all algorithms. We use the accu-
963
+ mulated cost in a testing episode as the metric of optimality
964
+ and the stability constraint violations as the stability metric.
965
+ Although the metric of stability violations depends on each
966
+ algorithm’s design, the best value should be close to 0. Ta-
967
+ ble 1 shows the performance on all tasks, and the training
968
+ curves for different algorithms are in Appendix D.3. Results
969
+ confirm that ALAC outperforms state-of-the-art baselines
970
+ on all tasks except for Cartpole-cost, Humanoid-cost and
971
+ Minitaur-cost where our method doesn’t outperform SAC-
972
+ cost and TNLF, respectively. Furthermore, although LAC∗
973
+ using tighter constraints achieves comparable performance
974
+ with our method in contrast to LAC, the stability violations
975
+ in LAC∗ remain at a high level on some tasks. Admittedly,
976
+ TNLF achieves lower cost than ALAC on Minitaur-cost,
977
+
978
+ Adaptive Stability Certification
979
+ Cartpole-cost
980
+ HalfCheetah-cost
981
+ Minitaur-cost
982
+ 𝑔𝑜𝑎𝑙!
983
+ 𝑔𝑜𝑎𝑙"
984
+ 𝑔𝑜𝑎𝑙#
985
+ Spacerandom-cost
986
+ Steps
987
+ 𝜑
988
+ 𝜑 ̇
989
+ ℒ!!
990
+ Values
991
+ Figure 4. Visualization of states for ALAC method by t-SNE and phase trajectory techniques. The top row of the figure
992
+ depicts the t-SNE dimension reduction technique. (Cartpole-Cost is visualized within 2 dims while others within 3 dims.)
993
+ The bottom row shows the phase trajectories and Lyapunov-value surfaces of environments. ψ and ˙ψ denotes the angular
994
+ position and velocity respectively.
995
+ Table 2. Average evaluation score and standard deviation on our environments for ALAC with and without the feedback under different
996
+ biases of goals. (w/ errors means using errors between the desired and achieved goals as extra states for the agent)
997
+ Task
998
+ Point-circle-cost
999
+ Halfcheetah-cost
1000
+ Spacereach-cost
1001
+ Biases of goals
1002
+ -20%
1003
+ 0%
1004
+ 20%
1005
+ -20%
1006
+ 0%
1007
+ 20%
1008
+ -20%
1009
+ 0%
1010
+ 20%
1011
+ ALAC w/ errors
1012
+ 85.2(4.5)
1013
+ 110.1(3.9)
1014
+ 148.5(12.2)
1015
+ 3.9(0.8)
1016
+ 2.5(0.6)
1017
+ 8.3(4.7)
1018
+ 6.4(1.7)
1019
+ 2.4(1.4)
1020
+ 8.7(1.7)
1021
+ ALAC w/o errors
1022
+ 178.8(7.8)
1023
+ 118.9(11.4)
1024
+ 247.8(11.9)
1025
+ 10.1(2.1)
1026
+ 3.3(1.2)
1027
+ 13.4(2.1)
1028
+ 11.9(0.4)
1029
+ 1.6(0.2)
1030
+ 11.5(0.3)
1031
+ SAC-cost w/ errors
1032
+ 84.2(4.2)
1033
+ 109.3(2.2)
1034
+ 140.1(3.0)
1035
+ 60.1(27.5)
1036
+ 81.6(50.2)
1037
+ 129.5(85.6)
1038
+ 21.9(12.3)
1039
+ 22.1(16.9)
1040
+ 20.6(18.1)
1041
+ SAC-cost w/o errors
1042
+ 180.9(6.3)
1043
+ 115.3(4.0)
1044
+ 240.3(3.7)
1045
+ 15.9(15.7)
1046
+ 16.7(25.5)
1047
+ 33.5(34.0)
1048
+ 16.1(6.2)
1049
+ 8.8(8.8)
1050
+ 15.0(7.2)
1051
+ but TNLF converges to suboptimal policies on many tasks.
1052
+ According to Figure 11, we notice that TNLF satisfies the
1053
+ stability constraints quickly during the training. Neverthe-
1054
+ less, the reason is that the trained Lyapunov function is close
1055
+ to 0 quickly. To this end, it does not provide dense guid-
1056
+ ance for the policy, thus leading it to a suboptimal solution.
1057
+ To recap, ALAC strikes an efficient balance between the
1058
+ optimality and stability of the system.
1059
+ 5.2. Ablation Studies
1060
+ To better demonstrate the effectiveness of the ASC condi-
1061
+ tion, we do the ablation studies about different certifications
1062
+ in ALAC. We compare the performance of the original
1063
+ ALAC with a version that uses ∆L1
1064
+ πφ and ∆L2
1065
+ πφ, and with
1066
+ a version where k is a constant throughout training. The de-
1067
+ tails of ∆L1
1068
+ πφ and ∆L2
1069
+ πφ are given in Appendix D.4. In con-
1070
+ trast to ∆Lπφ, ∆L1
1071
+ πφ and ∆L2
1072
+ πφ represents the upper bound
1073
+ and lower bound of the constraint, respectively. Figure 3 and
1074
+ Figure 10 (see Appendix D.4) depict the accumulated cost
1075
+ and constraint violations on all tasks, where the algorithms
1076
+ are modified from ALAC directly. ALAC(∆L2
1077
+ πφ) achieve
1078
+ lower performance than ALAC, while ALAC(∆L1
1079
+ πφ) per-
1080
+ forms the tasks comparably with ALAC. Nevertheless, more
1081
+ strict constraints (ALAC(∆L1
1082
+ πφ)) negatively affect its per-
1083
+ formance on constraint violations, as shown in Figure 3.
1084
+ This is because there doesn’t exist a reasonable policy
1085
+ that meets such strict constraints. Moreover, the results of
1086
+ ALAC(k = 0.1) comparing with ALAC demonstrate that
1087
+ the heuristics updating of k is effective during the training.
1088
+ The slight gap between ALAC(Tanh) with the Lyapunov
1089
+ function activated by the Tanh function and ALAC shows
1090
+ our method is not sensitive to the form of the Lyapunov
1091
+ function.
1092
+
1093
+ Adaptive Stability Certification
1094
+ 5.3. Evaluation Results
1095
+ In this section, we describe the impacts of the stability condi-
1096
+ tion more concretely by using various visualization methods.
1097
+ Furthermore, we verify that ALAC achieves excellent ro-
1098
+ bustness, generalization, and efficiency with the aid of the
1099
+ ASC condition.
1100
+ 5.3.1. ANALYSIS OF VISUALIZATION
1101
+ First, we use the t-SNE method to indicate the visualization
1102
+ of the state in 3 dimensions in order to illustrate better the
1103
+ stability of the system learned by ALAC (Cartpole-cost
1104
+ in 2 dimensions). As we can see, the top row of Figure 4
1105
+ shows the states in the final stage of an episode converge
1106
+ to a point or circle. Basically, we recognize that those
1107
+ patterns happen in a stable system. Furthermore, experts
1108
+ can judge a system’s stability from a phase space of the
1109
+ system. Therefore, we also introduce various phase space
1110
+ trajectories of the system to analyze stability. The second
1111
+ row of Figure 4 shows the phase trajectories with variance
1112
+ according to the state pairs of joint angular position and
1113
+ velocity. Concretely, ψ and ˙ψ represent an angular position
1114
+ and velocity, respectively. It can be found that the angular
1115
+ velocity starts from 0 to 0, and the angular position starts
1116
+ from the beginning to an equilibrium point. (For more
1117
+ details see Figure 6). Based on the above phenomenons, it
1118
+ suggests the trained systems using the ALAC method satisfy
1119
+ focal stability or stable limit cycles. More importantly, we
1120
+ observe that the Lyapunov value decreases while learning
1121
+ and consistently falls to its lowest point at the end of an
1122
+ episode, as shown in Figure 4 (bottom row). Furthermore,
1123
+ it can facilitate the policy to achieve promising results on
1124
+ robustness and generalization, which is demonstrated in
1125
+ further analysis. More implementation details for t-SNE
1126
+ and phase trajectories are given in Appendix D.5.
1127
+ 5.3.2. VERIFICATION OF PROPERTIES
1128
+ Robustness
1129
+ Generally speaking, stability has a potential
1130
+ relationship with robustness to some extent (Chang & Gao,
1131
+ 2021). Thus, we add external disturbances with different
1132
+ magnitudes in each environment and observe the perfor-
1133
+ mance difference. To be concrete, we introduce periodic
1134
+ external disturbances with different magnitudes in each task.
1135
+ Furthermore, we omit the algorithms which do not converge
1136
+ to a reasonable solution in each task. Figure 8 (see Appendix
1137
+ D.6.1) shows in all scenarios, ALAC enjoys superior per-
1138
+ formance over other methods.
1139
+ Generalization
1140
+ Previous studies focus on the robustness
1141
+ of a system to demonstrate the effectiveness of Lyapunov
1142
+ constraints. In this paper, we build some interesting exper-
1143
+ iments to verify whether the policy can generalize well to
1144
+ follow previously unseen reference signals or not. We in-
1145
+ Table 3. Comparisons on the performance of ALAC and SAC-cost
1146
+ methods under different actor structures.
1147
+ Task
1148
+ Halfcheetah-cost
1149
+ Actor structure
1150
+ [64,64]
1151
+ [32,32]
1152
+ [16,16]
1153
+ ALAC
1154
+ 1.7(0.7)
1155
+ 2.9(1.7)
1156
+ 5.0(1.9)
1157
+ SAC-cost
1158
+ 16.6(25.2)
1159
+ 94.1(38.4)
1160
+ 86.6(57.8)
1161
+ Task
1162
+ Minitaur-cost
1163
+ Actor structure
1164
+ [64,64]
1165
+ [32,32]
1166
+ [16,16]
1167
+ ALAC
1168
+ 492.9(67.9)
1169
+ 403.5(85.7)
1170
+ 571.7(116.1)
1171
+ SAC-cost
1172
+ 692.1(92.9)
1173
+ 664.9(115.3)
1174
+ 934.1(149.3)
1175
+ Task
1176
+ Spaceramdom-cost
1177
+ Actor structure
1178
+ [64,64]
1179
+ [32,32]
1180
+ [16,16]
1181
+ ALAC
1182
+ 3.6 (1.0)
1183
+ 8.3(6.0)
1184
+ 12.1(4.9)
1185
+ SAC-cost
1186
+ 28.7(9.5)
1187
+ 30.6(13.9)
1188
+ 38.4(28.6)
1189
+ troduce the error between the desired and achieved goals as
1190
+ additional information in the state. Because the Lyapunov
1191
+ function is significantly related to the error, ALAC w/ er-
1192
+ rors gains remarkable performance improvement on gen-
1193
+ eralization as Table 2 illustrated. We choose the SAC-cost
1194
+ algorithm as a comparison since SAC-cost is very similar
1195
+ to our method without the ASC condition. For more ex-
1196
+ perimental results, see Appendix D.6.2. In particular, the
1197
+ gap between each other enlarges with the increasing biases.
1198
+ Furthermore, we observe that the errors have a negative
1199
+ impact on the performance of SAC-cost on complex tasks
1200
+ like Halfcheetah-cost and Spacereach-cost. The reason
1201
+ can be that SAC-cost does not efficiently capture the error
1202
+ information without the guidance of a Lyapunov function.
1203
+ Efficiency
1204
+ Our method also offers a positive effect on the
1205
+ limited network size. Results in Table 3 suggest ALAC
1206
+ method consistently achieves comparable performance un-
1207
+ der different actor structures. Compared with the SAC-cost
1208
+ method, the ALAC method brings another benefit, perform-
1209
+ ing the task more efficiently with limited parameters of
1210
+ controllers. It is because the adaptive stability certification
1211
+ provides efficient guidance for policy optimization.
1212
+ 6. Discussion and Future Work
1213
+ We propose the Adaptive Stability Certification (ASC),
1214
+ which meets the sampling-based stability of mean cost.
1215
+ Meanwhile, it guides the current policy to approaches to
1216
+ the optimal point heuristically in the context of classical
1217
+ feedback control. Based on the Actor-Critic framework and
1218
+ Lagrangian-based optimization, We present a practical algo-
1219
+ rithm, namely the Adaptive Lyapunov-based Actor-Critic
1220
+ algorithm (ALAC). Furthermore, empirical results show that
1221
+ our method outperforms baselines on diverse robotic tasks
1222
+ with two metrics, stability constraint violations and mean
1223
+ costs. Furthermore, the controller trained by our method
1224
+
1225
+ Adaptive Stability Certification
1226
+ achieves higher generalization ability compared with the
1227
+ method without the stability guarantee. By making a heuris-
1228
+ tic formulation, we provide an interesting method to com-
1229
+ bine the policy’s optimality with the system’s stability in
1230
+ model-free RL. In this work, we focus on the system’s sta-
1231
+ bility; an exciting direction for future work would be to
1232
+ combine ASC with some safe RL algorithms.
1233
+ References
1234
+ Achiam, J., Held, D., Tamar, A., and Abbeel, P. Constrained
1235
+ policy optimization. In International conference on ma-
1236
+ chine learning, pp. 22–31. PMLR, 2017.
1237
+ Andrychowicz, O. M., Baker, B., Chociej, M., Jozefowicz,
1238
+ R., McGrew, B., Pachocki, J., Petron, A., Plappert, M.,
1239
+ Powell, G., Ray, A., et al. Learning dexterous in-hand
1240
+ manipulation.
1241
+ The International Journal of Robotics
1242
+ Research, 39(1):3–20, 2020.
1243
+ Berkenkamp, F., Turchetta, M., Schoellig, A., and Krause,
1244
+ A. Safe model-based reinforcement learning with stability
1245
+ guarantees. Advances in neural information processing
1246
+ systems, 30, 2017.
1247
+ Brockman, G., Cheung, V., Pettersson, L., Schneider, J.,
1248
+ Schulman, J., Tang, J., and Zaremba, W. Openai gym.
1249
+ arXiv preprint arXiv:1606.01540, 2016.
1250
+ Chang, Y.-C. and Gao, S. Stabilizing neural control using
1251
+ self-learned almost lyapunov critics. In 2021 IEEE Inter-
1252
+ national Conference on Robotics and Automation (ICRA),
1253
+ pp. 1803–1809. IEEE, 2021.
1254
+ Chang, Y.-C., Roohi, N., and Gao, S. Neural lyapunov con-
1255
+ trol. Advances in neural information processing systems,
1256
+ 32, 2019.
1257
+ Chen, S., Fazlyab, M., Morari, M., Pappas, G. J., and Pre-
1258
+ ciado, V. M. Learning region of attraction for nonlinear
1259
+ systems. In 2021 60th IEEE Conference on Decision and
1260
+ Control (CDC), pp. 6477–6484. IEEE, 2021.
1261
+ Chow,
1262
+ Y.,
1263
+ Nachum,
1264
+ O.,
1265
+ Duenez-Guzman,
1266
+ E.,
1267
+ and
1268
+ Ghavamzadeh, M. A lyapunov-based approach to safe
1269
+ reinforcement learning. Advances in neural information
1270
+ processing systems, 31, 2018.
1271
+ Chow, Y., Nachum, O., Faust, A., Duenez-Guzman, E.,
1272
+ and Ghavamzadeh, M.
1273
+ Lyapunov-based safe policy
1274
+ optimization for continuous control.
1275
+ arXiv preprint
1276
+ arXiv:1901.10031, 2019.
1277
+ Coumans, E. and Bai, Y. Pybullet, a python module for
1278
+ physics simulation for games, robotics and machine learn-
1279
+ ing. 2016.
1280
+ Dai, H., Landry, B., Pavone, M., and Tedrake, R. Counter-
1281
+ example guided synthesis of neural network lyapunov
1282
+ functions for piecewise linear systems. In 2020 59th
1283
+ IEEE Conference on Decision and Control (CDC), pp.
1284
+ 1274–1281. IEEE, 2020.
1285
+ Dai, H., Landry, B., Yang, L., Pavone, M., and Tedrake, R.
1286
+ Lyapunov-stable neural-network control. arXiv preprint
1287
+ arXiv:2109.14152, 2021.
1288
+
1289
+ Adaptive Stability Certification
1290
+ Dawson, C., Qin, Z., Gao, S., and Fan, C. Safe nonlinear
1291
+ control using robust neural lyapunov-barrier functions. In
1292
+ Conference on Robot Learning, pp. 1724–1735. PMLR,
1293
+ 2022.
1294
+ Donti, P. L., Roderick, M., Fazlyab, M., and Kolter, J. Z.
1295
+ Enforcing robust control guarantees within neural net-
1296
+ work policies. In International Conference on Learning
1297
+ Representations, 2020.
1298
+ Gaby, N., Zhang, F., and Ye, X. Lyapunov-net: A deep
1299
+ neural network architecture for lyapunov function ap-
1300
+ proximation. arXiv preprint arXiv:2109.13359, 2021.
1301
+ Haarnoja, T., Zhou, A., Abbeel, P., and Levine, S. Soft
1302
+ actor-critic: Off-policy maximum entropy deep reinforce-
1303
+ ment learning with a stochastic actor. In International
1304
+ conference on machine learning, pp. 1861–1870. PMLR,
1305
+ 2018.
1306
+ Han, M., Zhang, L., Wang, J., and Pan, W. Actor-critic re-
1307
+ inforcement learning for control with stability guarantee.
1308
+ IEEE Robotics and Automation Letters, 5(4):6217–6224,
1309
+ 2020a.
1310
+ Han, M., Zhou, Z., Zhang, L., Wang, J., and Pan, W. Rein-
1311
+ forcement learning for control with probabilistic stability
1312
+ guarantee. 2020b.
1313
+ Han, M., Tian, Y., Zhang, L., Wang, J., and Pan, W. Rein-
1314
+ forcement learning control of constrained dynamic sys-
1315
+ tems with uniformly ultimate boundedness stability guar-
1316
+ antee. Automatica, 129:109689, 2021.
1317
+ Huang, T., Gao, S., Long, X., and Xie, L. A neural lya-
1318
+ punov approach to transient stability assessment in inter-
1319
+ connected microgrids. In HICSS, pp. 1–10, 2021.
1320
+ Hwangbo, J., Lee, J., Dosovitskiy, A., Bellicoso, D., Tsou-
1321
+ nis, V., Koltun, V., and Hutter, M. Learning agile and
1322
+ dynamic motor skills for legged robots. Science Robotics,
1323
+ 4(26):eaau5872, 2019.
1324
+ Jin, W., Wang, Z., Yang, Z., and Mou, S.
1325
+ Neural
1326
+ certificates for safe control policies.
1327
+ arXiv preprint
1328
+ arXiv:2006.08465, 2020.
1329
+ Kashima, K., Yoshiuchi, R., and Kawano, Y.
1330
+ Learn-
1331
+ ing stabilizable deep dynamics models. arXiv preprint
1332
+ arXiv:2203.09710, 2022.
1333
+ La Salle, J. and Lefschetz, S. Stability by Liapunov’s Direct
1334
+ Method with Applications by Joseph L Salle and Solomon
1335
+ Lefschetz. Elsevier, 2012.
1336
+ Lale, S., Shi, Y., Qu, G., Azizzadenesheli, K., Wierman, A.,
1337
+ and Anandkumar, A. Kcrl: Krasovskii-constrained rein-
1338
+ forcement learning with guaranteed stability in nonlinear
1339
+ dynamical systems. arXiv preprint arXiv:2206.01704,
1340
+ 2022.
1341
+ Lawrence, N., Loewen, P., Forbes, M., Backstrom, J., and
1342
+ Gopaluni, B. Almost surely stable deep dynamics. Ad-
1343
+ vances in Neural Information Processing Systems, 33:
1344
+ 18942–18953, 2020.
1345
+ Lechner, M., Žikeli´c, Ð., Chatterjee, K., and Henzinger,
1346
+ T. A.
1347
+ Stability verification in stochastic control sys-
1348
+ tems via neural network supermartingales. arXiv preprint
1349
+ arXiv:2112.09495, 2021.
1350
+ Li, X., Xiao, J., Cheng, Y., and Liu, H. An actor-critic learn-
1351
+ ing framework based on lyapunov stability for automatic
1352
+ assembly. Applied Intelligence, pp. 1–12, 2022.
1353
+ Mayne, D. Q., Rawlings, J. B., Rao, C. V., and Scokaert,
1354
+ P. O. Constrained model predictive control: Stability and
1355
+ optimality. Automatica, 36(6):789–814, 2000.
1356
+ Mittal, M., Gallieri, M., Quaglino, A., Salehian, S. S. M.,
1357
+ and Koutník, J. Neural lyapunov model predictive control.
1358
+ 2020.
1359
+ Murray, R. M., Li, Z., and Sastry, S. S. A mathematical
1360
+ introduction to robotic manipulation. CRC press, 2017.
1361
+ Richards, S. M., Berkenkamp, F., and Krause, A.
1362
+ The
1363
+ lyapunov neural network: Adaptive stability certification
1364
+ for safe learning of dynamical systems. In Conference on
1365
+ Robot Learning, pp. 466–476. PMLR, 2018.
1366
+ Schlaginhaufen, A., Wenk, P., Krause, A., and Dorfler, F.
1367
+ Learning stable deep dynamics models for partially ob-
1368
+ served or delayed dynamical systems. Advances in Neural
1369
+ Information Processing Systems, 34:11870–11882, 2021.
1370
+ Schulman, J., Levine, S., Abbeel, P., Jordan, M., and Moritz,
1371
+ P. Trust region policy optimization. In International
1372
+ conference on machine learning, pp. 1889–1897. PMLR,
1373
+ 2015.
1374
+ Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and
1375
+ Klimov, O. Proximal policy optimization algorithms.
1376
+ arXiv preprint arXiv:1707.06347, 2017.
1377
+ Sikchi, H., Zhou, W., and Held, D. Lyapunov barrier policy
1378
+ optimization. arXiv preprint arXiv:2103.09230, 2021.
1379
+ Stooke, A., Achiam, J., and Abbeel, P. Responsive safety
1380
+ in reinforcement learning by pid lagrangian methods. In
1381
+ International Conference on Machine Learning, pp. 9133–
1382
+ 9143. PMLR, 2020.
1383
+ Wang, S., Cao, Y., Zheng, X., and Zhang, T. Collision-free
1384
+ trajectory planning for a 6-dof free-floating space robot
1385
+ via hierarchical decoupling optimization. IEEE Robotics
1386
+ and Automation Letters, 7(2):4953–4960, 2022.
1387
+
1388
+ Adaptive Stability Certification
1389
+ Xiong, Z., Eappen, J., Qureshi, A. H., and Jagannathan,
1390
+ S. Model-free neural lyapunov control for safe robot
1391
+ navigation. arXiv preprint arXiv:2203.01190, 2022.
1392
+ Zhang, L., Zhang, R., Wu, T., Weng, R., Han, M., and Zhao,
1393
+ Y. Safe reinforcement learning with stability guarantee
1394
+ for motion planning of autonomous vehicles. IEEE Trans-
1395
+ actions on Neural Networks and Learning Systems, 32
1396
+ (12):5435–5444, 2021.
1397
+ Zhao, T., Wang, J., Lu, X., and Du, Y. Neural lyapunov con-
1398
+ trol for power system transient stability: A deep learning-
1399
+ based approach. IEEE Transactions on Power Systems,
1400
+ 37(2):955–966, 2021.
1401
+ Zhou, R., Quartz, T., De Sterck, H., and Liu, J. Neural
1402
+ lyapunov control of unknown nonlinear systems with
1403
+ stability guarantees. arXiv preprint arXiv:2206.01913,
1404
+ 2022.
1405
+ Zou, S., Xu, T., and Liang, Y. Finite-sample analysis for
1406
+ sarsa with linear function approximation. Advances in
1407
+ neural information processing systems, 32, 2019.
1408
+
1409
+ Adaptive Stability Certification
1410
+ A. Preliminary Remarks
1411
+ A.1. Lyapunov function
1412
+ Definition A.1 (Equilibrium Point). A state se is an equilibrium point if ∃ action ae ∈ A such that f(se, ae) = se. (Murray
1413
+ et al., 2017)
1414
+ Definition A.2 (Stabilizable in the sense of Lyapunov). A system is stabilizable if ∀ϵ > 0, ∃δ such that for all s0 ∈ S such
1415
+ that ||s0 − se|| ≤ δ, there exists {at}∞
1416
+ t=0 such that the resulting {st}∞
1417
+ t=0 satisfies ||st − se|| ≤ ϵ ∀t ≥ 0.(Murray et al., 2017)
1418
+ Definition A.3 (Lyapunov Function). A continuous and radially unbounded function L : S → R is a Lyapunov function if
1419
+ the following conditions hold:
1420
+ 1. ∀s ∈ S, ∃a ∈ A, s.t. L(s) ≥ L(f(s, a)),
1421
+ 2. ∀s ̸= 0, L > 0; L(0) = 0.
1422
+ If a Lyapunov function exists, a discrete-time system can achieve stability in the sense of Lyapunov without considering the
1423
+ physical energy.
1424
+ A.2. Lyapunov Candidate Bound
1425
+ In this part, we show that the Lyapunov candidate Lπ(s) meets the property in Theorem 4.3, which can be formulated as:
1426
+ cπ(s) + λEs′∼PπL(s′) ≤ L(s) ≤ βcπ(s)
1427
+ (23)
1428
+ where we omit the lower bound αcπ(s) which is naturally satisfied by Lπ(s).
1429
+ Firstly, according to the definition of Lπ(s), we have
1430
+ Lπ(s) = Eπ[
1431
+
1432
+
1433
+ t=0
1434
+ γtcπ(st)|s0 = s]
1435
+ = Eπ[cπ(s0) +
1436
+
1437
+
1438
+ t=1
1439
+ γtcπ(st)|s0 = s]
1440
+ = cπ(s) + Eπ[
1441
+
1442
+
1443
+ t=1
1444
+ γtcπ(st)]
1445
+ = cπ(s) + γEπ,s′∼Pπ[
1446
+
1447
+
1448
+ t=0
1449
+ γtcπ(st)|s0 = s′]
1450
+ = cπ(s) + γEs′∼PπL(s′)
1451
+ (24)
1452
+ Considering the left-hand side of Equation (6), we can find that if λ ≤ γ holds, the lower bound of the Lyapunov function
1453
+ can be satisfied. This is because the Lyapunov candidate Lπ(s) is positive at each state. Furthermore, the right-hand side of
1454
+ Equation 6 illustrates the higher bound of the Lyapunov function exists. The condition is also guaranteed for our Lyapunov
1455
+ candidate shown in the following process.
1456
+ Lπ(s) = Eπ[
1457
+
1458
+
1459
+ t=0
1460
+ γtcπ(st)|s0 = s]
1461
+
1462
+
1463
+
1464
+ t=0
1465
+ γtEπ[cπ(st)|s0 = s]
1466
+
1467
+
1468
+ 1 − γ
1469
+ (25)
1470
+ Note that cπ denotes the maximum cost. The second row of the inequality holds due to Jensen inequality. Only if the
1471
+ maximum cost exists, ∃β ∈ R+,
1472
+
1473
+ 1−γ ≤ βcπ(s) holds.
1474
+
1475
+ Adaptive Stability Certification
1476
+ B. Details of Theoretical Analysis
1477
+ B.1. Proof of Theorem 4.3
1478
+ Theorem B.1 (Sampling-based Lyapunov Stability). An MDP system is stable with regard to the mean cost, if there exists a
1479
+ function L : S → R meets the following conditions:
1480
+ αcπ(s) ≤ L(s) ≤ βcπ(s)
1481
+ (26)
1482
+ L(s) ≥ cπ(s) + λEs′∼PπL(s′)
1483
+ (27)
1484
+ Es∼Uπ[Es′∼PπL(s′) − L(s)] ≤ −k[Es∼Uπ[L(s) − λEs′∼PπL(s′)]]
1485
+ (28)
1486
+ where α, β, λ and k is positive constants. Among them, Pπ(s′|s) =
1487
+
1488
+ A π(a|s)P(s′|s, a) da holds.
1489
+ Proof. Firstly, we simplify the left side of the Equation (28) with reference to (Han et al., 2020a). Introducing the definition
1490
+ of Uπ(s) leads to
1491
+ Es∼Uπ[Es′∼PπLπ(s′) − Lπ(s)]
1492
+ =
1493
+
1494
+ S
1495
+ lim
1496
+ T →∞
1497
+ 1
1498
+ T
1499
+ T
1500
+
1501
+ t=0
1502
+ T (s | ρ, π, t)(
1503
+
1504
+ S
1505
+ Pπ(s′|s)Lπ(s′)ds′ − Lπ(s))ds
1506
+ (29)
1507
+ Due to the boundedness of Lπ, we apply the Lebesgue’s Dominated convergence theorem. To be specific, when |Fn(s)| ≤
1508
+ B(s), ∀s ∈ S, ∀n holds, we have
1509
+ lim
1510
+ n→∞
1511
+
1512
+ S
1513
+ Fn(s)ds =
1514
+
1515
+ S
1516
+ lim
1517
+ n→∞ Fn(s)ds
1518
+ (30)
1519
+ Hence, we get
1520
+ Es∼Uπ[Es′∼PπLπ(s′) − Lπ(s)]
1521
+ =
1522
+
1523
+ S
1524
+ lim
1525
+ T →∞
1526
+ 1
1527
+ T
1528
+ T
1529
+
1530
+ t=0
1531
+ T (s | ρ, π, t)(
1532
+
1533
+ S
1534
+ Pπ(s′|s)Lπ(s′)ds′ − Lπ(s))ds
1535
+ = lim
1536
+ T →∞
1537
+
1538
+ S
1539
+ 1
1540
+ T
1541
+ T
1542
+
1543
+ t=0
1544
+ T (s | ρ, π, t)(
1545
+
1546
+ S
1547
+ Pπ(s′|s)Lπ(s′)ds′ − Lπ(s))ds
1548
+ = lim
1549
+ T →∞
1550
+ 1
1551
+ T (
1552
+ T +1
1553
+
1554
+ t=1
1555
+ ET (s|ρ,π,t)Lπ(s) −
1556
+ T
1557
+
1558
+ t=0
1559
+ ET (s|ρ,π,t)Lπ(s))
1560
+ = lim
1561
+ T →∞
1562
+ 1
1563
+ T (ET (s|ρ,π,T +1)Lπ(s) − ET (s|ρ,π,t=0)Lπ(s))
1564
+ (31)
1565
+ Note that T (s|ρ, π, t = 0) is equal to ρ. Since the expectation of Lπ(s) is a finite value, the left side of Equation (28) is
1566
+ zero.
1567
+ Now, we turn to the right side of Equation (28). According to the Equation (31), we have
1568
+ −k[Es∼Uπ[L(s) − λEs′∼PπL(s′)]] ≥ 0
1569
+ Es∼Uπ[L(s) − λEs′∼PπL(s′)] ≤ 0
1570
+ (32)
1571
+
1572
+ Adaptive Stability Certification
1573
+ Since L(s) ≥ cπ(s) + λEs′∼PπL(s′) holds, we get
1574
+ Es∼Uπcπ(s) ≤ 0
1575
+ (33)
1576
+ Based on the Abelian theorem, we know there exists
1577
+ Uπ(s) = lim
1578
+ T →∞
1579
+ 1
1580
+ T
1581
+ T
1582
+
1583
+ t=0
1584
+ T (s | ρ, π, t)
1585
+ = lim
1586
+ t→∞ T (s|ρ, π, t)
1587
+ = ωπ(s)
1588
+ (34)
1589
+ Thus, we get
1590
+ Es∼ωπ[cπ(s)] ≤ 0
1591
+ (35)
1592
+ The last row of inequality holds because of Equation (34). Based on the definition of ωπ(s), we have
1593
+ lim
1594
+ t→∞ ET (s|ρ,π,t)cπ(s) ≤ 0
1595
+ (36)
1596
+ Suppose that there exists a starting state s0
1597
+
1598
+ {s0
1599
+ |
1600
+ cπ(s0)
1601
+
1602
+ b} and a positive constant d such that
1603
+ limt→∞ ET (s|ρ,π,t)cπ(s) = d or limt→∞ ET (s|ρ,π,t)cπ(s) = ∞. Consider that ρ(s0) > 0 for all starting states in
1604
+ {s0 | cπ(s0) ≤ b} (Assumption 4.1), then limt→∞ Es∼T (·|ρ,π,t)cπ(s) > 0 , which is contradictory with Equation (36).
1605
+ Thus ∀s0 ∈ {s0 | cπ(s0) ≤ b}, limt→∞ ET (s|ρ,π,t)cπ(s) = 0. Thus the system meets the mean cost stability by Definition
1606
+ 3.1.
1607
+ Furthermore, we find that when L(s) = cπ(s) + λEs′∼PπL(s′) holds, our theorem is corresponding to the Theorem 1 in
1608
+ (Han et al., 2020a). That means we extend the previous method to a more general case. To be specific, the introduction of
1609
+ λ enlarges the solution space of the policy. Thus, it facilitates the policy to find the optimal point as well as maintain the
1610
+ system’s stability.
1611
+ B.2. Proof of Lemma 4.4
1612
+ Lemma B.2 (Finite Tracking Time). In a continuous-time system, a trajectory W(t) tracks the reference R(t). W(t) can
1613
+ track the reference within a finite time T, such that R(t) = W(t), t ≥ T, if the following conditions holds.
1614
+ ∇tW(t) ≤ −k(W(t) − R(t)), ∀t ∈ [0, T]
1615
+ (37)
1616
+ Note that the gradient of R(t) is bounded, meaning that ∇tR(t) ≤ µ holds.
1617
+ Proof. First, we build the mean square error V (t) between them.
1618
+ V = 1
1619
+ 2(W(t) − R(t))2
1620
+ (38)
1621
+ Then, we can derive the difference of V (t) as follows
1622
+ ∇tV = (W − R)(∇tW − ∇tR)
1623
+ ≤ (W − R)(−k(W − R) − ∇tR)
1624
+ ≤ −k|W − R|2 − (W − R)∇tR
1625
+ (39)
1626
+
1627
+ Adaptive Stability Certification
1628
+ Introducing the Assumption that the bounded gradient of R(t) , we have
1629
+ ∇tV ≤ −2k |W − R|2
1630
+ 2
1631
+
1632
+
1633
+ 2µ|W − R|
1634
+ 2
1635
+ ≤ −2kV −
1636
+
1637
+
1638
+
1639
+ V
1640
+ (40)
1641
+ Observe that the above formulation belongs to a form of the Bernoulli differential equation. In this case, we can reduce the
1642
+ Bernoulli equation to a linear differential equation by substituting z =
1643
+
1644
+ V . Then, the general solution for z is
1645
+ z =
1646
+
1647
+ V ≤ −
1648
+
1649
+ 2
1650
+ 2
1651
+ µ
1652
+ k + Ce−kt
1653
+ (41)
1654
+ Applying the initial condition V (t = 0) = vt0, we have
1655
+ C = √vt0 +
1656
+
1657
+ 2
1658
+ 2
1659
+ µ
1660
+ k
1661
+ (42)
1662
+ Finally, the convergence time T can be represented as:
1663
+ T = 1
1664
+ k ln
1665
+ � √
1666
+ 2
1667
+ 2
1668
+ µ
1669
+ k + √vt0
1670
+
1671
+ 2
1672
+ 2
1673
+ µ
1674
+ k
1675
+
1676
+ + t0
1677
+ (43)
1678
+ B.3. Constrained Lyapunov Critic Network
1679
+ In this work, the output of the neural network of the Lyapunov critic is described by:
1680
+ f(s, a) = hO(hO−1(· · · h2(h1(< s, a >))))
1681
+ (44)
1682
+ where each ho(z) has the same form:
1683
+ ho(z) = ψo(Woz + bo)
1684
+ (45)
1685
+ Here, O represents the number of layers, and ψo is the non-linear activation function used in the o-th layer. Furthermore,
1686
+ {Wo, bo} is the weight and bias of the o-th layer.
1687
+ First of all, to meet the demand of Lθ(se, a) = 0, we introduce a linear transformation Gs, one of whose possible forms can
1688
+ be
1689
+ Gs(f) =
1690
+ 1
1691
+ �I
1692
+ i δsi + ϵ
1693
+ �δs1
1694
+ δs2
1695
+ · · ·
1696
+ δsI
1697
+
1698
+
1699
+ ��
1700
+ f1
1701
+ f1
1702
+ · · ·
1703
+ fv
1704
+ f1
1705
+ f1
1706
+ · · ·
1707
+ fv
1708
+ · · ·
1709
+ · · ·
1710
+ · · ·
1711
+ · · ·
1712
+ f1
1713
+ f1
1714
+ · · ·
1715
+ fv
1716
+
1717
+ ��
1718
+ (46)
1719
+ where I denotes the number of elements of the state, and v is the number of units of the output layer. ϵ is a constant close to 0
1720
+ to avoid singularity. Note that δs = s − se, which indicates the difference between the current state and an equilibrium point.
1721
+ As we can see, when each element of δs is zero, the multiplication of matrices is zero. Thus, Gs=se(f(s = se, a)) = 0
1722
+ holds. Furthermore, it brings another benefit having no impact on the training of networks.
1723
+ B.4. Proof of Theorem 4.5
1724
+ Theorem B.3. Suppose that the length of sampling trajectories is T, then the bound can be expressed as:
1725
+ |Es∼Uπ∆Lπ(s) − Es∼UT
1726
+ π ∆Lπ(s)| ≤ 2(k + 1)cπ
1727
+ 1 − γ
1728
+ T q−1
1729
+ (47)
1730
+ where q is a constant in (0, 1).
1731
+
1732
+ Adaptive Stability Certification
1733
+ Proof. First, we can get the following equation by introducing the definitions of Uπ and UT
1734
+ π .
1735
+ Es∼Uπ∆Lπ(s) − Es∼UT
1736
+ π ∆Lπ(s)
1737
+ =
1738
+
1739
+ S
1740
+ (Uπ(s) − 1
1741
+ T
1742
+ T
1743
+
1744
+ t=1
1745
+ T (s | ρ, π, t))∆Lπ(s)ds
1746
+ = 1
1747
+ T
1748
+ T
1749
+
1750
+ t=1
1751
+
1752
+ S
1753
+ (Uπ(s) − T (s | ρ, π, t))∆Lπ(s)ds
1754
+ (48)
1755
+ Then, eliminating the integral operator, we obtain
1756
+ |Es∼Uπ∆Lπ(s) − Es∼UT
1757
+ π ∆Lπ(s)|
1758
+ ≤ 1
1759
+ T
1760
+ T
1761
+
1762
+ t=1
1763
+ ∥Uπ(s) − T (s | ρ, π, t)∥1∥∆Lπ(s)∥∞
1764
+ (49)
1765
+ Thus, the next step is to get the bounds of ∥Uπ(s) − T (s | ρ, π, t)∥1 and ∥∆Lπ(s)∥∞.
1766
+ For the first part, we introduce the assumption that first is mentioned in (Zou et al., 2019), shown as follows:
1767
+ T
1768
+
1769
+ t=1
1770
+ ∥Uπ(s) − T (s | ρ, π, t)∥1 ≤ 2T q, ∀T ∈ Z+, ∃q ∈ (0, 1)
1771
+ (50)
1772
+ Frankly speaking, the assumption is easily satisfied because the L1 distance between two distributions is bounded by 2. At
1773
+ the same time, T (s | ρ, π, t) converges to Uπ(s) with time approaching.
1774
+ For the second part, we can get the bound of ∆Lπ(s) according to Equation 25.
1775
+ ∆Lπ(s) = Es′∼PπLπ(s′) − Lπ(s) + k(Lπ(s) − λEs′∼Pπ(s′))
1776
+
1777
+
1778
+ 1 − γ − 0 + k( cπ
1779
+ 1 − γ − 0)
1780
+ (51)
1781
+ Then, we have
1782
+ ∥∆Lπ(s)∥∞ ≤ (k + 1) cπ
1783
+ 1 − γ
1784
+ (52)
1785
+ Adding results in Equation 53, we finally get
1786
+ |Es∼Uπ∆Lπ(s) − Es∼UT
1787
+ π ∆Lπ(s)| ≤ 2(k + 1)cπ
1788
+ 1 − γ
1789
+ T q−1
1790
+ (53)
1791
+ B.5. Proof of Theorem 4.6
1792
+ Theorem B.4. Suppose that the length of sampling trajectories is T and the number of trajectories is M, then there exists
1793
+ the following upper bound:
1794
+ P(| 1
1795
+ MT
1796
+ M
1797
+
1798
+ m=1
1799
+ T
1800
+
1801
+ t=1
1802
+ ∆Lπ(sm
1803
+ t ) − Es∼UT
1804
+ π ∆Lπ(s)| ≥ α)
1805
+ ≤ 2 exp(−
1806
+ Mα2(1 − γ)2
1807
+ ((1 − kλ)2 + (k − 1)2)cπ2 )
1808
+ (54)
1809
+ where sm
1810
+ t represents the state in the m-th trajectory at the timestep t.
1811
+
1812
+ Adaptive Stability Certification
1813
+ Proof. First, eliminating ∆Lπ(s) by Equation 20, we rewrites the left side of Equation 54 as
1814
+ δ = P(| 1
1815
+ MT
1816
+ M
1817
+
1818
+ m=1
1819
+ T
1820
+
1821
+ t=1
1822
+ ∆Lπ(sm
1823
+ t ) − Es∼UT
1824
+ π ∆Lπ(s)| ≥ α)
1825
+ = P(| 1
1826
+ MT
1827
+ M
1828
+
1829
+ m=1
1830
+ T
1831
+
1832
+ t=1
1833
+ (Lπ(st+1) − Lπ(st) + kl(Lπ(st) − λLπ(st+1))) − Es∼UT
1834
+ π ∆Lπ(s)| ≥ α)
1835
+ = P(| 1
1836
+ MT
1837
+ M
1838
+
1839
+ m=1
1840
+ T
1841
+
1842
+ t=1
1843
+ ((1 − kλ)Lπ(st+1) + (k − 1)Lπ(st)) − Es∼UT
1844
+ π ∆Lπ(s)| ≥ α)
1845
+ (55)
1846
+ Here Es∼UT
1847
+ π ∆Lπ(s) is expected value of
1848
+ 1
1849
+ MT
1850
+ �M
1851
+ m=1
1852
+ �T
1853
+ t=1 ∆Lπ(sm
1854
+ t ). In addition, the bounds of (1 − kλ)Lπ(st+1) and
1855
+ (k − 1)Lπ(st) can be obtained easily by Equation 25. Thus, we obtain the Theorem 4.6 by applying Hoeffding’s inequality.
1856
+ δ ≤ 2 exp(−
1857
+ 2M 2α2
1858
+ M((1 − kλ)2 + (k − 1)2)
1859
+ cπ2
1860
+ (1−γ)2
1861
+ )
1862
+ ≤ 2 exp(−
1863
+ Mα2(1 − γ)2
1864
+ ((1 − kλ)2 + (k − 1)2)cπ2 )
1865
+ (56)
1866
+ C. Details of Algorithms
1867
+ As mentioned in the main text, we introduce a minimum entropy as a constraint in policy optimization and apply the
1868
+ primal-dual method to update the policy and the Lagrange multiplier λe. To be specific, the constraint can be expressed as
1869
+ log πφ(a|s) ≤ −Ze
1870
+ (57)
1871
+ where Ze is the minimum value of policy entropy, usually, Ze corresponds to the dimension of action space in the
1872
+ environment.
1873
+ Algorithm 1: Adaptive Lyapunov-based Actor-Critic Algorithm (ALAC)
1874
+ Orthogonal initialize the parameters of actor and critic networks with φ, θ
1875
+ Initialize replay buffer D and λl, λe
1876
+ Initialize the parameters of target network with φ′ ← φ and θ′ ← θ
1877
+ for episode m = 1, M do
1878
+ Sample an initial state s0
1879
+ for step t = 0, T − 1 do
1880
+ Sample an action from πφ(at|st)
1881
+ Execute the action at and observe a new state st+1
1882
+ Store < st, at, ct, st+1 > into D
1883
+ end for
1884
+ for iteration n = 1, N do
1885
+ Sample a minibatch B from the replay buffer D
1886
+ Update θ according to Eq.(11) using minibatch B
1887
+ Update φ, λl, λe according to Eq.(17),(18),(57) using minibatch B
1888
+ Update the parameters of target networks, θ′, φ′, according to Eq. (12)
1889
+ end for
1890
+ end for
1891
+ D. Details of Experiments
1892
+ We test our method and baselines in ten robotic control environments, including Cartpole-cost,Point-circle-cost,
1893
+ Halfcheetah-cost, Swimmer-cost, Ant-cost, Humanoid-cost, Minitaur-cost, Spacereach-cost, Spacerandom-cost and
1894
+
1895
+ Adaptive Stability Certification
1896
+ Cartpole-cost
1897
+ Point-circle-cost
1898
+ Swimmer-cost
1899
+ Halfcheetah-cost
1900
+ Ant-cost
1901
+ Humanoid-cost
1902
+ Minitaur-cost
1903
+ Spacereach-cost
1904
+ Spacerandom-cost
1905
+ Spacedualarm-cost
1906
+ Figure 5. Overview of our environments.
1907
+ Spacedualarm-cost. Most tasks in ten environments are goal-oriented, tracking a target position or speed, which corre-
1908
+ sponds to most control tasks. Furthermore, the latter four environments involve models of practical robots like a quadruped
1909
+ robot and a robotic arm, making them relatively more difficult. It is worth noting that the task of Spacedualarm-cost is
1910
+ trajectory planning of a free-floating dual-arm space robot. The coupling property of the base and the robotic arms brings
1911
+ hardship for both traditional control and RL-based methods (Wang et al., 2022).
1912
+ D.1. Environmental Design
1913
+ Cartpole-cost
1914
+ This task aims to maintain the pole vertically at a target position. The environment is inherited from (Han
1915
+ et al., 2020b). The state and action space are the same as the default settings in OpenAI Gym(Brockman et al., 2016), so we
1916
+ omit the description. The cost function is c =
1917
+
1918
+ x
1919
+ xthreshold
1920
+ �2
1921
+ + 20 ∗
1922
+
1923
+ θ
1924
+ θthreshold
1925
+ �2
1926
+ , where xthreshold = 10 and θthreshold = 20◦. The
1927
+ other settings can be found in Table 4.
1928
+ Point-circle-cost
1929
+ This task aims to allow a sphere to track a circular trajectory. The environment is inherited from (Achiam
1930
+ et al., 2017). The sphere is initialized at the original point. The cost function is represented as c = d, where d denotes the
1931
+ distance between the current position and the reference. The other settings can be found in Table 4.
1932
+ Table 4. Hyper-parameters of non-linear dynamic environments
1933
+ Hyper-parameters
1934
+ Cartpole-cost
1935
+ Point-circle-cost
1936
+ State shape
1937
+ 4
1938
+ 7
1939
+ Action shape
1940
+ 2
1941
+ 2
1942
+ Length of an episode
1943
+ 250 steps
1944
+ 65 steps
1945
+ Maximum steps
1946
+ 3e5 steps
1947
+ 3e5 steps
1948
+ Actor network
1949
+ (64, 64)
1950
+ (64, 64)
1951
+ Critic network
1952
+ (64, 64, 16)
1953
+ (64, 64, 16)
1954
+ Halfcheetah-cost
1955
+ The goal of this task is to make a HalfCheetah (a 2-legged simulated robot) to track the desired velocity.
1956
+ The environment is inherited from (Han et al., 2020b). The state and action space are the same as the default settings in
1957
+
1958
+ A
1959
+ c300mm
1960
+ 300mm
1961
+ 300mmAdaptive Stability Certification
1962
+ Table 5. Hyper-parameters of mujoco environments
1963
+ Hyper-parameters
1964
+ Swimmer-cost
1965
+ Halfcheetah-cost
1966
+ Ant-cost
1967
+ Humanoid-cost
1968
+ State shape
1969
+ 8
1970
+ 17
1971
+ 27
1972
+ 376
1973
+ Action shape
1974
+ 2
1975
+ 6
1976
+ 8
1977
+ 8
1978
+ Length of an episode
1979
+ 250 steps
1980
+ 200 steps
1981
+ 200 steps
1982
+ 500 steps
1983
+ Maximum steps
1984
+ 3e5 steps
1985
+ 1e6 steps
1986
+ 1e6 steps
1987
+ 1e6 steps
1988
+ Actor network
1989
+ (64, 64)
1990
+ (64, 64)
1991
+ (64, 64)
1992
+ (256, 256)
1993
+ Critic network
1994
+ (64, 64, 16)
1995
+ (256, 256, 16)
1996
+ (64, 64, 16)
1997
+ (256, 256, 128)
1998
+ OpenAI Gym(Brockman et al., 2016), so we omit the description. The cost function is c = (v − 1)2, where 1 represents the
1999
+ desired velocity. The other settings can be found in Table 5.
2000
+ Swimmer-cost
2001
+ This task aims to make a multi-joint snake robot to track the desired velocity. The environment is inherited
2002
+ from (Han et al., 2020b). The state and action space are the same as the default settings in OpenAI Gym(Brockman et al.,
2003
+ 2016), so we omit the description. The cost function is c = (v − 1)2, where 1 represents the desired velocity. The other
2004
+ settings can be found in Table 5.
2005
+ Ant-cost
2006
+ This task aims to make an Ant (a quadrupedal simulated robot) track the desired velocity. The environment is
2007
+ inherited from (Brockman et al., 2016). The state and action space are the same as the default settings in OpenAI Gym
2008
+ (Brockman et al., 2016), so we omit the description. The cost function is c = (v − 1)2, where 1 represents the desired
2009
+ velocity. The other settings can be found in Table 5.
2010
+ Humanoid-cost
2011
+ This task aims to make a humanoid robot to track the desired velocity. The environment is inherited from
2012
+ (Brockman et al., 2016). The state and action space are the same as the default settings in OpenAI Gym (Brockman et al.,
2013
+ 2016), so we omit the description. The cost function is c = (v − 1)2, where 1 represents the desired velocity. The other
2014
+ settings can be found in Table 5.
2015
+ Minitaur-cost
2016
+ This task aims to control the Ghost Robotics Minitaur quadruped to run forward at the desired velocity.
2017
+ The environment is inherited from (Coumans & Bai, 2016). The state and action space are the same as the default settings
2018
+ in PyBullet environment(Coumans & Bai, 2016), so we omit the description. The cost function is c = (v − 1)2, where 1
2019
+ represents the desired velocity. The other settings can be found in Table 6.
2020
+ Spacereach-cost
2021
+ This task aims to make a free-floating single-arm space robot’s end-effector reach a fixed goal position.
2022
+ Since the base satellite is uncontrolled, collisions will cause system instability once collisions occur. Therefore, it is critical
2023
+ to plan a collision-free path while maintaining the stability of the base. The agent can obtain the state, including the angular
2024
+ positions and velocities of joints, the position of the end-effector, and the position of the reference point. Then, the agent
2025
+ outputs the desired velocities of joints. In low-level planning, a PD controller converts the desired velocities into torques,
2026
+ and then controls the manipulator. The cost function is defined as c = d, where d is the distance between the goal and
2027
+ end-effector. The other settings can be found in Table 6.
2028
+ Spacerandom-cost
2029
+ This task aims to make a free-floating single-arm space robot’s end-effector reach a random goal
2030
+ position. The agent can obtain the state, including the angular positions and velocities of joints, the position of the
2031
+ end-effector, and the position of the reference point. Then, the agent outputs the desired velocities of joints. In low-level
2032
+ planning, a PD controller converts the desired velocities into torques to control the manipulator. The cost function is defined
2033
+ as c = d, where d is the distance between goal and end-effector. The other settings can be found in Table 6.
2034
+ Spacedualarm-cost
2035
+ This task aims to make a free-floating dual-arm space robot’s end-effectors reach random goal
2036
+ positions. The complexity of the task increases dramatically due to two arms’ coupling effects on the base. The agent can
2037
+ obtain the state, including the angular positions and velocities of joints, the positions of end-effectors, and the position
2038
+ of target points of two manipulators. Then, the agent outputs the desired velocities of joints. In low-level planning, a PD
2039
+ controller converts the desired velocities into torques to control the manipulators. The cost function is defined as follows:
2040
+ c = d0 + d1, where di is the distance between goal and end-effector of Arm-i. The other settings can be found in Table 6.
2041
+
2042
+ Adaptive Stability Certification
2043
+ Table 6. Hyper-parameters of robotic environments
2044
+ Hyper-parameters
2045
+ Minitaur-cost
2046
+ Spacereach-cost
2047
+ Spacerandom-cost
2048
+ Spacedualarm-cost
2049
+ State shape
2050
+ 27
2051
+ 18
2052
+ 18
2053
+ 54
2054
+ Action shape
2055
+ 8
2056
+ 6
2057
+ 6
2058
+ 12
2059
+ Length of an episode
2060
+ 500 steps
2061
+ 200 steps
2062
+ 200 steps
2063
+ 200 steps
2064
+ Maximum steps
2065
+ 1e6 steps
2066
+ 3e5 steps
2067
+ 5e5 steps
2068
+ 5e5 steps
2069
+ Actor network
2070
+ (256, 256)
2071
+ (256, 256)
2072
+ (256, 256)
2073
+ (512, 512)
2074
+ Critic network
2075
+ (256, 256, 16)
2076
+ (256, 256, 128)
2077
+ (256, 256, 128)
2078
+ (512, 512, 256)
2079
+ D.2. Implementation Details
2080
+ D.2.1. BASELINES
2081
+ SAC-cost
2082
+ Soft Actor-Critic(SAC) is an off-policy maximum entropy actor-critic algorithm (Haarnoja et al., 2018). The
2083
+ main contribution is to add a maximum entropy objective into standard algorithms. The soft Q and V functions are trained
2084
+ to minimize the soft Bellman residual, and the policy can be learned by directly minimizing the expected KL-divergence.
2085
+ The only difference between SAC and SAC-cost is replacing maximizing a reward function with minimizing a cost function.
2086
+ The hyper-parameters of SAC-cost is illustrated in Table 7.
2087
+ Table 7. Hyper-parameters of SAC-cost
2088
+ Hyper-parameters
2089
+ SAC-cost
2090
+ Learning rate of actor
2091
+ 1.e-4
2092
+ Learning rate of critic
2093
+ 3.e-4
2094
+ Optimizer
2095
+ Adam
2096
+ ReplayBuffer size
2097
+ 106
2098
+ Discount (γ)
2099
+ 0.995
2100
+ Polyak (1 − τ)
2101
+ 0.995
2102
+ Entropy coefficient
2103
+ 1
2104
+ Batch size
2105
+ 256
2106
+ SPPO
2107
+ Safe proximal policy optimization (SPPO) is a Lyapunov-based safe policy optimization algorithm. The neural
2108
+ Lyapunov network is constructed to prevent unsafe behaviors. Actually, the safe projection method is inspired by the TRPO
2109
+ algorithm (Schulman et al., 2015). In this paper, we modify it to apply the Lyapunov constraints on the MDP tasks, similar
2110
+ to the process in (Han et al., 2020a). The hyper-parameters of SPPO is illustrated in Table 8.
2111
+ Table 8. Hyper-parameters of SPPO
2112
+ Hyper-parameters
2113
+ SPPO
2114
+ Learning rate of actor
2115
+ 1.e-4
2116
+ Learning rate of Lyapunov
2117
+ 3.e-4
2118
+ Optimizer
2119
+ Adam
2120
+ Discount (γ)
2121
+ 0.995
2122
+ GAE parameter (λ)
2123
+ 0.95
2124
+ Clipping range
2125
+ 0.2
2126
+ KL constraint (δ)
2127
+ 0.2
2128
+ Fisher estimation fraction
2129
+ 0.1
2130
+ Conjugate gradient steps
2131
+ 10
2132
+ Conjugate gradient damping
2133
+ 0.1
2134
+ Backtracking steps
2135
+ 10
2136
+ Timesteps per iteration
2137
+ 2000
2138
+
2139
+ Adaptive Stability Certification
2140
+ LAC
2141
+ Lyapunov-based Actor-Critic(LAC) algorithm is an actor-critic RL-based algorithm jointly learning a neural
2142
+ controller and Lyapunov function (Han et al., 2020a). Particularly, they propose a data-driven stability condition on the
2143
+ expected value over the state space. Moreover, they have found that the method achieves high generalization and robustness.
2144
+ The hyper-parameters of LAC is illustrated in Table 9. Among them, α3 is 0.1 in LAC, while it is changed as 1 in LAC∗.
2145
+ Table 9. Hyperparameters of LAC
2146
+ Hyperparameters
2147
+ LAC
2148
+ Learning rate of actor
2149
+ 1.e-4
2150
+ Learning rate of Lyapunov
2151
+ 3.e-4
2152
+ Learning rate of Larange multiplier
2153
+ 3.e-4
2154
+ Optimizer
2155
+ Adam
2156
+ ReplayBuffer size
2157
+ 106
2158
+ Discount (γ)
2159
+ 0.995
2160
+ Polyak (1 − τ)
2161
+ 0.995
2162
+ Parameter of Lyapunov constraint (α3)
2163
+ 0.1
2164
+ Batch size
2165
+ 256
2166
+ POLYC
2167
+ Policy Optimization with Self-Learned Almost Lyapunov Critics (POLYC) algorithm is built on the standard
2168
+ PPO algorithm (Schulman et al., 2017). Introducing a Lyapunov function without access to the cost allows the agent to
2169
+ self-learn the Lyapunov critic function by minimizing the Lyapunov risk. The hyper-parameters of POLYC is illustrated in
2170
+ Table 10.
2171
+ Table 10. Hyper-parameters of POLYC
2172
+ Hyper-parameters
2173
+ POLYC
2174
+ Learning rate of actor
2175
+ 1.e-4
2176
+ Learning rate of critic
2177
+ 3.e-4
2178
+ Learning rate of Lyapunov
2179
+ 3.e-4
2180
+ Optimizer
2181
+ Adam
2182
+ Discount (γ)
2183
+ 0.995
2184
+ GAE parameter (λ)
2185
+ 0.95
2186
+ Weight of Lyapunov constraint (β)
2187
+ 0.1
2188
+ Clipping range
2189
+ 0.2
2190
+ Timesteps per iteration
2191
+ 2000
2192
+ LBPO
2193
+ Lyapunov Barrier Policy Optimization (LBPO) algorithm (Sikchi et al., 2021) is built on SPPO algorithm (Chow
2194
+ et al., 2019). However, the core improvement uses a Lyapunov-based barrier function to restrict the policy update to a safe
2195
+ set for each training iteration. Compared with the SPPO algorithm, the method avoids backtracking to ensure safety. For
2196
+ the implementation in our paper, the process is similar to that of the SPPO algorithm. The hyperparameters of LBPO is
2197
+ illustrated in Table 11.
2198
+ TNLF
2199
+ Twin Neural Lyapunov Function (TNLF) algorithm is proposed to deal with safe robot navigation in (Xiong et al.,
2200
+ 2022). Different from other approaches, the TNLF method defines a Lyapunov V function and Lyapunov Q function, which
2201
+ are trained by minimizing the Lyapunov risk. In effect, the Lyapunov risk is similar to that of (Chang & Gao, 2021). Since
2202
+ the Lyapunov function strictly decreases over time, the robot starting with any state in a Region of Attraction (RoA) will
2203
+ always stay in the RoA in the future. It should be pointed out that as our environments only support the cost function,
2204
+ the objective, except for Lyapunov risk, is to minimize the cumulative return of cost. The hyper-parameters of TNLF is
2205
+ illustrated in Table 12.
2206
+ D.2.2. OUR METHOD
2207
+ ALAC
2208
+ Our method offers a significant advantage in contrast to baselines, which is to use fewer hyperparameters. The
2209
+ main hyperparameters are illustrated in Table 13. We notice that these parameters control networks’ learning without
2210
+
2211
+ Adaptive Stability Certification
2212
+ Table 11. Hyperparameters of LBPO
2213
+ Hyperparameters
2214
+ LBPO
2215
+ Learning rate of actor
2216
+ 1.e-4
2217
+ Learning rate of critic
2218
+ 1.e-4
2219
+ Learning rate of Lyapunov
2220
+ 3.e-4
2221
+ Optimizer
2222
+ Adam
2223
+ Discount (γ)
2224
+ 0.99
2225
+ GAE parameter (λ)
2226
+ 0.97
2227
+ Clipping range
2228
+ 0.2
2229
+ KL constraint
2230
+ 0.012
2231
+ Fisher estimation fraction
2232
+ 0.1
2233
+ Conjugate gradient steps
2234
+ 10
2235
+ Conjugate gradient damping
2236
+ 0.1
2237
+ Backtracking steps
2238
+ 10
2239
+ Weight of Lyapunov constraint (β)
2240
+ 0.01
2241
+ Timesteps per iteration
2242
+ 2000
2243
+ Table 12. Hyper-parameters of TNLF
2244
+ Hyper-parameters
2245
+ TNLF
2246
+ Learning rate of actor
2247
+ 1.e-4
2248
+ Learning rate of critic
2249
+ 3.e-4
2250
+ Learning rate of Lyapunov V functiob
2251
+ 3.e-4
2252
+ Learning rate of Lyapunov functiob
2253
+ 3.e-4
2254
+ Optimizer
2255
+ Adam
2256
+ ReplayBuffer size
2257
+ 106
2258
+ Discount (γ)
2259
+ 0.995
2260
+ Polyak (1 − τ)
2261
+ 0.995
2262
+ Weight of Lyapunov constraint (α)
2263
+ 0.1
2264
+ Variance of noise distribution
2265
+ 1
2266
+ Batch size
2267
+ 256
2268
+ including the parameters of constraints. The reason is they are automatically updated according to Lagrange multipliers, λl,
2269
+ and λe. The initial value of Lagrange multipliers is set to 1, common usage in previous constrained methods.
2270
+ D.3. More Results on Comparison
2271
+ Figure 11 shows the learning curves of the accumulated cost and constraint violations of ALAC and other baselines in ten
2272
+ environments.
2273
+ D.4. More Results on Ablation Study
2274
+ We provide the specific formulation of ∆L1
2275
+ πφ and ∆L2
2276
+ πφ. Compared with ∆Lπφ in Equation 14, we intuitively find
2277
+ that ∆L1
2278
+ πφ and ∆L2
2279
+ πφ are lower and higher bound of ∆Lπφ respectively. In other words, ∆L1
2280
+ πφ represents the strongest
2281
+ constraint, while ∆L2
2282
+ πφ represents the loosest constraint. The comparison between them can demonstrate that the ASC
2283
+ condition (∆Lπφ) has a positive effect on the performance of optimality and stability.
2284
+ ∆L1
2285
+ πφ(s, a) = Lθ(s′, πφ(·|s′)) − Lθ(s, a) + k[Lθ(s, a) − 0]
2286
+ ∆L2
2287
+ πφ(s, a) = Lθ(s′, πφ(·|s′)) − Lθ(s, a) + k[Lθ(s, a) − Lθ(s′, πφ(·|s′))]
2288
+ (58)
2289
+ The ablation experiments on other tasks are shown in Figure 10.
2290
+
2291
+ Adaptive Stability Certification
2292
+ Table 13. Hyper-parameters of ALAC
2293
+ Hyper-parameters
2294
+ ALAC
2295
+ Learning rate of actor
2296
+ 1.e-4
2297
+ Learning rate of Lyapunov
2298
+ 3.e-4
2299
+ Learning rate of Lagrange multipliers (λl and λe)
2300
+ 3.e-4
2301
+ Optimizer
2302
+ Adam
2303
+ ReplayBuffer size
2304
+ 106
2305
+ Discount (γ)
2306
+ 0.995
2307
+ Polyak (1 − τ)
2308
+ 0.995
2309
+ Batch size
2310
+ 256
2311
+ 1
2312
+ n_components=2 or 3 ,
2313
+ 2
2314
+ e a r l y _ e x a g g e r a t i o n =12 ,
2315
+ 3
2316
+ l e a r n i n g _ r a t e =200.0 ,
2317
+ 4
2318
+ n _ i t e r =1000 ,
2319
+ 5
2320
+ n _ i t e r _ w i t h o u t _ p r o g r e s s =300 ,
2321
+ 6
2322
+ min_grad_norm=1e −7 ,
2323
+ 7
2324
+ p e r p l e x i t y =30 ,
2325
+ 8
2326
+ me tric ="euclidean" ,
2327
+ 9
2328
+ n_jobs =None ,
2329
+ 10
2330
+ random_state =42 ,
2331
+ 11
2332
+ verbose =True ,
2333
+ 12
2334
+ i n i t =’pca’
2335
+ Table 14. Other hyper-parameters of t-SNE method.
2336
+ D.5. Details of Visualization
2337
+ Our RL-based policy optimization method guided by adaptive stability is difficult to express the latent laws of states in the
2338
+ convergent process of different environments as the high-dimension states-space. To find and show the state’s change laws
2339
+ in the convergent process:
2340
+ • We use the t-SNE dimension reduction technique to visualize the state-space.
2341
+ • We plot the phase trajectory with variance according to the state pairs of joint angular position and velocity.
2342
+ • We plot the Lyapunov-value surface and its shadow with the phase trajectory and values in the convergence process.
2343
+ T-SNE Visualization
2344
+ The top row of Figure 4 shows the results of the t-SNE state plotting with SciKit-Learn
2345
+ tools(i.e.sklearn.manifold.TSNE function) with varying parameters(e.g. early_exaggeration, min_ grad_norm). Cartpole-
2346
+ Cost is visualized with n_components=2 while other environments with n_components=3. The hyper-parameters for t-SNE
2347
+ are shown in Table 14.
2348
+ Phase Trajectories of Systems
2349
+ We select the angular position and velocity of a joint in the state space in each environment
2350
+ and plot the phase trajectory with variance in Figure 6. The convergent process is shown as the angular velocity starts from
2351
+ 0 to 0, and the joint angle starts from the beginning to the convergence position.
2352
+ Lyapunov Functions of Systems
2353
+ We visualize the change of Lyapunov-value in 3 dimensions based on the phase
2354
+ trajectory. The second row of Figure 4 shows the Lyapunov-value surface. The curves of values along the phase trajectory
2355
+ are mapped to the whole plane with down-sampled and smoothed by a Gaussian filter; we add the values and the phase
2356
+ trajectory shadows correspondingly simultaneously.
2357
+
2358
+ Adaptive Stability Certification
2359
+ 0.10
2360
+ 0.05
2361
+ 0.00
2362
+ 0.05
2363
+ 0.10
2364
+ 0.15
2365
+ Angular Position
2366
+ 1.5
2367
+ 1.0
2368
+ 0.5
2369
+ 0.0
2370
+ 0.5
2371
+ 1.0
2372
+ Angular Velocity
2373
+ Cartpole-cost phase trajectory
2374
+ 0.30
2375
+ 0.35
2376
+ 0.40
2377
+ 0.45
2378
+ 0.50
2379
+ Angular Position
2380
+ 0
2381
+ 1
2382
+ 2
2383
+ 3
2384
+ 4
2385
+ Angular Velocity
2386
+ Halfcheetah-cost phase trajectory
2387
+ 1.6
2388
+ 1.8
2389
+ 2.0
2390
+ 2.2
2391
+ 2.4
2392
+ Angular Position
2393
+ 15
2394
+ 10
2395
+ 5
2396
+ 0
2397
+ 5
2398
+ 10
2399
+ Angular Velocity
2400
+ Minitaur-cost phase trajectory
2401
+ 1.75
2402
+ 1.50
2403
+ 1.25
2404
+ 1.00
2405
+ 0.75
2406
+ 0.50
2407
+ 0.25
2408
+ 0.00
2409
+ Angular Position
2410
+ 0.3
2411
+ 0.2
2412
+ 0.1
2413
+ 0.0
2414
+ 0.1
2415
+ Angular Velocity
2416
+ Spacerandom-cost phase trajectory
2417
+ Figure 6. Phase trajectories of the systems trained by ALAC. (we report the results of 20 trials and select a joint to graph the
2418
+ phase trajectory in each task.)
2419
+ D.6. More Results on Evaluation
2420
+ D.6.1. ROBUSTNESS
2421
+ We verify that ALAC achieves excellent robustness on most tasks. It is worth noting that we introduce periodic external
2422
+ disturbances with different magnitudes in each task. Furthermore, we omit the algorithms which do not converge to a
2423
+ reasonable solution in each task.
2424
+ D.6.2. GENERALIZATION
2425
+ We verify that ALAC achieves excellent generalization with the feedback of errors. In particular, the gap between each other
2426
+ enlarges with the increasing biases. Furthermore, we observe that the errors bring a negative impact on the performance of
2427
+ SAC-cost. The reason can be that SAC-cost does not capture the error information without the guidance of a Lyapunov
2428
+ function. Note that the number of environment steps in Halfcheetah-cost is 5e5 in this section.
2429
+ D.6.3. EFFICIENCY
2430
+ We verify that ALAC achieves comparable performance under different network structures of the actor on three tasks. By
2431
+ contrast, the network structure significantly impacts the performance of SAC-cost.
2432
+
2433
+ Adaptive Stability Certification
2434
+ 0.0
2435
+ 0.1
2436
+ 0.2
2437
+ 0.3
2438
+ 0.4
2439
+ 0.5
2440
+ Magnitude
2441
+ 0
2442
+ 25
2443
+ 50
2444
+ 75
2445
+ 100
2446
+ 125
2447
+ 150
2448
+ 175
2449
+ Cost Return
2450
+ Cartpole-cost
2451
+ ALAC(ours)
2452
+ LAC
2453
+ LAC*
2454
+ POLYC
2455
+ SAC-cost
2456
+ SPPO
2457
+ TNLF
2458
+ 0.0
2459
+ 0.1
2460
+ 0.2
2461
+ 0.3
2462
+ 0.4
2463
+ 0.5
2464
+ Magnitude
2465
+ 0
2466
+ 200
2467
+ 400
2468
+ 600
2469
+ 800
2470
+ 1000
2471
+ Cost Return
2472
+ Pointcircle-cost
2473
+ ALAC(ours)
2474
+ LAC
2475
+ LAC*
2476
+ POLYC
2477
+ SAC-cost
2478
+ SPPO
2479
+ TNLF
2480
+ 0.0
2481
+ 0.1
2482
+ 0.2
2483
+ 0.3
2484
+ 0.4
2485
+ 0.5
2486
+ Magnitude
2487
+ 0
2488
+ 20
2489
+ 40
2490
+ 60
2491
+ 80
2492
+ 100
2493
+ 120
2494
+ Cost Return
2495
+ Swimmer-cost
2496
+ ALAC(ours)
2497
+ LAC
2498
+ LAC*
2499
+ POLYC
2500
+ SAC-cost
2501
+ SPPO
2502
+ TNLF
2503
+ 0.0
2504
+ 0.1
2505
+ 0.2
2506
+ 0.3
2507
+ 0.4
2508
+ 0.5
2509
+ Magnitude
2510
+ 0
2511
+ 25
2512
+ 50
2513
+ 75
2514
+ 100
2515
+ 125
2516
+ 150
2517
+ 175
2518
+ Cost Return
2519
+ HalfCheetah-cost
2520
+ ALAC(ours)
2521
+ LAC
2522
+ LAC*
2523
+ POLYC
2524
+ SAC-cost
2525
+ SPPO
2526
+ TNLF
2527
+ 0.0
2528
+ 0.1
2529
+ 0.2
2530
+ 0.3
2531
+ 0.4
2532
+ 0.5
2533
+ Magnitude
2534
+ 0
2535
+ 50
2536
+ 100
2537
+ 150
2538
+ 200
2539
+ 250
2540
+ 300
2541
+ 350
2542
+ Cost Return
2543
+ Ant-cost
2544
+ ALAC(ours)
2545
+ LAC
2546
+ LAC*
2547
+ POLYC
2548
+ SAC-cost
2549
+ SPPO
2550
+ TNLF
2551
+ 0.0
2552
+ 0.1
2553
+ 0.2
2554
+ 0.3
2555
+ 0.4
2556
+ 0.5
2557
+ Magnitude
2558
+ 0
2559
+ 100
2560
+ 200
2561
+ 300
2562
+ 400
2563
+ 500
2564
+ 600
2565
+ Cost Return
2566
+ Humanoid-cost
2567
+ ALAC(ours)
2568
+ LAC
2569
+ LAC*
2570
+ POLYC
2571
+ SAC-cost
2572
+ SPPO
2573
+ TNLF
2574
+ 0.0
2575
+ 0.1
2576
+ 0.2
2577
+ 0.3
2578
+ 0.4
2579
+ 0.5
2580
+ Magnitude
2581
+ 0
2582
+ 200
2583
+ 400
2584
+ 600
2585
+ 800
2586
+ 1000
2587
+ Cost Return
2588
+ Minitaur-cost
2589
+ ALAC(ours)
2590
+ LAC
2591
+ LAC*
2592
+ POLYC
2593
+ SAC-cost
2594
+ SPPO
2595
+ TNLF
2596
+ 0.0
2597
+ 0.1
2598
+ 0.2
2599
+ 0.3
2600
+ 0.4
2601
+ 0.5
2602
+ Magnitude
2603
+ 0
2604
+ 20
2605
+ 40
2606
+ 60
2607
+ 80
2608
+ 100
2609
+ 120
2610
+ 140
2611
+ 160
2612
+ Cost Return
2613
+ Spacereach-cost
2614
+ ALAC(ours)
2615
+ LAC
2616
+ LAC*
2617
+ POLYC
2618
+ SAC-cost
2619
+ SPPO
2620
+ TNLF
2621
+ 0.0
2622
+ 0.1
2623
+ 0.2
2624
+ 0.3
2625
+ 0.4
2626
+ 0.5
2627
+ Magnitude
2628
+ 0
2629
+ 20
2630
+ 40
2631
+ 60
2632
+ 80
2633
+ 100
2634
+ 120
2635
+ 140
2636
+ Cost Return
2637
+ Spacerandom-cost
2638
+ ALAC(ours)
2639
+ LAC
2640
+ LAC*
2641
+ POLYC
2642
+ SAC-cost
2643
+ SPPO
2644
+ TNLF
2645
+ 0.0
2646
+ 0.1
2647
+ 0.2
2648
+ 0.3
2649
+ 0.4
2650
+ 0.5
2651
+ Magnitude
2652
+ 0
2653
+ 50
2654
+ 100
2655
+ 150
2656
+ 200
2657
+ 250
2658
+ 300
2659
+ 350
2660
+ 400
2661
+ Cost Return
2662
+ Spacedualarm-cost
2663
+ ALAC(ours)
2664
+ LAC
2665
+ LAC*
2666
+ POLYC
2667
+ SAC-cost
2668
+ SPPO
2669
+ TNLF
2670
+ Figure 7. Performance of ALAC method and other baselines under persistent disturbances with different magnitudes. (The
2671
+ X-axis indicates the magnitude of the applied disturbance. We evaluate the trained policies for 20 trials in each setting.)
2672
+ -20%
2673
+ -10%
2674
+ 0%
2675
+ 10%
2676
+ 20%
2677
+ Biases of goals
2678
+ 75
2679
+ 100
2680
+ 125
2681
+ 150
2682
+ 175
2683
+ 200
2684
+ 225
2685
+ 250
2686
+ Cost Return
2687
+ Pointcircle-cost
2688
+ ALAC w/ error
2689
+ ALAC w/o error
2690
+ SAC-cost w/ error
2691
+ SAC-cost w/o error
2692
+ -20%
2693
+ -10%
2694
+ 0%
2695
+ 10%
2696
+ 20%
2697
+ Biases of goals
2698
+ 0
2699
+ 50
2700
+ 100
2701
+ 150
2702
+ 200
2703
+ Cost Return
2704
+ HalfCheetah-cost
2705
+ ALAC w/ error
2706
+ ALAC w/o error
2707
+ SAC-cost w/ error
2708
+ SAC-cost w/o error
2709
+ -20%
2710
+ -10%
2711
+ 0%
2712
+ 10%
2713
+ 20%
2714
+ Biases of goals
2715
+ 0
2716
+ 5
2717
+ 10
2718
+ 15
2719
+ 20
2720
+ 25
2721
+ 30
2722
+ 35
2723
+ 40
2724
+ Cost Return
2725
+ Spacereach-cost
2726
+ ALAC w/ error
2727
+ ALAC w/o error
2728
+ SAC-cost w/ error
2729
+ SAC-cost w/o error
2730
+ Figure 8. Evaluation of ALAC and SAC-cost methods in the presence of different biases of goals. (The X-axis indicates the
2731
+ magnitude of the applied shifting. We evaluate the trained policies for 20 trials in each setting.)
2732
+ 0
2733
+ 1
2734
+ 2
2735
+ 3
2736
+ 4
2737
+ 5
2738
+ Timestep
2739
+ 1e5
2740
+ 0
2741
+ 50
2742
+ 100
2743
+ 150
2744
+ 200
2745
+ Cost Return
2746
+ HalfCheetah-cost
2747
+ ALAC(64x64)
2748
+ ALAC(32x32)
2749
+ ALAC(16x16)
2750
+ SAC-cost(64x64)
2751
+ SAC-cost(32x32)
2752
+ SAC-cost(16x16)
2753
+ 0
2754
+ 1
2755
+ 2
2756
+ 3
2757
+ 4
2758
+ 5
2759
+ Timestep
2760
+ 1e5
2761
+ 400
2762
+ 600
2763
+ 800
2764
+ 1000
2765
+ 1200
2766
+ Cost Return
2767
+ Minitaur-cost
2768
+ ALAC(64x64)
2769
+ ALAC(32x32)
2770
+ ALAC(16x16)
2771
+ SAC-cost(64x64)
2772
+ SAC-cost(32x32)
2773
+ SAC-cost(16x16)
2774
+ 0
2775
+ 1
2776
+ 2
2777
+ 3
2778
+ 4
2779
+ 5
2780
+ Timestep
2781
+ 1e5
2782
+ 0
2783
+ 50
2784
+ 100
2785
+ 150
2786
+ 200
2787
+ Cost Return
2788
+ HalfCheetah-cost
2789
+ ALAC(64x64)
2790
+ ALAC(32x32)
2791
+ ALAC(16x16)
2792
+ SAC-cost(64x64)
2793
+ SAC-cost(32x32)
2794
+ SAC-cost(16x16)
2795
+ Figure 9. Learning curves of ALAC and SAC-cost methods with different network structures of the actor on Halfcheetah-cost,
2796
+ Minitaur-cost, and Spaceramdom-cost tasks.
2797
+
2798
+ Adaptive Stability Certification
2799
+ 0.0
2800
+ 0.5
2801
+ 1.0
2802
+ 1.5
2803
+ 2.0
2804
+ 2.5
2805
+ 3.0
2806
+ Timestep
2807
+ 1e5
2808
+ 50
2809
+ 100
2810
+ 150
2811
+ 200
2812
+ 250
2813
+ Cost Return
2814
+ Cartpole-cost
2815
+ ALAC(original)
2816
+ ALAC(
2817
+ 1 )
2818
+ ALAC(
2819
+ 2 )
2820
+ ALAC(Tanh)
2821
+ ALAC(kl = 0.1)
2822
+ 0.0
2823
+ 0.5
2824
+ 1.0
2825
+ 1.5
2826
+ 2.0
2827
+ 2.5
2828
+ 3.0
2829
+ Timestep
2830
+ 1e5
2831
+ 200
2832
+ 400
2833
+ 600
2834
+ 800
2835
+ 1000
2836
+ Cost Return
2837
+ Pointcircle-cost
2838
+ ALAC(original)
2839
+ ALAC(
2840
+ 1 )
2841
+ ALAC(
2842
+ 2 )
2843
+ ALAC(Tanh)
2844
+ ALAC(kl = 0.1)
2845
+ 0
2846
+ 1
2847
+ 2
2848
+ 3
2849
+ 4
2850
+ 5
2851
+ Timestep
2852
+ 1e5
2853
+ 0
2854
+ 25
2855
+ 50
2856
+ 75
2857
+ 100
2858
+ 125
2859
+ 150
2860
+ 175
2861
+ 200
2862
+ Cost Return
2863
+ HalfCheetah-cost
2864
+ ALAC(original)
2865
+ ALAC(
2866
+ 1 )
2867
+ ALAC(
2868
+ 2 )
2869
+ ALAC(Tanh)
2870
+ ALAC(kl = 0.1)
2871
+ 0.0
2872
+ 0.5
2873
+ 1.0
2874
+ 1.5
2875
+ 2.0
2876
+ 2.5
2877
+ 3.0
2878
+ Timestep
2879
+ 1e5
2880
+ 45
2881
+ 50
2882
+ 55
2883
+ 60
2884
+ 65
2885
+ 70
2886
+ 75
2887
+ 80
2888
+ Cost Return
2889
+ Swimmer-cost
2890
+ ALAC(original)
2891
+ ALAC(
2892
+ 1 )
2893
+ ALAC(
2894
+ 2 )
2895
+ ALAC(Tanh)
2896
+ ALAC(kl = 0.1)
2897
+ 0.0
2898
+ 0.5
2899
+ 1.0
2900
+ 1.5
2901
+ 2.0
2902
+ 2.5
2903
+ 3.0
2904
+ Timestep
2905
+ 1e5
2906
+ 0.0
2907
+ 0.5
2908
+ 1.0
2909
+ 1.5
2910
+ 2.0
2911
+ 2.5
2912
+ 3.0
2913
+ Violation
2914
+ Cartpole-cost
2915
+ ALAC(original)
2916
+ ALAC(
2917
+ 1 )
2918
+ ALAC(
2919
+ 2 )
2920
+ ALAC(Tanh)
2921
+ ALAC(kl = 0.1)
2922
+ 0.0
2923
+ 0.5
2924
+ 1.0
2925
+ 1.5
2926
+ 2.0
2927
+ 2.5
2928
+ 3.0
2929
+ Timestep
2930
+ 1e5
2931
+ 0
2932
+ 1
2933
+ 2
2934
+ 3
2935
+ 4
2936
+ 5
2937
+ 6
2938
+ Violation
2939
+ Pointcircle-cost
2940
+ ALAC(original)
2941
+ ALAC(
2942
+ 1 )
2943
+ ALAC(
2944
+ 2 )
2945
+ ALAC(Tanh)
2946
+ ALAC(kl = 0.1)
2947
+ 0
2948
+ 1
2949
+ 2
2950
+ 3
2951
+ 4
2952
+ 5
2953
+ Timestep
2954
+ 1e5
2955
+ 0.0
2956
+ 0.5
2957
+ 1.0
2958
+ 1.5
2959
+ 2.0
2960
+ 2.5
2961
+ 3.0
2962
+ 3.5
2963
+ Violation
2964
+ HalfCheetah-cost
2965
+ ALAC(original)
2966
+ ALAC(
2967
+ 1 )
2968
+ ALAC(
2969
+ 2 )
2970
+ ALAC(Tanh)
2971
+ ALAC(kl = 0.1)
2972
+ 0.0
2973
+ 0.5
2974
+ 1.0
2975
+ 1.5
2976
+ 2.0
2977
+ 2.5
2978
+ 3.0
2979
+ Timestep
2980
+ 1e5
2981
+ 0.00
2982
+ 0.25
2983
+ 0.50
2984
+ 0.75
2985
+ 1.00
2986
+ 1.25
2987
+ 1.50
2988
+ 1.75
2989
+ Violation
2990
+ Swimmer-cost
2991
+ ALAC(original)
2992
+ ALAC(
2993
+ 1 )
2994
+ ALAC(
2995
+ 2 )
2996
+ ALAC(Tanh)
2997
+ ALAC(kl = 0.1)
2998
+ 0.0
2999
+ 0.2
3000
+ 0.4
3001
+ 0.6
3002
+ 0.8
3003
+ 1.0
3004
+ Timestep
3005
+ 1e6
3006
+ 60
3007
+ 80
3008
+ 100
3009
+ 120
3010
+ 140
3011
+ 160
3012
+ 180
3013
+ 200
3014
+ 220
3015
+ Cost Return
3016
+ Ant-cost
3017
+ ALAC(original)
3018
+ ALAC(
3019
+ 1 )
3020
+ ALAC(
3021
+ 2 )
3022
+ ALAC(Tanh)
3023
+ ALAC(kl = 0.1)
3024
+ 0.0
3025
+ 0.5
3026
+ 1.0
3027
+ 1.5
3028
+ 2.0
3029
+ 2.5
3030
+ 3.0
3031
+ Timestep
3032
+ 1e5
3033
+ 0
3034
+ 10
3035
+ 20
3036
+ 30
3037
+ 40
3038
+ 50
3039
+ 60
3040
+ 70
3041
+ 80
3042
+ Cost Return
3043
+ Spacereach-cost
3044
+ ALAC(original)
3045
+ ALAC(
3046
+ 1 )
3047
+ ALAC(
3048
+ 2 )
3049
+ ALAC(Tanh)
3050
+ ALAC(kl = 0.1)
3051
+ 0
3052
+ 1
3053
+ 2
3054
+ 3
3055
+ 4
3056
+ 5
3057
+ Timestep
3058
+ 1e5
3059
+ 0
3060
+ 10
3061
+ 20
3062
+ 30
3063
+ 40
3064
+ 50
3065
+ 60
3066
+ 70
3067
+ Cost Return
3068
+ Spacerandom-cost
3069
+ ALAC(original)
3070
+ ALAC(
3071
+ 1 )
3072
+ ALAC(
3073
+ 2 )
3074
+ ALAC(Tanh)
3075
+ ALAC(kl = 0.1)
3076
+ 0
3077
+ 1
3078
+ 2
3079
+ 3
3080
+ 4
3081
+ 5
3082
+ Timestep
3083
+ 1e5
3084
+ 25
3085
+ 50
3086
+ 75
3087
+ 100
3088
+ 125
3089
+ 150
3090
+ 175
3091
+ 200
3092
+ 225
3093
+ Cost Return
3094
+ Spacedualarm-cost
3095
+ ALAC(original)
3096
+ ALAC(
3097
+ 1 )
3098
+ ALAC(
3099
+ 2 )
3100
+ ALAC(Tanh)
3101
+ ALAC(kl = 0.1)
3102
+ 0.0
3103
+ 0.2
3104
+ 0.4
3105
+ 0.6
3106
+ 0.8
3107
+ 1.0
3108
+ Timestep
3109
+ 1e6
3110
+ 0
3111
+ 1
3112
+ 2
3113
+ 3
3114
+ 4
3115
+ 5
3116
+ Violation
3117
+ Ant-cost
3118
+ ALAC(original)
3119
+ ALAC(
3120
+ 1 )
3121
+ ALAC(
3122
+ 2 )
3123
+ ALAC(Tanh)
3124
+ ALAC(kl = 0.1)
3125
+ 0.0
3126
+ 0.5
3127
+ 1.0
3128
+ 1.5
3129
+ 2.0
3130
+ 2.5
3131
+ 3.0
3132
+ Timestep
3133
+ 1e5
3134
+ 0.0
3135
+ 0.5
3136
+ 1.0
3137
+ 1.5
3138
+ 2.0
3139
+ 2.5
3140
+ 3.0
3141
+ 3.5
3142
+ 4.0
3143
+ Violation
3144
+ Spacereach-cost
3145
+ ALAC(original)
3146
+ ALAC(
3147
+ 1 )
3148
+ ALAC(
3149
+ 2 )
3150
+ ALAC(Tanh)
3151
+ ALAC(kl = 0.1)
3152
+ 0
3153
+ 1
3154
+ 2
3155
+ 3
3156
+ 4
3157
+ 5
3158
+ Timestep
3159
+ 1e5
3160
+ 0.0
3161
+ 0.5
3162
+ 1.0
3163
+ 1.5
3164
+ 2.0
3165
+ 2.5
3166
+ 3.0
3167
+ 3.5
3168
+ 4.0
3169
+ Violation
3170
+ Spacerandom-cost
3171
+ ALAC(original)
3172
+ ALAC(
3173
+ 1 )
3174
+ ALAC(
3175
+ 2 )
3176
+ ALAC(Tanh)
3177
+ ALAC(kl = 0.1)
3178
+ 0
3179
+ 1
3180
+ 2
3181
+ 3
3182
+ 4
3183
+ 5
3184
+ Timestep
3185
+ 1e5
3186
+ 0
3187
+ 1
3188
+ 2
3189
+ 3
3190
+ 4
3191
+ 5
3192
+ 6
3193
+ 7
3194
+ Violation
3195
+ Spacedualarm-cost
3196
+ ALAC(original)
3197
+ ALAC(
3198
+ 1 )
3199
+ ALAC(
3200
+ 2 )
3201
+ ALAC(Tanh)
3202
+ ALAC(kl = 0.1)
3203
+ Figure 10. Ablation studies of the ASC condition. ALAC(original) shows comparable or the best performance compared
3204
+ with other certifications on each task.
3205
+
3206
+ Adaptive Stability Certification
3207
+ 0.0
3208
+ 0.5
3209
+ 1.0
3210
+ 1.5
3211
+ 2.0
3212
+ 2.5
3213
+ 3.0
3214
+ Timestep
3215
+ 1e5
3216
+ 50
3217
+ 100
3218
+ 150
3219
+ 200
3220
+ 250
3221
+ 300
3222
+ Cost Return
3223
+ Cartpole-cost
3224
+ ALAC(ours)
3225
+ LAC
3226
+ LAC*
3227
+ LBPO
3228
+ POLYC
3229
+ SAC-cost
3230
+ SPPO
3231
+ TNLF
3232
+ 0.0
3233
+ 0.5
3234
+ 1.0
3235
+ 1.5
3236
+ 2.0
3237
+ 2.5
3238
+ 3.0
3239
+ Timestep
3240
+ 1e5
3241
+ 200
3242
+ 400
3243
+ 600
3244
+ 800
3245
+ 1000
3246
+ Cost Return
3247
+ Pointcircle-cost
3248
+ ALAC(ours)
3249
+ LAC
3250
+ LAC*
3251
+ LBPO
3252
+ POLYC
3253
+ SAC-cost
3254
+ SPPO
3255
+ TNLF
3256
+ 0
3257
+ 1
3258
+ 2
3259
+ 3
3260
+ 4
3261
+ 5
3262
+ Timestep
3263
+ 1e5
3264
+ 0
3265
+ 50
3266
+ 100
3267
+ 150
3268
+ 200
3269
+ 250
3270
+ 300
3271
+ Cost Return
3272
+ HalfCheetah-cost
3273
+ ALAC(ours)
3274
+ LAC
3275
+ LAC*
3276
+ LBPO
3277
+ POLYC
3278
+ SAC-cost
3279
+ SPPO
3280
+ TNLF
3281
+ 0.0
3282
+ 0.5
3283
+ 1.0
3284
+ 1.5
3285
+ 2.0
3286
+ 2.5
3287
+ 3.0
3288
+ Timestep
3289
+ 1e5
3290
+ 40
3291
+ 60
3292
+ 80
3293
+ 100
3294
+ 120
3295
+ Cost Return
3296
+ Swimmer-cost
3297
+ ALAC(ours)
3298
+ LAC
3299
+ LAC*
3300
+ LBPO
3301
+ POLYC
3302
+ SAC-cost
3303
+ SPPO
3304
+ TNLF
3305
+ 0.0
3306
+ 0.5
3307
+ 1.0
3308
+ 1.5
3309
+ 2.0
3310
+ 2.5
3311
+ 3.0
3312
+ Timestep
3313
+ 1e5
3314
+ 0
3315
+ 2
3316
+ 4
3317
+ 6
3318
+ 8
3319
+ 10
3320
+ Violation
3321
+ Cartpole-cost
3322
+ ALAC(ours)
3323
+ LAC
3324
+ LAC*
3325
+ POLYC
3326
+ SPPO
3327
+ TNLF
3328
+ 0.0
3329
+ 0.5
3330
+ 1.0
3331
+ 1.5
3332
+ 2.0
3333
+ 2.5
3334
+ 3.0
3335
+ Timestep
3336
+ 1e5
3337
+ 0
3338
+ 2
3339
+ 4
3340
+ 6
3341
+ 8
3342
+ Violation
3343
+ Pointcircle-cost
3344
+ ALAC(ours)
3345
+ LAC
3346
+ LAC*
3347
+ POLYC
3348
+ SPPO
3349
+ TNLF
3350
+ 0
3351
+ 1
3352
+ 2
3353
+ 3
3354
+ 4
3355
+ 5
3356
+ Timestep
3357
+ 1e5
3358
+ 0.0
3359
+ 0.5
3360
+ 1.0
3361
+ 1.5
3362
+ 2.0
3363
+ 2.5
3364
+ 3.0
3365
+ 3.5
3366
+ Violation
3367
+ HalfCheetah-cost
3368
+ ALAC(ours)
3369
+ LAC
3370
+ LAC*
3371
+ POLYC
3372
+ SPPO
3373
+ TNLF
3374
+ 0.0
3375
+ 0.5
3376
+ 1.0
3377
+ 1.5
3378
+ 2.0
3379
+ 2.5
3380
+ 3.0
3381
+ Timestep
3382
+ 1e5
3383
+ 0.0
3384
+ 0.2
3385
+ 0.4
3386
+ 0.6
3387
+ 0.8
3388
+ 1.0
3389
+ 1.2
3390
+ Violation
3391
+ Swimmer-cost
3392
+ ALAC(ours)
3393
+ LAC
3394
+ LAC*
3395
+ POLYC
3396
+ SPPO
3397
+ TNLF
3398
+ 0.0
3399
+ 0.2
3400
+ 0.4
3401
+ 0.6
3402
+ 0.8
3403
+ 1.0
3404
+ Timestep
3405
+ 1e6
3406
+ 50
3407
+ 100
3408
+ 150
3409
+ 200
3410
+ 250
3411
+ 300
3412
+ Cost Return
3413
+ Ant-cost
3414
+ ALAC(ours)
3415
+ LAC
3416
+ LAC*
3417
+ LBPO
3418
+ POLYC
3419
+ SAC-cost
3420
+ SPPO
3421
+ TNLF
3422
+ 0.0
3423
+ 0.2
3424
+ 0.4
3425
+ 0.6
3426
+ 0.8
3427
+ 1.0
3428
+ Timestep
3429
+ 1e6
3430
+ 300
3431
+ 350
3432
+ 400
3433
+ 450
3434
+ 500
3435
+ 550
3436
+ 600
3437
+ Cost Return
3438
+ Humanoid-cost
3439
+ ALAC(ours)
3440
+ LAC
3441
+ LAC*
3442
+ LBPO
3443
+ POLYC
3444
+ SAC-cost
3445
+ SPPO
3446
+ TNLF
3447
+ 0.0
3448
+ 0.2
3449
+ 0.4
3450
+ 0.6
3451
+ 0.8
3452
+ 1.0
3453
+ Timestep
3454
+ 1e6
3455
+ 400
3456
+ 600
3457
+ 800
3458
+ 1000
3459
+ 1200
3460
+ Cost Return
3461
+ Minitaur-cost
3462
+ ALAC(ours)
3463
+ LAC
3464
+ LAC*
3465
+ LBPO
3466
+ POLYC
3467
+ SAC-cost
3468
+ SPPO
3469
+ TNLF
3470
+ 0.0
3471
+ 0.5
3472
+ 1.0
3473
+ 1.5
3474
+ 2.0
3475
+ 2.5
3476
+ 3.0
3477
+ Timestep
3478
+ 1e5
3479
+ 0
3480
+ 25
3481
+ 50
3482
+ 75
3483
+ 100
3484
+ 125
3485
+ 150
3486
+ 175
3487
+ Cost Return
3488
+ Spacereach-cost
3489
+ ALAC(ours)
3490
+ LAC
3491
+ LAC*
3492
+ LBPO
3493
+ POLYC
3494
+ SAC-cost
3495
+ SPPO
3496
+ TNLF
3497
+ 0.0
3498
+ 0.2
3499
+ 0.4
3500
+ 0.6
3501
+ 0.8
3502
+ 1.0
3503
+ Timestep
3504
+ 1e6
3505
+ 0
3506
+ 1
3507
+ 2
3508
+ 3
3509
+ 4
3510
+ 5
3511
+ Violation
3512
+ Ant-cost
3513
+ ALAC(ours)
3514
+ LAC
3515
+ LAC*
3516
+ POLYC
3517
+ SPPO
3518
+ TNLF
3519
+ 0.0
3520
+ 0.2
3521
+ 0.4
3522
+ 0.6
3523
+ 0.8
3524
+ 1.0
3525
+ Timestep
3526
+ 1e6
3527
+ 0
3528
+ 2
3529
+ 4
3530
+ 6
3531
+ 8
3532
+ 10
3533
+ 12
3534
+ Violation
3535
+ Humanoid-cost
3536
+ ALAC(ours)
3537
+ LAC
3538
+ LAC*
3539
+ POLYC
3540
+ SPPO
3541
+ TNLF
3542
+ 0.0
3543
+ 0.2
3544
+ 0.4
3545
+ 0.6
3546
+ 0.8
3547
+ 1.0
3548
+ Timestep
3549
+ 1e6
3550
+ 0
3551
+ 1
3552
+ 2
3553
+ 3
3554
+ 4
3555
+ 5
3556
+ 6
3557
+ 7
3558
+ Violation
3559
+ Minitaur-cost
3560
+ ALAC(ours)
3561
+ LAC
3562
+ LAC*
3563
+ POLYC
3564
+ SPPO
3565
+ TNLF
3566
+ 0.0
3567
+ 0.5
3568
+ 1.0
3569
+ 1.5
3570
+ 2.0
3571
+ 2.5
3572
+ 3.0
3573
+ Timestep
3574
+ 1e5
3575
+ 0.0
3576
+ 0.5
3577
+ 1.0
3578
+ 1.5
3579
+ 2.0
3580
+ 2.5
3581
+ 3.0
3582
+ 3.5
3583
+ 4.0
3584
+ Violation
3585
+ Spacereach-cost
3586
+ ALAC(ours)
3587
+ LAC
3588
+ LAC*
3589
+ POLYC
3590
+ SPPO
3591
+ TNLF
3592
+ 0
3593
+ 1
3594
+ 2
3595
+ 3
3596
+ 4
3597
+ 5
3598
+ Timestep
3599
+ 1e5
3600
+ 0
3601
+ 20
3602
+ 40
3603
+ 60
3604
+ 80
3605
+ 100
3606
+ 120
3607
+ 140
3608
+ 160
3609
+ Cost Return
3610
+ Spacerandom-cost
3611
+ ALAC(ours)
3612
+ LAC
3613
+ LAC*
3614
+ LBPO
3615
+ POLYC
3616
+ SAC-cost
3617
+ SPPO
3618
+ TNLF
3619
+ 0
3620
+ 1
3621
+ 2
3622
+ 3
3623
+ 4
3624
+ 5
3625
+ Timestep
3626
+ 1e5
3627
+ 0.0
3628
+ 0.5
3629
+ 1.0
3630
+ 1.5
3631
+ 2.0
3632
+ 2.5
3633
+ 3.0
3634
+ 3.5
3635
+ 4.0
3636
+ Violation
3637
+ Spacerandom-cost
3638
+ ALAC(ours)
3639
+ LAC
3640
+ LAC*
3641
+ POLYC
3642
+ SPPO
3643
+ TNLF
3644
+ 0
3645
+ 1
3646
+ 2
3647
+ 3
3648
+ 4
3649
+ 5
3650
+ Timestep
3651
+ 1e5
3652
+ 50
3653
+ 100
3654
+ 150
3655
+ 200
3656
+ 250
3657
+ 300
3658
+ 350
3659
+ 400
3660
+ Cost Return
3661
+ Spacedualarm-cost
3662
+ ALAC(ours)
3663
+ LAC
3664
+ LAC*
3665
+ LBPO
3666
+ POLYC
3667
+ SAC-cost
3668
+ SPPO
3669
+ TNLF
3670
+ 0
3671
+ 1
3672
+ 2
3673
+ 3
3674
+ 4
3675
+ 5
3676
+ Timestep
3677
+ 1e5
3678
+ 0
3679
+ 1
3680
+ 2
3681
+ 3
3682
+ 4
3683
+ 5
3684
+ 6
3685
+ 7
3686
+ Violation
3687
+ Spacedualarm-cost
3688
+ ALAC(ours)
3689
+ LAC
3690
+ LAC*
3691
+ POLYC
3692
+ SPPO
3693
+ TNLF
3694
+ Figure 11. Performance comparison on ten tasks. The ALAC method finds a good trade-off between minimizing the
3695
+ accumulated cost and constraint violations in contrast to their rivals.
3696
+
4tAyT4oBgHgl3EQfpPhP/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
79E1T4oBgHgl3EQfTwOd/content/tmp_files/2301.03082v1.pdf.txt ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Noname manuscript No.
2
+ (will be inserted by the editor)
3
+ Przemys�law Ko´scik
4
+ On the Exponential Decay of Strongly Interacting
5
+ Cold Atoms from a Double-Well Potential
6
+ the date of receipt and acceptance should be inserted later
7
+ Abstract In this article, we study an exponential decay for the gas of bosons with strong repulsive
8
+ delta interactions from a double-well potential. We consider an exactly solvable model comprising an
9
+ infinite wall and two Dirac delta barriers. We explore its features both within the exact method and
10
+ with the resonance expansion approach. The study reveals the effect of the splitting barrier on the
11
+ decay rate in dependence on the number of particles. Among other things, we find that the effect of the
12
+ splitting barrier on the decay rate is most pronounced in systems with odd particle numbers. During
13
+ exponential decay, the spatial correlations in an internal region are well captured by the ”radiating
14
+ state”.
15
+ 1 Introduction
16
+ Over the past few years there has been a growing interest in understanding the decay properties
17
+ of unstable quantum states [1,2,3,4,5,6,7,8]. In particular, recent progress in fabricating systems of
18
+ interacting particles has inspired the theoretical community to study the decay properties of unstable
19
+ many-particle states [9,10,11,12,13,14]. A simple model to study the decay process of many-particle
20
+ states is the system of bosons with infinitely strong delta-contact interactions, i.e., the so-called Tonks-
21
+ Girardeau (TG) gas [15]. Considerable effort has been made already to understand the decay properties
22
+ of such systems. Among other works, the relevant for exponential and long-time decays were presented
23
+ in [12] and [13], respectively. A recent paper [14] went even further and explained the decay mechanism
24
+ of TG gases at intermediate stages of the time evolution (between exponential and long-time regimes).
25
+ In the present paper, we provide a deeper insight into the exponential decay of TG gases from the
26
+ double-well trap. As a model, we consider the potential in the form [14]
27
+ V (x) =
28
+
29
+ ∞,
30
+ if x ≤ −L
31
+ αδ(x) + ηδ(x − L)
32
+ otherwise.
33
+ (1)
34
+ Note that the model is a modification of the celebrated Winter model [3], to which a Delta barrier at
35
+ x = 0 was added, see Fig.1. The remainder of this article is structured as follows. Section 2 discusses
36
+ the theoretical tools for studying the time evolution of the decaying TG gas and focuses on the results.
37
+ Section 3 presents some concluding remarks.
38
+ 2 Results
39
+ The scenario we consider is typical of controllable studies of the tunnelling phenomena in modern
40
+ experiments. In the case studied, the system is initially prepared (t < 0) in the ground state of the
41
+ University of Applied Sciences, Department of Computer Sciences, ul. Mickiewicza 8, PL-33100 Tarn´ow, Poland
42
+ arXiv:2301.03082v1 [cond-mat.quant-gas] 8 Jan 2023
43
+
44
+ 2
45
+ Fig. 1 Ilustrative diagram of double-well structure in Eq. (1)
46
+ TG gas in a hard-wall split trap (η = ∞). At t = 0, the strength of the right barrier is changed to a
47
+ finite value of η. As a result, the initial state is no longer stationary and begins to evaluate in time.
48
+ According to Bose-Fermi mapping [15], the time-dependent TG wave function is given by
49
+ Ψ(x1, x2, ..., xN, t) =
50
+ = Πk<lsgn(xk − xl)
51
+ 1
52
+
53
+ N!
54
+ detN
55
+ i,j[φi(xj, t)],
56
+ (2)
57
+ where the one-particle state φk(x, t) is governed by the Schr¨odinger equation,
58
+ Iℏ∂φk(x, t)
59
+ ∂t
60
+ = [− ℏ2
61
+ 2m
62
+ ∂2
63
+ ∂x2 + V (x)]φk(x, t),
64
+ (3)
65
+ with the initial condition as the bound-state eigenfunction of the hard-wall split trap, ϕk(x), that
66
+ is, φk(x, t)|t=0 = ϕk(x). From here we set L = ℏ = m = 1 so that the spatial coordinates, time
67
+ coordinates, and energies are measured in units of L and mL2/ℏ, in and ℏ2/(mL2), respectively. The
68
+ system under consideration has a nice feature where both eigenfunctions of the hard-wall split trap,
69
+ ϕk(x) and continuum wave functions (η < ∞) ψp(x) (normalised to a delta Dirac distribution) can be
70
+ obtained in closed analytical forms. For further detail, we refer readers to the papers [14,16], in which
71
+ the relevant formulas are reported. Thanks to those, the solutions to Eq. (3) can be condensed in a
72
+ Fourier series as follows:
73
+ φk(x, t) =
74
+ � ∞
75
+ 0
76
+ ck(p)ψp(x)e− Itp2
77
+ 2 dp,
78
+ (4)
79
+ where ck(p),
80
+ ck(p) =
81
+ � 1
82
+ −1
83
+ ϕk(x)ψp(x)dx,
84
+ (5)
85
+ is given in closed analytical form [14]. Nonetheless, numerical computations are required to evaluate
86
+ the integrals in Eq. (4). We conduct our analysis in terms of the non-escape probability,
87
+ P (N)(t) =
88
+
89
+ ∆N |Ψ(x1, x2, ..., xN, t)|2dx1...dxN,
90
+ (6)
91
+
92
+ 4
93
+ 3.5
94
+ V(x)
95
+ 3
96
+ 2.5
97
+ 8
98
+ n (x-L)
99
+ 2
100
+ 1.5
101
+ α (x)
102
+ 1
103
+ 0.5
104
+ 0
105
+ x=-L
106
+ x=0
107
+ x=L
108
+ x3
109
+ ∆N = [−1, 1]N (internal region), which informs us of the probability that N bosons remain in the
110
+ internal region at time t. For the TG wavefunction in Eq. (2), the N-particle non-escape probabil-
111
+ ity can be reduced to the matrix form [12,13] P (N)(t) = detN
112
+ k,l[Pkl(t)] with the entries Pkl(t) =
113
+ � 1
114
+ −1 ψ∗
115
+ k(x, t)ψl(x, t)dx. The diagonal elements Pkk(t) are nothing but the non-escape probabilities
116
+ of the one-particle states. We denote Pk(t) = Pkk(t). Within the resonance expansion method [4],
117
+ the one-particle state that experiences an exponential decay follows in the region ∆ of the approx-
118
+ imation: ψk(x, t) ≈ Mk(x)e−Γkt/2−Iεkt with Γk = −Im{p2
119
+ k}, εk = (Im{pk}2 − Re{pk}2)/2, and
120
+ Mk(x) = 2πIrespk{ck(p)ψp(x)}, where pk are the roots of the denominator in the integrand in Eq.(4)
121
+ on the fourth quadrant of the complex p-plane (the proper poles) and respk{f} stands for the residue
122
+ of f at a pole pk. Then, the corresponding non-escape probability is Pk(t) ≈ mke−Γkt, where mk =
123
+ � 1
124
+ −1 |Mk(x)|2dx. If mk ≈ 1(Pk(0) ≈ 1), then the exponential decay starts at about t = 0 and the state
125
+ |ψk(t)⟩ can well be approximated by the so-called ”radiating state”: |ψk(t)⟩ ≈ e−Γkt/2−Iεkt |ψk(0)⟩.
126
+ When taking this approach, the time-dependent TG wavefunction in the region ∆N takes the form:
127
+ Ψ(x1, x2, ..., xN, t) ≈ e−Γ (N)t/2−Iε(N)tΠk<lsgn(xk − xl)
128
+ 1
129
+
130
+ N!
131
+ detN
132
+ i,j[φi(xj, 0)],
133
+ (7)
134
+ with Γ (N) = �N
135
+ k=1 Γk, ε(N) = �N
136
+ k=1 εk. Consequently its validity is expected to hold when mk ≈ 1
137
+ for k = 1, ..., N ( M (N) = ΠN
138
+ k=1mk ≈ 1). The corresponding N-particle non-escape probability is:
139
+ P (N)(t) ≈ e−Γ (N)t.
140
+ (8)
141
+ Now, we focus on examining when the above approximations are satisfied and what follows from their
142
+ applicability. Our resuls are summarised in Fig. 2. Fig. 2 (a) shows the behaviour of M (N) as a function
143
+ of N. We can observe a local minimum with a value slightly smaller than one. As a result, there is a
144
+ value where N = Nc(greater than where the minimum occurs) such that M (Nc) ≈ 1. Starting from
145
+ N = Nc the deviation of M (N)from 1 rapidly increases as N increases. This suggests that the point
146
+ N = Nc can be viewed as a transition point to the regime in which the non-escape probability begins to
147
+ diverge significantly from its approximation in Eq. (8). To clarify this, Fig. 2 (b) offers a comparison of
148
+ the results obtained from Eq. (8) with the results of the exact numerical calculations, where to support
149
+ the presentation only case η = 10, α = 0 is shown. As the results indicate, the period in which the
150
+ decay is consistent with the ”radiating state” shrinks with increasing N. When N exceeds the critical
151
+ value Nc = 10 (see Fig. 2 (a)), the decay of the N-particle state switches to the non-exponential regime
152
+ at a very small t value. When the splitting barrier is present an effect of the parity of a number of
153
+ particles appears. This is demonstrated in Fig. 2 (c) which displays the behaviour of a relative change
154
+ defined as γ(N) = (Γ (N) − Γ (N)
155
+ α=0) : Γ (N)
156
+ α=0, where Γ (N)
157
+ α=0 represents the decay rate for a system without
158
+ the splitting barrier. We conclude that the change in the decay process caused by the addition of the
159
+ splitting barrier is most pronounced for systems with odd numbers of particles and in the small N
160
+ regime, i. e. where γ(N) exhibits its most rapid variation. When there is instead an even number of
161
+ particles, the decay rate becomes almost insensitive to changes in α. It is worth mentioning that the
162
+ coherence of the initial state depends on α in the opposite way [16]. That is to say, it strongly depends
163
+ on α only when N is even. To determine whether Eq. (7) can capture the correlation in the internal
164
+ region, we tested its ability to reproduce a function n(x, t) defined as P (N)(t) =
165
+ � 1
166
+ −1 n(x, t)dx,
167
+ n(x, t) =
168
+
169
+ ∆N−1 |Ψ(x, x2, ..., xN, t)|2dx2...dxN.
170
+ (9)
171
+ If n(x, t) is divided by the corresponding value of P (N)(t), then the resulting quantity can be interpreted
172
+ as the probability density of finding a particle, provided that all the particles remain in the region ∆.
173
+ The correctness of the ”radiating state” in reproducing the spatial correlation is confirmed in Fig. 2 (d),
174
+ where the results for n(x, t) obtained using it and the exact wave function are compared. The results
175
+ imply that during exponential decay, spatial correlations in the internal region are indeed consistent
176
+ with those in the initial state. More details regarding the correlation in the initial state (i.e. in the TG
177
+ ground state in the hard-wall split trap) can be found in [16].
178
+
179
+ 4
180
+ Fig. 2 Results for different quantities as discussed in the text, when investigating transparent control pa-
181
+ rameter values. (a) Results for M (N) as a function of N. (b) N- particle non-escape probability, where the
182
+ continuous lines represent the results of Eq.(8) and the markers the results of exact calculations. (c) Behaviour
183
+ of the relative change γ(N). (d) Results for n(x, t), obtained for N = 3 particles at t = 10 from the numerically
184
+ exact wave function (markers) and its approximation in Eq. (7)(continuous lines)
185
+
186
+ 4
187
+ (a)
188
+ n=10
189
+ n=15
190
+ 3.5
191
+ --α=0
192
+ -α=0
193
+ *:α=5
194
+ 0:α=5
195
+ 3
196
+ 0
197
+ (N)
198
+ 2.5
199
+ M
200
+ 2
201
+ 1.5
202
+ *
203
+ 5
204
+ 10
205
+ 15
206
+ 20
207
+ 25
208
+ 30
209
+ N0
210
+ (b)
211
+ N=5
212
+ -50
213
+ N=8
214
+ -100
215
+ X
216
+ +
217
+ N=15
218
+ :11
219
+ X
220
+ -150
221
+ X
222
+ -200
223
+ n=10
224
+ α=0
225
+ 0
226
+ 2
227
+ 4
228
+ 6
229
+ 8
230
+ 10
231
+ t(c)
232
+ 0.2
233
+ n=10
234
+ x:α=1
235
+ 0.15
236
+ 0:α=3
237
+ *:α=5
238
+ (N)
239
+ 0.1
240
+ 0.05
241
+ 0
242
+ 2
243
+ 4
244
+ 6
245
+ 8
246
+ 10
247
+ 12
248
+ 14
249
+ 16
250
+ 18
251
+ 20
252
+ N0.04 N=3,t=10
253
+ (d)
254
+ 0.035
255
+ 0.03
256
+ 0.025
257
+ ('x)u
258
+ X
259
+ n=10
260
+ 0.02
261
+ x:α=0
262
+ 0:α=3
263
+ 0.015
264
+ 0.01
265
+ x
266
+ 0
267
+ 0.005
268
+ x
269
+ x5
270
+ 3 Conclusions
271
+ In this article, we have studied the exponential decay of N bosons with strong delta interactions from
272
+ the double-well structure. Using the resonance expansion approach, we have analysed the effect of the
273
+ splitting barrier on the decay rate of the N-particle system in dependence on N. We have found that
274
+ the splitting barrier strongly affects the decay rates of systems with odd numbers of particles. In such
275
+ systems, the coherence of the initial state behaves differently and is insensitive to changes in the height
276
+ of the splitting barrier. Our results have shown that in the exponential regime the ”radiating state”
277
+ effectively reproduces the spatial correlations in the internal region.
278
+ References
279
+ 1. G. Gamow,Z. Phys. 51, 204 (1928)
280
+ 2. E. U. Condon, R. W. Gurney, Nature London 112, 439 (1928)
281
+ 3. R. G. Winter, Phys. Rev. 123, 1503 (1961)
282
+ 4. G. Garcia-Calder´on, J. L. Mateos, M. Moshinsky, Phys. Rev. Lett. 74, 337 (1995)
283
+ 5. A.Wyrzykowski, Acta Physica Polonica B, 51,11 (2020)
284
+ 6. F. Giacosa, P. Ko´scik, and T. Sowi´nski, Phys. Rev. A 102, 022204 (2020)
285
+ 7. D. R. Jim´enez, N. G. Kelkar,Phys. Rev. A 104.2, 022214 (2021)
286
+ 8. G. Garcia-Calder´on, R. Romo, Annals of Physics, 424, 168348 (2021)
287
+ 9. G. Garcia-Calder´on, L. G. Mendoza-Luna, Phys. Rev. A 84, 032106 (2011)
288
+ 10. J. Dobrzyniecki, T. Sowi´nski, Phys. Rev. A 98, 013634 (2018)
289
+ 11. I. S. Ishmukhamedov, Physica E: Low-dimensional Systems and Nanostructures 142, 11522 (2022)
290
+ 12. M. Pons, D. Sokolovski, A. del Campo, Phys. Rev. A 85, 022107 (2012)
291
+ 13. A. del Campo, Phys. Rev. A 84, 012113 (2011)
292
+ 14. P. Ko´scik, Phys. Rev. A 102, 033308 (2020)
293
+ 15. M. D. Girardeau, J. Math. Phys. 1, 516 (1960)
294
+ 16. X. Yin, Y. Hao, S. Chen, and Y. Zhang, Phys. Rev. A 78, 013604 (2008)
295
+
79E1T4oBgHgl3EQfTwOd/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf,len=215
2
+ page_content='Noname manuscript No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
3
+ page_content=' (will be inserted by the editor) Przemys�law Ko´scik On the Exponential Decay of Strongly Interacting Cold Atoms from a Double-Well Potential the date of receipt and acceptance should be inserted later Abstract In this article, we study an exponential decay for the gas of bosons with strong repulsive delta interactions from a double-well potential.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
4
+ page_content=' We consider an exactly solvable model comprising an infinite wall and two Dirac delta barriers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
5
+ page_content=' We explore its features both within the exact method and with the resonance expansion approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
6
+ page_content=' The study reveals the effect of the splitting barrier on the decay rate in dependence on the number of particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
7
+ page_content=' Among other things, we find that the effect of the splitting barrier on the decay rate is most pronounced in systems with odd particle numbers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
8
+ page_content=' During exponential decay, the spatial correlations in an internal region are well captured by the ”radiating state”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
9
+ page_content=' 1 Introduction Over the past few years there has been a growing interest in understanding the decay properties of unstable quantum states [1,2,3,4,5,6,7,8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
10
+ page_content=' In particular, recent progress in fabricating systems of interacting particles has inspired the theoretical community to study the decay properties of unstable many-particle states [9,10,11,12,13,14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
11
+ page_content=' A simple model to study the decay process of many-particle states is the system of bosons with infinitely strong delta-contact interactions, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
12
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
13
+ page_content=', the so-called Tonks- Girardeau (TG) gas [15].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
14
+ page_content=' Considerable effort has been made already to understand the decay properties of such systems.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
15
+ page_content=' Among other works, the relevant for exponential and long-time decays were presented in [12] and [13], respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
16
+ page_content=' A recent paper [14] went even further and explained the decay mechanism of TG gases at intermediate stages of the time evolution (between exponential and long-time regimes).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
17
+ page_content=' In the present paper, we provide a deeper insight into the exponential decay of TG gases from the double-well trap.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
18
+ page_content=' As a model, we consider the potential in the form [14] V (x) = � ∞, if x ≤ −L αδ(x) + ηδ(x − L) otherwise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
19
+ page_content=' (1) Note that the model is a modification of the celebrated Winter model [3], to which a Delta barrier at x = 0 was added, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
20
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
21
+ page_content=' The remainder of this article is structured as follows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
22
+ page_content=' Section 2 discusses the theoretical tools for studying the time evolution of the decaying TG gas and focuses on the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
23
+ page_content=' Section 3 presents some concluding remarks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
24
+ page_content=' 2 Results The scenario we consider is typical of controllable studies of the tunnelling phenomena in modern experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
25
+ page_content=' In the case studied, the system is initially prepared (t < 0) in the ground state of the University of Applied Sciences, Department of Computer Sciences, ul.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
26
+ page_content=' Mickiewicza 8, PL-33100 Tarn´ow, Poland arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
27
+ page_content='03082v1 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
28
+ page_content='quant-gas] 8 Jan 2023 2 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
29
+ page_content=' 1 Ilustrative diagram of double-well structure in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
30
+ page_content=' (1) TG gas in a hard-wall split trap (η = ∞).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
31
+ page_content=' At t = 0, the strength of the right barrier is changed to a finite value of η.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
32
+ page_content=' As a result, the initial state is no longer stationary and begins to evaluate in time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
33
+ page_content=' According to Bose-Fermi mapping [15], the time-dependent TG wave function is given by Ψ(x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
34
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
35
+ page_content=', xN, t) = = Πk<lsgn(xk − xl) 1 √ N!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
36
+ page_content=' detN i,j[φi(xj, t)], (2) where the one-particle state φk(x, t) is governed by the Schr¨odinger equation, Iℏ∂φk(x, t) ∂t = [− ℏ2 2m ∂2 ∂x2 + V (x)]φk(x, t), (3) with the initial condition as the bound-state eigenfunction of the hard-wall split trap, ϕk(x), that is, φk(x, t)|t=0 = ϕk(x).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
37
+ page_content=' From here we set L = ℏ = m = 1 so that the spatial coordinates, time coordinates, and energies are measured in units of L and mL2/ℏ, in and ℏ2/(mL2), respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
38
+ page_content=' The system under consideration has a nice feature where both eigenfunctions of the hard-wall split trap, ϕk(x) and continuum wave functions (η < ∞) ψp(x) (normalised to a delta Dirac distribution) can be obtained in closed analytical forms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
39
+ page_content=' For further detail, we refer readers to the papers [14,16], in which the relevant formulas are reported.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
40
+ page_content=' Thanks to those, the solutions to Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
41
+ page_content=' (3) can be condensed in a Fourier series as follows: φk(x, t) = � ∞ 0 ck(p)ψp(x)e− Itp2 2 dp, (4) where ck(p), ck(p) = � 1 −1 ϕk(x)ψp(x)dx, (5) is given in closed analytical form [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
42
+ page_content=' Nonetheless, numerical computations are required to evaluate the integrals in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
43
+ page_content=' (4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
44
+ page_content=' We conduct our analysis in terms of the non-escape probability, P (N)(t) = � ∆N |Ψ(x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
45
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
46
+ page_content=', xN, t)|2dx1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
47
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
48
+ page_content='dxN, (6) 4 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
49
+ page_content='5 V(x) 3 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
50
+ page_content='5 8 n (x-L) 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
51
+ page_content='5 α (x) 1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
52
+ page_content='5 0 x=-L x=0 x=L x3 ∆N = [−1, 1]N (internal region), which informs us of the probability that N bosons remain in the internal region at time t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
53
+ page_content=' For the TG wavefunction in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
54
+ page_content=' (2), the N-particle non-escape probabil- ity can be reduced to the matrix form [12,13] P (N)(t) = detN k,l[Pkl(t)] with the entries Pkl(t) = � 1 −1 ψ∗ k(x, t)ψl(x, t)dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
55
+ page_content=' The diagonal elements Pkk(t) are nothing but the non-escape probabilities of the one-particle states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
56
+ page_content=' We denote Pk(t) = Pkk(t).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
57
+ page_content=' Within the resonance expansion method [4], the one-particle state that experiences an exponential decay follows in the region ∆ of the approx- imation: ψk(x, t) ≈ Mk(x)e−Γkt/2−Iεkt with Γk = −Im{p2 k}, εk = (Im{pk}2 − Re{pk}2)/2, and Mk(x) = 2πIrespk{ck(p)ψp(x)}, where pk are the roots of the denominator in the integrand in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
58
+ page_content=' (4) on the fourth quadrant of the complex p-plane (the proper poles) and respk{f} stands for the residue of f at a pole pk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
59
+ page_content=' Then, the corresponding non-escape probability is Pk(t) ≈ mke−Γkt, where mk = � 1 −1 |Mk(x)|2dx.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
60
+ page_content=' If mk ≈ 1(Pk(0) ≈ 1), then the exponential decay starts at about t = 0 and the state |ψk(t)⟩ can well be approximated by the so-called ”radiating state”: |ψk(t)⟩ ≈ e−Γkt/2−Iεkt |ψk(0)⟩.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
61
+ page_content=' When taking this approach, the time-dependent TG wavefunction in the region ∆N takes the form: Ψ(x1, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
62
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
63
+ page_content=', xN, t) ≈ e−Γ (N)t/2−Iε(N)tΠk<lsgn(xk − xl) 1 √ N!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
64
+ page_content=' detN i,j[φi(xj, 0)], (7) with Γ (N) = �N k=1 Γk, ε(N) = �N k=1 εk.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
65
+ page_content=' Consequently its validity is expected to hold when mk ≈ 1 for k = 1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
66
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
67
+ page_content=', N ( M (N) = ΠN k=1mk ≈ 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
68
+ page_content=' The corresponding N-particle non-escape probability is: P (N)(t) ≈ e−Γ (N)t.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
69
+ page_content=' (8) Now, we focus on examining when the above approximations are satisfied and what follows from their applicability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
70
+ page_content=' Our resuls are summarised in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
71
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
72
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
73
+ page_content=' 2 (a) shows the behaviour of M (N) as a function of N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
74
+ page_content=' We can observe a local minimum with a value slightly smaller than one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
75
+ page_content=' As a result, there is a value where N = Nc(greater than where the minimum occurs) such that M (Nc) ≈ 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
76
+ page_content=' Starting from N = Nc the deviation of M (N)from 1 rapidly increases as N increases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
77
+ page_content=' This suggests that the point N = Nc can be viewed as a transition point to the regime in which the non-escape probability begins to diverge significantly from its approximation in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
78
+ page_content=' (8).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
79
+ page_content=' To clarify this, Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
80
+ page_content=' 2 (b) offers a comparison of the results obtained from Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
81
+ page_content=' (8) with the results of the exact numerical calculations, where to support the presentation only case η = 10, α = 0 is shown.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
82
+ page_content=' As the results indicate, the period in which the decay is consistent with the ”radiating state” shrinks with increasing N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
83
+ page_content=' When N exceeds the critical value Nc = 10 (see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
84
+ page_content=' 2 (a)), the decay of the N-particle state switches to the non-exponential regime at a very small t value.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
85
+ page_content=' When the splitting barrier is present an effect of the parity of a number of particles appears.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
86
+ page_content=' This is demonstrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
87
+ page_content=' 2 (c) which displays the behaviour of a relative change defined as γ(N) = (Γ (N) − Γ (N) α=0) : Γ (N) α=0, where Γ (N) α=0 represents the decay rate for a system without the splitting barrier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
88
+ page_content=' We conclude that the change in the decay process caused by the addition of the splitting barrier is most pronounced for systems with odd numbers of particles and in the small N regime, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
89
+ page_content=' e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
90
+ page_content=' where γ(N) exhibits its most rapid variation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
91
+ page_content=' When there is instead an even number of particles, the decay rate becomes almost insensitive to changes in α.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
92
+ page_content=' It is worth mentioning that the coherence of the initial state depends on α in the opposite way [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
93
+ page_content=' That is to say, it strongly depends on α only when N is even.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
94
+ page_content=' To determine whether Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
95
+ page_content=' (7) can capture the correlation in the internal region, we tested its ability to reproduce a function n(x, t) defined as P (N)(t) = � 1 −1 n(x, t)dx, n(x, t) = � ∆N−1 |Ψ(x, x2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
96
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
97
+ page_content=', xN, t)|2dx2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
98
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
99
+ page_content='dxN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
100
+ page_content=' (9) If n(x, t) is divided by the corresponding value of P (N)(t), then the resulting quantity can be interpreted as the probability density of finding a particle, provided that all the particles remain in the region ∆.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
101
+ page_content=' The correctness of the ”radiating state” in reproducing the spatial correlation is confirmed in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
102
+ page_content=' 2 (d), where the results for n(x, t) obtained using it and the exact wave function are compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
103
+ page_content=' The results imply that during exponential decay, spatial correlations in the internal region are indeed consistent with those in the initial state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
104
+ page_content=' More details regarding the correlation in the initial state (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
105
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
106
+ page_content=' in the TG ground state in the hard-wall split trap) can be found in [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
107
+ page_content=' 4 Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
108
+ page_content=' 2 Results for different quantities as discussed in the text, when investigating transparent control pa- rameter values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
109
+ page_content=' (a) Results for M (N) as a function of N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
110
+ page_content=' (b) N- particle non-escape probability, where the continuous lines represent the results of Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
111
+ page_content=' (8) and the markers the results of exact calculations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
112
+ page_content=' (c) Behaviour of the relative change γ(N).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
113
+ page_content=' (d) Results for n(x, t), obtained for N = 3 particles at t = 10 from the numerically exact wave function (markers) and its approximation in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
114
+ page_content=' (7)(continuous lines) 4 (a) n=10 n=15 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
115
+ page_content='5 --α=0 α=0 :α=5 0:α=5 3 0 (N) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
116
+ page_content='5 M 2 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
117
+ page_content='5 5 10 15 20 25 30 N0 (b) N=5 50 N=8 100 X + N=15 :11 X 150 X 200 n=10 α=0 0 2 4 6 8 10 t(c) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
118
+ page_content='2 n=10 x:α=1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
119
+ page_content='15 0:α=3 :α=5 (N) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
120
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
121
+ page_content='05 0 2 4 6 8 10 12 14 16 18 20 N0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
122
+ page_content='04 N=3,t=10 (d) 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
123
+ page_content='035 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
124
+ page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
125
+ page_content="025 ('x)u X n=10 0." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
126
+ page_content='02 x:α=0 0:α=3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
127
+ page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
128
+ page_content='01 x 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
129
+ page_content='005 x x5 3 Conclusions In this article, we have studied the exponential decay of N bosons with strong delta interactions from the double-well structure.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
130
+ page_content=' Using the resonance expansion approach, we have analysed the effect of the splitting barrier on the decay rate of the N-particle system in dependence on N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
131
+ page_content=' We have found that the splitting barrier strongly affects the decay rates of systems with odd numbers of particles.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
132
+ page_content=' In such systems, the coherence of the initial state behaves differently and is insensitive to changes in the height of the splitting barrier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
133
+ page_content=' Our results have shown that in the exponential regime the ”radiating state” effectively reproduces the spatial correlations in the internal region.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
134
+ page_content=' References 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
135
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
136
+ page_content=' Gamow,Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
137
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
138
+ page_content=' 51, 204 (1928) 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
139
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
140
+ page_content=' U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
141
+ page_content=' Condon, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
142
+ page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
143
+ page_content=' Gurney, Nature London 112, 439 (1928) 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
144
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
145
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
146
+ page_content=' Winter, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
147
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
148
+ page_content=' 123, 1503 (1961) 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
149
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
150
+ page_content=' Garcia-Calder´on, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
151
+ page_content=' L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
152
+ page_content=' Mateos, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
153
+ page_content=' Moshinsky, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
154
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
155
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
156
+ page_content=' 74, 337 (1995) 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
157
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
158
+ page_content='Wyrzykowski, Acta Physica Polonica B, 51,11 (2020) 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
159
+ page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
160
+ page_content=' Giacosa, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
161
+ page_content=' Ko´scik, and T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
162
+ page_content=' Sowi´nski, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
163
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
164
+ page_content=' A 102, 022204 (2020) 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
165
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
166
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
167
+ page_content=' Jim´enez, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
168
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
169
+ page_content=' Kelkar,Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
170
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
171
+ page_content=' A 104.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
172
+ page_content='2, 022214 (2021) 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
173
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
174
+ page_content=' Garcia-Calder´on, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
175
+ page_content=' Romo, Annals of Physics, 424, 168348 (2021) 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
176
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
177
+ page_content=' Garcia-Calder´on, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
178
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
179
+ page_content=' Mendoza-Luna, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
180
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
181
+ page_content=' A 84, 032106 (2011) 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
182
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
183
+ page_content=' Dobrzyniecki, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
184
+ page_content=' Sowi´nski, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
185
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
186
+ page_content=' A 98, 013634 (2018) 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
187
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
188
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
189
+ page_content=' Ishmukhamedov, Physica E: Low-dimensional Systems and Nanostructures 142, 11522 (2022) 12.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
190
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
191
+ page_content=' Pons, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
192
+ page_content=' Sokolovski, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
193
+ page_content=' del Campo, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
194
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
195
+ page_content=' A 85, 022107 (2012) 13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
196
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
197
+ page_content=' del Campo, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
198
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
199
+ page_content=' A 84, 012113 (2011) 14.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
200
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
201
+ page_content=' Ko´scik, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
202
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
203
+ page_content=' A 102, 033308 (2020) 15.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
204
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
205
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
206
+ page_content=' Girardeau, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
207
+ page_content=' Math.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
208
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
209
+ page_content=' 1, 516 (1960) 16.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
210
+ page_content=' X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
211
+ page_content=' Yin, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
212
+ page_content=' Hao, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
213
+ page_content=' Chen, and Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
214
+ page_content=' Zhang, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
215
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
216
+ page_content=' A 78, 013604 (2008)' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/79E1T4oBgHgl3EQfTwOd/content/2301.03082v1.pdf'}
9dE1T4oBgHgl3EQfoAQF/content/tmp_files/2301.03314v1.pdf.txt ADDED
@@ -0,0 +1,818 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .
2
+ SIGN INVOLUTIONS ON PARA-ABELIAN VARIETIES
3
+ JAKOB BERGQVIST, THUONG DANG, AND STEFAN SCHR¨OER
4
+ 9 January 2023
5
+ Abstract. We study the so-called sign involutions on twisted forms of abelian
6
+ varieties, and show that such a sign involution exists if and only if the class in the
7
+ Weil–Chˆatelet group is annihilated by two. If these equivalent conditions hold,
8
+ we prove that the Picard scheme of the quotient is ´etale and contains no points
9
+ of finite order. In dimension one, such quotients are Brauer–Severi curves, and
10
+ we analyze the ensuing embeddings of the genus-one curve into twisted forms of
11
+ Hirzebruch surfaces and weighted projective spaces.
12
+ Contents
13
+ Introduction
14
+ 1
15
+ 1.
16
+ The scheme of sign involutions
17
+ 3
18
+ 2.
19
+ The Picard scheme of the quotient
20
+ 6
21
+ 3.
22
+ Morphisms to Brauer–Severi curves
23
+ 9
24
+ References
25
+ 13
26
+ Introduction
27
+ Recall that an abelian variety A over a ground field k is a group scheme that
28
+ is proper, smooth, and with h0(OA) = 1. It then follows that the group law is
29
+ commutative, such that A comes with a canonical automorphism x �→ −x. This
30
+ sign involution plays a prominent role in the theory of abelian varieties, because it
31
+ gives rise to the notion of symmetric invertible sheaves. Furthermore, one can form
32
+ the quotient A/G for the corresponding group G = {±1} of order two. In dimension
33
+ g = 1 this gives the projective line, whereas for g = 2 we get Kummer surfaces. In
34
+ characteristic p ̸= 2 this is a K3 surface with rational double points. The case p = 2
35
+ requires extra attention, because than A/G may also be a rational surface with an
36
+ elliptic singularity ([35] and [16]).
37
+ In this paper we investigate the existence of sign involutions σ on twisted forms
38
+ X of abelian varieties A, over general ground fields k of arbitrary characteristic
39
+ p ≥ 0. These σ are involutions on X that become a sign involutions with respect to
40
+ a suitable group law that arises on some base-change. The following point of view,
41
+ developed by Laurent and the third author [18], is most suitable: A para-abelian
42
+ variety is a proper scheme X such that X ⊗ k′ admits the structure of an abelian
43
+ variety, for some field extension k ⊂ k′. It then turns out that the the subgroup
44
+ scheme A ⊂ AutX/k that acts trivially on the numerically trivial part Picτ
45
+ X/k is an
46
+ abelian variety, and that the canonical A-action on X is free and transitive. In
47
+ 2010 Mathematics Subject Classification. 14L30, 14K15, 14K30, 14J26.
48
+ 1
49
+ arXiv:2301.03314v1 [math.AG] 9 Jan 2023
50
+
51
+ SIGN INVOLUTIONS
52
+ 2
53
+ turn, one may view the scheme X as a torsor with respect to the abelian variety A,
54
+ and obtains a class [X] in the Weil–Chˆatelet group H1(k, A). Our first main result
55
+ relates these cohomology classes with the kernel A[2] for the multiplication-by-two
56
+ map and the existence of sign involutions on X:
57
+ Theorem. (See Thm. 1.2) Let X be a para-abelian variety. Then the following are
58
+ equivalent:
59
+ (i) There is a sign involution σ : X → X.
60
+ (ii) We have 2 · [X] = 0 in the Weil–Chˆatelet group H1(k, A).
61
+ (iii) There is an torsor P with respect to H = A[2] such that X ≃ P ∧H A.
62
+ Here P ∧H A denotes the quotient of P × A by the diagonal H-action, usually
63
+ called contracted product or associated fiber bundle. The main idea idea for the above
64
+ result is to introduce the scheme of sign involutions Invsgn
65
+ X/k ⊂ AutX/k, analyze the
66
+ effect of the conjugacy action on this subscheme, and derive consequences using the
67
+ general machinery of twisted forms and non-abelian cohomology.
68
+ Now suppose X is a para-abelian variety admitting a sign involution σ : X → X.
69
+ We then can form the quotient B = X/G with respect to the cyclic group G = {e, σ}
70
+ of order two. In particular in characteristic two, not much seems to be known on
71
+ this proper normal scheme. Our second main result is concerned with Picτ
72
+ B/k, the
73
+ numerically trivial part of the Picard scheme:
74
+ Theorem. (See Thm. 2.1) In the above situation, the group scheme Picτ
75
+ B/k is trivial.
76
+ This relies on Grothendieck’s two spectral sequences abutting to equivariant coho-
77
+ mology groups [11]. The result is not difficult in the tame case p ̸= 2, but requires a
78
+ careful analysis in the wild case p = 2. In dimension g = 1 the para-abelian varieties
79
+ X are usually called genus-one curves; we like to call them para-elliptic curves. The
80
+ above shows that the quotient by any sign involution is a Brauer–Severi curve, that
81
+ is, a twisted form of P1.
82
+ Our third main result deals with the converse situation: Suppose there is a degree-
83
+ two morphism f : X → B from a para-elliptic curve X to some Brauer–Severi
84
+ curve B. Then the projectivization S = P(E ) of the rank-two sheaf E = f∗(OX)
85
+ is a twisted form of a Hirzebruch surface with invariant e = 2, and comes with
86
+ a contraction to a normal surface S′, having a unique singularity, which is often
87
+ factorial. The geometry of the situation is as follows:
88
+ Theorem. (See Section 3) Assumptions as above. Then f : X → B is the quotient
89
+ by some sign involution σ on the para-elliptic curve X, and the latter embeds into
90
+ both S and S′ as an anti-canonical curve. Moreover, S′ is the anti-canonical model
91
+ of S, and also a twisted form of the weighted projective space P(1, 1, 2).
92
+ We also show that if there are two different sign involutions σ1 ̸= σ2, the ensuing
93
+ diagonal map gives an embedding X ⊂ B1 × B2 into a product of Brauer–Severi
94
+ curves.
95
+ Such products where studied by Koll´ar [17] and Hogadi [15].
96
+ Again X
97
+ becomes an anti-canonical curve, and it turns out that B1 × B2 embeds into P3 if
98
+ and only if the factors are isomorphic.
99
+ The paper is structured as follows: In Section 1 we recall the theory of para-abelian
100
+ varieties X, introduces the scheme of sign involutions Invsgn
101
+ X/k ⊂ AutX/k, analyze
102
+
103
+ SIGN INVOLUTIONS
104
+ 3
105
+ the conjugacy action, and establish the link between sign involutions, cohomology
106
+ classes, and structure reductions. Section 2 is devoted to the Picard scheme of the
107
+ quotient B = X/G of a para-abelian variety X of arbitrary dimension g ≥ 0 by a
108
+ sign involution. In Section 3 we consider the case g = 1, and unravel the geometry
109
+ attached to degree-two maps X → B from a para-elliptic curve X to a Brauer–Severi
110
+ curve B.
111
+ Acknowledgement. The research was conducted in the framework of the research
112
+ training group GRK 2240: Algebro-Geometric Methods in Algebra, Arithmetic and
113
+ Topology. The first two authors where financially supported by the Deutsche For-
114
+ schungsgemeinschaft with a PhD grant in GRK 2240/1, the first author also with a
115
+ PhD grant in GRK 2240/2.
116
+ 1. The scheme of sign involutions
117
+ Let k be a ground field of characteristic p ≥ 0, and X be a proper scheme. Then
118
+ the group scheme AutX/k is locally of finite type, and the connected component
119
+ Aut0
120
+ X/k of the neutral element e = idX is of finite type ([19], Theorem 3.7). By
121
+ the Yoneda Lemma, the map σ �→ σ2 defines a morphism of the scheme AutX/k to
122
+ itself, which usually disrespects the group law. The scheme of involutions InvX/k is
123
+ defined via a cartesian diagram
124
+ InvX/k
125
+ −−−→ AutX/k
126
+ ���
127
+ ���σ�→σ2
128
+ Spec(k) −−−→
129
+ e
130
+ AutX/k .
131
+ It contains the neutral element and is stable under the inverse map σ �→ σ−1, but
132
+ otherwise carries no further structure in general.
133
+ Now suppose that X can be endowed with the structure of an abelian variety.
134
+ Recall that for each rational point x0 ∈ X, there is a unique group law that turns X
135
+ into an abelian variety, with origin 0 = x0. Fix such a datum, and write A for the
136
+ abelian variety obtained by endowing X with the ensuing group law. Note that A
137
+ can also be regarded as the pair (X, x0). The automorphism group scheme becomes
138
+ a semidirect product
139
+ AutX/k = A ⋊ AutA/k,
140
+ where the normal subgroup on the left acts on X by translations x �→ a + x. The
141
+ cokernel AutA/k on the right is an ´etale group scheme with countably many points,
142
+ acting on A in the canonical way. Its rational points are the automorphisms σ :
143
+ X → X fixing the origin x0. It contains a canonical element, namely the standard
144
+ sign involution x �→ −x. This defines a morphism (−1) : Spec(k) → AutA/k. Its
145
+ fiber with respect to the canonical projection A ⋊ AutA/k → AutA/k is denoted by
146
+ A ⊗ κ(−1).
147
+ Lemma 1.1. The closed subscheme A ⊗ κ(−1) ⊂ AutX/k is invariant under the
148
+ conjugacy action of AutX/k, lies inside InvX/k, and does not depend on the choice
149
+ of the origin x0 ∈ X.
150
+
151
+ SIGN INVOLUTIONS
152
+ 4
153
+ Proof. Let x, a, b ∈ A(R) and ϕ ∈ AutA/k(R) be R-valued points, for some k-algebra
154
+ R. Then x �→ a − x is some R-valued point of A ⊗ κ(−1). Conjugation by (b, id) is
155
+ (1)
156
+ x �−→ −b + x �−→ a − (−b + x) �−→ (a + 2b) − x,
157
+ whereas conjugation by (0, ϕ) takes the form
158
+ x �−→ ϕ−1(x) �−→ a − ϕ−1(x) �−→ ϕ(a) − x.
159
+ Both are R-valued points of A⊗κ(−1). Furthermore, the composition x �→ a−x �→
160
+ a − (a − x) is the identity. With the Yoneda Lemma, we see that A ⊗ κ(−1) is
161
+ invariant under conjugacy, and must be contained in InvX/k.
162
+ Now let a0 ∈ X be another origin. The ensuing new group law and negation are
163
+ given by
164
+ x ⊕ y = x + y − x′
165
+ 0
166
+ and
167
+ ⊖ x = −x + 2a0,
168
+ and thus a ⊖ x = (a + a0) − x. This shows that the closed subscheme A ⊗ κ(−1) ⊂
169
+ AutX/k does not depend on the choice of origin.
170
+
171
+ Recall that a proper scheme X is called a para-abelian variety if there is a field
172
+ extension k ⊂ k′ such that the base-change X′ = X ⊗ k′ admits the structure
173
+ of an abelian variety. This notation was introduced and studied by Laurent and
174
+ the third author [18]. According to loc. cit., Proposition 5.2, the closed subscheme
175
+ A ⊂ AutX/k that acts trivial on Picτ
176
+ X/k is an abelian variety, and the canonical
177
+ A-action on X is free and transitive. The resulting class
178
+ [X] ∈ H1(k, A)
179
+ in the Weil–Chˆatelet group is called the cohomology class of the para-abelian variety.
180
+ Note that since A is smooth, the ´etale and fppf topology yield the same cohomology
181
+ groups ([13], Theorem 11.7). Consequently, the class [X] has some finite order; this
182
+ number is usually called period per(X) ≥ 1.
183
+ Conversely, if H is any commutative group scheme, with a torsor P and a homo-
184
+ morphism H → A, we get a para-abelian variety X = P ∧H X0. The latter denotes
185
+ the quotient of P × X0 by the diagonal action h · (p, x) = (h · p, h + x), and X0 is
186
+ the underlying scheme of the abelian variety A. By construction, this X is a twisted
187
+ form of X0.
188
+ Recall that the index ind(X) ≥ 1 is the greatest common divisor of the degrees
189
+ [κ(a) : k] for the closed points a ∈ X. This is indeed the index for the image of the
190
+ degree map CH0(X) → Z on the Chow group of zero-cycles. Note that in dimension
191
+ one this can also be seen as the degree map on the Picard group. According to [21],
192
+ Proposition 5 the divisibility property per(X) | ind(X) holds, and both numbers
193
+ have the same prime factors.
194
+ As explained in [36], Section 3, the group scheme AutX/k is a twisted form of
195
+ AutX0/k with respect to the conjugacy action.
196
+ In turn, the conjugacy-invariant
197
+ closed subscheme A ⊗ κ(−1) ⊂ AutX0/k becomes a closed subscheme
198
+ Invsgn
199
+ X/k ⊂ AutX/k,
200
+ which we call the scheme of sign involutions.
201
+ Any automorphism σ : X → X
202
+ belonging to Invsgn
203
+ X/k is called a sign involution.
204
+
205
+ SIGN INVOLUTIONS
206
+ 5
207
+ Theorem 1.2. For each para-abelian variety X of dimension g ≥ 0, the following
208
+ three conditions are equivalent:
209
+ (i) There is a sign involution σ : X → X.
210
+ (ii) We have 2 · [X] = 0 in the Weil–Chˆatelet group H1(k, A).
211
+ (iii) There is an torsor P with respect to H = A[2] such that X ≃ P ∧H A.
212
+ It these conditions hold we have the divisibility property ind(X) | 4g.
213
+ Proof. We start with some general observations: The first projection
214
+ AutX0/k = A ⋊ AutA/k −→ A
215
+ identifies the scheme of sign involutions Z0 = Invsgn
216
+ X0/k = A ⊗ κ(−1) with a copy of
217
+ X0 = A. According to (1), the kernel for the conjugacy homomorphism A → AutZ0/k
218
+ is A[2], so this factors over multiplication-by-two map A
219
+ 2→ A. It is now convenient
220
+ to write X = T ∧A X0 for some A-torsor T. Note that since the X0 is the trivial
221
+ A-torsor, one actually has T = X. What is important now is that the scheme of sign
222
+ involutions Z = Invsgn
223
+ X/k coincides with Z = T ∧A Z0, and the latter is the quotient
224
+ of T × Z0 by the A-action a · (t, z0) = (a + t, 2a + z0).
225
+ This quotient can be computed as successive quotients, first for the action of
226
+ H = A[2] and then for the induced action of A/A[2]. The group H acts trivially on
227
+ the second factor, hence H\(T × X0) = (H\T) × X0. In light of the short exact
228
+ sequence
229
+ (2)
230
+ 0 −→ H −→ A
231
+ 2
232
+ −→ A −→ 0,
233
+ we may regard ¯T = H\T as the A-torsor induced from T with respect to A
234
+ 2→ A.
235
+ In other words Z = ¯T ∧ ¯
236
+ A Z0, where we write ¯A = A/H = A to indicate the nature
237
+ of the action. By construction, the ¯A-action on Z0 is free and transitive, so the
238
+ projection ¯T ⊗ κ(−1) → Z is an isomorphism. We conclude that there is a rational
239
+ point σ ∈ Z if and only if the torsor ¯T is trivial.
240
+ From the short exact sequence (2) we get a long exact sequence
241
+ H0(k, A)
242
+ 2
243
+ −→ H0(k, A) −→ H1(k, H) −→ H1(k, A)
244
+ 2
245
+ −→ H1(k, A).
246
+ It follows that the element [X] = [T] in H1(k, A) is annihilated by two if and only
247
+ if there is an H-torsor P such that such that X ≃ P ∧H X0, giving the equivalence
248
+ of (ii) and (iii). Similarly, we see that [X] = [T] is annihilated by two if and only if
249
+ ¯T is trivial. Together with the previous paragraph this gives the equivalence of (i)
250
+ and (ii).
251
+ It remains to verify the divisibility property of the index. This is just a special
252
+ case of general fact: Suppose X has period n ≥ 1. From the long exact sequence
253
+ for the multiplication-by-n map we see that the quotient of X by A[n] contains a
254
+ rational point, so its fiber Z ⊂ X is a torsor with respect to A[n]. According to
255
+ [24], page 147 the kernel A[n] is finite of length l = n2g. Clearly, the torsor Z has
256
+ the same length, hence X contains a zero-cycle of degree n2g. Now if (ii) holds, we
257
+ have n | 2, and thus ind(X) | 4g.
258
+
259
+ Recall that for each m ≥ 1 there is an identification H1(k, µm) = k×/k×m. Sup-
260
+ pose now that k contains a primitive m-th root of unity, such that µn ≃ (Z/mZ)k.
261
+ Let us recall the following result of Lang and Tate ([21], Theorem 8): Assume that
262
+
263
+ SIGN INVOLUTIONS
264
+ 6
265
+ the ground field k, the abelian variety A, and the integer m ≥ 0 satisfies the fol-
266
+ lowing conditions: The Z/mZ-module k×/k×m contains a free module of infinite
267
+ rank, the quotient A(k)/mA(k) is finite, and A(k) contains an element of order m.
268
+ Then the Weil–Chˆatelet group H1(k, A) contains infinitely many elements X whose
269
+ period and index equals m. Note that for global fields k, the first two conditions
270
+ are automatic, and the third can be obtained after a finite extension, provided the
271
+ abelian variety has dimension g ≥ 1 and the characteristic exponent p ≥ 1 of k is
272
+ prime to m.
273
+ 2. The Picard scheme of the quotient
274
+ Let X be a para-abelian variety having a sign involution σ : X → X. Write
275
+ G ⊂ Aut(X) the corresponding subgroup of order two. The quotient B = X/G is
276
+ a projective scheme that is geometrically integral and geometrically normal, with
277
+ h0(OB) = 1. Following [9], Section 2, we write Sing(B/k) for the locus of non-
278
+ smoothness. In contrast to the locus of non-regularity Sing(B), it comes with a
279
+ scheme structure, defined via Fitting ideals for K¨ahler differentials.
280
+ Let Picτ
281
+ B/k be the open-and-closed subgroup scheme inside the Picard scheme
282
+ comprising numerically trivial invertible sheaves. Its Lie algebra is H1(B, OB), and
283
+ the group scheme of connected components is the torsion part of the N´eron–Severi
284
+ group scheme. It therefore encodes important information on B.
285
+ Theorem 2.1. The group scheme Picτ
286
+ B/k is trivial. Moreover, Sing(B/k) is finite,
287
+ and is contained in the image of the fixed scheme Xσ.
288
+ Proof. It suffices to treat the case that k is algebraically closed, and we choose the
289
+ origin so that X = A is an abelian variety with σ(x) = −x. Write q : A → B for
290
+ the quotient map, let U ⊂ A be the complement of the fixed scheme, and V = q(U)
291
+ be its image. The induced map q : U → V is a G-torsor, in particular smooth.
292
+ According to [12], Theorem 17.11.1 the smoothness of U ensures the smoothness of
293
+ V . Thus Sing(B/k) is contained in the image of Aσ = A[2], and is therefore finite.
294
+ The structure sheaf OA has a G-linearization, and thus comes with equivariant
295
+ cohomology groups Hi(A, G, OA), and likewise we get Hi(A, G, O×
296
+ A). According to
297
+ [11], Section 5.2 there are two spectral sequences
298
+ (3)
299
+ Ers
300
+ 2 = Hr(G, Hs(A, O×
301
+ A))
302
+ and
303
+ Ers
304
+ 2 = Hr(B, Hs(G, O×
305
+ A),
306
+ both with equivariant cohomology Hr+s(A, G, O×
307
+ A) as abutment.
308
+ This gives two
309
+ exact sequences forming a diagram
310
+ (4)
311
+ 0
312
+ Pic(B)
313
+ Pic(A)G
314
+ H2(G, k×)
315
+ H1(A, G, O×
316
+ A)
317
+ 0
318
+ H1(G, k×)
319
+ H0(B, F)
320
+ H2(B, O×
321
+ B),
322
+ where the abelian sheaf F = H1(G, O×
323
+ A) is supported by the singular locus of B.
324
+ Recall that the cohomology groups for the cyclic group G = {e, σ} are given by
325
+ H2j+1(G, M) = Ker(σ + id)
326
+ Im(σ − id)
327
+ and
328
+ H2j+2(G, M) = Ker(σ − id)
329
+ Im(σ + id) ,
330
+
331
+ SIGN INVOLUTIONS
332
+ 7
333
+ for any G-module M. It follows that H2(G, k×) vanishes, because G acts trivially
334
+ on k×, and k× = k×2, whereas H1(G, k×) = µ2(k) = {±1}.
335
+ According to (4)
336
+ the kernel for Picτ(B) → Picτ(A) is the intersection of Pic(B) ∩ H1(G, k×) inside
337
+ the equivariant cohomology group, whereas the image is contained in Pic(A)[2] =
338
+ Pic(A)G. This already shows that the group scheme Picτ
339
+ B/k must be finite. It also
340
+ solves the case of dimension g = 1: Now B is a normal curve with finite Picard
341
+ scheme. The latter is smooth, according to [23], Section 27 because H2(B, OB) = 0.
342
+ Consequently B = P1, and thus Picτ
343
+ B/k = 0.
344
+ From now on, we assume that we are in dimension g ≥ 2. At each a ∈ A[2], the
345
+ induced G-action on the local ring OA,a is ramified only at the origin, and it follows
346
+ from [22], Proposition 3.2 that the local ring at the image b ∈ B is singular, and that
347
+ the finite degree-two extension OB,b ⊂ OA,a is not flat. Consequently, the quotient
348
+ map q : A → B induces a bijection between A[2] and Sing(B). Furthermore, the
349
+ short exact sequence 0 → OB → q∗(OA) → F → 0 defines a coherent sheaf F that
350
+ is invertible on the open set V = Reg(B), but not at the points b ∈ Sing(B).
351
+ We claim that the canonical map Pic(B) → Pic(A)[2] is injective. Equivalently,
352
+ the intersection Pic(B) ∩ H1(G, k×) inside H1(A, G, O×
353
+ A) is trivial.
354
+ The group
355
+ H1(G, k×) = µ2(k) vanishes in characteristic two, so only the case p ̸= 2 requires
356
+ attention. Then the trace map gives a splitting q∗(OA) = OB ⊕ F, thus F satis-
357
+ fies Serre’s Condition (S2). The canonical identification FV ⊗ F ∨
358
+ V = OV yields an
359
+ element in Γ(V, q∗(OA) ⊗ F ∨) = Γ(U, q∗(F ∨)) without zeros, and it follows that
360
+ the invertible sheaf F|V becomes trivial on U. Using the diagram (4) for the quo-
361
+ tient V = U/G instead of B = A/G, we conclude that F|V generates the kernel
362
+ of Pic(V ) → Pic(U). Seeking a contradiction, we now assume that there is a non-
363
+ trivial invertible sheaf L on B that becomes trivial on Y , we therefore must have
364
+ L |V = F|V . Using that both L and F satisfies Serre’s Condition (S2) together
365
+ with [14], Theorem 1.12 we infer that L = F, contradicting that F is not invert-
366
+ ible. This establishes our claim. We therefor may regard the canonical map as an
367
+ inclusion Picτ(B) ⊂ Pic(A)[2].
368
+ We next check that for p ̸= 2 the finite group scheme Picτ
369
+ B/k is reduced. Equiv-
370
+ alently, its Lie algebra H1(B, OB) vanishes. To see this, consider the spectral se-
371
+ quences (3) with the additive sheaf OA instead the multiplicative sheaf O×
372
+ A. For
373
+ i ≥ 1, the vector spaces Hi(G, k) are annihilated by the group order |G| = 2. For
374
+ p ̸= 2 they consequently vanish, and we obtain inclusions
375
+ H1(B, OB) ⊂ H1(A, G, OA) ⊂ H1(A, OA)G.
376
+ Moreover, the term on the right also vanishes because G acts via the sign involution
377
+ on the cohomology group, according ([27], proof of Proposition 2.3). This establishes
378
+ the claim.
379
+ To proceed we use the fact that for any finite commutative group scheme N the
380
+ isomorphism classes of N-torsors B′ → B corresponds to homomorphisms of group
381
+ schemes N ∗ → PicB/k, where N ∗ = Hom(N, Gm) denotes the Cartier dual (see [26],
382
+ Proposition 6.2.1, and also the discussion in [33], Section 4).
383
+ The constant group scheme N = (Z/2Z)k has Cartier dual N ∗ = µ2. Suppose we
384
+ have an inclusion µ2 ⊂ Picτ
385
+ B/k such that the composite map µ2 → Picτ
386
+ A/k remains
387
+ a monomorphism. The corresponding N-torsor B′ → B thus induces a non-trivial
388
+
389
+ SIGN INVOLUTIONS
390
+ 8
391
+ N-torsor A′ → A. According to the Serre–Lang Theorem ([24], page 167), there is
392
+ a unique structure of an abelian variety for A′ so that A′ → A is a homomorphisms.
393
+ This gives an embedding N ⊂ A′ defined by a 2-division point a′ ∈ A′.
394
+ The
395
+ composite A′ → B is the quotient for the action of N ⋊ {±1}. Since this semidirect
396
+ product is actually a direct product, the projection A′ → B′ must be the quotient
397
+ by G = {±1}. Now choose a closed point x′ ∈ A′ with 2x′ = a′. It follows that
398
+ the orbit G · x′ = {±x′}, viewed as a rational point on B′, is fixed by the the N-
399
+ action, contradiction. This settles the case p ̸= 2: Then µ2 = (Z/2Z)k, and we see
400
+ that Picτ(B) ⊂ Pic(A)[2] is trivial. We already saw in the previous paragraph that
401
+ Picτ
402
+ B/k is reduced, and infer that it must be trivial.
403
+ It remains to treat the case p = 2, where the arguments in some sense run parallel
404
+ to the preceding paragraph. At each a ∈ A[2], the local ring at the image b ∈ B
405
+ is singular, with depth(OB,b) = 2, according to [22], Proposition 3.2. Note that is
406
+ in stark contrast to the situation p ̸= 2, when such rings of invariants are Cohen–
407
+ Macaulay. Again we consider the short exact sequence 0 → OB → q∗(OA) → F → 0
408
+ of coherent sheaves on B. According to [8], Section 1 we have F|V = OV . The
409
+ short exact sequence of local cohomology
410
+ H0
411
+ b (B, q∗(OA)) −→ H0
412
+ b (B, F) −→ H1
413
+ b (B, OB)
414
+ reveals that F is torsion-free.
415
+ So the adjunction map F → i∗(F|V ) = OB is
416
+ injective, hence F is a sheaf of ideals.
417
+ Using that F is not invertible we infer
418
+ H0(B, F) = 0. The exact sequence
419
+ H0(B, F) −→ H1(B, OB) −→ H1(A, OA)
420
+ ensures that the map on the right is injective. On the other hand, its kernel is the
421
+ Lie algebra for the kernel of Picτ
422
+ B/k → PicA/k[2]. It follows that this map is actually
423
+ a closed embedding Picτ
424
+ B/k ⊂ PicA/k[2].
425
+ Now we use that the Lie algebra of any group scheme in characteristic p > 0
426
+ carries as additional structure the p-map x �→ x[p] and becomes a restricted Lie
427
+ algebra (see [36], Section 1 for more details). Suppose H1(B, OB) ̸= 0. Then there
428
+ is a p-closed vector x ̸= 0, in other words x[p] is a multiple of x. The case x[p] ̸= 0
429
+ yields an inclusion of µp ⊂ B where the composite map µp → A is injective. We saw
430
+ above that this is impossible. In turn we must have x[p] = 0. This gives an inclusion
431
+ of N ∗ = αp into B where the composite map αp → A remains injective. The Cartier
432
+ dual is N = αp. Thus we get a non-trivial αp-torsor B′ → B for αp whose base-
433
+ change A′ → A remains non-trivial. A similar situation with N ∗ = (Z/2Z)k and
434
+ N = µp arise if there is a point of order two on PicB/k. In both cases the discussion
435
+ in [27], beginning of Section 2 shows that A′ has the structure of an abelian variety
436
+ so that the projection A′ → A is a homomorphism, and we get an inclusion N ⊂ A′.
437
+ The composition A′ → B is the quotient by the group scheme N ⋊ {±1}. Again
438
+ this is actually a direct product. In the cartesian diagram
439
+ A′ −−−→ B′
440
+ ���
441
+ ���
442
+ A −−−→ B
443
+
444
+ SIGN INVOLUTIONS
445
+ 9
446
+ the vertical maps are quotients by the action of the infinitesimal group scheme N,
447
+ and the horizontal maps are quotients by G = {±1}. Fix some a′ ∈ A′[2], with
448
+ image b′ ∈ Sing(B′), and consider the ring of invariants OB′,b′ ⊂ OA′,a′. According
449
+ to [22], Lemma 3.3 no element f ∈ mA′,a′ ∖ m2
450
+ a′ is G-invariant.
451
+ It follows that
452
+ the infinitesimal neighborhood Spec(OA′,a′/m2
453
+ a′) maps to Z′ = Spec(OB′,b′/mb′), and
454
+ therefore the same holds for the orbit N · {a′}. In light of the above commutative
455
+ diagram, the N-action on B′ is not free, contradiction.
456
+
457
+ Para-abelian varieties X of dimension g = 1 are usually called genus-one curves.
458
+ Throughout, we shall prefer the term para-elliptic curves. These are twisted forms
459
+ of elliptic curves. The moduli stack of such curves was studied by the second author
460
+ [6]. Recall that the Brauer–Severi varieties Y are twisted forms of projective space
461
+ Pn, for some n ≥ 0. For more details we refer to [3]. In case n = 1 we also say that
462
+ Y is a Brauer–Severi curve.
463
+ Corollary 2.2. Assumption as in the proposition, and suppose additionally g = 1.
464
+ Then the corresponding quotient B = X/G is a Brauer–Severi curve.
465
+ Proof. The scheme B is geometrically normal and of dimension one, hence smooth.
466
+ According to the theorem, the Picard scheme is discrete. It follows that the tangent
467
+ space H1(B, OB) vanishes. If there is a rational point a ∈ X, the resulting invertible
468
+ sheaf L = OB(a) is very ample, with h0(L ) = 2, and we obtain an isomorphism
469
+ B → P1.
470
+
471
+ In dimension g = 2 and characteristic p ̸= 2, the quotient B = A/{±1} is called
472
+ a Kummer surface, and is a K3 surface with rational double points. For p = 2,
473
+ the quotient B is either a K3 surface with rational double points, or a rational
474
+ surface with an elliptic singularity. This was discovered by Shioda [35], see also [16],
475
+ [31], [32] and [20]. The formation of such quotients is studied by the first author
476
+ [5]. Little seems to be know on the quotient in higher dimensions, in particular in
477
+ characteristic two, compare Schilson’s investigation [29], [30].
478
+ 3. Morphisms to Brauer–Severi curves
479
+ Let X be a para-elliptic curve over a ground field k. If there is a sign involution
480
+ σ : X → X, the quotient B by the corresponding group of order two is a Brauer–
481
+ Severi curve, according to Corollary 2.2. In this section we conversely assume that
482
+ our para-elliptic curve X admits a morphism f : X → B of degree two to some
483
+ Brauer–Severi curve B, and derive several geometric consequences.
484
+ First note that the corresponding function field extension k(B) ⊂ k(X) has degree
485
+ two. It must be separable, because X and B are smooth of different genus. So this
486
+ is a Galois extension, and the Galois group G is cyclic of order two. Let σ ∈ G be
487
+ the generator.
488
+ Proposition 3.1. The automorphism σ : X → X is a sign involution.
489
+ Proof. It suffices to treat the case that k is algebraically closed. The action is not
490
+ free, because χ(OX) = 0 ̸= 2 = |G|·χ(OB). Choose a fixed point x0 ∈ X, and regard
491
+ E = (X, x0) as an elliptic curve. If Aut(E) is cyclic, there is a unique element of
492
+ order two, and we infer that σ equals the sign involution. Suppose now that Aut(E)
493
+ is non-cyclic. According to [7], Proposition 5.9 this group is either the semi-direct
494
+
495
+ SIGN INVOLUTIONS
496
+ 10
497
+ product Z/3Z ⋊ µ4(k) in characteristic p = 3, or Q ⋊ µ3(k) in characteristic p = 2,
498
+ where Q = {±1, ±i, ±j ± k} denotes the quaternion group. In these groups, the
499
+ respective elements (0, −1) and (−1, 1) are the only ones of order two, and we again
500
+ conclude that σ coincides with the sign involution.
501
+
502
+ Proposition 3.2. The cokernel for the inclusion OB ⊂ f∗(OX) is isomorphic to
503
+ ωB, and the resulting extension 0 → OB → f∗(OX) → ωB → 0 of coherent sheaves
504
+ splits.
505
+ Proof. The sheaf f∗(OX) has rank two and is torsion-free, hence is locally free. The
506
+ inclusion of OB is locally a direct summand, so the cokernel L is invertible. We
507
+ have 0 = χ(OX) = χ(OB) + χ(L ) = 2 + deg(L ) and conclude deg(L ) = −2. Since
508
+ deg : Pic(B) → Z is injective, this gives L ≃ ωB. The extension yields a class in
509
+ Ext1(ωB, OB) = H1(X, ω⊗−1
510
+ B
511
+ ), which vanishes by Serre Duality. So the extension
512
+ splits.
513
+
514
+ Choose a splitting and set E = f∗(OX) = OB ⊕ ωB. The smooth surface
515
+ S = P(E ) = Proj(Sym• E )
516
+ is a twisted form of the Hirzebruch surface S0 = P(E0), where E0 = OP1 ⊕ OP1(−2).
517
+ Let us call S the twisted Hirzebruch surface attached to the Brauer–Severi curve B.
518
+ Since f : X → B is affine, the invertible sheaf OX is relatively very ample, and we
519
+ get a closed embedding X ⊂ S. By abuse of notation we also write f : S → B for
520
+ the extension of our original morphism on X.
521
+ Recall that each invertible quotient E → N defines a section s : B → S, whose
522
+ image D has self-intersection D2 = deg(N ) − deg(N ′), where N ′ ⊂ E is the
523
+ kernel. For more details we refer to [10], Section 6. In particular, pr1 : E → OB
524
+ yields a curve D ⊂ S with D2 = 2, whereas pr2 : E → ωB gives some E ⊂ S
525
+ with E2 = −2, and the two sections are disjoint. The Adjunction Formula gives
526
+ (ωS ·D) = −4 and (ωS ·E) = 0. Hence ωS = f ∗(ω⊗2
527
+ B )⊗OS(−2E), because both sides
528
+ have the same intersection numbers with D and E. In particular c2
529
+ 1 = (ωS · ωS) =
530
+ −8 · deg(ωB) + 4 · E2 = 8. Setting
531
+ ω⊗1/2
532
+ S
533
+ = f ∗(ωB) ⊗ OS(−E),
534
+ we get an invertible sheaf whose square is isomorphic to the dualizing sheaf. In other
535
+ words, the surface S comes with a canonical theta characteristic, or spin structure,
536
+ compare [4] and [25].
537
+ Proposition 3.3. The dual sheaf L = ω⊗−1/2
538
+ S
539
+ is globally generated with h0(L ) = 4.
540
+ The image of the resulting r : S → P3 is an integral normal surface S′ ⊂ P3 of degree
541
+ two, and the induced morphism r : S → S′ is the contraction of E. Moreover, the
542
+ image a = r(E) is a rational point, the local ring OS′,a is singular, and the restriction
543
+ r|X is a closed embedding.
544
+ Proof. Our sheaf has intersection numbers (L · L ) = 2 and (L · E) = 0. Serre
545
+ Duality gives h2(L ) = h0(ω⊗3/2
546
+ S
547
+ ) = 0, and Riemann–Roch yields
548
+ h0(L ) ≥ χ(L ) = c2
549
+ 1/4 + c2
550
+ 1/2
551
+ 2
552
+ + χ(OS) = (2 + 4)/2 + 1 = 4.
553
+
554
+ SIGN INVOLUTIONS
555
+ 11
556
+ The base locus Bs(L ) is contained in E, because ω⊗−1
557
+ B
558
+ is globally generated. The
559
+ short exact sequence 0 → f ∗(ω⊗−1
560
+ B
561
+ ) → L → L |E → 0 yields an exact sequence
562
+ 0 −→ H0(S, f ∗(ω⊗−1
563
+ B
564
+ )) −→ H0(S, L ) −→ H0(E, OE),
565
+ consequently h0(L ) ≤ h0(ωB) + h0(OE) = 4. This ensures h0(L ) = 4, and that L
566
+ is globally generated.
567
+ In turn, our spin structure yields a morphism r : S → P3 with r∗(OP3(1)) =
568
+ ω⊗1/2
569
+ S
570
+ . It therefore contracts E. Moreover, the image S′ ⊂ P3 is integral and two-
571
+ dimensional, of some degree n ≥ 1. This image is not a plane, because the morphism
572
+ is defined by the complete linear system H0(S, L ). From 2 = (L ·L ) = deg(S/S′)·n
573
+ we infer that S → S′ is birational and n = 2.
574
+ The Adjunction Formula gives
575
+ ωS′ = OS′(2), consequently r∗(ωS′) = ωS. It follows that the birational morphism
576
+ r : S → S′ is in Stein factorization. Since Pic(S) has rank two, the exceptional
577
+ divisor is irreducible, whence must coincide with E.
578
+ The image a = r(E) is a rational point, because h0(OE) = 1. The local ring OS′,a
579
+ must be singular, because otherwise S = Bla(S′), such that E = r−1(a) must be a
580
+ projective line with E2 = −1, contradiction.
581
+ It remains to verify that the curves X, E ⊂ S are disjoint. Since deg(X/B) = 2
582
+ we have ωS = OS(−X)⊗f ∗(N ) for some invertible sheaf N on B. The Adjunction
583
+ Formula gives
584
+ 0 = (ωS · X) + X2 = −X2 + 2 deg(N ) + X2.
585
+ Consequently N
586
+ is trivial, and ωS = OS(−X).
587
+ This gives X2 = c2
588
+ 1 = 8, and
589
+ furthermore (X · E) = −(ωS · E) = 0. Thus the integral curves X and E must be
590
+ disjoint, hence r|X is a closed embedding.
591
+
592
+ Note that the local ring OS′,a is factorial provided that B ̸≃ P1. The above also
593
+ shows that the image S′ = r(S) can also be viewed as the anti-canonical model
594
+ P(S, −KS) of the scheme S, which is defined as the homogeneous spectrum of the
595
+ anti-canonical ring R(S, −KS) = �
596
+ t≥0 H0(S, ω⊗t
597
+ S ).
598
+ Recall that the weighted projective space P(d0, . . . , dn) is the homogeneous spec-
599
+ trum of k[U0, . . . , Un], where the generators have degrees di = deg(Ui). The case
600
+ d0 = . . . = dn = 1 gives back the standard projective space Pn. Let us say that a
601
+ closed subscheme of a Gorenstein surface is an anti-canonical curve if its sheaf of
602
+ ideals is isomorphic to the dualizing sheaf.
603
+ Proposition 3.4. The anti-canonical model S′ = P(S, −KS) is a twisted form of
604
+ the weighted projective space P(1, 1, 2). Moreover, X ⊂ S and the resulting inclusion
605
+ X ⊂ S′ are anti-canonical curves.
606
+ Proof. It suffices to treat the case that k is algebraically closed. We claim that S′ is
607
+ defined inside P3 = Proj k[T0, . . . , T3] by the equation T 2
608
+ 0 − T1T2 = 0, for a suitable
609
+ choice of homogeneous coordinates. The main challenge is the case p = 2: According
610
+ to [1], Satz 2 our quadric X ⊂ P3 must be defined by an equation of the form
611
+ r
612
+
613
+ i=1
614
+ (αiX2
615
+ i + XiYi + γiY 2
616
+ i ) +
617
+ s
618
+
619
+ j=1
620
+ δjZ2
621
+ j = 0,
622
+ with 1 ≤ 2r + s ≤ 4, and non-zero coefficients δj. Since k is algebraically closed, we
623
+ can make a change of variables and achieve δj = 1, and furthermore αi = γi = 0.
624
+
625
+ SIGN INVOLUTIONS
626
+ 12
627
+ On now immediately sees that only for r = s = 1 the quadric S′ ⊂ P3 is normal
628
+ and singular, and setting T0 = Z1 and T1 = X1 and T2 = Y1 gives the claim. For
629
+ p ̸= 2 our quadric can be defined by an equation of the form �3
630
+ j=0 δjZ2
631
+ j = 0, and
632
+ one argues similarly.
633
+ Consider the graded ring A = k[U0, U1, U2] with weights (1, 1, 2). The Veronese
634
+ subring A(2) is generated by the homogeneous elements U0U1, U 2
635
+ 0, U 2
636
+ 1, U2, which sat-
637
+ isfy the relation (U0U1)2 = U 2
638
+ 0 · U 2
639
+ 1. This gives a surjection
640
+ k[T0, T1, T2, T3]/(T 2
641
+ 0 − T1T2) −→ A(2),
642
+ defined by the assignments T0 �→ U0U1 and T1 �→ U 2
643
+ 0 and T2 �→ U 2
644
+ 1 and T3 �→ U2.
645
+ Both rings are integral of dimension three. Using Krull’s Principal Ideal Theorem,
646
+ we infer that the above surjection is bijective. The homogeneous spectrum of A(2)
647
+ coincides with P(1, 1, 2) = Proj(A), and by the above also with S′.
648
+ We already saw in the previous proof that ωS = OS(−X), hence X ⊂ S is an
649
+ anti-canonical curve. From the Theorem of Formal functions one infers f∗(ωS) is
650
+ invertible, and this ensures that the direct image coincides with ωS′. Using X ∩E =
651
+ ∅ we infer ωS′ = OS′(−X).
652
+
653
+ Now suppose that we have two morphism B1
654
+ f1
655
+ ← X
656
+ f2
657
+ → B2 to Brauer–Severi curves,
658
+ with deg(X/Bi) = 2. According to Proposition 3.1, they comes from sign involutions
659
+ σ1 and σ2, respectively.
660
+ Proposition 3.5. If σ1 ̸= σ2, the diagonal morphism i : X → B1 × B2 is a closed
661
+ embedding, and its image is an anti-canonical curve.
662
+ Proof. Let A ⊂ AutX/k be the subgroup scheme that fixes Picτ
663
+ X/k. As discussed in
664
+ Section 1, this is an elliptic curve, and the action on the para-elliptic curve X is free
665
+ and transitive. Moreover, the dual abelian variety is identified with Pic0
666
+ X/k. But
667
+ note that the principal polarization stemming from the origin also gives A = Pic0
668
+ X/k.
669
+ We saw in the proof of Proposition 1.1 that the two rational points σ1, σ2 ∈ Invsgn
670
+ X/k
671
+ differ by the action of some non-zero a ∈ A(k). In other words, σ2(x) = a + σ1(x).
672
+ It follows that there is no rational point x ∈ X with σ1(x) = σ2(x). In particular,
673
+ the fixed schemes Xσ1 and Xσ2 are disjoint.
674
+ To proceed, we assume that k is algebraically closed. Let x ∈ X be a closed
675
+ point and write y = i(x) = (b1, b2). The inverse image i−1(y) is the intersection
676
+ of the fibers f −1
677
+ 1 (b1) ∩ f −1
678
+ 2 (b2). This is just the spectrum of κ(x), by the previous
679
+ paragraph. According to [12], Corollary 18.12.6 the finite morphism i : X → B1×B2
680
+ is a closed embedding.
681
+ By construction, we have deg(X/B1) = deg(X/B2) = 2. Set V = B1 × B2. Its
682
+ Picard scheme PicV/k can seen as the Galois module Pic(V ⊗ksep) = Z×Z, compare
683
+ the discussion in [34], Section 1. Obviously, the elements (2, 0) and (0, 2) are fixed
684
+ by Gal(ksep/k), hence the whole Galois action is trivial, and thus PicV/k = (Z × Z)k
685
+ is a constant group scheme. The dualizing sheaf ωV = pr∗
686
+ 1(ωB1) ⊗ pr∗
687
+ 2(ωB2) has class
688
+ (2, 2), and we infer ωV = OS(−X).
689
+
690
+ Note that ωV is anti-ample, so the smooth surface V = B1 ×B2 coincides with its
691
+ anti-canonical model P(V, −KV ). Products of Brauer–Severi curves were studied by
692
+ Koll´ar [17] and Hogadi [15]. Let us close this paper with the following observation:
693
+
694
+ SIGN INVOLUTIONS
695
+ 13
696
+ Proposition 3.6. The surface V = B1 × B2 admits an embedding into P3 if and
697
+ only if B1 ≃ B2.
698
+ Proof. The Picard scheme is given by PicV/k = (Z × Z)k. The classes (−2, 0) and
699
+ (0, −2) come from the preimages of the invertible sheaves on B1 and B2, and thus
700
+ belong to the subgroup Pic(V ) ⊂ PicV/k(k).
701
+ Suppose we have V ⊂ P3, and write d ≥ 1 for its degree. From ωV = OV (d − 4)
702
+ we get 8 = (ωV · ωV ) = d(d − 4)2, and thus d = 2. In particular, V admits the spin
703
+ structure ω⊗1/2
704
+ V
705
+ = OV (−1). The dual sheaf L = OV (1) has h0(L ) = 4, which easily
706
+ follows from the short exact sequence 0 → OP3(−1) → OP3(1) → L → 0. Choose
707
+ some non-zero global section s ̸= 0 from L , and let D ⊂ V the resulting effective
708
+ Cartier divisor. Suppose D is reducible. Since deg(D) = 2 we see that there are
709
+ two components. Since L has class (1, 1) in PicV/k(k), it follows that D = D1 +D2,
710
+ where the summands are preimages of rational points on B1 and B2, respectively.
711
+ Thus both Brauer–Severi curves are copies of P1. Suppose now that D is irreducible.
712
+ Then deg(D/Bi) = 1, so the morphism D → Bi are birational. By Zariski’s Main
713
+ Theorem, it must be an isomorphism, and therefore B1 ≃ B2.
714
+ Conversely, suppose there is an isomorphism h : B1 → B2. Its graph defines
715
+ an effective Cartier divisor D ⊂ B1 × B2 with class (1, 1) ∈ PicV/k(k). Set L =
716
+ OV (D). Passing to the algebraic closure of k, we get L = pr∗
717
+ 1(OP1(1))⊗pr∗
718
+ 2(OP1(1)),
719
+ and compute h0(L ) = 4. Moreover, L is very ample, and thus defines a closed
720
+ embedding X ⊂ P3.
721
+
722
+ Given a sign involution σ : X → X and a non-zero rational point a ∈ A(k), we
723
+ get another sign involution x �→ a + σ(x). We see that the situation B1
724
+ f1
725
+ ← X
726
+ f2
727
+ → B2
728
+ with σ1 ̸= σ2 appears if and only if the set Invsgn
729
+ X/k(k) is non-empty and the group
730
+ A(k) is non-trivial.
731
+ References
732
+ [1] C. Arf: Untersuchungen ¨uber quadratische Formen in K¨orpern der Charakteristik 2. I. J.
733
+ Reine Angew. Math. 183 (1941), 148–167.
734
+ [2] M. Artin: Algebraization of formal moduli I. In: D. Spencer, S. Iyanaga (eds.), Global
735
+ Analysis, pp. 21–71. Univ. Tokyo Press, Tokyo, 1969.
736
+ [3] M. Artin: Brauer–Severi varieties. In: F. van Oystaeyen, A. Verschoren (eds.), Brauer
737
+ groups in ring theory and algebraic geometry, pp. 194—210, Springer, Berlin-New York,
738
+ 1982.
739
+ [4] M. Atiyah: Riemann surfaces and spin structures. Ann. Sci. ´Ecole Norm. Sup. 4 (1971),
740
+ 47–62.
741
+ [5] J. Bergqvist: The Kummer constructions in families. Dissertation, D¨usseldorf, in prepara-
742
+ tion.
743
+ [6] T. Dang: Cohomology of certain Artin stacks. Dissertation, D¨usseldorf (2022), https://
744
+ nbn-resolving.org/urn/resolver.pl?urn=urn:nbn:de:hbz:061-20220822-084645-3.
745
+ [7] P. Deligne: Courbes elliptiques: formulaire d’apr`es J. Tate. In: B. Birch, W. Kuyk (eds.),
746
+ Modular functions of one variable IV, pp. 53–73. Springer, Berlin, 1975.
747
+ [8] T. Ekedahl: Canonical models of surfaces of general type in positive characteristic. Inst.
748
+ Hautes ´Etudes Sci. Publ. Math. 67 (1988), 97–144.
749
+ [9] A. Fanelli, S. Schr¨oer: Del Pezzo surfaces and Mori fiber spaces in positive characteristic.
750
+ Trans. Amer. Math. Soc. 373 (2020), 1775–1843.
751
+
752
+ SIGN INVOLUTIONS
753
+ 14
754
+ [10] A. Fanelli, S. Schr¨oer: The maximal unipotent finite quotient, unusual torsion in Fano
755
+ threefolds, and exceptional Enriques surfaces. ´Epijournal Geom. Alg´ebrique 4 (2020), Art.
756
+ 11.
757
+ [11] A. Grothendieck: Sur quelques points d’alg`ebre homologique. Tohoku Math. J. 9 (1957),
758
+ 119–221.
759
+ [12] A. Grothendieck: ´El´ements de g´eom´etrie alg´ebrique IV: ´Etude locale des sch´emas et des
760
+ morphismes de sch´emas. Publ. Math., Inst. Hautes ´Etud. Sci. 32 (1967).
761
+ [13] A. Grothendieck: Le groupe de Brauer III. In: J. Giraud (ed.) et al.: Dix expos´es sur la
762
+ cohomologie des sch´emas, pp. 88–189. North-Holland, Amsterdam, 1968.
763
+ [14] R. Hartshorne: Generalised divisors on Gorenstein schemes. K-Theory 8 (1994), 287–339.
764
+ [15] A. Hogadi: Products of Brauer–Severi surfaces. Proc. Amer. Math. Soc. 137 (2009), 45–50.
765
+ [16] T. Katsura: On Kummer surfaces in characteristic 2. In: M. Nagata (ed.), Proceedings of
766
+ the international symposium on algebraic geometry, pp. 525–542. Kinokuniya Book Store,
767
+ Tokyo, 1978.
768
+ [17] J. Koll´ar: Conics in the Grothendieck ring. Adv. Math. 198 (2005), 27–35.
769
+ [18] B. Laurent, S. Schr¨oer: Para-abelian varieties and Albanese maps. Preprint, arXiv:2101.
770
+ 10829.
771
+ [19] H. Matsumura, F. Oort: Representability of group functors, and automorphisms of alge-
772
+ braic schemes. Invent. Math. 4 (1967–68), 1–25.
773
+ [20] S. Kondo, S. Schr¨oer: Kummer surfaces associated with group schemes. Manuscripta Math.
774
+ 166 (2021), 323–342.
775
+ [21] S. Lang, J. Tate: Principal homogeneous spaces over abelian varieties. Amer. J. Math. 80
776
+ (1958), 659–684.
777
+ [22] D. Lorenzini, S. Schr¨oer: Moderately ramified actions in positive characteristic. Math. Z.
778
+ 295 (2020), 1095–1142.
779
+ [23] D. Mumford:
780
+ Lectures on curves on an algebraic surface. Princeton University Press,
781
+ Princeton, 1966.
782
+ [24] D. Mumford: Abelian varieties. Tata Institute of Fundamental Research Studies in Math-
783
+ ematics 5. Oxford University Press, London, 1970.
784
+ [25] D. Mumford: Theta characteristics of an algebraic curve. Ann. Sci. ´Ecole Norm. Sup. 4
785
+ (1971), 181–192.
786
+ [26] M. Raynaud: Sp´ecialisation du foncteur de Picard. Publ. Math., Inst. Hautes ´Etud. Sci. 38
787
+ (1970), 27–76.
788
+ [27] D. R¨ossler, S. Schr¨oer: Moret-Bailly families and non-liftable schemes. Algebr. Geom. 9
789
+ (2022), 93–121.
790
+ [28] T. Saito: The discriminant and the determinant of a hypersurface of even dimension. Math.
791
+ Res. Lett. 19 (2012), 855–871.
792
+ [29] B. Schilson: Singularit¨aten von Kummer-Variet¨aten in beliebiger Charakteristik. Disserta-
793
+ tion, D¨usseldorf (2018), https://nbn-resolving.org/urn/resolver.pl?urn=urn:nbn:
794
+ de:hbz:061-20181108-114448-1.
795
+ [30] B. Schilson: Wild singularities of Kummer varieties. J. Singul. 20 (2020), 274–288.
796
+ [31] S. Schr¨oer: Kummer surfaces for the selfproduct of the cuspidal rational curve. J. Algebraic
797
+ Geom. 16 (2007), 305–346.
798
+ [32] S. Schr¨oer: The Hilbert scheme of points for supersingular abelian surfaces. Arkiv Mat. 47
799
+ (2009), 143–181.
800
+ [33] S. Schr¨oer: Enriques surfaces with normal K3-like coverings. J. Math. Soc. Japan. 73 (2021),
801
+ 433–496.
802
+ [34] S. Schr¨oer: There is no Enriques surface over the integers. Ann. of Math. 197 (2023), 1–63.
803
+ [35] T. Shioda: Kummer surfaces in characteristic 2. Proc. Japan Acad. 50 (1974), 718–722.
804
+ [36] N. Tziolas, S. Schr¨oer: The structure of Frobenius kernels for automorphism group schemes.
805
+ arXiv:2105.07860, to appear in Algebra Number Theory.
806
+
807
+ SIGN INVOLUTIONS
808
+ 15
809
+ Mathematisches Institut, Heinrich-Heine-Universit¨at, 40204 D¨usseldorf, Ger-
810
+ many
811
+ Email address: Jakob.Bergqvist@hhu.de
812
+ Mathematisches Institut, Heinrich-Heine-Universit¨at, 40204 D¨usseldorf, Ger-
813
+ many
814
+ Email address: dangt@uni-duesseldorf.de
815
+ Mathematisches Institut, Heinrich-Heine-Universit¨at, 40204 D¨usseldorf, Ger-
816
+ many
817
+ Email address: schroeer@math.uni-duesseldorf.de
818
+
9dE1T4oBgHgl3EQfoAQF/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
AtFLT4oBgHgl3EQfFC_E/content/tmp_files/2301.11986v1.pdf.txt ADDED
@@ -0,0 +1,2817 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1
2
+ 1
3
+ FRA: A novel Face Representation Augmentation algorithm for face recognition
4
+ Soroush Hashemifar1, Abdolreza Marefat2, Javad Hassannataj Joloudari3,*, Hamid Hassanpour4
5
+ 1School of Computer Engineering, Iran University of Science and Technology, Tehran, Iran
6
+ 2Department of Artificial Intelligence, Technical and Engineering Faculty, South Tehran Branch, Islamic
7
+ Azad University, Tehran, Irans
8
+ 3Department of Computer Engineering, Faculty of Engineering, University of Birjand, Birjand
9
+ 9717434765, Iran
10
+ 4Faculty of Computer Engineering & Information Technology, Shahrood University of Technology, P.O.
11
+ Box 316, Shahrood, Iran
12
+ Corresponding author*: javad.hassannataj@birjand.ac.ir
13
+ Abstract
14
+ A low amount of training data for many state-of-the-art deep learning-based Face Recognition (FR)
15
+ systems causes a marked deterioration in their performance. Although a considerable amount of research
16
+ has addressed this issue by inventing new data augmentation techniques, using either input space
17
+ transformations or Generative Adversarial Networks (GAN) for feature space augmentations, these
18
+ techniques have yet to satisfy expectations. In this paper, we propose a novel method, named the Face
19
+ Representation Augmentation (FRA) algorithm, for augmenting face datasets. To the best of our
20
+ knowledge, FRA is the first method that shifts its focus towards manipulating the face embeddings
21
+ generated by any face representation learning algorithm in order to generate new embeddings
22
+ representing the same identity and facial emotion but with an altered posture. Extensive experiments
23
+ conducted in this study convince the efficacy of our methodology and its power to provide noiseless,
24
+ completely new facial representations to improve the training procedure of any FR algorithm. Therefore,
25
+ FRA is able to help the recent state-of-the-art FR methods by providing more data for training FR
26
+ systems. The proposed method, using experiments conducted on the Karolinska Directed Emotional Faces
27
+ (KDEF) dataset, improves the identity classification accuracies by 9.52 %, 10.04 %, and 16.60 %, in
28
+ comparison with the base models of MagFace, ArcFace, and CosFace, respectively.
29
+ Keywords:
30
+ Face Recognition, Face Embeddings, Face Representation Learning, Autoencoder, Vision Transformers,
31
+ Latent Space Data Augmentation, Facial Pose Reconstruction
32
+ 1.Introduction
33
+ Face images are one of the most popular biometric modalities which have been continuously utilized in
34
+ Face Recognition (FR) systems [1]. It is used in a wide range of contexts with the aim of identity
35
+ authentication and its applications vary from daily life and finance to military and public security [2]. In
36
+ fact, in comparison with other biometrics, such as the fingerprint, iris, or retina which are ubiquitously
37
+ used for authorizing individuals, FR can provide us with the most convenient way to capture visual
38
+ information without the need for any extra activity from the subject. In recent years, FR has been one of
39
+
40
+ 2
41
+ 2
42
+ the most proactively studied areas in Computer Vision [3]. Particularly, with the advent of deep learning
43
+ and architectures like Convolutional Neural Networks (CNNs) [4], a large number of efficient facial
44
+ recognition methods with outstanding performance have been invented to address this challenge [5-12].
45
+ These successful algorithms depend heavily on the performance of neural networks which use a cascade
46
+ of layers comprised of neurons that are able to learn different levels of abstractions and representations
47
+ from the input data [2]. These representations are more powerful substitutions for hand-crafted features
48
+ from facial attributes such as Scale-Invariant Feature Transform (SIFT) and Speeded Up Robust Features
49
+ (SURF) [13, 14]. Their principal advantage is that they obviate the need for manually and exhaustively
50
+ searching for the best features representing one’s face. Moreover, the process of learning representations
51
+ via deep learning-based algorithms makes the generated features surprisingly discriminative in that the
52
+ inter-class diversity and intra-class compactness within the training data are all taken into account by the
53
+ network itself [15].
54
+ However, there are still problematic scenarios in which FR systems fail to realize the expectations. For
55
+ instance, in real-life situations, the imagery of a person’s face has a high chance of being in a variety of
56
+ facial expressions, occlusions, poor illumination, low resolution, etc. [16-18], and all these factors cause
57
+ substantial degradation of the overall performance of the current FR algorithms. Thus, different
58
+ approaches have been adopted to rectify the negative impact of such barriers in FR systems [19-21].
59
+ Some have opted for experimenting and devising new loss functions whose capability to better feedback
60
+ to their neural network in the backpropagation step, enables the extracted deep features to be more
61
+ discriminative and clearly separable [2, 6, 9, 22-26]. In addition to these works, different architectures
62
+ have been implemented to extract feature maps which are more useful in terms of facial representations.
63
+ Moreover, developing larger and more variant datasets has been one of the main stimuli which have been
64
+ pushing the boundaries in recent FR systems [27]. Nevertheless, although some of these benchmark
65
+ datasets can be found in large volumes, we often lack such a training set of images when it comes to real
66
+ use cases. A typical case would be a situation in which the goal is to train a deep learning-based method
67
+ on a private, in-house set of identities that have been chosen by a multimedia organization for video
68
+ indexing purposes. The data-gathering phase can be very time and labor-consuming and sometimes even
69
+ impossible, and it acts as an impediment in the way of achieving a tailored amount of training datasets.
70
+ These have motivated researchers to pave the way by introducing different data augmentation techniques.
71
+ Data augmentation refers to a set of techniques that are used to increase the number of training datasets
72
+ without the loss of previously annotated data. The benefit of such methods is that it equips the trained
73
+ model with more generalizability and acts as a regularizer in the case of overfitting which is one of the
74
+ most frequent complications when dealing with a small amount of training data [28, 29]. Overall, there
75
+ are two mainstream categories of methods for augmenting data. The first set of methods has the aim of
76
+ manipulating the data in the input space in that they simply take the input image and apply different
77
+ geometric transformations such as translations, cropping, vertical and horizontal flipping, rotation, etc
78
+ [30]. Even though these methods are proven to be extremely useful in some other challenges like image
79
+ classification, object detection, and image captioning in computer vision, in the case of FR they cannot be
80
+ as helpful as they expected. The main reason is that in order for any FR system to capture a reliable visual
81
+ representation of a face crop image, the content should be aligned in terms of facial landmarks. This
82
+ means that any geometric alteration on these which conspicuously happens when one uses these classical
83
+ methods, can perturb the overall performance of FR pipeline. These challenges have motivated the
84
+ researchers to shift their studies’ direction toward more modern and domain-specific solutions [31-33],
85
+ leading to the second set of methods, which are known to be Generative Adversarial Networks (GANs)
86
+
87
+ 3
88
+ 3
89
+ [34]. These methods are the well-known type of generative models which are used with the objective of
90
+ transforming the input data in feature space with the aim of generating new augmented image data. This
91
+ group of models is capable of adjusting the facial attributes existent in a face image such as hair style,
92
+ expression, posture, skin color, etc. to a target style. However, in most cases, these generative models
93
+ cannot create realistic outputs and these models deal with the high complexities of reconstructing the
94
+ feature space to input space, without having any considerable improvement on the downstream task,
95
+ which in our case, is classification on the identity of the samples.
96
+ In order to address these difficulties, in this paper we propose the Face Representation Augmentation
97
+ (FRA) algorithm. This algorithm augments the posture of a given face image in the latent space. This
98
+ means that, given a set of embeddings representing a specific person, the proposed approach alters the
99
+ embedding to sustain the identity-related features with a transformed pose feature. The FRA algorithm
100
+ can help the existing facial recognition systems especially when the number of training samples is
101
+ imbalanced or less than expected. Our main contributions in this paper are itemized in the following:
102
+ 1. A novel algorithm for facial posture augmentation inside the latent space to reduce the complexity of
103
+ the image augmentation problem.
104
+ 2. Generating noiseless, non-duplicated embeddings which are proved to be linearly separable.
105
+ 3. Extensive experiments were conducted on the Karolinska Directed Emotional Faces (KDEF) [35]
106
+ dataset and improved the identity classification accuracies in comparison with the base models of
107
+ MagFace, ArcFace, and CosFace, respectively.
108
+ The rest of the paper is organized as follows. In Section 2, we briefly review the related works on face-
109
+ specific data augmentation and representation learning. Then, in Section 4, we present the details of our
110
+ proposed methodology. In Section 4, we demonstrate the results of our experiments in comparison with
111
+ other related state-of-the-art approaches. Finally, the conclusion will be drawn in Section 5.
112
+ 2.Related works
113
+ In this section, we present an overview of face-specific data augmentation techniques. These are
114
+ categorized into two groups classical and generative-based methods in 2.1. Additionally, we review the
115
+ related literature of FR algorithms in 2.2.
116
+ 2.1.Face-Specific Data Augmentation
117
+ To begin with, five data augmentation techniques for face photos were reported by Lv et al. [29]. These
118
+ techniques were landmark perturbation, hairdo synthesis, glasses synthesis, postures synthesis, and
119
+ lighting synthesis. Vincent et al. [36] tried to synthesize more data by applying different types of noise
120
+ such as Gaussian and Salt-and-pepper with the objective of training Stacked Denoising Autoencoders on
121
+ more complicated samples. Wang et al. [37] addressed the issue of data augmentation in picture
122
+ classification using conventional transformation techniques and GANs. They also suggested a technique
123
+ for learning network-based augmentations that better enhance the classifier in the setting of generic
124
+ photos rather than face images.
125
+ Moreover, although the hair is not an intrinsic part of the human face, it interferes with facial recognition
126
+ since it obscures the face and changes its appearance. Using DiscoGAN, which was developed to find
127
+ cross-domain relationships using unpaired data, Kim et al. altered hair color. In addition to the color, Kim
128
+ et al. in [38], suggested changing the bang by transferring an unsupervised visual characteristic using a
129
+ reconfigurable GAN. An online compositing technique was used in the face synthesis system proposed by
130
+ Kemelmacher-Shlizerman et al. [39]. The system might produce a series of fresh photographs with the
131
+
132
+ 4
133
+ 4
134
+ input person's identification and the questioned look using one or more photos of their face and a text
135
+ query like curly hair. Jiang et al., in [40], proposed Pose and expression resilient Spatial aware GAN
136
+ (PSGAN). It starts by using Makeup Distill Network to separate the reference image's makeup into two
137
+ spatially aware makeup matrices. After that, a module called Attentive Makeup Morphing is developed to
138
+ let users describe how a pixel's appearance in the source picture is altered based on the reference image.
139
+ In order to ease applications in the real-world setting, PSGAN is the first to concurrently accomplish
140
+ partial, shade tunable, and pose/expression robust makeup transfer. In order to separate the makeup from
141
+ the reference picture as two makeup matrices, an MDNet is also included. The flexible partial and shade
142
+ adjustable transfer is made possible by the spatially aware makeup matrices. To learn all cosmetics
143
+ attributes [41], including color, form, texture, and position, it comprises an enhanced color transfer branch
144
+ and a new pattern transfer branch. They present makeup in this work as a combination of color
145
+ transformation and pattern addition, and they create a thorough makeup transfer technique that works for
146
+ both delicate and dramatic looks. They suggest using warped faces in the Ultraviolet (UV) space while
147
+ training two network branches to eliminate the disagreement between input faces in terms of form, head
148
+ posture, and expression. They also create a new architecture with two branches for color and pattern
149
+ transfer. They present brand-new cosmetics transfer datasets with extreme fashions that were not taken
150
+ into account in the earlier datasets.
151
+ 2.2.Representation Learning for Face Recognition
152
+ Representation learning refers to a set of algorithms that are designed to solve a variety of challenges like
153
+ image retrieval [42-44], the person [45, 46] and vehicle [47, 48] re-identification, landmark detection, and
154
+ fine-grained object recognition [49, 50]. The task of face recognition in computer vision is heavily
155
+ dependent on learning representations that have fine intra-class and large inter-class distances [51].
156
+ Previous works [6, 22, 25, 52, 53] have mainly adopted different, more robust loss functions with the aim
157
+ of learning representations that satisfy the aforementioned requirements.
158
+ In [52], a deep convolutional neural network, named FaceNet, was proposed which learns facial
159
+ representations with the help of triplet loss. The main objective of this work is to achieve an embedding
160
+ f(x) from an image x into a d-dimensional Euclidean space Rd. The obtained embedding is generated in a
161
+ way that the squared distance among the embeddings from one class is small and that of the embeddings
162
+ from different classes is large. This algorithm achieves 99.63% and 95.12% accuracy in LFW [54] and
163
+ YouTube Faces Database [55] respectively. Liu et al. [53] have proposed a new look at the loss functions
164
+ which are based on the Euclidean margin between the produced embeddings. For CNNs to learn
165
+ discriminative facial characteristics with clear and innovative geometric interpretation, they suggest the
166
+ A-Softmax loss. The assumption that faces also lie on a manifold is fundamentally compatible with the
167
+ learnt features' discriminative spread on a hypersphere manifold. In order to approximate the learning
168
+ problem that minimal inter-class distance is greater than maximum intra-class distance, they develop
169
+ lower the margin set between such classes.
170
+ In [22], the authors have proposed ArcFace, a major modification of the Softmax loss to further improve
171
+ the robustness of the learned deep features. By utilizing the arc-cosine function to calculate the angle
172
+ between the current feature and the target weight and adding an additive angular margin to the target
173
+ angle, the target logit can be obtained. Then, these logits are rescaled by a fixed feature norm followed by
174
+ exactly the same steps in the Softmax loss function. Their approach has the following advantages over the
175
+ others. (1) Directly optimizing the geodesic distance margin (2) State-of-the-art performance in several
176
+
177
+ 5
178
+ 5
179
+ benchmark datasets: achieving 99.53% accuracy (3) Easiness in terms of implementation (4) Efficiency in
180
+ terms of computational complexity.
181
+ In [25], the authors reformulated the Softmax loss as a cosine loss with the aim of introducing a novel
182
+ loss function, named Large Margin Cosine Loss (LMCL). Their improvement is to further maximize the
183
+ decision margin in the angular space by introducing and training a deep model called CosFace. In this
184
+ deep model, LMCL guides the convolutional layers to learn features with huge cosine margins. Their
185
+ results demonstrate that they have achieved 97.96% accuracy in face verification on the MegaFace
186
+ benchmark, which has been a major improvement in comparison to previous works.
187
+ Meng et al. [6] proposed a new set of losses that enable the network to learn embeddings whose
188
+ magnitude represents the quality of the given face. By extending ArcFace [22] and introducing the
189
+ MagFace loss function, they demonstrate that the more likely the subject is to be recognized, the bigger
190
+ the magnitude of the generated embedding becomes. MagFace learns to generate these universal
191
+ embeddings by pulling the easier samples within a class of identities to the class center and pushing them
192
+ away from the origin. This makes the embeddings robust to ambiguity and the absence of high
193
+ discriminative features which prevalently exist in unconstrained face images in real scenarios. They have
194
+ achieved 99.83% verification accuracy in the LFW benchmark dataset. In Table 1, a comparison of these
195
+ works is depicted.
196
+ Table 1. Verification accuracy of MagFace, CosFace, ArcFace, and SphereFace. These models are
197
+ evaluated on CALFW, CPLFW, AgeDB, LFW, and CFP-FP datasets.
198
+ Method
199
+ CALFW [56]
200
+ CPLFW [57]
201
+ AgeDB [58]
202
+ LFW [54]
203
+ CFP-FP [59]
204
+ MagFace [6]
205
+ 96.15
206
+ 92.87
207
+ 98.17
208
+ 99.83
209
+ 98.46
210
+ CosFace [25]
211
+ 96.18
212
+ 92.18
213
+ 98.17
214
+ 99.78
215
+ 98.26
216
+ ArcFace [22]
217
+ 95.96
218
+ 92.72
219
+ 98.05
220
+ 9981
221
+ 98.40
222
+ SphereFace [53]
223
+ 95.58
224
+ 91.27
225
+ 97.05
226
+ 99.67
227
+ 96.84
228
+ Moreover, although these approaches have significant performance, directly applying GAN approaches
229
+ appears to have a few disadvantages. Models collapse, difficulty in training and convergence problems,
230
+ and poor image generation effect, along with the unreliable results of the generator for unconstrained
231
+ input images, cause the generated image examples to be incapable of being utilized for industrial data
232
+ augmentation tasks ]60 ,61[.
233
+ 3.
234
+ Proposed approach
235
+ 3.1.Overview
236
+ This section presents the proposed FRA algorithm. As can be inferred from Figure 1, our method includes
237
+ four steps. These are as follows: face detection and alignment, input preparation: facial landmark and
238
+ representation extraction, pose feature extraction, and representation augmentation. Steps 1 and 2
239
+ comprise our data preprocessing pipeline which is explained in Section 3.2. Steps 3 and 4 represent our
240
+ main contribution to this paper and are explained in Section 3.4.
241
+
242
+ 6
243
+ 6
244
+ Figure 1. The overall procedure of FRA. FRA is composed of four steps to generate a new representation
245
+ vector with identity i, emotion e, and posture p, by applying a target posture p on a base image with
246
+ identity i and emotion e.
247
+ 3.2. Dataset preprocessing and preparation
248
+ Our data preprocessing step includes three main phases. These three phases are depicted in Figure 1. As is
249
+ seen in the first phase, we feed the raw face images to the Multi-task Cascaded Convolutional Networks
250
+ (MTCNN) algorithm [53] which is a robust face and landmark detector. MTCNN provides us with 5
251
+ landmark points, including the center of both eyes, the tip of the nose, and the left and right corners of the
252
+ lips, and a bounding box that perfectly encloses the face area within the image without any padding. In
253
+ this phase, we also align the face images by feeding the acquired facial landmarks along with the face
254
+ image itself to the method of warp affine which exists in OpenCV [62], a famous library with ready-to-
255
+ use computer vision-related algorithms.
256
+ In the second phase, we feed the aligned face images to MLXTEND1 so as to determine more facial
257
+ landmarks. As is shown in Figure 1, MLXTEND outputs 68 facial key points which we use to construct
258
+ binarized images with pixel value 0 (completely black) for the background and 1 (completely white) for
259
+ facial landmarks. On the other hand, we need to have fixed-size embeddings for each sample within the
260
+ dataset. These embeddings are in fact the training data for the combiner module which will be explained
261
+ in Section 3.4. In our case, we use two of the most reliable and robust face representation learning
262
+ algorithms, namely MagFace [6] and FaceNet [52], for obtaining embeddings for each image. MagFace’s
263
+ learning procedure is for a universal embedding that is quality aware, meaning that the easier the sample
264
+ is for the recognition task, the closer its feature vector becomes to the center of the class. Furthermore,
265
+ FaceNet is an algorithm that directly learns a mapping from the samples to a compact Euclidean space
266
+ and the distances correlate to the similarity degree of a given pair of face images. In Phase 3, the
267
+ binarized images generated in Phase 2 are fed to the AE model in order to generate an embedding vector
268
+ 1http://rasbt.github.io/mlxtend/
269
+
270
+ Faciallandmarkextraction
271
+ 1
272
+ Face lancmark
273
+ detector
274
+ Encoder
275
+ (MLXTEND)
276
+ 1
277
+ -
278
+ 68facial
279
+ 1
280
+ targetface
281
+ -
282
+ withposturep'
283
+ landmarks
284
+ 512Dposefeatures
285
+ Combiner
286
+ 1
287
+ Facialrepresentationextraction
288
+ 1
289
+ -
290
+ 1
291
+ -
292
+ 512D augmented face
293
+ Face
294
+ Face
295
+ -
296
+ for identity i, emotion e,
297
+ representation
298
+ detector
299
+ representation
300
+ detector
301
+ andposturep'
302
+ (MTCNN)
303
+ (FRL)
304
+ 1
305
+ 1
306
+ sourceface
307
+ with identity i,
308
+ 512Dfacial
309
+ emotione,
310
+ representation
311
+ andposturep
312
+ 1
313
+ 1
314
+ -
315
+ 1
316
+ 1
317
+ a)alignandcrop
318
+ b)inputspreparation
319
+ c)posefeaturesextraction
320
+ d)augmentedrepresentation7
321
+ 7
322
+ representing posture features. Finally, in Phase 4, pose and face representation vectors are fed into the
323
+ combiner module to generate an augmented face representation vector.
324
+ 3.3.Facial Landmark Restoration using Autoencoders
325
+ Autoencoders (AE) are a particular type of neural networks whose main functionality is to encode the
326
+ input into a meaningfully compacted representation and decode this into the input space afterwards [63,
327
+ 64]. Following this paradigm, in this paper, we have been inspired by the work done by Meng et al. [65]
328
+ and decided to use an AE-based model for encoding our input space (binarized images of landmarks
329
+ explained in Section 3.2) into the latent space (embeddings), as shown in Figure 2. Given Si as a sample
330
+ of facial landmarks image, the output of F(Si) is a reconstructed image S’i, where F(Si) is. After the AE
331
+ model’s convergence, we can discard B (decoder) and take only A which has learned to encode the input
332
+ into an optimized and meaningful latent space representation denoted by Vi. It is worth mentioning that Vi
333
+ plays a vital role in our proposed method which is the latent representation of the posture of the face.
334
+ Figure 2 illustrates the proposed AE-based model and its architecture.
335
+ Figure 2. A general architecture of an autoencoder-based model. FRA utilizes a typical convolutional
336
+ autoencoder with a bottleneck of size 512. This bottleneck vector is used in further steps.
337
+ 3.4.Combining Feature Vectors and Feature Extraction using Vision Transformers
338
+ Vision Transformers (ViT) are deep learning models whose versatility in various fields such as natural
339
+ language processing, speech recognition and computer vision has made them a prominent choice for
340
+ researchers [66]. In comparison with the conventional CNNs, ViT models have achieved competitive
341
+ superior results in vision tasks like object detection [67], image recognition [68], image super-resolution
342
+ [69], and segmentation [70, 71].
343
+ At the core of ViT models, there is a mechanism of attention that has been probably one of the most
344
+ significant concepts in the domain of deep learning. Its inspiration is the biological attributes of human
345
+ beings in that, to recognize an object, we tend to focus on the most distinctive parts of that entity instead
346
+ of paying attention to all parts of it as a whole [72]. In terms of deep neural networks, this can be
347
+ interpreted as assigning importance scores for a given set of features where the higher scores are for more
348
+ relevant features and the lower ones for the features with less saliency [73]. As can be observed from
349
+ Figure 3, the model learns to have more focus on the parts which represent the target object in the image.
350
+
351
+ 20
352
+ 5
353
+ 112x112x1
354
+ 56
355
+ 1@112x112
356
+ 112x112x1
357
+ @
358
+ 4@
359
+ Convolution
360
+ Convoluton
361
+ +Max-Pool
362
+ S
363
+ Encoder (A)
364
+ Decoder (B)8
365
+ 8
366
+ Figure 3. The paradigm of combining two representation vectors using ViT. The combiner takes two
367
+ representation vectors with a size of 512 and combines them into a 32x32 matrix to be processed by a
368
+ vision-transformer component.
369
+ Moreover, transformers [74] refer to a set of neural networks which use the mechanism of attention.
370
+ These models consist of multiple encoders and decoders whose architectures are identical to each other.
371
+ In these models, a multi-head self-attention (MSA) mechanism is used for encoding the input, followed
372
+ by decoders which include an extra attention layer in order to process the encoder’s output. Self-attention
373
+ is a function denoted in Equation (1).
374
+
375
+ s.t.
376
+ (1)
377
+ where , , and are weight matrices used in linear transformations on inputs x to produce Q, K, and V. The
378
+ attention score is then calculated by as the dot product of the query and each key, scaled by the
379
+ dimension dk of the key K. Put x = "x1, x2, x3, ... , xn" to calculate an answer based on a collection of
380
+ queries Q, keys K, and values V. In MSA, Q, K, and V are projected linearly and this is done for h
381
+ consecutive times with different learned weights. Then, by applying the self-attention mechanism on each
382
+ of the outputs in the previous step simultaneously, we obtain h outputs which are heads. Then, these
383
+ heads are concatenated to achieve the final output. The following demonstrates these computations in
384
+ mathematical terms.
385
+
386
+ s.t.
387
+ (2)
388
+ MSAs, compared with CNNs, transform feature maps with huge data-specific kernels and this makes
389
+ them as expressive as the CNN-based architectures [75]. The key difference exists where convolutions
390
+ diversify feature maps whereas MSAs combine them. According to [76], the Fourier analysis of feature
391
+ maps demonstrates that convolutions boost high-frequency components whereas MSAs, on the other
392
+ hand, attenuate them.
393
+ Furthermore, finding elements that are more pertinent for the depiction of the altered posture is made
394
+ easier by the multi-head attention layer. In order to do this, the scaled dot product attention gives greater
395
+ weight to the characteristics of the input facial representation and encoded posture that is more pertinent
396
+ while providing less weight to the features that are less relevant [77]. The procedure chooses features
397
+
398
+ 512Dpose
399
+ Concatination and Reshape
400
+ representation
401
+ Normalization Layer
402
+ 512DAugmented
403
+ Vision
404
+ representation
405
+ Transformer
406
+ (ViT)
407
+ 512Dface
408
+ representation9
409
+ 9
410
+ from various input regions and aids in improving representation performance since there are several heads
411
+ in the attention layer.
412
+ In this paper, we have opted for using a ViT-based architecture for extracting features. As stated before,
413
+ this policy ensures that the model is trained to attend to the most salient feature values within the identity
414
+ and posture-related feature vectors simultaneously. Considering E of size as the embedding obtained
415
+ from a pre-trained facial representation learning algorithm and Vi of shape as the bottleneck vector
416
+ generated by the autoencoder part of FRA, we concatenate and reshape the produced feature vector to
417
+ make it of shape .
418
+ 3.5.Generating Pose-Aware Face Embeddings
419
+ After the ViT module in our proposed model, the output is normalized, making it into the range of 0-1,
420
+ after which it is fed to a fully connected layer. This layer helps us produce an output of a 1-d shape which
421
+ is also considered the embedding generated by our model.
422
+ 3.6.Multi-Task Loss Function
423
+ In the training procedure, we utilized a Multi-part Loss Function (MLF) as the learning objective. This
424
+ MLF comprises a Binary Cross-Entropy (BCE) loss function, which is used to train the autoencoder, so it
425
+ can reconstruct the posed style better. Since the activation function of the last layer of our autoencoder is
426
+ Sigmoid, it can lead to loss of saturation (plateau) [78]. This saturation could prevent gradient-based
427
+ learning algorithms from convergence. In order to avoid this issue, it would be better to have a logarithm
428
+ function in the objective function to undo the exponential function within the Sigmoid. This is why BCE is
429
+ preferred, because it uses a logarithm function, unlike Mean Squared Error (MSE).
430
+ The second part of our loss function is a type of N-pair Loss [79]. N-pair loss generalizes triplet loss [52]
431
+ to include comparison with multiple negative samples. The objective of this function is to keep the
432
+ distance between the anchor and positive smaller than the distance between the anchor and negative
433
+ representations, as shown in Figure 4.
434
+ Figure 4. Effect of the proposed loss function during the learning process. The N-pair loss allows the
435
+ model to distinguish between pose-variant representation vectors with the same identity and emotion, as
436
+ well as possible.
437
+ The proposed multi-task loss function is defined as follows,
438
+
439
+ 10
440
+ 10
441
+ (3)
442
+ in which, yi and pi denote the reconstructed pose style and the original pose style, respectively, and also is
443
+ defined as
444
+ (4)
445
+ where m is a margin applied to impose the separability between genuine and imposter pairs, and f denotes
446
+ the proposed architecture. d is the euclidean distance applied on normalized features and it is given by
447
+ Equation (5).
448
+ (5)
449
+ In Equation (6), a and p denote the anchor (generated) representation and the positive (real)
450
+ representation, respectively. Additionally, , , and denote the negative representation w.r.t pose, identity,
451
+ and emotion of the anchor face, respectively. Specifically, negative pose representations have the same
452
+ identity as the anchor, but with different poses. The same holds for negative emotion representation. But,
453
+ for negative identity representation, the representation of another person is chosen randomly, regardless
454
+ of what pose or emotion it has. The goal of the triplet loss is to achieve,
455
+ (6)
456
+ The optimal state for each single triplet loss is achieved when is equal to zero and is greater than the
457
+ predefined margin.
458
+ 4.Results and Discussion
459
+ In this section we first introduce the benchmark dataset that we have used for evaluating our proposed
460
+ method. Then, we elaborate the details of our implementation and introduce the metrics used in this
461
+ paper. Finally, we demonstrate our experimental results and discussion.
462
+ 4.1.Datasets
463
+ With the object of benchmarking our results, we have used the KDEF dataset. It is a publicly available
464
+ dataset of 4900 face images, covering 140 unique identities. The images demonstrate face images with
465
+ varying pose and emotion styles. Some samples of these datasets are shown in Figure 5.
466
+
467
+ 11
468
+ 11
469
+ Figure 5. A few samples of the KDEF dataset. The KDEF dataset provides face images of 140 different
470
+ people in various postures and emotions.
471
+ 4.2.Implementation details
472
+ We carried out our experiments on a machine with a Core i7-1165G7 @ 2.80GHz CPU with 64
473
+ Gigabytes of RAM and a GeForce RTX 2060 12 GB GPU. All models were implemented and trained
474
+ using the Pytorch framework. Table 2 shows the hyperparameter setting.
475
+ Table 2. Details of the training procedure and the utilized FRLs. The hyperparameter settings are shown.
476
+ FRL arch.
477
+ # epochs
478
+ Init. learning rate
479
+ Dropout
480
+ rate
481
+ Triplet
482
+ margin
483
+ ViT
484
+ Embedding dim
485
+ FC dim
486
+ # Heads
487
+ # Layers
488
+ Patch
489
+ size
490
+ MagFace
491
+ 255
492
+ 0.001
493
+ 0.4
494
+ 10.0
495
+ 256
496
+ 256
497
+ 4
498
+ 4
499
+ 8
500
+ ArcFace
501
+ 320
502
+ 0.001
503
+ 0.4
504
+ 10.0
505
+ CosFace
506
+ 157
507
+ 0.001
508
+ 0.05
509
+ 10.0
510
+ Furthermore, with the object of fairly evaluating the proposed FRA, we divided KDEF datasets based
511
+ on identities with the following distributions:
512
+
513
+ We randomly selected all samples from 99 identities which nearly comprise 70.7 % of all
514
+ identities in KDEF as our training data.
515
+
516
+ 12
517
+ 12
518
+
519
+ We randomly selected all samples from 11 identities which nearly comprise 7.8 % of all identities
520
+ in KDEF as our validation data.
521
+
522
+ We randomly selected all samples from 30 identities which nearly comprise 21.5 % of all
523
+ identities in KDEF as our testing data.
524
+ 4.3.
525
+ Experimental Results
526
+ This section details our comprehensive experimental results. Table 3 shows the achieved
527
+ accuracy of the Support Vector Machine (SVM) [80] classifier on the embeddings generated in
528
+ three different experiments. These experiments are:
529
+ (1) Pre-augmentation accuracy: In this experiment, the training happens on the embeddings extracted
530
+ using three different FRLs, namely MagFace, ArcFace, and CosFace, and the testing accuracy is
531
+ achieved on the testing partition of these embeddings (Train/Test split ratio is set 80/20). In this
532
+ experiment, we used no augmentation technique at all and this is done to find a baseline for the
533
+ quality of the original data in the chosen benchmark dataset.
534
+ (2) Generated embeddings’ accuracy: In this experiment, we first augmented the original
535
+ embeddings to obtain the transformed embeddings. Then, we trained the SVM on the original data
536
+ and tested its performance on the generated embeddings by the proposed algorithm. This is done to
537
+ demonstrate how much the proposed model is able to sustain the identity, posture, and emotion-
538
+ related features without any degradation.
539
+ (3) Post-augmentation accuracy: In this experiment, we have augmented the embeddings of the
540
+ training split using FRA, where the test split is the same as (1). Then, we trained the SVM classifier
541
+ on the training part and tested it on the testing one.
542
+ Table3. Evaluation results of FRA. Pre-augmentation and post-augmentation accuracies show the
543
+ effectiveness of FRA. Generated embeddings’ accuracy denotes the sustainability of FRA.
544
+ FRL
545
+ Target
546
+ (1) Pre-
547
+ augmentation
548
+ Accuracy (%)
549
+ (2) Generated
550
+ embeddings’
551
+ Accuracy
552
+ (%)
553
+ (3) Post-
554
+ augmentation
555
+ Accuracy (%)
556
+ MagFace
557
+ Posture
558
+ 82.38
559
+ 98.12
560
+ 96.66
561
+ Identity
562
+ 86.19
563
+ 93.61
564
+ 95.71
565
+ Emotion
566
+ 44.76
567
+ 92.43
568
+ 99.04
569
+ ArcFace
570
+ Posture
571
+ 89.12
572
+ 99.3
573
+ 97.9
574
+ Identity
575
+ 86.61
576
+ 91.6
577
+ 96.65
578
+ Emotion
579
+ 53.55
580
+ 92.98
581
+ 100
582
+ CosFace
583
+ Posture
584
+ 99.12
585
+ 99.91
586
+ 99.12
587
+ Identity
588
+ 79.91
589
+ 88.11
590
+ 96.50
591
+ Emotion
592
+ 54.14
593
+ 87.61
594
+ 97.37
595
+
596
+ 13
597
+ 13
598
+ Based on Table 3, it is observed that our proposed algorithm improves the classification accuracy, not
599
+ only identity-wise but also in terms of emotion and posture. For instance, SVM outputs 86.19% accuracy
600
+ on the MagFace embeddings but after the augmentation, this score goes up to 95.71% in Post-
601
+ augmentation. For the same data, the generated embeddings are much more representative which
602
+ improves the classification accuracy on the identity of the SVM outputs by 93.61%. This enhancement
603
+ can also be validated for ArcFace and CosFace since our algorithm increases the accuracy in all three
604
+ experiments. In addition, FRA can improve the accuracy of SVM embeddings remarkably. In addition to
605
+ improving the classification accuracy with respect to the identities of the embeddings, FRA improves that
606
+ pose and emotion-wise. Based on Table 5, the accuracy of the SVM classifier is increased from 86.19%
607
+ for MagFace embeddings to 95.71% after augmentation. This improvement for ArcFace and Cosface is
608
+ from 86.61% to 96.65% and from 79.91% to 97.37%, respectively.
609
+ Furthermore, our generated embeddings should ensure the fact that they are linearly separable. This
610
+ means that the embeddings can be classified using a linear classifier such as SVM with a linear kernel. In
611
+ our experiments we used an SVM classifier with a linear kernel and based on Table 5, we can deduce that
612
+ FRA is able to improve the accuracy in Phase 2 and Phase 3 by a large margin, effectively enhancing the
613
+ performance of SVM with a linear kernel.
614
+ Moreover, in order to show the independence of FRA from any FR algorithms, we adopted three such
615
+ approaches, namely MagFace, ArcFace, and CosFace. Based on Table 5, after augmenting the
616
+ embeddings generated by each of these algorithms, the classification accuracy of the SVM classifier is
617
+ increased significantly and this proves the fact that FRA is not dependent on any specific FR algorithm as
618
+ its requirements.
619
+ In addition, for our algorithms’ performance to be verified thoroughly, the reconstructed binary images
620
+ which are created by the AE part of the proposed approach are presented. These images are illustrated in
621
+ Figure 6, showing the original image, and the AE’s output when dealing with MagFace embeddings,
622
+ ArcFace embeddings, and CosFace embeddings. Also, the training and validation loss in the training
623
+ procedure of our proposed pipeline is shown in Figure 7.
624
+
625
+ 14
626
+ 14
627
+ Original landmarks
628
+ AE-generated landmarks
629
+ for Magface FRL
630
+ AE-generated landmarks
631
+ for Arcface FRL
632
+ AE-generated landmarks
633
+ for Cosface FRL
634
+ Figure 6. Some instances of reconstructed binarized facial landmark images. These reconstructed images
635
+ denote how good the AE is performing for each FRL model. The CosFace model reconstructs the most
636
+ landmarks more precisely.
637
+
638
+ 15
639
+ 15
640
+ Total Loss Magface
641
+ BCE Loss Magface
642
+ Npair Loss Magface
643
+ Total Loss Arcface
644
+ BCE Loss Arcface
645
+ Npair Loss Arcface
646
+ Total Loss Cosface
647
+ BCE Loss Cosface
648
+ Npair Loss Cosface
649
+ Figure 7. Total loss, BCE loss and Npair loss curves achieved by various FRL methods.
650
+ The curves of Precision-Recall (PRC), Receiver Operating Characteristic (ROC), and confusion matrices
651
+ of all experiments for MagFace, ArcFace, and CosFace are shown in Figures 8-16.
652
+
653
+ Train loss
654
+ Validation loss
655
+ 20
656
+ 15
657
+ Loss Error
658
+ 10
659
+ 5
660
+ 0
661
+ 0
662
+ 50
663
+ 100
664
+ 150
665
+ 200
666
+ 250
667
+ Epoch0.22
668
+ Train BCE losS
669
+ ValidationBCEloss
670
+ 0.20
671
+ 0.18
672
+ 0.16
673
+ rror
674
+ E
675
+ 0.14
676
+ Loss
677
+ 0.12
678
+ 0.10
679
+ 0.08
680
+ 0.06
681
+ 0
682
+ 50
683
+ 100
684
+ 150
685
+ 200
686
+ 250
687
+ EpochTrain Npair loss
688
+ Validation Npair loss
689
+ 20
690
+ 15
691
+ Loss Error
692
+ 10
693
+ 5
694
+ 0
695
+ 0
696
+ 50
697
+ 100
698
+ 150
699
+ 200
700
+ 250
701
+ EpochTrain loss
702
+ Validation loss
703
+ 20
704
+ 15
705
+ Error
706
+ Loss
707
+ 10
708
+ 5
709
+ 0
710
+ 0
711
+ 50
712
+ 100
713
+ 150
714
+ 200
715
+ 250
716
+ 300
717
+ EpochTrain BCE losS
718
+ Validation BCE loss
719
+ 0.7
720
+ 0.6
721
+ 0.5
722
+ rror
723
+ E
724
+ 0.4
725
+ LosS
726
+ 0.3
727
+ 0.2
728
+ 0.1
729
+ 0
730
+ 50
731
+ 100
732
+ 150
733
+ 200
734
+ 250
735
+ 300
736
+ EpochTrain Npair loss
737
+ Validation Npair loss
738
+ 20
739
+ 15
740
+ Loss Error
741
+ 10
742
+ 5
743
+ 0
744
+ 0
745
+ 50
746
+ 100
747
+ 150
748
+ 200
749
+ 250
750
+ 300
751
+ EpochTrain loss
752
+ 14
753
+ Validation loss
754
+ 12
755
+ 10
756
+ Error
757
+ 8
758
+ Loss
759
+ 6
760
+ 4
761
+ 2
762
+ 0
763
+ 0
764
+ 20
765
+ 40
766
+ 60
767
+ 80
768
+ 100
769
+ 120
770
+ 140
771
+ 160
772
+ EpochTrain BCE losS
773
+ Validation BCE loss
774
+ 0.18
775
+ 0.16
776
+ 0.14
777
+ Loss Error
778
+ 0.12
779
+ 0.10
780
+ 0.08
781
+ 0.06
782
+ 0.04
783
+ 0
784
+ 20
785
+ 40
786
+ 60
787
+ 80
788
+ 100
789
+ 120
790
+ 140
791
+ 160
792
+ EpochTrain Npair loss
793
+ 14
794
+ Validation Npair loss
795
+ 12
796
+ 10
797
+ Error
798
+ 8
799
+ Loss
800
+ 6
801
+ 4
802
+ 2
803
+ 0
804
+ 0
805
+ 20
806
+ 40
807
+ 60
808
+ 80
809
+ 100
810
+ 120
811
+ 140
812
+ 160
813
+ Epoch16
814
+ 16
815
+ a)
816
+ Pre-augmentation PRC (pose)
817
+ b)
818
+ Pre-augmentation PRC (id)
819
+ c)
820
+ Pre-augmentation PRC
821
+ (emotion)
822
+ d)
823
+ Post-augmentation PRC
824
+ (pose)
825
+ e)
826
+ Post-augmentation PRC (id)
827
+ f)
828
+ Post-augmentation PRC
829
+ (emotion)
830
+ Figure 8. PRC curves for MagFace FRL. As the curves indicate, post-augmentation PRC curves (d, e, and f) have
831
+ significant improvements in comparison to pre-augmentation PRC curves (a, b, and c) for pose, identity, and
832
+ emotion, respectively.
833
+
834
+ precision vs.recall curve (pose)
835
+ 1.0
836
+ 0.8
837
+ 0.6
838
+ precision
839
+ 0.4
840
+ 0.2
841
+ class FL
842
+ class HL
843
+ 0.0
844
+ class HR
845
+ class S
846
+ 0.0
847
+ 0.2
848
+ 0.4
849
+ 0.6
850
+ 0.8
851
+ 1.0
852
+ recallprecision vs. recall curve (id)
853
+ 1.0
854
+ 0.9
855
+ class AF05
856
+ class BF04
857
+ 0.8
858
+ clasS AM31
859
+ classBM13
860
+ class BF11
861
+ class AF01
862
+ class AM15
863
+ 0.7
864
+ class AF03
865
+ class BM16
866
+ precision
867
+ class AF10
868
+ class AM22
869
+ 0.6
870
+ class AM13
871
+ class BMo5
872
+ class BM23
873
+ class AM34
874
+ class BF12
875
+ 0.5
876
+ class AF07
877
+ class AM05
878
+ class AF27
879
+ class BF14
880
+ class BM29
881
+ 0.4
882
+ class AMo1
883
+ class AF16
884
+ class AF32
885
+ class AF26
886
+ 0.3
887
+ class BM07
888
+ class AF29
889
+ class BF16
890
+ 0.0
891
+ 0.2
892
+ 0.4
893
+ 0.6
894
+ 0.8
895
+ 1.0
896
+ recallprecision vs.recall curve (emo)
897
+ 1.0
898
+ 0.8
899
+ precision
900
+ 0.6
901
+ 0.4
902
+ class AF
903
+ class AN
904
+ class DI
905
+ class HA
906
+ 0.2
907
+ class NE
908
+ class SA
909
+ class SU
910
+ 0.0
911
+ 0.2
912
+ 0.4
913
+ 0.6
914
+ 0.8
915
+ 1.0
916
+ recallprecision vs.recall curve (pose
917
+ class FL
918
+ 1.0
919
+ class HL
920
+ class HR
921
+ 0.8
922
+ 0.6
923
+ precision
924
+ 0.4
925
+ 0.2
926
+ 0.0
927
+ 0.0
928
+ 0.2
929
+ 0.4
930
+ 0.6
931
+ 0.8
932
+ 1.0
933
+ recallprecision vs.recall curve (id)
934
+ 1.00
935
+ 0.95
936
+ class AF05
937
+ 0.90
938
+ class BF04
939
+ class AM31
940
+ class BM13
941
+ class BF11
942
+ class AF01
943
+ 0.85
944
+ class AM15
945
+ class AF03
946
+ precision
947
+ class BM16
948
+ class AF10
949
+ 0.80
950
+ class AM22
951
+ class AM13
952
+ class BM05
953
+ class BM23
954
+ 0.75
955
+ class AM34
956
+ class BF12
957
+ class AF07
958
+ class AM05
959
+ 0.70
960
+ class AF27
961
+ class BF14
962
+ class BM29
963
+ class AM01
964
+ 0.65
965
+ class AF16
966
+ class AF32
967
+ class AF26
968
+ class BM07
969
+ 0.60
970
+ class AF29
971
+ class BF16
972
+ 0.0
973
+ 0.2
974
+ 0.4
975
+ 0.6
976
+ 0.8
977
+ 1.0
978
+ recallprecision vs.recall curve (emo)
979
+ 1.00
980
+ 0.99
981
+ 0.98
982
+ precision
983
+ 0.97
984
+ 0.96
985
+ 0.95
986
+ class AF
987
+ class AN
988
+ class DI
989
+ class HA
990
+ 0.94
991
+ class NE
992
+ class SA
993
+ class SU
994
+ 0.0
995
+ 0.2
996
+ 0.4
997
+ 0.6
998
+ 0.8
999
+ 1.0
1000
+ recall17
1001
+ 17
1002
+ a)
1003
+ Pre-augmentation ROC (pose)
1004
+ b)
1005
+ Pre-augmentation ROC (id)
1006
+ c)
1007
+ Pre-augmentation ROC
1008
+ (emotion)
1009
+ d)
1010
+ Post-augmentation ROC
1011
+ (pose)
1012
+ e)
1013
+ Post-augmentation ROC (id)
1014
+ f)
1015
+ Post-augmentation ROC
1016
+ (emotion)
1017
+ Figure 9. ROC curves for MagFace FRL. As the curves indicate, post-augmentation ROC curves (d, e, and f) have
1018
+ significant improvements in comparison to pre-augmentation ROC curves (a, b, and c) for pose, identity, and
1019
+ emotion, respectively.
1020
+
1021
+ RoCcurve(pose)
1022
+ 1.0
1023
+ 0.8
1024
+ 0.6
1025
+ truepositive rate
1026
+ 0.4
1027
+ 0.2
1028
+ class FL (AUC = 0.9609)
1029
+ class FR (AUC = 0.999)
1030
+ class HL (AUC = 0.9991)
1031
+ 0.0
1032
+ class HR (AUC = 1.0)
1033
+ No skill
1034
+ 0.0
1035
+ 0.2
1036
+ 0.4
1037
+ 0.6
1038
+ 0.8
1039
+ 1.0
1040
+ false positive rateROC curve (id)
1041
+ 1.0
1042
+ class AM23 (AUC = 1.0)
1043
+ 0.8 -
1044
+ class AF05 (AUC = 1.0)
1045
+ class BM13 (AUC = 1.0)
1046
+ classAF13(AUC =0.9892)
1047
+ class BF28 (AUC = 1.0)
1048
+ class BM08 (AUC = 1.0)
1049
+ class AF31 (AUC = 1.0)
1050
+ 0.6
1051
+ class BM35 (AUC = 1.0)
1052
+ true positive rate
1053
+ class BF33 (AUC = 1.0)
1054
+ class BM18 (AUC = 1.0)
1055
+ class BF13 (AUC = 1.0)
1056
+ class AF19 (AUC = 0.9918)
1057
+ class AM26 (AUC = 0.9904)
1058
+ class BM12 (AUC = 1.0)
1059
+ 0.4
1060
+ class AF16 (AUC = 1.0)
1061
+ class AF23 (AUC = 1.0)
1062
+ class BM01 (AUC = 1.0)
1063
+ class BM03 (AUC = 0.9938)
1064
+ class BF10 (AUC = 1.0)
1065
+ class BM19 (AUC = 1.0)
1066
+ class AF04 (AUC = 1.0)
1067
+ 0.2
1068
+ class BF32 (AUC = 1.0)
1069
+ class AM08 (AUC =0.9939)
1070
+ class BF01 (AUC = 1.0)
1071
+ class BM06 (AUC = 1.0)
1072
+ class AM14 (AUC = 1.0)
1073
+ class BM07 (AUC = 1.0)
1074
+ 0.0
1075
+ class BF15 (AUC =0.9949)
1076
+ No skill
1077
+ 0.0
1078
+ 0.2
1079
+ 0.4
1080
+ 0.6
1081
+ 0.8
1082
+ 1.0
1083
+ false positive rateROCcurve (emo)
1084
+ 1.0
1085
+ 0.8
1086
+ 0.6
1087
+ true positive rate
1088
+ 0.4
1089
+ 0.2
1090
+ class AF (AUC =0.9948)
1091
+ classAN (AUC =0.9921)
1092
+ class DI (AUC = 0.9972)
1093
+ class HA (AUC = 0.9959)
1094
+ class NE (AUC = 0.9894)
1095
+ class SA (AUC = 0.9939)
1096
+ 0.0
1097
+ classSU (AUC = 0.9916)
1098
+ No skill
1099
+ 0.0
1100
+ 0.2
1101
+ 0.4
1102
+ 0.6
1103
+ 0.8
1104
+ 1.0
1105
+ false positive rateRoC curve (pose)
1106
+ class FL (AUC = 0.459)
1107
+ 1.0
1108
+ class FR (AUC = 0.4088)
1109
+ class HL (AUC = 0.0797)
1110
+ No skill
1111
+ 0.8
1112
+ 0.6
1113
+ true positive rate
1114
+ 0.4
1115
+ 0.2
1116
+ 0.0
1117
+ 0.0
1118
+ 0.2
1119
+ 0.4
1120
+ 0.6
1121
+ 0.8
1122
+ 1.0
1123
+ false positive rateROC curve (id)
1124
+ 1.0
1125
+ class AM23 (AUC = 1.0)
1126
+ 0.8 -
1127
+ class AF05 (AUC = 1.0)
1128
+ class BM13 (AUC = 1.0)
1129
+ class AF13 (AUC = 0.9951)
1130
+ class BF28 (AUC = 1.0)
1131
+ class BM08 (AUC = 1.0)
1132
+ class AF31 (AUC = 1.0)
1133
+ 0.6
1134
+ class BM35 (AUC = 1.0)
1135
+ true positive rate
1136
+ class BF33 (AUC = 1.0)
1137
+ class BM18 (AUC = 1.0)
1138
+ class BF13 (AUC = 1.0)
1139
+ class AF19 (AUC =0.9961)
1140
+ class AM26 (AUC = 0.9961)
1141
+ class BM12 (AUC = 1.0)
1142
+ 0.4
1143
+ class AF16 (AUC = 1.0)
1144
+ class AF23 (AUC = 1.0)
1145
+ class BM01 (AUC = 1.0)
1146
+ class BM03 (AUC = 0.9951)
1147
+ class BF10 (AUC = 1.0)
1148
+ class BM19 (AUC = 1.0)
1149
+ class AF04 (AUC = 1.0)
1150
+ 0.2
1151
+ class BF32 (AUC = 1.0)
1152
+ class AM08 (AUC = 0.9989)
1153
+ class BF01 (AUC = 1.0)
1154
+ class BM06 (AUC = 1.0)
1155
+ class AM14 (AUC = 1.0)
1156
+ class BM07 (AUC = 1.0)
1157
+ 0.0
1158
+ class BF15 (AUC =0.999)
1159
+ No skill
1160
+ 0.0
1161
+ 0.2
1162
+ 0.4
1163
+ 0.6
1164
+ 0.8
1165
+ 1.0
1166
+ false positive rateRoCcurve(emo)
1167
+ 1.0
1168
+ 0.8
1169
+ 0.6
1170
+ true positive rate
1171
+ 0.4
1172
+ 0.2
1173
+ class AF (AUC = 1.0)
1174
+ class AN (AUC = 1.0)
1175
+ class DI (AUC = 1.0)
1176
+ class HA (AUC = 1.0)
1177
+ class NE (AUC = 1.0)
1178
+ class SA (AUC = 0.9996)
1179
+ 0.0
1180
+ class SU (AUC = 1.0)
1181
+ No skill
1182
+ 0.0
1183
+ 0.2
1184
+ 0.4
1185
+ 0.6
1186
+ 0.8
1187
+ 1.0
1188
+ false positive rate18
1189
+ 18
1190
+ a) Pre-augmentation Confusion (pose)
1191
+ b) Pre-augmentation Confusion (id)
1192
+ c) Pre-augmentation Confusion
1193
+ (emotion)
1194
+ d) Post-augmentation Confusion (pose)
1195
+ e) Post-augmentation Confusion (id)
1196
+ f) Post-augmentation Confusion
1197
+ (emotion)
1198
+ Figure 10. Confusion matrices for MagFace FRL. As the confusion matrices indicate, post-augmentation matrices
1199
+ (d, e, and f) have significant improvements in comparison to pre-augmentation matrices (a, b, and c) for pose,
1200
+ identity, and emotion, respectively.
1201
+
1202
+ Confusionmatrix(pose
1203
+ 10000
1204
+ 8000
1205
+ 6000
1206
+ 4000
1207
+ 2000
1208
+ FL
1209
+ HL
1210
+ HR
1211
+ SConfusionmatrix(id)
1212
+ AF05
1213
+ BF04
1214
+ AM31
1215
+ BM13
1216
+ BF11
1217
+ 1000
1218
+ AF01
1219
+ AM15
1220
+ AF03
1221
+ BM16
1222
+ AF10
1223
+ 800
1224
+ AM22
1225
+ AM13
1226
+ BM05
1227
+ BM23
1228
+ 600
1229
+ AM34 -
1230
+ BF12
1231
+ AF07
1232
+ AM05
1233
+ AF27
1234
+ 400
1235
+ BF14
1236
+ BM29
1237
+ AM01
1238
+ AF16
1239
+ AF32
1240
+ 200
1241
+ AF26
1242
+ BM07
1243
+ AF29
1244
+ BF16
1245
+ AF05
1246
+ BF04
1247
+ AM31
1248
+ BM13
1249
+ BF11
1250
+ AF01
1251
+ AM15
1252
+ AF03
1253
+ BM16
1254
+ AF10
1255
+ AM22
1256
+ AM13
1257
+ BM05
1258
+ BM23
1259
+ AM34
1260
+ BF12
1261
+ AF07
1262
+ AM05
1263
+ AF27
1264
+ BF14
1265
+ BM29
1266
+ AM01
1267
+ AF16
1268
+ AF32
1269
+ AF26
1270
+ BM07
1271
+ AF29
1272
+ BF16Confusionmatrix(emo)
1273
+ 4
1274
+ 4000
1275
+ 3500
1276
+ 3
1277
+ 3000
1278
+ 2500
1279
+ 2000
1280
+ 1500
1281
+ 1000
1282
+ SA
1283
+ 500
1284
+ SU
1285
+ 0
1286
+ AF
1287
+ AN
1288
+ DI
1289
+ HA
1290
+ NE
1291
+ SA
1292
+ SUConfusionmatrix(pose)
1293
+ 60
1294
+ 50
1295
+ 40
1296
+ 30
1297
+ 20
1298
+ S
1299
+ 10
1300
+ 0
1301
+ HL
1302
+ HR
1303
+ SConfusionmatrix(id)
1304
+ 10
1305
+ AF05
1306
+ BF04
1307
+ AM31
1308
+ BM13
1309
+ BF11
1310
+ AF01
1311
+ 8
1312
+ AM15
1313
+ AF03
1314
+ BM16
1315
+ AF10
1316
+ AM22
1317
+ AM13
1318
+ 6
1319
+ BM05
1320
+ BM23
1321
+ AM34
1322
+ BF12
1323
+ AF07
1324
+ 4
1325
+ AM05
1326
+ AF27
1327
+ BF14
1328
+ BM29
1329
+ AM01
1330
+ AF16
1331
+ 2
1332
+ AF32
1333
+ AF26
1334
+ BM07
1335
+ AF29
1336
+ BF16
1337
+ AF05
1338
+ BF04
1339
+ AM31
1340
+ BM13
1341
+ BF11
1342
+ AF01
1343
+ AM15
1344
+ AF03
1345
+ BM16
1346
+ AF10
1347
+ AM22
1348
+ AM13
1349
+ BM05
1350
+ BM23
1351
+ AM34
1352
+ BF12
1353
+ AF07
1354
+ AM05
1355
+ AF27
1356
+ BF14
1357
+ BM29
1358
+ AM01
1359
+ AF16
1360
+ AF32
1361
+ AF26
1362
+ BM07
1363
+ AF29
1364
+ BF16Confusionmatrix(emo)
1365
+ -35
1366
+ 4
1367
+ 30
1368
+ 3
1369
+ 25
1370
+ - 20
1371
+ 15
1372
+ 10
1373
+ SA
1374
+ - 5
1375
+ SU
1376
+ 0
1377
+ AF
1378
+ AN
1379
+ DI
1380
+ HA
1381
+ NE
1382
+ SA
1383
+ sU19
1384
+ 19
1385
+ a)
1386
+ Pre-augmentation PRC (pose)
1387
+ b)
1388
+ Pre-augmentation PRC (id)
1389
+ c)
1390
+ Pre-augmentation PRC
1391
+ (emotion)
1392
+ d)
1393
+ Post-augmentation PRC
1394
+ (pose)
1395
+ e)
1396
+ Post-augmentation PRC (id)
1397
+ f)
1398
+ Post-augmentation PRC
1399
+ (emotion)
1400
+ Figure 11. PRC curves for ArcFace FRL. As the curves indicate, post-augmentation PRC curves (d, e, and f) have
1401
+ significant improvements in comparison to pre-augmentation PRC curves (a, b, and c) for pose, identity, and
1402
+ emotion, respectively.
1403
+
1404
+ precision vs.recall curve (pose
1405
+ 1.0
1406
+ 0.8
1407
+ 0.6
1408
+ class FL
1409
+ precision
1410
+ class FR
1411
+ class HL
1412
+ class HR
1413
+ class S
1414
+ 0.4
1415
+ 0.2
1416
+ 0.0
1417
+ 0.0
1418
+ 0.2
1419
+ 0.4
1420
+ 0.6
1421
+ 0.8
1422
+ 1.0
1423
+ recallprecision vs.recall curve (id)
1424
+ 1.0
1425
+ 0.8
1426
+ class BM15
1427
+ class BF09
1428
+ class BM14
1429
+ class BF07
1430
+ class AM22
1431
+ class AM23
1432
+ 0.6
1433
+ class BM18
1434
+ class BM26
1435
+ precision
1436
+ class AF14
1437
+ class BM11
1438
+ class AF10
1439
+ class BM16
1440
+ class AF33
1441
+ 0.4
1442
+ class BF10
1443
+ class AF20
1444
+ class BF02
1445
+ class BM08
1446
+ class BF28
1447
+ class AM35
1448
+ class AM07
1449
+ 0.2
1450
+ class BF03
1451
+ class BM31
1452
+ class BF17
1453
+ class AM10
1454
+ class AF02
1455
+ class BM06
1456
+ 0.0
1457
+ class BM23
1458
+ class BM13
1459
+ 0.0
1460
+ 0.2
1461
+ 0.4
1462
+ 0.6
1463
+ 0.8
1464
+ 1.0
1465
+ recallprecision vs.recall curve (emo)
1466
+ 1.0
1467
+ 0.8
1468
+ precision
1469
+ 0.6
1470
+ 0.4
1471
+ class AF
1472
+ class AN
1473
+ class DI
1474
+ class HA
1475
+ 0.2
1476
+ class NE
1477
+ class SA
1478
+ class SU
1479
+ 0.0
1480
+ 0.2
1481
+ 0.4
1482
+ 0.6
1483
+ 0.8
1484
+ 1.0
1485
+ recallprecision vs.recall curve (pose)
1486
+ class FL
1487
+ 1.0
1488
+ class FR
1489
+ class HL
1490
+ 0.8
1491
+ 0.6
1492
+ precision
1493
+ 0.4
1494
+ 0.2
1495
+ 0.0
1496
+ 0.0
1497
+ 0.2
1498
+ 0.4
1499
+ 0.6
1500
+ 0.8
1501
+ 1.0
1502
+ recallprecision vs.recall curve (id)
1503
+ 1.0
1504
+ class BM15
1505
+ 0.9
1506
+ class BF09
1507
+ class BM14
1508
+ class BF07
1509
+ class AM22
1510
+ class AM23
1511
+ class BM18
1512
+ class BM26
1513
+ precision
1514
+ class AF14
1515
+ 0.8
1516
+ class BM11
1517
+ class AF10
1518
+ class BM16
1519
+ class AF33
1520
+ class BF10
1521
+ class AF20
1522
+ class BF02
1523
+ class BM08
1524
+ 0.7
1525
+ class BF28
1526
+ class AM35
1527
+ class AM07
1528
+ class BF03
1529
+ class BM31
1530
+ class BF17
1531
+ class AM10
1532
+ class AF02
1533
+ 0.6
1534
+ class BM06
1535
+ class BM23
1536
+ class BM13
1537
+ 0.0
1538
+ 0.2
1539
+ 0.4
1540
+ 0.6
1541
+ 0.8
1542
+ 1.0
1543
+ recallprecision vs.recall curve (emo)
1544
+ 1.2
1545
+ 1.0
1546
+ 0.8
1547
+ precision
1548
+ 0.6
1549
+ 0.4
1550
+ class AF
1551
+ 0.2
1552
+ class AN
1553
+ class DI
1554
+ class HA
1555
+ class NE
1556
+ class SA
1557
+ class SU
1558
+ 0.0
1559
+ 0.0
1560
+ 0.2
1561
+ 0.4
1562
+ 0.6
1563
+ 0.8
1564
+ 1.0
1565
+ recall20
1566
+ 20
1567
+ a)
1568
+ Pre-augmentation ROC (pose)
1569
+ b)
1570
+ Pre-augmentation ROC (id)
1571
+ c)
1572
+ Pre-augmentation ROC
1573
+ (emotion)
1574
+ d)
1575
+ Post-augmentation ROC
1576
+ (pose)
1577
+ e)
1578
+ Post-augmentation ROC (id)
1579
+ f)
1580
+ Post-augmentation ROC
1581
+ (emotion)
1582
+ Figure 12. ROC curves for ArcFace FRL. As the curves indicate, post-augmentation ROC curves (d, e, and f) have
1583
+ significant improvements in comparison to pre-augmentation ROC curves (a, b, and c) for pose, identity, and
1584
+ emotion, respectively.
1585
+
1586
+ RoCcurve(pose)
1587
+ 1.0
1588
+ 0.8
1589
+ 0.6
1590
+ true positive rate
1591
+ 0.4
1592
+ 0.2
1593
+ class FL (AUC = 0.9978)
1594
+ class FR (AUC = 0.2198)
1595
+ class HL (AUC = 0.9953)
1596
+ class HR (AUC = 0.9998)
1597
+ 0.0
1598
+ classS (AUC=0.9981)
1599
+ No skill
1600
+ 0.0
1601
+ 0.2
1602
+ 0.4
1603
+ 0.6
1604
+ 0.8
1605
+ 1.0
1606
+ false positive rateROC curve (id)
1607
+ 1.0
1608
+ class BM15 (AUC = 1.0)
1609
+ 0.8 -
1610
+ class BF09 (AUC = 1.0)
1611
+ class BM14 (AUC = 1.0)
1612
+ class BF07 (AUC = 1.0)
1613
+ class AM22 (AUC = 1.0)
1614
+ class AM23 (AUC = 0.9782)
1615
+ class BM18 (AUC = 1.0)
1616
+ 0.6
1617
+ class BM26 (AUC = 1.0)
1618
+ true positive rate
1619
+ class AF14 (AUC = 1.0)
1620
+ class BM11 (AUC = 1.0)
1621
+ class AF10 (AUC = 0.9838)
1622
+ class BM16 (AUC = 1.0)
1623
+ class AF33 (AUC = 1.0)
1624
+ class BF10 (AUC = 0.9841)
1625
+ 0.4
1626
+ class AF20 (AUC = 1.0)
1627
+ class BF02 (AUC = 0.9916)
1628
+ class BM08 (AUC = 1.0)
1629
+ class BF28 (AUC = 1.0)
1630
+ class AM35 (AUC = 1.0)
1631
+ class AM07 (AUC = 1.0)
1632
+ class BF03 (AUC = 1.0)
1633
+ 0.2
1634
+ class BM31 (AUC = 1.0)
1635
+ class BF17 (AUC = 1.0)
1636
+ class AM10 (AUC = 1.0)
1637
+ class AF02 (AUC = 0.9875)
1638
+ class BM06 (AUC = 1.0)
1639
+ classBM23(AUC=0.9827)
1640
+ 0.0
1641
+ class BM13 (AUC = 1.0)
1642
+ No skill
1643
+ 0.0
1644
+ 0.2
1645
+ 0.4
1646
+ 0.6
1647
+ 0.8
1648
+ 1.0
1649
+ false positive rateROCcurve (emo)
1650
+ 1.0
1651
+ 0.8
1652
+ 0.6
1653
+ true positive rate
1654
+ 0.4
1655
+ 0.2
1656
+ classAF(AUC =0.9885)
1657
+ clasS AN (AUC = 0.9983)
1658
+ class DI (AUC = 0.9975)
1659
+ class HA (AUC = 0.9998)
1660
+ class NE (AUC =0.9901)
1661
+ class SA (AUC = 0.9947)
1662
+ 0.0
1663
+ class SU (AUC = 0.9968)
1664
+ No skill
1665
+ 0.0
1666
+ 0.2
1667
+ 0.4
1668
+ 0.6
1669
+ 0.8
1670
+ 1.0
1671
+ false positive rateRoCcurve(pose)
1672
+ class FL (AUC = 0.1322)
1673
+ 1.0
1674
+ class FR (AUC = 0.7495)
1675
+ class HL (AUC = 0.3596)
1676
+ No skill
1677
+ 0.8
1678
+ 0.6
1679
+ true positive rate
1680
+ 0.4
1681
+ 0.2
1682
+ 0.0
1683
+ 0.0
1684
+ 0.2
1685
+ 0.4
1686
+ 0.6
1687
+ 0.8
1688
+ 1.0
1689
+ false positiverateROC curve (id)
1690
+ 1.0
1691
+ class BM15 (AUC = 1.0)
1692
+ 0.8 -
1693
+ class BF09 (AUC = 1.0)
1694
+ class BM14 (AUC = 1.0)
1695
+ class BF07 (AUC = 1.0)
1696
+ class AM22 (AUC = 1.0)
1697
+ class AM23 (AUC =0.9952)
1698
+ class BM18 (AUC = 1.0)
1699
+ 0.6
1700
+ class BM26 (AUC = 1.0)
1701
+ true positive rate
1702
+ class AF14 (AUC = 1.0)
1703
+ class BM11 (AUC = 1.0)
1704
+ class AF10 (AUC = 0.9987)
1705
+ class BM16 (AUC = 1.0)
1706
+ class AF33 (AUC = 1.0)
1707
+ class BF10 (AUC = 0.9979)
1708
+ 0.4
1709
+ class AF20 (AUC = 1.0)
1710
+ class BF02 (AUC = 1.0)
1711
+ class BM08 (AUC = 1.0)
1712
+ class BF28 (AUC = 1.0)
1713
+ class AM35 (AUC = 1.0)
1714
+ class AM07 (AUC = 1.0)
1715
+ class BF03 (AUC = 1.0)
1716
+ 0.2
1717
+ claSs BM31 (AUC = 1.0)
1718
+ class BF17 (AUC = 1.0)
1719
+ class AM10 (AUC = 1.0)
1720
+ class AF02 (AUC = 1.0)
1721
+ class BM06 (AUC = 1.0)
1722
+ class BM23 (AUC = 0.994)
1723
+ 0.0
1724
+ class BM13 (AUC = 1.0)
1725
+ No skill
1726
+ 0.0
1727
+ 0.2
1728
+ 0.4
1729
+ 0.6
1730
+ 0.8
1731
+ 1.0
1732
+ false positive rateRoCcurve(emo)
1733
+ 1.0
1734
+ 0.8
1735
+ 0.6
1736
+ truepositive rate
1737
+ 0.4
1738
+ 0.2
1739
+ class AF (AUC = 1.0)
1740
+ class AN (AUC = 1.0)
1741
+ class DI (AUC = 1.0)
1742
+ class HA (AUC = 1.0)
1743
+ class NE (AUC = 1.0)
1744
+ class SA (AUC = 1.0)
1745
+ 0.0
1746
+ class SU (AUC = 1.0)
1747
+ No skill
1748
+ 0.0
1749
+ 0.2
1750
+ 0.4
1751
+ 0.6
1752
+ 0.8
1753
+ 1.0
1754
+ false positive rate21
1755
+ 21
1756
+ a)
1757
+ Pre-augmentation Confusion
1758
+ (pose)
1759
+ b)
1760
+ Pre-augmentation Confusion
1761
+ (id)
1762
+ c)
1763
+ Pre-augmentation Confusion
1764
+ (emotion)
1765
+ d)
1766
+ Post-augmentation Confusion
1767
+ (pose)
1768
+ e)
1769
+ Post-augmentation Confusion
1770
+ (id)
1771
+ f)
1772
+ Post-augmentation Confusion
1773
+ (emotion)
1774
+ Figure 13. Confusion matrices for ArcFace FRL. As the confusion matrices indicate, post-augmentation matrices (d,
1775
+ e, and f) have significant improvements in comparison to pre-augmentation matrices (a, b, and c) for pose, identity,
1776
+ and emotion, respectively.
1777
+
1778
+ Confusionmatrix(pose)
1779
+ 10000
1780
+ L
1781
+ 8000
1782
+ R
1783
+ 6000
1784
+
1785
+ 4000
1786
+ 2000
1787
+ S
1788
+ FL
1789
+ FR
1790
+ HL
1791
+ HR
1792
+ sConfusionmatrix(id)
1793
+ BM15
1794
+ BF09
1795
+ BM14
1796
+ BF07
1797
+ AM22
1798
+ 1000
1799
+ AM23
1800
+ BM18
1801
+ BM26
1802
+ AF14
1803
+ 800
1804
+ BM11
1805
+ AF10
1806
+ BM16
1807
+ AF33
1808
+ BF10
1809
+ 600
1810
+ AF20
1811
+ BF02
1812
+ BM08
1813
+ BF28
1814
+ AM35
1815
+ 400
1816
+ AM07
1817
+ BF03
1818
+ BM31
1819
+ BF17
1820
+ AM10
1821
+ 200
1822
+ AF02
1823
+ BM06
1824
+ BM23
1825
+ BM13
1826
+ BM15
1827
+ BF09
1828
+ BM14
1829
+ BF07
1830
+ AM22
1831
+ AM23
1832
+ BM18
1833
+ BM26
1834
+ AF14
1835
+ BM11
1836
+ AF10
1837
+ BM16
1838
+ AF33
1839
+ BF10
1840
+ AF20
1841
+ BF02
1842
+ BM08
1843
+ BF28
1844
+ AM35
1845
+ AM07
1846
+ BF03
1847
+ BM31
1848
+ BF17
1849
+ AM10
1850
+ AF02
1851
+ BM06
1852
+ BM23
1853
+ BM13Confusionmatrix(emo)
1854
+ 4
1855
+ 4000
1856
+ 3
1857
+ 3500
1858
+ 3000
1859
+ 2500
1860
+ 2000
1861
+ NE
1862
+ 1500
1863
+ 1000
1864
+ SA
1865
+ 500
1866
+ SU
1867
+ 0
1868
+ AF
1869
+ AN
1870
+ DI
1871
+ HA
1872
+ NE
1873
+ SA
1874
+ sUConfusion matrix(pose)
1875
+ 80
1876
+ 70
1877
+ 60
1878
+ 50
1879
+ 40
1880
+ -30
1881
+ 20
1882
+ S
1883
+ 10
1884
+ 0
1885
+ HL
1886
+ HR
1887
+ SConfusionmatrix(id)
1888
+ BM15
1889
+ BF09
1890
+ BM14
1891
+ 10
1892
+ BF07
1893
+ AM22
1894
+ AM23
1895
+ BM18
1896
+ BM26
1897
+ AF14
1898
+ BM11
1899
+ AF10
1900
+ BM16
1901
+ AF33
1902
+ 6
1903
+ BF10
1904
+ AF20
1905
+ BF02
1906
+ BM08
1907
+ BF28
1908
+ AM35
1909
+ AM07
1910
+ BF03
1911
+ BM31
1912
+ BF17
1913
+ 2
1914
+ AM10
1915
+ AF02
1916
+ BM06
1917
+ BM23
1918
+ BM13
1919
+ BM15
1920
+ BF09
1921
+ BM14
1922
+ BF07
1923
+ AM22
1924
+ AM23
1925
+ BM18
1926
+ BM26
1927
+ AF14
1928
+ BM11
1929
+ AF10
1930
+ BM16
1931
+ AF33
1932
+ BF10
1933
+ AF20
1934
+ BF02
1935
+ BM08
1936
+ BF28
1937
+ AM35
1938
+ AM07
1939
+ BF03
1940
+ BM31
1941
+ BF17
1942
+ AM10
1943
+ AF02
1944
+ BM06
1945
+ BM23
1946
+ BM13Confusionmatrix(emo)
1947
+ -35
1948
+ 4
1949
+ 30
1950
+ 3
1951
+ 25
1952
+ 20
1953
+ 15
1954
+ 10
1955
+ 5
1956
+ 0
1957
+ AF
1958
+ AN
1959
+ DI
1960
+ HA
1961
+ NE
1962
+ SA
1963
+ sU22
1964
+ 22
1965
+ a)
1966
+ Pre-augmentation PRC (pose)
1967
+ b)
1968
+ Pre-augmentation PRC (id)
1969
+ c)
1970
+ Pre-augmentation PRC
1971
+ (emotion)
1972
+ d)
1973
+ Post-augmentation PRC
1974
+ (pose)
1975
+ e)
1976
+ Post-augmentation PRC (id)
1977
+ f)
1978
+ Post-augmentation PRC
1979
+ (emotion)
1980
+ Figure 14. PRC curves for CosFace FRL. As the curves indicate, post-augmentation PRC curves (d, e, and f) have
1981
+ significant improvements in comparison to pre-augmentation PRC curves (a, b, and c) for pose, identity, and
1982
+ emotion, respectively.
1983
+
1984
+ precision vs.recall curve (pose)
1985
+ 1.0
1986
+ 0.8
1987
+ 0.6
1988
+ class FL
1989
+ precision
1990
+ class FR
1991
+ class HL
1992
+ class HR
1993
+ class S
1994
+ 0.4
1995
+ 0.2
1996
+ 0.0
1997
+ 0.0
1998
+ 0.2
1999
+ 0.4
2000
+ 0.6
2001
+ 0.8
2002
+ 1.0
2003
+ recallprecision vs.recall curve (id)
2004
+ 1.0
2005
+ 0.8
2006
+ class BF25
2007
+ class AM30
2008
+ classBM13
2009
+ class AM33
2010
+ class BM21
2011
+ class BF14
2012
+ class BM19
2013
+ class BM17
2014
+ 0.6
2015
+ precision
2016
+ class BF15
2017
+ class BF06
2018
+ class AM18
2019
+ class AF35
2020
+ class BF13
2021
+ class BM22
2022
+ class BM23
2023
+ 0.4
2024
+ class BM25
2025
+ class AF08
2026
+ class AF26
2027
+ class BM33
2028
+ class AM21
2029
+ class AMo1
2030
+ class AF14
2031
+ class AF32
2032
+ 0.2
2033
+ class BM28
2034
+ class AM10
2035
+ class BM29
2036
+ class BM05
2037
+ class AM06
2038
+ 0.0
2039
+ 0.2
2040
+ 0.4
2041
+ 0.6
2042
+ 0.8
2043
+ 1.0
2044
+ recallprecision vs.recall curve (emo)
2045
+ 1.0
2046
+ 0.9
2047
+ 0.8
2048
+ 0.7
2049
+ precision
2050
+ 0.6
2051
+ 0.5
2052
+ 0.4
2053
+ class AF
2054
+ 0.3
2055
+ class AN
2056
+ class DI
2057
+ class HA
2058
+ class NE
2059
+ 0.2
2060
+ class SA
2061
+ class SU
2062
+ 0.0
2063
+ 0.2
2064
+ 0.4
2065
+ 0.6
2066
+ 0.8
2067
+ 1.0
2068
+ recallprecision vs.recall curve (pose
2069
+ 1.2
2070
+ class FL
2071
+ class FR
2072
+ class HL
2073
+ class HR
2074
+ class S
2075
+ 1.0
2076
+ 0.8
2077
+ precision
2078
+ 0.6
2079
+ 0.4
2080
+ 0.2
2081
+ 0.0
2082
+ 0.0
2083
+ 0.2
2084
+ 0.4
2085
+ 0.6
2086
+ 0.8
2087
+ 1.0
2088
+ recallprecision vs.recall curve (id)
2089
+ 1.0
2090
+ class E
2091
+ BF25
2092
+ class AM30
2093
+ class E
2094
+ BM13
2095
+ class AM33
2096
+ 0.9
2097
+ class BM21
2098
+ class BF14
2099
+ class BM19
2100
+ class BM17
2101
+ class BF15
2102
+ class BF06
2103
+ 0.8
2104
+ class AM18
2105
+ class AF35
2106
+ precision
2107
+ class BF13
2108
+ class BM22
2109
+ class BM23
2110
+ class BM25
2111
+ class AF08
2112
+ 0.7
2113
+ class AF26
2114
+ class BM33
2115
+ class AM21
2116
+ class AMol
2117
+ class AF14
2118
+ class AF32
2119
+ class BM28
2120
+ 0.6
2121
+ class AM10
2122
+ class BM29
2123
+ class BM05
2124
+ class AM06
2125
+ 0.5
2126
+ 0.0
2127
+ 0.2
2128
+ 0.4
2129
+ 0.6
2130
+ 0.8
2131
+ 1.0
2132
+ recallprecision vs.recall curve (emo)
2133
+ 1.0
2134
+ 0.9
2135
+ 0.8
2136
+ 0.7
2137
+ recision
2138
+ 0.6
2139
+ 0.5
2140
+ 0.4
2141
+ class AF
2142
+ class AN
2143
+ class DI
2144
+ class HA
2145
+ 0.3
2146
+ class NE
2147
+ class SA
2148
+ class SU
2149
+ 0.0
2150
+ 0.2
2151
+ 0.4
2152
+ 0.6
2153
+ 0.8
2154
+ 1.0
2155
+ recall23
2156
+ 23
2157
+ a)
2158
+ Pre-augmentation ROC (pose)
2159
+ b)
2160
+ Pre-augmentation ROC (id)
2161
+ c)
2162
+ Pre-augmentation ROC
2163
+ (emotion)
2164
+ d) Post-augmentation ROC (pose)
2165
+ e) Post-augmentation ROC (id)
2166
+ f) Post-augmentation ROC (emotion)
2167
+ Figure 15. ROC curves for CosFace FRL. As the curves indicate, post-augmentation ROC curves (d, e, and f) have
2168
+ significant improvements in comparison to pre-augmentation ROC curves (a, b, and c) for pose, identity, and
2169
+ emotion, respectively.
2170
+
2171
+ RoC curve (pose)
2172
+ 1.0
2173
+ 0.8 -
2174
+ 0.6
2175
+ true positive rate
2176
+ 0.4
2177
+ 0.2
2178
+ class FL (AUC = 0.5661)
2179
+ class FR (AUC = 0.7062)
2180
+ class HL (AUC = 0.9998)
2181
+ class HR (AUC =0.9997)
2182
+ 0.0
2183
+ class S (AUC = 1.0)
2184
+ No skill
2185
+ 0.0
2186
+ 0.2
2187
+ 0.4
2188
+ 0.6
2189
+ 0.8
2190
+ 1.0
2191
+ false positive rateROC curve (id)
2192
+ 1.0
2193
+ class BF25 (AUC = 0.9997)
2194
+ 0.8
2195
+ class AM30 (AUC = 1.0)
2196
+ class BM13 (AUC = 0.9996)
2197
+ class AM33 (AUC = 0.9969)
2198
+ class BM21 (AUC = 0.9795)
2199
+ class BF14 (AUC = 0.9894)
2200
+ class BM19 (AUC = 0.9983)
2201
+ 0.6
2202
+ class BM17 (AUC = 0.9998)
2203
+ truepositive rate
2204
+ classBF15(AUC =0.9907)
2205
+ class BF06 (AUC = 0.994)
2206
+ class AM18 (AUC = 0.9926)
2207
+ class AF35 (AUC =0.9998)
2208
+ class BF13 (AUC = 0.9998)
2209
+ class BM22 (AUC =0.9999)
2210
+ 0.4
2211
+ class BM23 (AUC =0.9999)
2212
+ class BM25 (AUC =0.9997)
2213
+ class AF08 (AUC =0.9986)
2214
+ class AF26 (AUC =0.9952)
2215
+ class BM33 (AUC = 0.9912)
2216
+ class AM21 (AUC = 0.9817)
2217
+ class AM01 (AUC = 0.9994)
2218
+ 0.2
2219
+ class AF14 (AUC =0.9908)
2220
+ class AF32 (AUC = 0.9996)
2221
+ class BM28 (AUC = 0.9996)
2222
+ class AM10 (AUC =0.9999)
2223
+ class BM29(AUC =0.9985)
2224
+ class BM05 (AUC =1.0)
2225
+ 0.0
2226
+ class AM06 (AUC = 0.9988)
2227
+ No skill
2228
+ 0.0
2229
+ 0.2
2230
+ 0.4
2231
+ 0.6
2232
+ 0.8
2233
+ 1.0
2234
+ false positive rateROCcurve (emo)
2235
+ 1.0
2236
+ 0.8
2237
+ 0.6
2238
+ true positive rate
2239
+ 0.4
2240
+ 0.2
2241
+ claSSAF (AUC =0.9749)
2242
+ class AN (AUC = 0.9835)
2243
+ class DI (AUC = 0.9897)
2244
+ class HA (AUC = 0.9994)
2245
+ class NE (AUC =0.9949)
2246
+ class SA (AUC = 0.9814)
2247
+ 0.0
2248
+ class SU (AUC = 0.9931)
2249
+ No skill
2250
+ 0.0
2251
+ 0.2
2252
+ 0.4
2253
+ 0.6
2254
+ 0.8
2255
+ 1.0
2256
+ false positive rateROCcurve(pose)
2257
+ 1.0
2258
+ 0.8
2259
+ 0.6
2260
+ true positive rate
2261
+ 0.4
2262
+ 0.2
2263
+ class FL (AUC = 1.0)
2264
+ class FR (AUC = 1.0)
2265
+ class HL (AUC = 1.0)
2266
+ class HR (AUC = 1.0)
2267
+ 0.0
2268
+ class S (AUC = 1.0)
2269
+ No skill
2270
+ 0.0
2271
+ 0.2
2272
+ 0.4
2273
+ 0.6
2274
+ 0.8
2275
+ 1.0
2276
+ false positive rateROC curve (id)
2277
+ 1.0
2278
+ class BF25 (AUC = 1.0)
2279
+ 0.8 -
2280
+ class AM30 (AUC = 1.0)
2281
+ class BM13 (AUC = 1.0)
2282
+ class AM33 (AUC = 1.0)
2283
+ class BM21 (AUC = 1.0)
2284
+ class BF14 (AUC = 0.9932)
2285
+ class BM19 (AUC = 1.0)
2286
+ 0.6
2287
+ class BM17 (AUC = 1.0)
2288
+ true positive rate
2289
+ class BF15 (AUC = 1.0)
2290
+ class BF06 (AUC = 1.0)
2291
+ class AM18 (AUC = 1.0)
2292
+ class AF35 (AUC = 1.0)
2293
+ class BF13 (AUC = 1.0)
2294
+ class BM22 (AUC = 1.0)
2295
+ 0.4
2296
+ class BM23 (AUC = 1.0)
2297
+ class BM25 (AUC = 1.0)
2298
+ class AF08 (AUC = 1.0)
2299
+ class AF26 (AUC = 1.0)
2300
+ class BM33 (AUC = 1.0)
2301
+ class AM21 (AUC = 1.0)
2302
+ class AM01 (AUC = 1.0)
2303
+ 0.2
2304
+ clasS AF14 (AUC = 0.9943)
2305
+ class AF32 (AUC = 1.0)
2306
+ class BM28 (AUC = 1.0)
2307
+ class AM10 (AUC = 1.0)
2308
+ class BM29 (AUC = 1.0)
2309
+ class BM05 (AUC = 1.0)
2310
+ 0.0
2311
+ class AM06 (AUC = 1.0)
2312
+ No skill
2313
+ 0.0
2314
+ 0.2
2315
+ 0.4
2316
+ 0.6
2317
+ 0.8
2318
+ 1.0
2319
+ false positive rateROCcurve (emo)
2320
+ 1.0
2321
+ 0.8
2322
+ 0.6
2323
+ true positive rate
2324
+ 0.4
2325
+ 0.2
2326
+ classAF (AUC =0.9853)
2327
+ class AN (AUC = 0.9996)
2328
+ class DI (AUC = 0.9965)
2329
+ class HA (AUC = 1.0)
2330
+ class NE (AUC =0.9932)
2331
+ class SA (AUC = 0.9998)
2332
+ 0.0
2333
+ class SU (AUC = 1.0)
2334
+ No skill
2335
+ 0.0
2336
+ 0.2
2337
+ 0.4
2338
+ 0.6
2339
+ 0.8
2340
+ 1.0
2341
+ false positive rate24
2342
+ 24
2343
+ a)
2344
+ Pre-augmentation Confusion
2345
+ (pose)
2346
+ b)
2347
+ Pre-augmentation Confusion
2348
+ (id)
2349
+ c)
2350
+ Pre-augmentation Confusion
2351
+ (emotion)
2352
+ d)
2353
+ Post-augmentation Confusion
2354
+ (pose)
2355
+ e)
2356
+ Post-augmentation Confusion
2357
+ (id)
2358
+ f)
2359
+ Post-augmentation Confusion
2360
+ (emotion)
2361
+ Figure 16. Confusion matrices for CosFace FRL. As the confusion matrices indicate, post-augmentation matrices
2362
+ (d, e, and f) have significant improvements in comparison to pre-augmentation matrices (a, b, and c) for the pose,
2363
+ identity, and emotion, respectively.
2364
+ 4.4. Discussion
2365
+ FR has long been a popular field of study among specialists and academics in the field of biometric
2366
+ recognition and it has the advantages of being non-contact, amiable, and simple to accept. Although
2367
+ remarkable performance has been shown by some state-of-the-art approaches presented in the literature,
2368
+ in real-world scenarios, there still exists the demand to improve such algorithms. For better handling such
2369
+ uncontrolled contexts especially when we face a lack of data, DA techniques are introduced to increase
2370
+ the number of training samples by applying different manipulations. Classical techniques for image
2371
+ transformations such as rotation, skewing, flipping, blurring, etc., and also GAN-based ones which utilize
2372
+ deep generative models and disentangled features to create more realistically transformed face images
2373
+ have well been studied for DA in the domain of FR. However, these techniques have their drawbacks.
2374
+ Classical techniques mostly manipulate face images in a way that distorts their alignment causing FR
2375
+ algorithms’ performance when generating distinct representative embeddings dramatically decrease. To
2376
+ prove this, we have experimented with four different transformations, namely, horizontal flip, skewing,
2377
+ blurring, and notifying. We augmented samples in the KDEF dataset and increased the training dataset
2378
+ size to be 4 times more than the original dataset and used different FR algorithms for generating
2379
+
2380
+ Confusion matrix (pose)
2381
+ 10000
2382
+ 8000
2383
+ R
2384
+ 6000
2385
+ 4000
2386
+ 2000
2387
+ S
2388
+ FL
2389
+ FR
2390
+ HL
2391
+ HRConfusionmatrix(id)
2392
+ BF25
2393
+ AM30
2394
+ 1200
2395
+ BM13
2396
+ AM33
2397
+ BM21
2398
+ BF14
2399
+ 1000
2400
+ BM19
2401
+ BM17
2402
+ BF15
2403
+ BF06
2404
+ AM18
2405
+ 800
2406
+ AF35
2407
+ BF13
2408
+ BM22
2409
+ BM23
2410
+ 600
2411
+ BM25
2412
+ AF08
2413
+ AF26
2414
+ BM33
2415
+ 400
2416
+ AM21
2417
+ AM01
2418
+ AF14
2419
+ AF32
2420
+ BM28
2421
+ 200
2422
+ AM10
2423
+ BM29
2424
+ BM05
2425
+ AM06
2426
+ BF25
2427
+ AM30
2428
+ BM13
2429
+ AM33
2430
+ BM21
2431
+ BF14
2432
+ BM19
2433
+ BM17
2434
+ BF15
2435
+ BF06
2436
+ AM18
2437
+ AF35
2438
+ BF13
2439
+ BM22
2440
+ M23
2441
+ BM25
2442
+ AF08
2443
+ AF26
2444
+ BM33
2445
+ AM21
2446
+ AM01
2447
+ AF14
2448
+ AF32
2449
+ BM28
2450
+ AM10
2451
+ BM29
2452
+ BM05
2453
+ AM06Confusionmatrix(emo)
2454
+ 4000
2455
+ 4
2456
+ 3500
2457
+ 3
2458
+ 3000
2459
+ 2500
2460
+ 2000
2461
+ NE
2462
+ 1500
2463
+ 1000
2464
+ SA
2465
+ 500
2466
+ SU
2467
+ 0
2468
+ AF
2469
+ AN
2470
+ DI
2471
+ HA
2472
+ NE
2473
+ SA
2474
+ SUConfusion matrix (pose)
2475
+ - 80
2476
+ - 70
2477
+ 60
2478
+ 50
2479
+ -40
2480
+ -30
2481
+ - 20
2482
+ 10
2483
+ 0
2484
+ FL
2485
+ FR
2486
+ HL
2487
+ HR
2488
+ SConfusionmatrix(id)
2489
+ BF25
2490
+ AM30
2491
+ BM13
2492
+ 10
2493
+ AM33
2494
+ BM21
2495
+ BF14
2496
+ BM19
2497
+ BM17
2498
+ BF15
2499
+ BF06
2500
+ AM18
2501
+ AF35
2502
+ BF13
2503
+ 6
2504
+ BM22
2505
+ BM23
2506
+ BM25
2507
+ AF08
2508
+ AF26
2509
+ BM33
2510
+ AM21
2511
+ AM01
2512
+ AF14
2513
+ AF32
2514
+ 2
2515
+ BM28
2516
+ AM10
2517
+ BM29
2518
+ BM05
2519
+ AM06
2520
+ BF25
2521
+ AM30
2522
+ BM13
2523
+ AM33
2524
+ BM21
2525
+ BF14
2526
+ BM19
2527
+ BM17
2528
+ BF15
2529
+ BF06
2530
+ AM18
2531
+ AF35
2532
+ BF13
2533
+ BM22
2534
+ BM23
2535
+ BM25
2536
+ AF08
2537
+ AF26
2538
+ BM33
2539
+ AM21
2540
+ AM01
2541
+ AF14
2542
+ AF32
2543
+ BM28
2544
+ AM10
2545
+ BM29
2546
+ BM05
2547
+ AM06Confusionmatrix(emo)
2548
+ 35
2549
+ 4
2550
+ 30
2551
+ 3
2552
+ - 25
2553
+ 20
2554
+ 15
2555
+ 10
2556
+ - 5
2557
+ 0
2558
+ AF
2559
+ AN
2560
+ DI
2561
+ HA
2562
+ NE
2563
+ SA
2564
+ SU25
2565
+ 25
2566
+ embeddings. Then we classified the embeddings using SVM with respect to their identities. Table 6
2567
+ details the results achieved by this experiment.
2568
+
2569
+ Table 4. Evaluation of MagFace, ArcFace, and CosFace using traditional augmentation techniques on the
2570
+ KDEF dataset. Post-augmentation accuracy scores achieved denote the ineffectiveness of these techniques
2571
+ for FR tasks.
2572
+ FR Algorithm
2573
+ Accuracy Score
2574
+ Pre-augmentation
2575
+ Post-augmentation
2576
+ MagFace
2577
+ 20.49%
2578
+ 18.54%
2579
+ ArcFace
2580
+ 20.12%
2581
+ 17.01%
2582
+ CosFace
2583
+ 18.19%
2584
+ 11.33%
2585
+ Table 4 shows that augmenting the face images using the classical approaches does not result in any
2586
+ improvement and they, in fact, degrade the quality of embeddings. For instance, for the MagFace
2587
+ algorithm, the accuracy obtained by SVM is decreased by 2% after applying DA.
2588
+ Moreover, we conducted another experiment using three state-of-the-art generative-based algorithms
2589
+ namely, CPM [41], AttGAN [81], and PSGAN [40]. Following the previous experiments, we augmented
2590
+ the face images and obtained the classification accuracy before and after augmentation. Table 7 shows the
2591
+ results achieved by this experiment.
2592
+ Table 5. Evaluation of augmentation techniques using GANs on the KDEF dataset. In the best case, the
2593
+ post-augmentation accuracy has increased a little and in some cases, it has caused a degradation in
2594
+ accuracy.
2595
+ Algorithm
2596
+ Accuracy Score
2597
+ Pre-augmentation
2598
+ Post-augmentation
2599
+ CPM [40]
2600
+ 20.49 %
2601
+ 17.82 %
2602
+ AttGAN
2603
+ 20.49 %
2604
+ 26.70 %
2605
+ PSGAN [39]
2606
+ 20.49 %
2607
+ 23.40 %
2608
+ Based on Table 5, it can be claimed that these generative models do not contribute to classification
2609
+ accuracy improvement. Therefore, in order to address this issue, in this paper, we propose a new
2610
+ algorithm, named FRA, which effectively augments the training data for FR algorithms. FRA functions
2611
+ with original embeddings and manipulates them in a way to be representative of the same identity of the
2612
+ embedding and also a differed postural information existent in these representational embeddings. Results
2613
+ achieved by our extensive experiments indicate the efficacy of FRA in augmenting samples in the FR
2614
+ domain.
2615
+ 5.Conclusion
2616
+
2617
+ 26
2618
+ 26
2619
+ Since data scarcity is a common problem in deep learning-based solutions, it can be very challenging to
2620
+ build up FR systems that are robust enough to recognize face images with extreme diversity. In this paper,
2621
+ we proposed a novel method that augments the face data in latent space. The proposed method utilizes
2622
+ two major components, one of which is an autoencoder and the other is a ViT-based model. The former is
2623
+ used to encode the binarized input images consisting of sparse facial landmarks into a latent space. The
2624
+ latter is used for extracting features from the combined embeddings coming from a pre-trained FRL
2625
+ algorithm and the autoencoder part of our model. Lastly, the output of the proposed model is an
2626
+ embedding representing the main identity with the same emotion but with a different posture. This way,
2627
+ we managed to improve the classification accuracy by 9.52, 10.04, and 16.60, in comparison with the
2628
+ based models of MagFace, ArcFace, and CosFace, respectively.
2629
+ References
2630
+ [1] P. Terhorst, J.N. Kolf, N. Damer, F. Kirchbuchner, A. Kuijper, SER-FIQ: Unsupervised estimation of face image
2631
+ quality based on stochastic embedding robustness, Proceedings of the IEEE/CVF conference on computer
2632
+ vision and pattern recognition2020, pp. 5651-5660.
2633
+ [2] M. Wang, W. Deng, Deep face recognition: A survey, Neurocomputing, 429 (2021) 215-244.
2634
+ [3] W. Ali, W. Tian, S.U. Din, D. Iradukunda, A.A. Khan, Classical and modern face recognition approaches: a
2635
+ complete review, Multimedia tools and applications, 80 (2021) 4825-4880.
2636
+ [4] A. Krizhevsky, I. Sutskever, G.E. Hinton, Imagenet classification with deep convolutional neural networks,
2637
+ Communications of the ACM, 60 (2017) 84-90.
2638
+ [5] Y. Taigman, M. Yang, M.A. Ranzato, L. Wolf, Deepface: Closing the gap to human-level performance in face
2639
+ verification, Proceedings of the IEEE conference on computer vision and pattern recognition2014, pp. 1701-
2640
+ 1708.
2641
+ [6] Q. Meng, S. Zhao, Z. Huang, F. Zhou, Magface: A universal representation for face recognition and quality
2642
+ assessment, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition2021, pp.
2643
+ 14225-14234.
2644
+ [7] Q. Cao, L. Shen, W. Xie, O.M. Parkhi, A. Zisserman, Vggface2: A dataset for recognising faces across pose and
2645
+ age, 2018 13th IEEE international conference on automatic face & gesture recognition (FG 2018), IEEE2018,
2646
+ pp. 67-74.
2647
+ [8] C. Yan, L. Meng, L. Li, J. Zhang, Z. Wang, J. Yin, J. Zhang, Y. Sun, B. Zheng, Age-invariant face recognition
2648
+ by multi-feature fusionand decomposition with self-attention, ACM Transactions on Multimedia Computing,
2649
+ Communications, and Applications (TOMM), 18 (2022) 1-18.
2650
+ [9] Y. Zheng, D.K. Pal, M. Savvides, Ring loss: Convex feature normalization for face recognition, Proceedings of
2651
+ the IEEE conference on computer vision and pattern recognition2018, pp. 5089-5097.
2652
+ [10] X. Zhang, Z. Fang, Y. Wen, Z. Li, Y. Qiao, Range loss for deep face recognition with long-tailed training data,
2653
+ Proceedings of the IEEE International Conference on Computer Vision2017, pp. 5409-5418.
2654
+ [11] C. Peng, N. Wang, J. Li, X. Gao, DLFace: Deep local descriptor for cross-modality face recognition, Pattern
2655
+ Recognition, 90 (2019) 161-171.
2656
+ [12] A. Bhattacharyya, S. Chatterjee, S. Sen, A. Sinitca, D. Kaplun, R. Sarkar, A deep learning model for classifying
2657
+ human facial expressions from infrared thermal images, Scientific Reports, 11 (2021) 20696.
2658
+ [13] S. Gupta, K. Thakur, M. Kumar, 2D-human face recognition using SIFT and SURF descriptors of face’s feature
2659
+ regions, The Visual Computer, 37 (2021) 447-456.
2660
+ [14] Y. Kortli, M. Jridi, A. Al Falou, M. Atri, A comparative study of CFs, LBP, HOG, SIFT, SURF, and BRIEF for
2661
+ security and face recognition, Advanced Secure Optical Image Processing for Communications, IOP
2662
+ Publishing2018.
2663
+ [15] Y. Duan, J. Lu, J. Zhou, Uniformface: Learning deep equidistributed representation for face recognition,
2664
+ Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition2019, pp. 3415-3424.
2665
+
2666
+ 27
2667
+ 27
2668
+ [16] B. Lahasan, S.L. Lutfi, R. San-Segundo, A survey on techniques to handle face recognition challenges:
2669
+ occlusion, single sample per subject and expression, Artificial Intelligence Review, 52 (2019) 949-979.
2670
+ [17] Y. Li, K. Guo, Y. Lu, L. Liu, Cropping and attention based approach for masked face recognition, Appl. Intell.,
2671
+ 51 (2021) 3012-3025.
2672
+ [18] P. Li, L. Prieto, D. Mery, P.J. Flynn, On low-resolution face recognition in the wild: Comparisons and new
2673
+ techniques, IEEE Transactions on Information Forensics and Security, 14 (2019) 2000-2012.
2674
+ [19] L. Song, D. Gong, Z. Li, C. Liu, W. Liu, Occlusion robust face recognition based on mask learning with
2675
+ pairwise differential siamese network, Proceedings of the IEEE/CVF International Conference on Computer
2676
+ Vision2019, pp. 773-782.
2677
+ [20] Z. Lu, X. Jiang, A. Kot, Deep coupled resnet for low-resolution face recognition, IEEE Signal Processing
2678
+ Letters, 25 (2018) 526-530.
2679
+ [21] J. Deng, S. Zafeririou, Arcface for disguised face recognition, Proceedings of the IEEE/CVF International
2680
+ Conference on Computer Vision Workshops2019, pp. 0-0.
2681
+ [22] J. Deng, J. Guo, N. Xue, S. Zafeiriou, Arcface: Additive angular margin loss for deep face recognition,
2682
+ Proceedings of the IEEE/CVF conference on computer vision and pattern recognition2019, pp. 4690-4699.
2683
+ [23] Y. Srivastava, V. Murali, S.R. Dubey, A performance evaluation of loss functions for deep face recognition,
2684
+ Computer Vision, Pattern Recognition, Image Processing, and Graphics: 7th National Conference,
2685
+ NCVPRIPG 2019, Hubballi, India, December 22–24, 2019, Revised Selected Papers 7, Springer2020, pp. 322-
2686
+ 332.
2687
+ [24] Y. Huang, Y. Wang, Y. Tai, X. Liu, P. Shen, S. Li, J. Li, F. Huang, Curricularface: adaptive curriculum
2688
+ learning loss for deep face recognition, proceedings of the IEEE/CVF conference on computer vision and
2689
+ pattern recognition2020, pp. 5901-5910.
2690
+ [25] H. Wang, Y. Wang, Z. Zhou, X. Ji, D. Gong, J. Zhou, Z. Li, W. Liu, Cosface: Large margin cosine loss for
2691
+ deep face recognition, Proceedings of the IEEE conference on computer vision and pattern recognition2018,
2692
+ pp. 5265-5274.
2693
+ [26] X. Wang, S. Wang, J. Wang, H. Shi, T. Mei, Co-mining: Deep face recognition with noisy labels, Proceedings
2694
+ of the IEEE/CVF International Conference on Computer Vision2019, pp. 9358-9367.
2695
+ [27] Z. Liu, P. Luo, X. Wang, X. Tang, Deep learning face attributes in the wild, Proceedings of the IEEE
2696
+ international conference on computer vision2015, pp. 3730-3738.
2697
+ [28] S.C. Wong, A. Gatt, V. Stamatescu, M.D. McDonnell, Understanding data augmentation for classification:
2698
+ when to warp?, 2016 international conference on digital image computing: techniques and applications
2699
+ (DICTA), IEEE2016, pp. 1-6.
2700
+ [29] J.-J. Lv, X.-H. Shao, J.-S. Huang, X.-D. Zhou, X. Zhou, Data augmentation for face recognition,
2701
+ Neurocomputing, 230 (2017) 184-196.
2702
+ [30] I. Masi, A.T. Trần, T. Hassner, G. Sahin, G. Medioni, Face-specific data augmentation for unconstrained face
2703
+ recognition, International Journal of Computer Vision, 127 (2019) 642-667.
2704
+ [31] D. Jiang, Y. Hu, S. Yan, L. Zhang, H. Zhang, W. Gao, Efficient 3D reconstruction for face recognition, Pattern
2705
+ Recognition, 38 (2005) 787-798.
2706
+ [32] H. Mohammadzade, D. Hatzinakos, Projection into expression subspaces for face recognition from single
2707
+ sample per person, IEEE Transactions on Affective Computing, 4 (2012) 69-82.
2708
+ [33] Y. Shen, P. Luo, J. Yan, X. Wang, X. Tang, Faceid-gan: Learning a symmetry three-player gan for identity-
2709
+ preserving face synthesis, Proceedings of the IEEE conference on computer vision and pattern
2710
+ recognition2018, pp. 821-830.
2711
+ [34] I. Goodfellow, J. Pouget-Abadie, M. Mirza, B. Xu, D. Warde-Farley, S. Ozair, A. Courville, Y. Bengio,
2712
+ Generative adversarial networks, Communications of the ACM, 63 (2020) 139-144.
2713
+ [35] D. Lundqvist, A. Flykt, A. Öhman, Karolinska directed emotional faces, Cognition and Emotion, (1998).
2714
+ [36] P. Vincent, H. Larochelle, I. Lajoie, Y. Bengio, P.-A. Manzagol, L. Bottou, Stacked denoising autoencoders:
2715
+ Learning useful representations in a deep network with a local denoising criterion, Journal of machine learning
2716
+ research, 11 (2010).
2717
+ [37] J. Wang, L. Perez, The effectiveness of data augmentation in image classification using deep learning,
2718
+ Convolutional Neural Networks Vis. Recognit, 11 (2017) 1-8.
2719
+ [38] T. Kim, M. Cha, H. Kim, J.K. Lee, J. Kim, Learning to discover cross-domain relations with generative
2720
+ adversarial networks, International conference on machine learning, PMLR2017, pp. 1857-1865.
2721
+ [39] I. Kemelmacher-Shlizerman, Transfiguring portraits, ACM Transactions on Graphics (TOG), 35 (2016) 1-8.
2722
+
2723
+ 28
2724
+ 28
2725
+ [40] W. Jiang, S. Liu, C. Gao, J. Cao, R. He, J. Feng, S. Yan, Psgan: Pose and expression robust spatial-aware gan
2726
+ for customizable makeup transfer, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern
2727
+ Recognition2020, pp. 5194-5202.
2728
+ [41] T. Nguyen, A.T. Tran, M. Hoai, Lipstick ain't enough: beyond color matching for in-the-wild makeup transfer,
2729
+ Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition2021, pp. 13305-
2730
+ 13314.
2731
+ [42] C.-Y. Wu, R. Manmatha, A.J. Smola, P. Krahenbuhl, Sampling matters in deep embedding learning,
2732
+ Proceedings of the IEEE international conference on computer vision2017, pp. 2840-2848.
2733
+ [43] Y. Movshovitz-Attias, A. Toshev, T.K. Leung, S. Ioffe, S. Singh, No fuss distance metric learning using
2734
+ proxies, Proceedings of the IEEE international conference on computer vision2017, pp. 360-368.
2735
+ [44] E. Ustinova, V. Lempitsky, Learning deep embeddings with histogram loss, Advances in Neural Information
2736
+ Processing Systems, 29 (2016).
2737
+ [45] Q. Xiao, H. Luo, C. Zhang, Margin sample mining loss: A deep learning based method for person re-
2738
+ identification, arXiv preprint arXiv:1710.00478, (2017).
2739
+ [46] W. Chen, X. Chen, J. Zhang, K. Huang, Beyond triplet loss: a deep quadruplet network for person re-
2740
+ identification, Proceedings of the IEEE conference on computer vision and pattern recognition2017, pp. 403-
2741
+ 412.
2742
+ [47] A. Sanakoyeu, V. Tschernezki, U. Buchler, B. Ommer, Divide and conquer the embedding space for metric
2743
+ learning, Proceedings of the ieee/cvf conference on computer vision and pattern recognition2019, pp. 471-
2744
+ 480.
2745
+ [48] Y. Bai, Y. Lou, F. Gao, S. Wang, Y. Wu, L.-Y. Duan, Group-sensitive triplet embedding for vehicle
2746
+ reidentification, IEEE Transactions on Multimedia, 20 (2018) 2385-2399.
2747
+ [49] Y. Em, F. Gag, Y. Lou, S. Wang, T. Huang, L.-Y. Duan, Incorporating intra-class variance to fine-grained
2748
+ visual recognition, 2017 IEEE International Conference on Multimedia and Expo (ICME), IEEE2017, pp.
2749
+ 1452-1457.
2750
+ [50] E. Smirnov, A. Oleinik, A. Lavrentev, E. Shulga, V. Galyuk, N. Garaev, M. Zakuanova, A. Melnikov, Face
2751
+ representation learning using composite mini-batches, Proceedings of the IEEE/CVF International Conference
2752
+ on Computer Vision Workshops2019, pp. 0-0.
2753
+ [51] Y. Shi, X. Yu, K. Sohn, M. Chandraker, A.K. Jain, Towards universal representation learning for deep face
2754
+ recognition, Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition2020, pp.
2755
+ 6817-6826.
2756
+ [52] F. Schroff, D. Kalenichenko, J. Philbin, Facenet: A unified embedding for face recognition and clustering,
2757
+ Proceedings of the IEEE conference on computer vision and pattern recognition2015, pp. 815-823.
2758
+ [53] W. Liu, Y. Wen, Z. Yu, M. Li, B. Raj, L. Song, Sphereface: Deep hypersphere embedding for face recognition,
2759
+ Proceedings of the IEEE conference on computer vision and pattern recognition2017, pp. 212-220.
2760
+ [54] G.B. Huang, M. Mattar, T. Berg, E. Learned-Miller, Labeled faces in the wild: A database forstudying face
2761
+ recognition in unconstrained environments, Workshop on faces in'Real-Life'Images: detection, alignment, and
2762
+ recognition2008.
2763
+ [55] L. Wolf, T. Hassner, I. Maoz, Face recognition in unconstrained videos with matched background similarity,
2764
+ CVPR 2011, IEEE2011, pp. 529-534.
2765
+ [56] T. Zheng, W. Deng, J. Hu, Cross-age lfw: A database for studying cross-age face recognition in unconstrained
2766
+ environments, arXiv preprint arXiv:1708.08197, (2017).
2767
+ [57] T. Zheng, W. Deng, Cross-pose lfw: A database for studying cross-pose face recognition in unconstrained
2768
+ environments, Beijing University of Posts and Telecommunications, Tech. Rep, 5 (2018).
2769
+ [58] S. Moschoglou, A. Papaioannou, C. Sagonas, J. Deng, I. Kotsia, S. Zafeiriou, Agedb: the first manually
2770
+ collected, in-the-wild age database, proceedings of the IEEE conference on computer vision and pattern
2771
+ recognition workshops2017, pp. 51-59.
2772
+ [59] S. Sengupta, J.-C. Chen, C. Castillo, V.M. Patel, R. Chellappa, D.W. Jacobs, Frontal to profile face verification
2773
+ in the wild, 2016 IEEE winter conference on applications of computer vision (WACV), IEEE2016, pp. 1-9.
2774
+ [60] K. Cong, M. Zhou, Face Dataset Augmentation with Generative Adversarial Network, Journal of Physics:
2775
+ Conference Series, IOP Publishing2022, pp. 012035.
2776
+ [61] A. Kammoun, R. Slama, H. Tabia, T. Ouni, M. Abid, Generative Adversarial Networks for face generation: A
2777
+ survey, ACM Computing Surveys, 55 (2022) 1-37.
2778
+ [62] G. Bradski, The openCV library, Dr. Dobb's Journal: Software Tools for the Professional Programmer, 25
2779
+ (2000) 120-123.
2780
+
2781
+ 29
2782
+ 29
2783
+ [63] D.E. Rumelhart, G.E. Hinton, R.J. Williams, Learning internal representations by error propagation, California
2784
+ Univ San Diego La Jolla Inst for Cognitive Science1985.
2785
+ [64] D. Bank, N. Koenigstein, R. Giryes, Autoencoders, arXiv preprint arXiv:2003.05991, (2020).
2786
+ [65] Q. Meng, X. Xu, X. Wang, Y. Qian, Y. Qin, Z. Wang, C. Zhao, F. Zhou, Z. Lei, PoseFace: Pose-invariant
2787
+ features and pose-adaptive loss for face recognition, arXiv preprint arXiv:2107.11721, (2021).
2788
+ [66] T. Lin, Y. Wang, X. Liu, X. Qiu, A survey of transformers, AI Open, (2022).
2789
+ [67] N. Carion, F. Massa, G. Synnaeve, N. Usunier, A. Kirillov, S. Zagoruyko, End-to-end object detection with
2790
+ transformers, Computer Vision–ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020,
2791
+ Proceedings, Part I 16, Springer2020, pp. 213-229.
2792
+ [68] H. Touvron, M. Cord, M. Douze, F. Massa, A. Sablayrolles, H. Jégou, Training data-efficient image
2793
+ transformers & distillation through attention, International conference on machine learning, PMLR2021, pp.
2794
+ 10347-10357.
2795
+ [69] F. Yang, H. Yang, J. Fu, H. Lu, B. Guo, Learning texture transformer network for image super-resolution,
2796
+ Proceedings of the IEEE/CVF conference on computer vision and pattern recognition2020, pp. 5791-5800.
2797
+ [70] L. Ye, M. Rochan, Z. Liu, Y. Wang, Cross-modal self-attention network for referring image segmentation,
2798
+ Proceedings of the IEEE/CVF conference on computer vision and pattern recognition2019, pp. 10502-10511.
2799
+ [71] A.M. Hafiz, S.A. Parah, R.U.A. Bhat, Attention mechanisms and deep learning for machine vision: A survey of
2800
+ the state of the art, arXiv preprint arXiv:2106.07550, (2021).
2801
+ [72] Z. Niu, G. Zhong, H. Yu, A review on the attention mechanism of deep learning, Neurocomputing, 452 (2021)
2802
+ 48-62.
2803
+ [73] F. Chollet, Deep learning with Python, Simon and Schuster2021.
2804
+ [74] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A.N. Gomez, Ł. Kaiser, I. Polosukhin, Attention is
2805
+ all you need, Advances in neural information processing systems, 30 (2017).
2806
+ [75] J.-B. Cordonnier, A. Loukas, M. Jaggi, On the relationship between self-attention and convolutional layers,
2807
+ arXiv preprint arXiv:1911.03584, (2019).
2808
+ [76] N. Park, S. Kim, How do vision transformers work?, arXiv preprint arXiv:2202.06709, (2022).
2809
+ [77] N. Park, S. Kim, Blurs behave like ensembles: Spatial smoothings to improve accuracy, uncertainty, and
2810
+ robustness, International Conference on Machine Learning, PMLR2022, pp. 17390-17419.
2811
+ [78] I. Goodfellow, Y. Bengio, A. Courville, Deep learning, MIT press2016.
2812
+ [79] K. Sohn, Improved deep metric learning with multi-class n-pair loss objective, Advances in neural information
2813
+ processing systems, 29 (2016).
2814
+ [80] C. Cortes, V. Vapnik, Support vector machine, Machine learning, 20 (1995) 273-297.
2815
+ [81] Z. He, W. Zuo, M. Kan, S. Shan, X. Chen, Attgan: Facial attribute editing by only changing what you want,
2816
+ IEEE transactions on image processing, 28 (2019) 5464-5478.
2817
+
AtFLT4oBgHgl3EQfFC_E/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
CtAyT4oBgHgl3EQfR_f1/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a999d37283c8cae3fd30c1764c85c50c46f2101ef950685b990d01c3d5481a8
3
+ size 337509
D9AzT4oBgHgl3EQfif2Z/content/tmp_files/2301.01501v1.pdf.txt ADDED
@@ -0,0 +1,741 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Towards Edge-Cloud Architectures for Personal
2
+ Protective Equipment Detection
3
+ Jarosław Legierski
4
+ jaroslaw.legierski@orange.com
5
+ Orange Innovation, Orange
6
+ Polska S.A.
7
+ Warsaw, Poland
8
+ Kajetan Rachwał
9
+ Piotr Sowinski
10
+ kajetan.rachwal@ibspan.waw.pl
11
+ piotr.sowinski@ibspan.waw.pl
12
+ Systems Research Institute Polish
13
+ Academy of Sciences
14
+ Poland
15
+ Warsaw Univesity of Technology
16
+ Warsaw, Poland
17
+ Wojciech Niewolski
18
+ wojciech.niewolski@orange.com
19
+ Orange Innovation, Orange
20
+ Polska S.A.
21
+ Poland
22
+ Warsaw Univesity of Technology
23
+ Warsaw, Poland
24
+ Przemysław Ratuszek
25
+ Zbigniew Kopertowski
26
+ przemyslaw.ratuszek@orange.com
27
+ zbigniew.kopertowski@orange.com
28
+ Orange Innovation, Orange
29
+ Polska S.A.
30
+ Warsaw, Poland
31
+ Marcin Paprzycki
32
+ Maria Ganzha
33
+ marcin.paprzycki@ibspan.waw.pl
34
+ maria.ganzha@ibspan.waw.pl
35
+ Systems Research Institute Polish
36
+ Academy of Sciences
37
+ Warsaw, Poland
38
+ Abstract
39
+ Detecting Personal Protective Equipment in images and video
40
+ streams is a relevant problem in ensuring the safety of con-
41
+ struction workers. In this contribution, an architecture en-
42
+ abling live image recognition of such equipment is proposed.
43
+ The solution is deployable in two settings – edge-cloud and
44
+ edge-only. The system was tested on an active construction
45
+ site, as a part of a larger scenario, within the scope of the
46
+ ASSIST-IoT H2020 project. To determine the feasibility of
47
+ the edge-only variant, a model for counting people wearing
48
+ safety helmets was developed using the YOLOX method. It
49
+ was found that an edge-only deployment is possible for this
50
+ use case, given the hardware infrastructure available on site.
51
+ In the preliminary evaluation, several important observations
52
+ were made, that are crucial to the further development and de-
53
+ ployment of the system. Future work will include an in-depth
54
+ investigation of performance aspects of the two architecture
55
+ variants.
56
+ Keywords: edge-cloud continuum architectures, PPE detec-
57
+ tion, image recognition, worker safety
58
+ 1
59
+ Introduction
60
+ Nowadays, the demand for intelligent video analytics is grow-
61
+ ing across a wide spectrum of application areas [17]. The
62
+ key part of such systems is usually an image recognition (IR)
63
+ component. However, as of today, the IR subsystem is, most
64
+ commonly, deployed in the cloud. This approach offers mul-
65
+ tiple benefits, such as availability of large and scalable com-
66
+ putational resources, reliable APIs, and shifting the burden
67
+ of system maintenance to the cloud service provider. How-
68
+ ever, this comes at a cost. Sending data to the cloud raises
69
+ both security and privacy concerns. Moreover, communicat-
70
+ ing with the cloud always induces network latency, which
71
+ may be significant in time-critical applications. To address
72
+ issues brought about by cloud-centric solutions, edge com-
73
+ puting has been proposed. Here, the core of the approach is
74
+ processing the data as close to the source as possible. This
75
+ allows for latency reduction, and helps ensure the security
76
+ and privacy of data, which remains within the local network.
77
+ However, edge computing has its own set of issues. Typically,
78
+ the computational resources, which are available at the edge
79
+ are considerably smaller. A possible solution to addressing
80
+ the downsides of both these options is a combined approach –
81
+ an edge-cloud continuum, where data is partially processed
82
+ on the edge and partially in the cloud. However, this raises
83
+ the obvious question: at which point(s), within the continuum,
84
+ individual parts of the system should be deployed.
85
+ Here, this question is considered within a real-world sce-
86
+ nario of monitoring the entrance to an active construction
87
+ site. Specifically, the system is tasked with ensuring that (1)
88
+ no unauthorized people enter the worksite, and (2) every-
89
+ body is wearing appropriate Personal Protective Equipment
90
+ (PPE), i.e. helmets and safety vests. The scenario is evalu-
91
+ ated as part of the ASSIST-IoT project, on a construction site
92
+ in Warsaw, Poland, managed by the construction company
93
+ Mostostal Warszawa. Here, the edge versus cloud discussion
94
+ becomes particularly relevant. On the one hand, the privacy
95
+ of workers is of paramount importance, while latencies must
96
+ be minimized, to ensure a quick reaction, which hints at an
97
+ arXiv:2301.01501v1 [cs.CV] 4 Jan 2023
98
+
99
+ Legierski and Rachwał et al.
100
+ edge deployment. On the other hand, given the limited hard-
101
+ ware resources available on the edge, and the extremely harsh
102
+ conditions of the construction site, a cloud deployment seems
103
+ attractive.
104
+ Given the possible benefits of both solutions, in this contri-
105
+ bution, a solution is proposed for an edge-cloud continuum
106
+ video analytics architecture. The architecture can be deployed
107
+ in two variants (edge-only, and edge-cloud), described in the
108
+ Architecture section. Moreover, to determine the viability of
109
+ the solution, an initial experimental study was performed.
110
+ Here, an IR model was developed and integrated with the
111
+ edge-only variant of the architecture. Next, it was tasked with
112
+ detecting when personnel wearing PPE entered and exited the
113
+ work site.
114
+ 2
115
+ Background
116
+ To provide a context for this study, the state of the art of (1)
117
+ IR system architectures and (2) machine learning models for
118
+ PPE detection is summarized.
119
+ System architectures. The most obvious benefit of deploy-
120
+ ing IR systems on the edge is the decreased latency. This was
121
+ demonstrated in [20], where facial recognition models were
122
+ deployed on the edge. The authors found that deploying the
123
+ models on the edge resulted in significantly better response
124
+ speeds, as compared to a cloud deployment. In other stud-
125
+ ies [8, 9], the viability of deploying deep convolutional neural
126
+ networks (CNNs) in the edge-only scenario was investigated.
127
+ CNNs are characterized by high resource utilization, and thus
128
+ are typically deployed in the cloud. The studies found that
129
+ deploying CNNs is viable on mobile devices, when parts of
130
+ the computation can be offloaded to other edge devices. Edge
131
+ deployment allowed to achieve a consistently low latency
132
+ of 2.24 ms while using CNNs to perform real-time object
133
+ tracking in augmented reality [8]. Both studies showed that
134
+ the edge deployment distributing the workload increased the
135
+ inference capabilities of the system, as the models could not
136
+ be run on the disconnected mobile devices alone.
137
+ One study [5] investigated an edge-cloud architecture, where
138
+ data preprocessing servers were deployed close to the data
139
+ source. The preprocessed data was then sent to a cloud-based
140
+ deep-learning platform. This resulted in decreasing network
141
+ latency and traffic. It also increased the security and privacy
142
+ of the raw data.
143
+ On the other hand, edge deployments are more limited
144
+ in terms of the available hardware. Low computational re-
145
+ sources naturally limit the size of models and inference speed.
146
+ A study compared different implementations (based on Ten-
147
+ sorFlow, TensorRT, and TFLite) of the same video processing
148
+ model [6], and found them to differ in their resource utiliza-
149
+ tion. The choice of implementation influenced the energy
150
+ consumption of the model, as well as its inference speed.
151
+ Interestingly, the slowest implementation (TFLite) was the
152
+ most energy efficient. It was also found that TFLite managed
153
+ to remain on par with the other implementations in terms of
154
+ speed, when processing low-resolution video. In the case of
155
+ high-resolution video, more resource-intensive models were
156
+ needed to maintain the speed, suggesting that a cloud deploy-
157
+ ment could be more beneficial in low-resource settings. Nev-
158
+ ertheless, some resource-intensive models can be deployed
159
+ on the edge, if resources available there are sufficient. The
160
+ deployment proposed in a different study required all nodes
161
+ to be equipped with a GPU [9]. This allowed the authors to
162
+ use CNNs on the edge. A similar result was reported in [13],
163
+ were IR models deployed on a Raspberry Pi 4B, equipped
164
+ with a camera, and an Intel Neural Compute Stick 2 (a USB
165
+ device for deep learning inference on the edge) were studied.
166
+ These devices were chosen for their low power consumption
167
+ and good computing capabilities. Overall, a model tasked
168
+ with detecting PPE in the form of helmets and safety vests
169
+ achieved precision on the order of 99.5%.
170
+ Models for PPE detection. Effective video analytics-based
171
+ methods for detecting the presence of protective helmets,
172
+ worn by workers, due to its health and safety importance, is
173
+ currently a hot research topic.
174
+ The usage of existing, unmodified machine learning models
175
+ for detecting protective head covers does not provide suffi-
176
+ cient detection accuracy, as proven in a recent study [19].
177
+ In said article, several versions of the popular YOLO algo-
178
+ rithm [1] were compared. It was shown that the most effective
179
+ version of YOLO for helmet detection is the v4. After improv-
180
+ ing the loss function, it achieved more than 93% accuracy
181
+ during tests. A similar study [10] focused on improving the
182
+ YOLOv5 algorithm. The system achieved results close to 97%
183
+ accuracy, thanks to the improvement of the structure of the
184
+ neural network. Another study [18], also investigated improv-
185
+ ing YOLOv5. However, instead of the algorithm itself, work
186
+ was focused on processing of input data by applying filters
187
+ on the input image. This allowed to improve the accuracy to
188
+ above 95%. Yet another study [15] presented an approach for
189
+ improving the detection speed and accuracy by designing a
190
+ multi-level pyramidal feature fusion network based on the
191
+ ConCaNet attention mechanism. Here, YOLOv3 was applied
192
+ and a dataset with 6000 images was used. The results demon-
193
+ strate the effectiveness of this approach, which managed to
194
+ reduce the number of necessary parameters.
195
+ Helmet detection can also be done using the SSD-MobileNet
196
+ algorithm [4], which is based on yet another variant of CNN.
197
+ An analysis of this method, reported in [7], tested its effec-
198
+ tiveness and managed to reach 80% accuracy during tests.
199
+ In a wider comparison of algorithm types [11], the authors
200
+ proposed a helmet detection method based on a dynamically
201
+ changing neural network – SHDDM (Safety Helmet Detec-
202
+ tion Dynamic Model). The developed model analyzes the
203
+ human posture and defines the area where the helmet should
204
+ be located, to eliminate the detection of the helmet outside
205
+ the head area and thus reduce the false positive rate. There are
206
+
207
+ Towards Edge-Cloud Architectures for Personal Protective Equipment Detection
208
+ also other approaches to helmet detection, such as methods
209
+ based on color and shape used to to locate the face, and the
210
+ proper wearing of a helmet [16]. Another solution used low-
211
+ resolution images, captured from a video stream, using the
212
+ Local Binary Pattern (LBP) and gray-level co-occurrence ma-
213
+ trix (GLCM) methods along with a back-propagation neural
214
+ network [14].
215
+ Another study [2] investigated the usefulness of artificially
216
+ created images in the training of CNNs for PPE detection. The
217
+ paper presented the results achieved with YOLOv3, trained on
218
+ artificial images generated by the Rockstar Advanced Game
219
+ Engine (RAGE) from the Grand Theft Auto V video game.
220
+ This approach achieved a mean average precision (mAP) of
221
+ only 55.11% on a test dataset consisting of real-world images.
222
+ The mAP for synthetic images was much higher at 87.24%.
223
+ It should be noted that the poor results for the real-world
224
+ images are most likely caused by the RAGE engine being
225
+ unable to generate a sufficient amount of head, welding mask,
226
+ ear protection, and chest object variations.
227
+ As can be seen, there are many possibilities for detecting
228
+ protective helmets. Here, the SHDDM is particularly note-
229
+ worthy, as it has an important feature of checking whether the
230
+ helmet is worn properly, and not only detecting its presence.
231
+ This, in turn, is particularly relevant in real-world applica-
232
+ tions.
233
+ 3
234
+ Proposed Architecture
235
+ The proposed video analytics system can be deployed in
236
+ two architecture variants: edge-cloud (Fig. 1) and edge-only
237
+ (Fig. 2). As outlined above, there are reasons to believe that
238
+ both variants may be appropriate for the considered scenario.
239
+ Both architectures share a common core deployed on the edge,
240
+ consisting of: a camera, the Image Processor (IP) component,
241
+ and the OSH (Occupational Safety and Health) manager’s
242
+ mobile device.
243
+ The camera (in the reported experiments the Dahua IPC-
244
+ HFW5449T-ASE-LED was used) provides a live RTSP video
245
+ stream, which is directed to the Image Processor. The IP is
246
+ a service written in Python, which can optionally perform
247
+ preliminary image analysis. Using configurable methods such
248
+ as motion detection and brightness thresholding, the IP is able
249
+ to discard image frames that do not contain moving people,
250
+ reducing network traffic to components involved in actual im-
251
+ age analysis. It is also responsible for communicating with the
252
+ rest of the system, designed in accordance with the ASSIST-
253
+ IoT reference architecture [3]. IP communicates with the rest
254
+ of the system publishing alerts to an MQTT topic. This de-
255
+ sign allows other components and devices in the ASSIST-IoT
256
+ deployment to be notified in a streaming manner of any OSH
257
+ violations, such as workers not wearing protective helmets.
258
+ In the first version of the architecture – the edge-cloud
259
+ deployment – the IP is configured to use the cloud-based
260
+ AWS Rekognition platform, with its PPE detection service.
261
+ ASSIST-IoT deployment
262
+ AWS
263
+ Rekognition
264
+ Video stream
265
+ OSH alert
266
+ Image Processor
267
+ Camera
268
+ OSH manager's
269
+ mobile device
270
+ Edge
271
+ Filtered
272
+ video stream
273
+ Inference
274
+ results
275
+ AWS Cloud
276
+ Figure 1. Edge-cloud deployment
277
+ Video stream
278
+ OSH alert
279
+ Image Processor
280
+ Camera
281
+ OSH manager's
282
+ mobile device
283
+ Edge
284
+ Orange AI&ML Platform
285
+ Video stream
286
+ Processing pipeline
287
+ AI-X
288
+ AI-2
289
+ AI-1
290
+ ...
291
+ Result adaptation
292
+ Inference
293
+ results
294
+ External platform connector
295
+ ASSIST-IoT deployment
296
+ Figure 2. Edge deployment
297
+ In the edge-only variant, the video analysis is performed by
298
+ the Orange AI&ML Platform, which is deployed on a server
299
+ on the construction site. This edge deployment allows for
300
+ maintaining lower network latency, and ensures the privacy
301
+ of worker data. The AI&ML Platform’s services are written
302
+ as Python runnable modules that provide their own APIs and
303
+ GUIs. The services can reuse the APIs and GUIs provided
304
+ by the platform, or build them from scratch. A service col-
305
+ lects frames from a video source, processes them in an ML
306
+ pipeline specific to the service, and adapts or interprets the
307
+ results. The inference results from the Platform are forwarded
308
+ to external services, with the use of provided connectors. As
309
+ the Orange AI&ML Platform operates on the edge, all video
310
+
311
+ Legierski and Rachwał et al.
312
+ processing takes place on the client’s site, ensuring full secu-
313
+ rity of customer data (video) and compliance with appropriate
314
+ regulations, such as GDPR.
315
+ 4
316
+ Methodology
317
+ As part of this study, a preliminary version of the edge-only
318
+ variant of the architecture was deployed on an active construc-
319
+ tion site. Using the Orange AI&ML Platform, a model was
320
+ trained to count people wearing helmets entering and exiting a
321
+ specific area. The system counts people in helmets in defined
322
+ recognition areas (bounding boxes), crossing the yellow and
323
+ green lines visible in Figs. 3 and 4. People entering the con-
324
+ struction site are counted after crossing the green line, while
325
+ people leaving are counted after crossing the yellow line. The
326
+ machine learning pipeline consists of a YOLOX object detec-
327
+ tion model, trained for detecting heads in helmets, and a Deep-
328
+ SORT [12] multi-object tracking algorithm. The YOLOX
329
+ model was trained using a dataset provided by the Northeast-
330
+ ern University of China (https://public.roboflow.com/object-
331
+ detection/hard-hat-workers).
332
+ The system’s results were compared to those obtained
333
+ from an algorithm built into the Dahua camera. It should
334
+ be noted that the camera counted all people entering and leav-
335
+ ing, including those without protective helmets. However, this
336
+ should not impact the results much, as the safety regulations
337
+ on this particular site forbid entering it without a helmet and
338
+ the rule is strictly enforced before workers reach the counting
339
+ location.
340
+ The measurements were performed in two series – each
341
+ using a different bounding box definition. A single series
342
+ spanned the length of one workday on the construction site.
343
+ The number of entering and leaving people was counted in
344
+ hourly intervals (between 5 AM and 7 PM).
345
+ 5
346
+ Results
347
+ The Tables 1 and 2 present the results of the performed exper-
348
+ iments. The Table 1 contains measurements made on 22nd
349
+ November 2022, with the bounding box set as presented in
350
+ Fig. 3. The average difference between the number of people
351
+ entering, as measured by the camera and the model was equal
352
+ to −6.21, with the standard deviation of σ = 5.08, whereas
353
+ for people exiting it was 1.35 and σ = 2.73 respectively. The
354
+ correlation between entrances detected by the camera and the
355
+ model deployed on the AI&ML platform, expressed by the
356
+ Pearson coefficient is 0.988, whereas for exits 0.995. The cor-
357
+ relations were found to be statistically significant (p ≤ 0.05).
358
+ Table 2 contains measurements from 24th November 2022
359
+ (for modified detection areas, depicted in Fig. 4). On that day,
360
+ the average difference for entering was −4.93 with σ = 4.25
361
+ and for exiting 3.92 with σ = 4.92. For these measurements
362
+ the Pearson coefficient for people entering is equal to 0.993
363
+ and exiting 0.989. The correlations were found to be statisti-
364
+ cally significant (p ≤ 0.05).
365
+ Figure 3. Bounding boxes location on November 22 (before
366
+ modification)
367
+ Figure 4. Bounding boxes location on November 24 (after
368
+ modification)
369
+ The tables also present differences in the number of people
370
+ detected by the camera and the AI&ML platform and the sum
371
+ of these differences calculated for both movement directions:
372
+ entries and exits.
373
+ During the experiments, several unexpected events took
374
+ place, which had a significant impact on the reported results.
375
+ Workers were observed acting in an unexpected manner –
376
+ lingering or walking around the detection area (Fig. 5). It
377
+ was also noticed that sometimes the workers put on their
378
+ helmets after having passed the detection area (Fig. 6). These
379
+ behaviors present a challenge to the future system, as they
380
+ significantly affect its accuracy.
381
+
382
+ Inferen
383
+ POlnferen
384
+ me
385
+ POTowards Edge-Cloud Architectures for Personal Protective Equipment Detection
386
+ Hour
387
+ 05:00
388
+ 06:00
389
+ 07:00
390
+ 08:00
391
+ 09:00
392
+ 10:00
393
+ 11:00
394
+ 12:00
395
+ 13:00
396
+ 14:00
397
+ 15:00
398
+ 16:00
399
+ 17:00
400
+ 18:00
401
+ Total
402
+ Dahua In
403
+ 12
404
+ 65
405
+ 84
406
+ 47
407
+ 26
408
+ 84
409
+ 50
410
+ 51
411
+ 28
412
+ 70
413
+ 28
414
+ 8
415
+ 9
416
+ 0
417
+ 562
418
+ Dahua Out
419
+ 2
420
+ 15
421
+ 21
422
+ 35
423
+ 81
424
+ 44
425
+ 61
426
+ 31
427
+ 63
428
+ 32
429
+ 59
430
+ 66
431
+ 26
432
+ 8
433
+ 544
434
+ AI&ML In
435
+ 11
436
+ 78
437
+ 87
438
+ 52
439
+ 28
440
+ 96
441
+ 60
442
+ 58
443
+ 43
444
+ 75
445
+ 33
446
+ 18
447
+ 10
448
+ 0
449
+ 649
450
+ AI&ML Out
451
+ 2
452
+ 13
453
+ 23
454
+ 33
455
+ 73
456
+ 44
457
+ 63
458
+ 31
459
+ 58
460
+ 31
461
+ 59
462
+ 62
463
+ 25
464
+ 8
465
+ 525
466
+ Diff. In
467
+ 1
468
+ -13
469
+ -3
470
+ -5
471
+ -2
472
+ -12
473
+ -10
474
+ -7
475
+ -15
476
+ -5
477
+ -5
478
+ -10
479
+ -1
480
+ 0
481
+ -87
482
+ Diff. Out
483
+ 0
484
+ 2
485
+ -2
486
+ 2
487
+ 8
488
+ 0
489
+ -2
490
+ 0
491
+ 5
492
+ 1
493
+ 0
494
+ 4
495
+ 1
496
+ 0
497
+ 19
498
+ Table 1. Entries and exits to the construction site, 22 November 2022.
499
+ Hour
500
+ 05:00
501
+ 06:00
502
+ 07:00
503
+ 08:00
504
+ 09:00
505
+ 10:00
506
+ 11:00
507
+ 12:00
508
+ 13:00
509
+ 14:00
510
+ 15:00
511
+ 16:00
512
+ 17:00
513
+ 18:00
514
+ Total
515
+ Dahua In
516
+ 4
517
+ 57
518
+ 113
519
+ 62
520
+ 34
521
+ 73
522
+ 75
523
+ 65
524
+ 56
525
+ 93
526
+ 27
527
+ 10
528
+ 9
529
+ 0
530
+ 678
531
+ Dahua Out
532
+ 0
533
+ 10
534
+ 29
535
+ 57
536
+ 80
537
+ 53
538
+ 74
539
+ 41
540
+ 82
541
+ 52
542
+ 49
543
+ 84
544
+ 20
545
+ 8
546
+ 639
547
+ AI&ML In
548
+ 3
549
+ 61
550
+ 113
551
+ 68
552
+ 43
553
+ 76
554
+ 79
555
+ 73
556
+ 69
557
+ 98
558
+ 33
559
+ 21
560
+ 10
561
+ 0
562
+ 747
563
+ AI&ML Out
564
+ 0
565
+ 11
566
+ 26
567
+ 46
568
+ 64
569
+ 48
570
+ 72
571
+ 38
572
+ 73
573
+ 49
574
+ 47
575
+ 82
576
+ 22
577
+ 6
578
+ 584
579
+ Diff. In
580
+ 1
581
+ -4
582
+ 0
583
+ -6
584
+ -9
585
+ -3
586
+ -4
587
+ -8
588
+ -13
589
+ -5
590
+ -6
591
+ -11
592
+ -1
593
+ 0
594
+ -69
595
+ Diff. Out
596
+ 0
597
+ -1
598
+ 3
599
+ 11
600
+ 16
601
+ 5
602
+ 2
603
+ 3
604
+ 9
605
+ 3
606
+ 2
607
+ 2
608
+ -2
609
+ 2
610
+ 55
611
+ Table 2. Entries and exits to the construction site, 24 November 2022.
612
+ 6
613
+ Concluding remarks
614
+ The tested model demonstrated relatively good performance
615
+ in the investigated scenario. Its accuracy when tasked with
616
+ counting people wearing protective helmets was found to be
617
+ sufficient, and was validated against a different system. A
618
+ number of discrepancies between the counts of the model and
619
+ the camera can be attributed to unexpected situations (Figs. 5
620
+ and 6) and the fact that the Dahua camera did not differentiate
621
+ people wearing and not wearing helmets. The high correla-
622
+ tion coefficient between the camera and the Orange AI&ML
623
+ Platform’s model allows to conclude that the two solutions
624
+ perform comparably well.
625
+ It should be noted that there were changes in the correla-
626
+ tion between the days of experiments. These differences are
627
+ explained by the changes to the bounding box. This is one of
628
+ the parameters that have to be investigated further.
629
+ Both variants of the proposed architecture can be used
630
+ in the investigated scenario of PPE detection on a construc-
631
+ tion site. The feasibility of using an edge-deployment was
632
+ confirmed – the server’s computational capabilities were suf-
633
+ ficient to maintain satisfactory inference accuracy. Therefore,
634
+ it can be concluded that the construction site is equipped with
635
+ sufficient hardware to warrant further experiments with the
636
+ deployment.
637
+ In the future, the two proposed architecture variants will be
638
+ compared in terms of network latencies, resource utilization,
639
+ and their accuracy. The presented model will also be tested
640
+ further, which will include manually annotating the videos to
641
+ obtain a ground truth for comparison. This will allow for de-
642
+ termining the actual accuracy of the developed model. Further
643
+ optimization of bounding box locations is also planned.
644
+ Acknowledgments
645
+ Work supported by ASSIST-IoT project funded from the Eu-
646
+ ropean Union’s H2020 RIA program under grant 957258.
647
+ References
648
+ [1] Alexey Bochkovskiy, Chien-Yao Wang, and Hong-Yuan Mark Liao.
649
+ 2020. YOLOv4: Optimal speed and accuracy of object detection. arXiv
650
+ preprint arXiv:2004.10934 (2020).
651
+ [2] Marco di Benedetto, Enrico Meloni, Giuseppe Amato, Fabrizio Falchi,
652
+ and Claudio Gennaro. 2019. Learning Safety Equipment Detection
653
+ using Virtual Worlds. In 2019 International Conference on Content-
654
+ Based Multimedia Indexing (CBMI). 1–6.
655
+ https://doi.org/10.1109/
656
+ CBMI.2019.8877466
657
+ [3] Alejandro Fornés-Leal, Ignacio Lacalle, Carlos E Palau, Paweł Szmeja,
658
+ Maria Ganzha, Marcin Paprzycki, Eduardo Garro, and Francisco Blan-
659
+ quer. 2022. ASSIST-IoT: A reference architecture for next generation
660
+ Internet of Things. In New Trends in Intelligent Software Methodologies,
661
+ Tools and Techniques. IOS Press, 109–128.
662
+ [4] Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko,
663
+ Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam.
664
+ 2017. MobileNets: Efficient Convolutional Neural Networks for Mobile
665
+ Vision Applications. https://doi.org/10.48550/ARXIV.1704.04861
666
+ [5] Yutao Huang, Xiaoqiang Ma, Xiaoyi Fan, Jiangchuan Liu, and Wei
667
+ Gong. 2017. When deep learning meets edge computing. In 2017
668
+ IEEE 25th International Conference on Network Protocols (ICNP).
669
+ 1–2. https://doi.org/10.1109/ICNP.2017.8117585
670
+ [6] Anis Koubaa, Adel Ammar, Anas Kanhouch, and Yasser AlHabashi.
671
+ 2022. Cloud Versus Edge Deployment Strategies of Real-Time Face
672
+ Recognition Inference. IEEE Transactions on Network Science and
673
+ Engineering 9, 1 (2022), 143–160. https://doi.org/10.1109/TNSE.2021.
674
+ 3055835
675
+ [7] Yange Li, Han Wei, Zheng Han, Jianling Huang, and Wei-Dong Wang.
676
+ 2020. Deep Learning-Based Safety Helmet Detection in Engineering
677
+ Management Based on Convolutional Neural Networks. Advances in
678
+ Civil Engineering 2020 (09 2020), 1–10. https://doi.org/10.1155/2020/
679
+ 9703560
680
+ [8] Luyang Liu, Hongyu Li, and Marco Gruteser. 2019. Edge Assisted
681
+ Real-Time Object Detection for Mobile Augmented Reality. In The
682
+ 25th Annual International Conference on Mobile Computing and
683
+
684
+ Legierski and Rachwał et al.
685
+ Figure 5. Unexpected worker behavior – staying in the detec-
686
+ tion area for longer
687
+ Figure 6. Unexpected worker behavior – putting the helmet
688
+ on behind the detection line
689
+ Networking (Los Cabos, Mexico) (MobiCom ’19). Association for
690
+ Computing Machinery, New York, NY, USA, Article 25, 16 pages.
691
+ https://doi.org/10.1145/3300061.3300116
692
+ [9] Peng Liu, Bozhao Qi, and Suman Banerjee. 2018. EdgeEye: An Edge
693
+ Service Framework for Real-time Intelligent Video Analytics. 1–6.
694
+ https://doi.org/10.1145/3213344.3213345
695
+ [10] Weiran Liu, Yi Hu, and Dawei Fan. 2022. Safety Helmet Wearing
696
+ Recognition Based on Improved YOLOv5. In 2022 11th International
697
+ Conference of Information and Communication Technology (ICTech)).
698
+ 466–470. https://doi.org/10.1109/ICTech55460.2022.00099
699
+ [11] Yao Nan, Qin Jian-Hua, Wang Zhen, and Wang Hong-Chang. 2022.
700
+ Safety Helmet Detection Dynamic Model Based on the Critical Area
701
+ Attention Mechanism. In 2022 7th Asia Conference on Power and
702
+ Electrical Engineering (ACPEE). 1296–1303. https://doi.org/10.1109/
703
+ ACPEE53904.2022.9783764
704
+ [12] Nicolai Wojke, Alex Bewley, and Dietrich Paulus. 2017. Simple Online
705
+ and Realtime Tracking with a Deep Association Metric. https://doi.
706
+ org/10.48550/ARXIV.1703.07402
707
+ [13] Pei-Shao Wu, Chun-Yi Lin, Tang-Yu Cheng, and Wu-Sung Yao. 2021.
708
+ Analysis and design of industrial safety automatic identification sys-
709
+ tem based on Tiny-YOLOv3. In 2021 IEEE International Future En-
710
+ ergy Electronics Conference (IFEEC). 1–6. https://doi.org/10.1109/
711
+ IFEEC53238.2021.9661778
712
+ [14] JIANG Xinhua, XUE Heru, ZHANG Lina, and ZHOU Yanqing. 2021.
713
+ A Study of Low-resolution Safety Helmet Image Recognition Combin-
714
+ ing Statistical Features with Artificial Neural Network. In International
715
+ Journal of Simulation: Systems, Science and Technology (IJSSST). 1–6.
716
+ https://doi.org/10.5013/IJSSST.a.17.37.11
717
+ [15] Chang Xu, Jinyu Tian, and Zhiqiang Zeng. 2022. Lightweight Fusion
718
+ Channel Attention Convolutional Neural Network for Helmet Recogni-
719
+ tion. In WSPC Proceedings. 1–8.
720
+ [16] Geng Zhang, Lei Lv, Li Dan, and Min Zhu. 2017. The Method for
721
+ Recognizing Recognition Helmet Based On Color and Shape. In 5th
722
+ International Conference on Machinery, Materials and Computing
723
+ Technology (ICMMCT). 1–5.
724
+ [17] Qingyang Zhang, Hui Sun, Xiaopei Wu, and Hong Zhong. 2019. Edge
725
+ video analytics for public safety: A review. Proc. IEEE 107, 8 (2019),
726
+ 1675–1696.
727
+ [18] Fangbo Zhou, Huailin Zhao, and Zhen Nie. 2021. Safety Helmet
728
+ Detection Based on YOLOv5. In 2021 IEEE International Conference
729
+ on Power Electronics, Computer Applications (ICPECA). 6–11. https:
730
+ //doi.org/10.1109/ICPECA51329.2021.9362711
731
+ [19] Mudi Zhou, Zhuli Fang, Bin Zhao, and Pengfei Li. 2021. Safety Helmet
732
+ Wearing Detection and Recognition Based on YOLOv4. In 2021 3rd
733
+ International Academic Exchange Conference on Science and Technol-
734
+ ogy Innovation (IAECST). https://doi.org/10.1109/IAECST54258.2021.
735
+ 9695790
736
+ [20] Xihao Zhou and Sye Loong Keoh. 2020. Deployment of Facial Recogni-
737
+ tion Models at the Edge: A Feasibility Study. In 2020 21st Asia-Pacific
738
+ Network Operations and Management Symposium (APNOMS). 214–
739
+ 219. https://doi.org/10.23919/APNOMS50412.2020.9236972
740
+
741
+ RC
D9AzT4oBgHgl3EQfif2Z/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf,len=375
2
+ page_content='Towards Edge-Cloud Architectures for Personal Protective Equipment Detection Jarosław Legierski jaroslaw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
3
+ page_content='legierski@orange.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
4
+ page_content='com Orange Innovation, Orange Polska S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
5
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
6
+ page_content=' Warsaw, Poland Kajetan Rachwał Piotr Sowinski kajetan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
7
+ page_content='rachwal@ibspan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
8
+ page_content='waw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
9
+ page_content='pl piotr.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
10
+ page_content='sowinski@ibspan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
11
+ page_content='waw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
12
+ page_content='pl Systems Research Institute Polish Academy of Sciences Poland Warsaw Univesity of Technology Warsaw, Poland Wojciech Niewolski wojciech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
13
+ page_content='niewolski@orange.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
14
+ page_content='com Orange Innovation, Orange Polska S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
15
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
16
+ page_content=' Poland Warsaw Univesity of Technology Warsaw, Poland Przemysław Ratuszek Zbigniew Kopertowski przemyslaw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
17
+ page_content='ratuszek@orange.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
18
+ page_content='com zbigniew.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
19
+ page_content='kopertowski@orange.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
20
+ page_content='com Orange Innovation, Orange Polska S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
21
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
22
+ page_content=' Warsaw, Poland Marcin Paprzycki Maria Ganzha marcin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
23
+ page_content='paprzycki@ibspan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
24
+ page_content='waw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
25
+ page_content='pl maria.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
26
+ page_content='ganzha@ibspan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
27
+ page_content='waw.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
28
+ page_content='pl Systems Research Institute Polish Academy of Sciences Warsaw, Poland Abstract Detecting Personal Protective Equipment in images and video streams is a relevant problem in ensuring the safety of con- struction workers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
29
+ page_content=' In this contribution, an architecture en- abling live image recognition of such equipment is proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
30
+ page_content=' The solution is deployable in two settings – edge-cloud and edge-only.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
31
+ page_content=' The system was tested on an active construction site, as a part of a larger scenario, within the scope of the ASSIST-IoT H2020 project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
32
+ page_content=' To determine the feasibility of the edge-only variant, a model for counting people wearing safety helmets was developed using the YOLOX method.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
33
+ page_content=' It was found that an edge-only deployment is possible for this use case, given the hardware infrastructure available on site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
34
+ page_content=' In the preliminary evaluation, several important observations were made, that are crucial to the further development and de- ployment of the system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
35
+ page_content=' Future work will include an in-depth investigation of performance aspects of the two architecture variants.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
36
+ page_content=' Keywords: edge-cloud continuum architectures, PPE detec- tion, image recognition, worker safety 1 Introduction Nowadays, the demand for intelligent video analytics is grow- ing across a wide spectrum of application areas [17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
37
+ page_content=' The key part of such systems is usually an image recognition (IR) component.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
38
+ page_content=' However, as of today, the IR subsystem is, most commonly, deployed in the cloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
39
+ page_content=' This approach offers mul- tiple benefits, such as availability of large and scalable com- putational resources, reliable APIs, and shifting the burden of system maintenance to the cloud service provider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
40
+ page_content=' How- ever, this comes at a cost.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
41
+ page_content=' Sending data to the cloud raises both security and privacy concerns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
42
+ page_content=' Moreover, communicat- ing with the cloud always induces network latency, which may be significant in time-critical applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
43
+ page_content=' To address issues brought about by cloud-centric solutions, edge com- puting has been proposed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
44
+ page_content=' Here, the core of the approach is processing the data as close to the source as possible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
45
+ page_content=' This allows for latency reduction, and helps ensure the security and privacy of data, which remains within the local network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
46
+ page_content=' However, edge computing has its own set of issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
47
+ page_content=' Typically, the computational resources, which are available at the edge are considerably smaller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
48
+ page_content=' A possible solution to addressing the downsides of both these options is a combined approach – an edge-cloud continuum, where data is partially processed on the edge and partially in the cloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
49
+ page_content=' However, this raises the obvious question: at which point(s), within the continuum, individual parts of the system should be deployed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
50
+ page_content=' Here, this question is considered within a real-world sce- nario of monitoring the entrance to an active construction site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
51
+ page_content=' Specifically, the system is tasked with ensuring that (1) no unauthorized people enter the worksite, and (2) every- body is wearing appropriate Personal Protective Equipment (PPE), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
52
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
53
+ page_content=' helmets and safety vests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
54
+ page_content=' The scenario is evalu- ated as part of the ASSIST-IoT project, on a construction site in Warsaw, Poland, managed by the construction company Mostostal Warszawa.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
55
+ page_content=' Here, the edge versus cloud discussion becomes particularly relevant.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
56
+ page_content=' On the one hand, the privacy of workers is of paramount importance, while latencies must be minimized, to ensure a quick reaction, which hints at an arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
57
+ page_content='01501v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
58
+ page_content='CV] 4 Jan 2023 Legierski and Rachwał et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
59
+ page_content=' edge deployment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
60
+ page_content=' On the other hand, given the limited hard- ware resources available on the edge, and the extremely harsh conditions of the construction site, a cloud deployment seems attractive.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
61
+ page_content=' Given the possible benefits of both solutions, in this contri- bution, a solution is proposed for an edge-cloud continuum video analytics architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
62
+ page_content=' The architecture can be deployed in two variants (edge-only, and edge-cloud), described in the Architecture section.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
63
+ page_content=' Moreover, to determine the viability of the solution, an initial experimental study was performed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
64
+ page_content=' Here, an IR model was developed and integrated with the edge-only variant of the architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
65
+ page_content=' Next, it was tasked with detecting when personnel wearing PPE entered and exited the work site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
66
+ page_content=' 2 Background To provide a context for this study, the state of the art of (1) IR system architectures and (2) machine learning models for PPE detection is summarized.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
67
+ page_content=' System architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
68
+ page_content=' The most obvious benefit of deploy- ing IR systems on the edge is the decreased latency.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
69
+ page_content=' This was demonstrated in [20], where facial recognition models were deployed on the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
70
+ page_content=' The authors found that deploying the models on the edge resulted in significantly better response speeds, as compared to a cloud deployment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
71
+ page_content=' In other stud- ies [8, 9], the viability of deploying deep convolutional neural networks (CNNs) in the edge-only scenario was investigated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
72
+ page_content=' CNNs are characterized by high resource utilization, and thus are typically deployed in the cloud.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
73
+ page_content=' The studies found that deploying CNNs is viable on mobile devices, when parts of the computation can be offloaded to other edge devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
74
+ page_content=' Edge deployment allowed to achieve a consistently low latency of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
75
+ page_content='24 ms while using CNNs to perform real-time object tracking in augmented reality [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
76
+ page_content=' Both studies showed that the edge deployment distributing the workload increased the inference capabilities of the system, as the models could not be run on the disconnected mobile devices alone.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
77
+ page_content=' One study [5] investigated an edge-cloud architecture, where data preprocessing servers were deployed close to the data source.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
78
+ page_content=' The preprocessed data was then sent to a cloud-based deep-learning platform.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
79
+ page_content=' This resulted in decreasing network latency and traffic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
80
+ page_content=' It also increased the security and privacy of the raw data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
81
+ page_content=' On the other hand, edge deployments are more limited in terms of the available hardware.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
82
+ page_content=' Low computational re- sources naturally limit the size of models and inference speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
83
+ page_content=' A study compared different implementations (based on Ten- sorFlow, TensorRT, and TFLite) of the same video processing model [6], and found them to differ in their resource utiliza- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
84
+ page_content=' The choice of implementation influenced the energy consumption of the model, as well as its inference speed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
85
+ page_content=' Interestingly, the slowest implementation (TFLite) was the most energy efficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
86
+ page_content=' It was also found that TFLite managed to remain on par with the other implementations in terms of speed, when processing low-resolution video.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
87
+ page_content=' In the case of high-resolution video, more resource-intensive models were needed to maintain the speed, suggesting that a cloud deploy- ment could be more beneficial in low-resource settings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
88
+ page_content=' Nev- ertheless, some resource-intensive models can be deployed on the edge, if resources available there are sufficient.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
89
+ page_content=' The deployment proposed in a different study required all nodes to be equipped with a GPU [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
90
+ page_content=' This allowed the authors to use CNNs on the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
91
+ page_content=' A similar result was reported in [13], were IR models deployed on a Raspberry Pi 4B, equipped with a camera, and an Intel Neural Compute Stick 2 (a USB device for deep learning inference on the edge) were studied.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
92
+ page_content=' These devices were chosen for their low power consumption and good computing capabilities.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
93
+ page_content=' Overall, a model tasked with detecting PPE in the form of helmets and safety vests achieved precision on the order of 99.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
94
+ page_content='5%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
95
+ page_content=' Models for PPE detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
96
+ page_content=' Effective video analytics-based methods for detecting the presence of protective helmets, worn by workers, due to its health and safety importance, is currently a hot research topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
97
+ page_content=' The usage of existing, unmodified machine learning models for detecting protective head covers does not provide suffi- cient detection accuracy, as proven in a recent study [19].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
98
+ page_content=' In said article, several versions of the popular YOLO algo- rithm [1] were compared.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
99
+ page_content=' It was shown that the most effective version of YOLO for helmet detection is the v4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
100
+ page_content=' After improv- ing the loss function, it achieved more than 93% accuracy during tests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
101
+ page_content=' A similar study [10] focused on improving the YOLOv5 algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
102
+ page_content=' The system achieved results close to 97% accuracy, thanks to the improvement of the structure of the neural network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
103
+ page_content=' Another study [18], also investigated improv- ing YOLOv5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
104
+ page_content=' However, instead of the algorithm itself, work was focused on processing of input data by applying filters on the input image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
105
+ page_content=' This allowed to improve the accuracy to above 95%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
106
+ page_content=' Yet another study [15] presented an approach for improving the detection speed and accuracy by designing a multi-level pyramidal feature fusion network based on the ConCaNet attention mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
107
+ page_content=' Here, YOLOv3 was applied and a dataset with 6000 images was used.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
108
+ page_content=' The results demon- strate the effectiveness of this approach, which managed to reduce the number of necessary parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
109
+ page_content=' Helmet detection can also be done using the SSD-MobileNet algorithm [4], which is based on yet another variant of CNN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
110
+ page_content=' An analysis of this method, reported in [7], tested its effec- tiveness and managed to reach 80% accuracy during tests.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
111
+ page_content=' In a wider comparison of algorithm types [11], the authors proposed a helmet detection method based on a dynamically changing neural network – SHDDM (Safety Helmet Detec- tion Dynamic Model).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
112
+ page_content=' The developed model analyzes the human posture and defines the area where the helmet should be located, to eliminate the detection of the helmet outside the head area and thus reduce the false positive rate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
113
+ page_content=' There are Towards Edge-Cloud Architectures for Personal Protective Equipment Detection also other approaches to helmet detection, such as methods based on color and shape used to to locate the face, and the proper wearing of a helmet [16].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
114
+ page_content=' Another solution used low- resolution images, captured from a video stream, using the Local Binary Pattern (LBP) and gray-level co-occurrence ma- trix (GLCM) methods along with a back-propagation neural network [14].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
115
+ page_content=' Another study [2] investigated the usefulness of artificially created images in the training of CNNs for PPE detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
116
+ page_content=' The paper presented the results achieved with YOLOv3, trained on artificial images generated by the Rockstar Advanced Game Engine (RAGE) from the Grand Theft Auto V video game.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
117
+ page_content=' This approach achieved a mean average precision (mAP) of only 55.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
118
+ page_content='11% on a test dataset consisting of real-world images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
119
+ page_content=' The mAP for synthetic images was much higher at 87.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
120
+ page_content='24%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
121
+ page_content=' It should be noted that the poor results for the real-world images are most likely caused by the RAGE engine being unable to generate a sufficient amount of head, welding mask, ear protection, and chest object variations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
122
+ page_content=' As can be seen, there are many possibilities for detecting protective helmets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
123
+ page_content=' Here, the SHDDM is particularly note- worthy, as it has an important feature of checking whether the helmet is worn properly, and not only detecting its presence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
124
+ page_content=' This, in turn, is particularly relevant in real-world applica- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
125
+ page_content=' 3 Proposed Architecture The proposed video analytics system can be deployed in two architecture variants: edge-cloud (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
126
+ page_content=' 1) and edge-only (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
127
+ page_content=' 2).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
128
+ page_content=' As outlined above, there are reasons to believe that both variants may be appropriate for the considered scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
129
+ page_content=' Both architectures share a common core deployed on the edge, consisting of: a camera, the Image Processor (IP) component, and the OSH (Occupational Safety and Health) manager’s mobile device.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
130
+ page_content=' The camera (in the reported experiments the Dahua IPC- HFW5449T-ASE-LED was used) provides a live RTSP video stream, which is directed to the Image Processor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
131
+ page_content=' The IP is a service written in Python, which can optionally perform preliminary image analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
132
+ page_content=' Using configurable methods such as motion detection and brightness thresholding, the IP is able to discard image frames that do not contain moving people, reducing network traffic to components involved in actual im- age analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
133
+ page_content=' It is also responsible for communicating with the rest of the system, designed in accordance with the ASSIST- IoT reference architecture [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
134
+ page_content=' IP communicates with the rest of the system publishing alerts to an MQTT topic.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
135
+ page_content=' This de- sign allows other components and devices in the ASSIST-IoT deployment to be notified in a streaming manner of any OSH violations, such as workers not wearing protective helmets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
136
+ page_content=' In the first version of the architecture – the edge-cloud deployment – the IP is configured to use the cloud-based AWS Rekognition platform, with its PPE detection service.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
137
+ page_content=" ASSIST-IoT deployment AWS Rekognition Video stream OSH alert Image Processor Camera OSH manager's mobile device Edge Filtered video stream Inference results AWS Cloud Figure 1." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
138
+ page_content=" Edge-cloud deployment Video stream OSH alert Image Processor Camera OSH manager's mobile device Edge Orange AI&ML Platform Video stream Processing pipeline AI-X AI-2 AI-1 ." metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
139
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
140
+ page_content=' Result adaptation Inference results External platform connector ASSIST-IoT deployment Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
141
+ page_content=' Edge deployment In the edge-only variant, the video analysis is performed by the Orange AI&ML Platform, which is deployed on a server on the construction site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
142
+ page_content=' This edge deployment allows for maintaining lower network latency, and ensures the privacy of worker data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
143
+ page_content=' The AI&ML Platform’s services are written as Python runnable modules that provide their own APIs and GUIs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
144
+ page_content=' The services can reuse the APIs and GUIs provided by the platform, or build them from scratch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
145
+ page_content=' A service col- lects frames from a video source, processes them in an ML pipeline specific to the service, and adapts or interprets the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
146
+ page_content=' The inference results from the Platform are forwarded to external services, with the use of provided connectors.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
147
+ page_content=' As the Orange AI&ML Platform operates on the edge, all video Legierski and Rachwał et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
148
+ page_content=' processing takes place on the client’s site, ensuring full secu- rity of customer data (video) and compliance with appropriate regulations, such as GDPR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
149
+ page_content=' 4 Methodology As part of this study, a preliminary version of the edge-only variant of the architecture was deployed on an active construc- tion site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
150
+ page_content=' Using the Orange AI&ML Platform, a model was trained to count people wearing helmets entering and exiting a specific area.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
151
+ page_content=' The system counts people in helmets in defined recognition areas (bounding boxes), crossing the yellow and green lines visible in Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
152
+ page_content=' 3 and 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
153
+ page_content=' People entering the con- struction site are counted after crossing the green line, while people leaving are counted after crossing the yellow line.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
154
+ page_content=' The machine learning pipeline consists of a YOLOX object detec- tion model, trained for detecting heads in helmets, and a Deep- SORT [12] multi-object tracking algorithm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
155
+ page_content=' The YOLOX model was trained using a dataset provided by the Northeast- ern University of China (https://public.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
156
+ page_content='roboflow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
157
+ page_content='com/object- detection/hard-hat-workers).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
158
+ page_content=' The system’s results were compared to those obtained from an algorithm built into the Dahua camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
159
+ page_content=' It should be noted that the camera counted all people entering and leav- ing, including those without protective helmets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
160
+ page_content=' However, this should not impact the results much, as the safety regulations on this particular site forbid entering it without a helmet and the rule is strictly enforced before workers reach the counting location.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
161
+ page_content=' The measurements were performed in two series – each using a different bounding box definition.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
162
+ page_content=' A single series spanned the length of one workday on the construction site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
163
+ page_content=' The number of entering and leaving people was counted in hourly intervals (between 5 AM and 7 PM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
164
+ page_content=' 5 Results The Tables 1 and 2 present the results of the performed exper- iments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
165
+ page_content=' The Table 1 contains measurements made on 22nd November 2022, with the bounding box set as presented in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
166
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
167
+ page_content=' The average difference between the number of people entering, as measured by the camera and the model was equal to −6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
168
+ page_content='21, with the standard deviation of σ = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
169
+ page_content='08, whereas for people exiting it was 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
170
+ page_content='35 and σ = 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
171
+ page_content='73 respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
172
+ page_content=' The correlation between entrances detected by the camera and the model deployed on the AI&ML platform, expressed by the Pearson coefficient is 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
173
+ page_content='988, whereas for exits 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
174
+ page_content='995.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
175
+ page_content=' The cor- relations were found to be statistically significant (p ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
176
+ page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
177
+ page_content=' Table 2 contains measurements from 24th November 2022 (for modified detection areas, depicted in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
178
+ page_content=' 4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
179
+ page_content=' On that day, the average difference for entering was −4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
180
+ page_content='93 with σ = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
181
+ page_content='25 and for exiting 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
182
+ page_content='92 with σ = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
183
+ page_content='92.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
184
+ page_content=' For these measurements the Pearson coefficient for people entering is equal to 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
185
+ page_content='993 and exiting 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
186
+ page_content='989.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
187
+ page_content=' The correlations were found to be statisti- cally significant (p ≤ 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
188
+ page_content='05).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
189
+ page_content=' Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
190
+ page_content=' Bounding boxes location on November 22 (before modification) Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
191
+ page_content=' Bounding boxes location on November 24 (after modification) The tables also present differences in the number of people detected by the camera and the AI&ML platform and the sum of these differences calculated for both movement directions: entries and exits.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
192
+ page_content=' During the experiments, several unexpected events took place, which had a significant impact on the reported results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
193
+ page_content=' Workers were observed acting in an unexpected manner – lingering or walking around the detection area (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
194
+ page_content=' 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
195
+ page_content=' It was also noticed that sometimes the workers put on their helmets after having passed the detection area (Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
196
+ page_content=' 6).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
197
+ page_content=' These behaviors present a challenge to the future system, as they significantly affect its accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
198
+ page_content=' Inferen POlnferen me POTowards Edge-Cloud Architectures for Personal Protective Equipment Detection Hour 05:00 06:00 07:00 08:00 09:00 10:00 11:00 12:00 13:00 14:00 15:00 16:00 17:00 18:00 Total Dahua In 12 65 84 47 26 84 50 51 28 70 28 8 9 0 562 Dahua Out 2 15 21 35 81 44 61 31 63 32 59 66 26 8 544 AI&ML In 11 78 87 52 28 96 60 58 43 75 33 18 10 0 649 AI&ML Out 2 13 23 33 73 44 63 31 58 31 59 62 25 8 525 Diff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
199
+ page_content=' In 1 13 3 5 2 12 10 7 15 5 5 10 1 0 87 Diff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
200
+ page_content=' Out 0 2 2 2 8 0 2 0 5 1 0 4 1 0 19 Table 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
201
+ page_content=' Entries and exits to the construction site, 22 November 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
202
+ page_content=' Hour 05:00 06:00 07:00 08:00 09:00 10:00 11:00 12:00 13:00 14:00 15:00 16:00 17:00 18:00 Total Dahua In 4 57 113 62 34 73 75 65 56 93 27 10 9 0 678 Dahua Out 0 10 29 57 80 53 74 41 82 52 49 84 20 8 639 AI&ML In 3 61 113 68 43 76 79 73 69 98 33 21 10 0 747 AI&ML Out 0 11 26 46 64 48 72 38 73 49 47 82 22 6 584 Diff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
203
+ page_content=' In 1 4 0 6 9 3 4 8 13 5 6 11 1 0 69 Diff.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
204
+ page_content=' Out 0 1 3 11 16 5 2 3 9 3 2 2 2 2 55 Table 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
205
+ page_content=' Entries and exits to the construction site, 24 November 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
206
+ page_content=' 6 Concluding remarks The tested model demonstrated relatively good performance in the investigated scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
207
+ page_content=' Its accuracy when tasked with counting people wearing protective helmets was found to be sufficient, and was validated against a different system.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
208
+ page_content=' A number of discrepancies between the counts of the model and the camera can be attributed to unexpected situations (Figs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
209
+ page_content=' 5 and 6) and the fact that the Dahua camera did not differentiate people wearing and not wearing helmets.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
210
+ page_content=' The high correla- tion coefficient between the camera and the Orange AI&ML Platform’s model allows to conclude that the two solutions perform comparably well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
211
+ page_content=' It should be noted that there were changes in the correla- tion between the days of experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
212
+ page_content=' These differences are explained by the changes to the bounding box.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
213
+ page_content=' This is one of the parameters that have to be investigated further.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
214
+ page_content=' Both variants of the proposed architecture can be used in the investigated scenario of PPE detection on a construc- tion site.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
215
+ page_content=' The feasibility of using an edge-deployment was confirmed – the server’s computational capabilities were suf- ficient to maintain satisfactory inference accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
216
+ page_content=' Therefore, it can be concluded that the construction site is equipped with sufficient hardware to warrant further experiments with the deployment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
217
+ page_content=' In the future, the two proposed architecture variants will be compared in terms of network latencies, resource utilization, and their accuracy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
218
+ page_content=' The presented model will also be tested further, which will include manually annotating the videos to obtain a ground truth for comparison.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
219
+ page_content=' This will allow for de- termining the actual accuracy of the developed model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
220
+ page_content=' Further optimization of bounding box locations is also planned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
221
+ page_content=' Acknowledgments Work supported by ASSIST-IoT project funded from the Eu- ropean Union’s H2020 RIA program under grant 957258.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
222
+ page_content=' References [1] Alexey Bochkovskiy, Chien-Yao Wang, and Hong-Yuan Mark Liao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
223
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
224
+ page_content=' YOLOv4: Optimal speed and accuracy of object detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
225
+ page_content=' arXiv preprint arXiv:2004.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
226
+ page_content='10934 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
227
+ page_content=' [2] Marco di Benedetto, Enrico Meloni, Giuseppe Amato, Fabrizio Falchi, and Claudio Gennaro.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
228
+ page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
229
+ page_content=' Learning Safety Equipment Detection using Virtual Worlds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
230
+ page_content=' In 2019 International Conference on Content- Based Multimedia Indexing (CBMI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
231
+ page_content=' 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
232
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
233
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
234
+ page_content='1109/ CBMI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
235
+ page_content='2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
236
+ page_content='8877466 [3] Alejandro Fornés-Leal, Ignacio Lacalle, Carlos E Palau, Paweł Szmeja, Maria Ganzha, Marcin Paprzycki, Eduardo Garro, and Francisco Blan- quer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
237
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
238
+ page_content=' ASSIST-IoT: A reference architecture for next generation Internet of Things.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
239
+ page_content=' In New Trends in Intelligent Software Methodologies, Tools and Techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
240
+ page_content=' IOS Press, 109–128.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
241
+ page_content=' [4] Andrew G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
242
+ page_content=' Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, and Hartwig Adam.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
243
+ page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
244
+ page_content=' MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
245
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
246
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
247
+ page_content='48550/ARXIV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
248
+ page_content='1704.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
249
+ page_content='04861 [5] Yutao Huang, Xiaoqiang Ma, Xiaoyi Fan, Jiangchuan Liu, and Wei Gong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
250
+ page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
251
+ page_content=' When deep learning meets edge computing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
252
+ page_content=' In 2017 IEEE 25th International Conference on Network Protocols (ICNP).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
253
+ page_content=' 1–2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
254
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
255
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
256
+ page_content='1109/ICNP.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
257
+ page_content='2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
258
+ page_content='8117585 [6] Anis Koubaa, Adel Ammar, Anas Kanhouch, and Yasser AlHabashi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
259
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
260
+ page_content=' Cloud Versus Edge Deployment Strategies of Real-Time Face Recognition Inference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
261
+ page_content=' IEEE Transactions on Network Science and Engineering 9, 1 (2022), 143–160.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
262
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
263
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
264
+ page_content='1109/TNSE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
265
+ page_content='2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
266
+ page_content=' 3055835 [7] Yange Li, Han Wei, Zheng Han, Jianling Huang, and Wei-Dong Wang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
267
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
268
+ page_content=' Deep Learning-Based Safety Helmet Detection in Engineering Management Based on Convolutional Neural Networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
269
+ page_content=' Advances in Civil Engineering 2020 (09 2020), 1–10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
270
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
271
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
272
+ page_content='1155/2020/ 9703560 [8] Luyang Liu, Hongyu Li, and Marco Gruteser.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
273
+ page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
274
+ page_content=' Edge Assisted Real-Time Object Detection for Mobile Augmented Reality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
275
+ page_content=' In The 25th Annual International Conference on Mobile Computing and Legierski and Rachwał et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
276
+ page_content=' Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
277
+ page_content=' Unexpected worker behavior – staying in the detec- tion area for longer Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
278
+ page_content=' Unexpected worker behavior – putting the helmet on behind the detection line Networking (Los Cabos, Mexico) (MobiCom ’19).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
279
+ page_content=' Association for Computing Machinery, New York, NY, USA, Article 25, 16 pages.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
280
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
281
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
282
+ page_content='1145/3300061.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
283
+ page_content='3300116 [9] Peng Liu, Bozhao Qi, and Suman Banerjee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
284
+ page_content=' 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
285
+ page_content=' EdgeEye: An Edge Service Framework for Real-time Intelligent Video Analytics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
286
+ page_content=' 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
287
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
288
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
289
+ page_content='1145/3213344.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
290
+ page_content='3213345 [10] Weiran Liu, Yi Hu, and Dawei Fan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
291
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
292
+ page_content=' Safety Helmet Wearing Recognition Based on Improved YOLOv5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
293
+ page_content=' In 2022 11th International Conference of Information and Communication Technology (ICTech)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
294
+ page_content=' 466–470.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
295
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
296
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
297
+ page_content='1109/ICTech55460.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
298
+ page_content='2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
299
+ page_content='00099 [11] Yao Nan, Qin Jian-Hua, Wang Zhen, and Wang Hong-Chang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
300
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
301
+ page_content=' Safety Helmet Detection Dynamic Model Based on the Critical Area Attention Mechanism.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
302
+ page_content=' In 2022 7th Asia Conference on Power and Electrical Engineering (ACPEE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
303
+ page_content=' 1296–1303.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
304
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
305
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
306
+ page_content='1109/ ACPEE53904.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
307
+ page_content='2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
308
+ page_content='9783764 [12] Nicolai Wojke, Alex Bewley, and Dietrich Paulus.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
309
+ page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
310
+ page_content=' Simple Online and Realtime Tracking with a Deep Association Metric.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
311
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
312
+ page_content=' org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
313
+ page_content='48550/ARXIV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
314
+ page_content='1703.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
315
+ page_content='07402 [13] Pei-Shao Wu, Chun-Yi Lin, Tang-Yu Cheng, and Wu-Sung Yao.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
316
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
317
+ page_content=' Analysis and design of industrial safety automatic identification sys- tem based on Tiny-YOLOv3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
318
+ page_content=' In 2021 IEEE International Future En- ergy Electronics Conference (IFEEC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
319
+ page_content=' 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
320
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
321
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
322
+ page_content='1109/ IFEEC53238.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
323
+ page_content='2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
324
+ page_content='9661778 [14] JIANG Xinhua, XUE Heru, ZHANG Lina, and ZHOU Yanqing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
325
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
326
+ page_content=' A Study of Low-resolution Safety Helmet Image Recognition Combin- ing Statistical Features with Artificial Neural Network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
327
+ page_content=' In International Journal of Simulation: Systems, Science and Technology (IJSSST).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
328
+ page_content=' 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
329
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
330
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
331
+ page_content='5013/IJSSST.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
332
+ page_content='a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
333
+ page_content='17.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
334
+ page_content='37.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
335
+ page_content='11 [15] Chang Xu, Jinyu Tian, and Zhiqiang Zeng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
336
+ page_content=' 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
337
+ page_content=' Lightweight Fusion Channel Attention Convolutional Neural Network for Helmet Recogni- tion.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
338
+ page_content=' In WSPC Proceedings.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
339
+ page_content=' 1–8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
340
+ page_content=' [16] Geng Zhang, Lei Lv, Li Dan, and Min Zhu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
341
+ page_content=' 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
342
+ page_content=' The Method for Recognizing Recognition Helmet Based On Color and Shape.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
343
+ page_content=' In 5th International Conference on Machinery, Materials and Computing Technology (ICMMCT).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
344
+ page_content=' 1–5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
345
+ page_content=' [17] Qingyang Zhang, Hui Sun, Xiaopei Wu, and Hong Zhong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
346
+ page_content=' 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
347
+ page_content=' Edge video analytics for public safety: A review.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
348
+ page_content=' Proc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
349
+ page_content=' IEEE 107, 8 (2019), 1675–1696.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
350
+ page_content=' [18] Fangbo Zhou, Huailin Zhao, and Zhen Nie.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
351
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
352
+ page_content=' Safety Helmet Detection Based on YOLOv5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
353
+ page_content=' In 2021 IEEE International Conference on Power Electronics, Computer Applications (ICPECA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
354
+ page_content=' 6–11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
355
+ page_content=' https: //doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
356
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
357
+ page_content='1109/ICPECA51329.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
358
+ page_content='2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
359
+ page_content='9362711 [19] Mudi Zhou, Zhuli Fang, Bin Zhao, and Pengfei Li.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
360
+ page_content=' 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
361
+ page_content=' Safety Helmet Wearing Detection and Recognition Based on YOLOv4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
362
+ page_content=' In 2021 3rd International Academic Exchange Conference on Science and Technol- ogy Innovation (IAECST).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
363
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
364
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
365
+ page_content='1109/IAECST54258.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
366
+ page_content='2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
367
+ page_content=' 9695790 [20] Xihao Zhou and Sye Loong Keoh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
368
+ page_content=' 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
369
+ page_content=' Deployment of Facial Recogni- tion Models at the Edge: A Feasibility Study.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
370
+ page_content=' In 2020 21st Asia-Pacific Network Operations and Management Symposium (APNOMS).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
371
+ page_content=' 214– 219.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
372
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
373
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
374
+ page_content='23919/APNOMS50412.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
375
+ page_content='2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
376
+ page_content='9236972 RC' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/D9AzT4oBgHgl3EQfif2Z/content/2301.01501v1.pdf'}
EdA0T4oBgHgl3EQfA_-G/content/tmp_files/2301.01970v1.pdf.txt ADDED
@@ -0,0 +1,2269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CAT: LoCalization and IdentificAtion Cascade Detection Transformer
2
+ for Open-World Object Detection
3
+ Shuailei Ma 1* Yuefeng Wang1† Jiaqi Fan1 Ying Wei1‡
4
+ Thomas H. Li3 Hongli Liu2 Fanbing Lv2
5
+ 1Northeast University, 2Changsha Hisense Intelligent System Research Institute Co., Ltd.
6
+ 3Information Technology R&D Innovation Center of Peking University,
7
+ Abstract
8
+ Open-world object detection (OWOD), as a more gen-
9
+ eral and challenging goal, requires the model trained from
10
+ data on known objects to detect both known and unknown
11
+ objects and incrementally learn to identify these unknown
12
+ objects.
13
+ The existing works which employ standard de-
14
+ tection framework and fixed pseudo-labelling mechanism
15
+ (PLM) have the following problems: (𝑖) The inclusion of de-
16
+ tecting unknown objects substantially reduces the model’s
17
+ ability to detect known ones. (𝑖𝑖) The PLM does not ade-
18
+ quately utilize the priori knowledge of inputs. (𝑖𝑖𝑖) The fixed
19
+ selection manner of PLM cannot guarantee that the model
20
+ is trained in the right direction. We observe that humans
21
+ subconsciously prefer to focus on all foreground objects and
22
+ then identify each one in detail, rather than localize and
23
+ identify a single object simultaneously, for alleviating the
24
+ confusion. This motivates us to propose a novel solution
25
+ called CAT: LoCalization and IdentificAtion Cascade De-
26
+ tection Transformer which decouples the detection process
27
+ via the shared decoder in the cascade decoding way. In the
28
+ meanwhile, we propose the self-adaptive pseudo-labelling
29
+ mechanism which combines the model-driven with input-
30
+ driven PLM and self-adaptively generates robust pseudo-
31
+ labels for unknown objects, significantly improving the abil-
32
+ ity of CAT to retrieve unknown objects. Comprehensive ex-
33
+ periments on two benchmark datasets, 𝑖.𝑒., MS-COCO and
34
+ PASCAL VOC, show that our model outperforms the state-
35
+ of-the-art in terms of all metrics in the task of OWOD, in-
36
+ cremental object detection (IOD) and open-set detection.
37
+ 1. Introduction
38
+ Open-world object detection (OWOD) is a more prac-
39
+ tical detection problem in computer vision, making artifi-
40
+ *First author. Email: xiaomabufei@gmail.com
41
+ †Code url: https://github.com/xiaomabufei/CAT
42
+ ‡Corresponding author. Email: weiying@ise.neu.edu.cn
43
+ Bear
44
+ Frog
45
+ Flower
46
+ Squirrel
47
+ Unknown
48
+ Cat
49
+ BeeUnknown
50
+ Unknown
51
+ Unknown
52
+ Unknown
53
+ Figure 1. When faced with new scenes in open world, humans sub-
54
+ consciously focus on all foreground objects and then identify them
55
+ in detail in order to alleviate the confusion between the known and
56
+ unknown objects and get a clear view. Motivated by this, our CAT
57
+ utilizes the shared decoder to decouple the localization and iden-
58
+ tification process in the cascade decoding way, where the former
59
+ decoding process is used for localization and the latter for identi-
60
+ fication.
61
+ cial intelligence (AI) smarter to face more difficulties in real
62
+ scenes. Within the OWOD paradigm, the model’s life-span
63
+ is pushed by iterative learning process. At each episode, the
64
+ model trained only by known objects needs to detect known
65
+ objects while simultaneously localizing unknown objects
66
+ and identifying them into the unknown class. Human an-
67
+ notators then label a few of these tagged unknown classes
68
+ of interest gradually. The model given these newly-added
69
+ annotations will continue to incrementally update its knowl-
70
+ edge without retraining from scratch.
71
+ Recently, the work [17] proposed an open-world ob-
72
+ ject detector, ORE, based on the two-stage Faster R-CNN
73
+ [33] pipeline. ORE utilizes an auto-labelling step to obtain
74
+ pseudo-unknowns for training model to detect unknown ob-
75
+ jects and learns an energy-based binary classifier to distin-
76
+ guish the unknown class from known classes. However,
77
+ its success largely relies on a held-out validation set which
78
+ 1
79
+ arXiv:2301.01970v1 [cs.CV] 5 Jan 2023
80
+
81
+ is leveraged to estimate the distribution of unknown ob-
82
+ jects in the energy-based classifier. To alleviate the prob-
83
+ lems in ORE, OW-DETR [13] proposes to use the detection
84
+ transformer [3, 38] for OWOD in a justifiable way and di-
85
+ rectly leverages the framework of DDETR [38]. In addi-
86
+ tion, OW-DETR proposes an attention-driven PLM which
87
+ selects pseudo labels for unknown objects according to the
88
+ attention scores.
89
+ For the existing works, we find the following hindering
90
+ problems. (𝑖) Owing to the inclusion of detecting unknown
91
+ objects, the model’s ability to detect known objects substan-
92
+ tially drops. To alleviate the confusion between known and
93
+ unknown objects, humans prefer to dismantle the process of
94
+ open-world object detection rather than parallelly localize
95
+ and identify open-world objects like most standard detec-
96
+ tion models. (𝑖𝑖) To the best of our knowledge, in the exist-
97
+ ing OWOD PLM, models leverage the learning process for
98
+ known objects to guide the generation of pseudo labels for
99
+ unknown objects, without leveraging the prior conditions of
100
+ the inputs (𝑡𝑒𝑥𝑡𝑢𝑟𝑒,𝑙𝑖𝑔ℎ𝑡 𝑓 𝑙𝑜𝑤,𝑒𝑡𝑐). As a result, the model
101
+ cannot learn knowledge beyond the data annotation. (𝑖𝑖𝑖)
102
+ The fixed selection manner of PLM cannot guarantee that
103
+ the model learns to detect unknown objects in the right di-
104
+ rection, due to the uncertain quality of the pseudo labels.
105
+ The models may be worse for detecting unknown objects.
106
+ When faced with a new scene, humans prefer focusing
107
+ on all foreground objects and then analyse them in detail,
108
+ as shown in Figure.1. Motivated by this and the aforemen-
109
+ tioned observations, we propose a novel LoCalization and
110
+ IdentificAtion Cascade Detection Transformer. CAT com-
111
+ prises three dedicated components namely, self-adaptive
112
+ pseudo-labelling mechanism, shared transformer de-
113
+ coder and cascade decoupled decoding structure. The
114
+ self-adaptive PLM maintains the ability of CAT to ex-
115
+ plore the knowledge beyond the known objects and self-
116
+ adaptively adjusts the pseudo-label generation according to
117
+ the model training process. Via the cascade decoupled de-
118
+ coding structure, the shared transformer decoder decouples
119
+ the localization and identification process for alleviating the
120
+ influence of detecting unknown objects on the detection of
121
+ known objects, where the former decoding process is used
122
+ for localization and the latter for identification. In the mean-
123
+ while, we observe the structure substantially improves the
124
+ model’s ability for incremental object detection according
125
+ to the experiments. In addition, we explore the decoupled
126
+ structures for detection transformer. Our contributions can
127
+ be summarized fourfold:
128
+ • We propose a novel localization and identification cas-
129
+ cade detection transformer (CAT), which decouples
130
+ the localization and identification process of detection
131
+ and alleviates the influence of detecting unknown ob-
132
+ jects on the detection of known ones.
133
+ • We introduce a novel pseudo-labelling mechanism
134
+ which self-adaptively combines the model-driven and
135
+ input-driven pseudo-labelling during the training pro-
136
+ cess for generating robust pseudo-labels and exploring
137
+ knowledge beyond known objects.
138
+ • We explore the decoupled decoding methods of the de-
139
+ tection transformer, 𝑖.𝑒., the fully decoupled decoding
140
+ structure and the cascade decoupled decoding struc-
141
+ ture.
142
+ • Our extensive experiments on two popular bench-
143
+ marks demonstrate the effectiveness of the proposed
144
+ CAT. CAT outperforms the recently introduced ORE
145
+ and OW-DETR for OWOD, IOD and open-set detec-
146
+ tion. For OWOD, CAT achieves absolute gains ranging
147
+ from 11.8% to 18.3% in terms of unknown recall over
148
+ OW-DETR.
149
+ 2. Problem Formulation
150
+ At time 𝑡, let K𝑡 = {1,2,...,𝐶} denote the set of known
151
+ object classes and U𝑡 = {𝐶 + 1,...} denote the unknown
152
+ classes which might be encountered at the test time. The
153
+ known object categories K𝑡 are labeled in the dataset
154
+ D𝑡 = {J 𝑡,L𝑡} where J 𝑡 denotes the input images and
155
+ L𝑡 denotes the corresponding labels at time 𝑡. The train-
156
+ ing image set consists of 𝑀 images J 𝑡 = {𝑖1,𝑖2,...,𝑖𝑀 }
157
+ and corresponding labels L𝑡 = {ℓ1,ℓ2,...,ℓ𝑀 }. Each ℓ𝑖 =
158
+ {T1,T2,...,T𝑁 } denotes a set of 𝑁 object instances with
159
+ their class labels 𝑐𝑛 ⊂ K𝑡 and locations, 𝑥𝑛, 𝑦𝑛,𝑤𝑛, ℎ𝑛
160
+ denote the bounding box center coordinates, width and
161
+ height respectively. The Open-World Object Detection re-
162
+ moves the artificial assumptions and restrictions in tradi-
163
+ tional object detection and makes object detection tasks
164
+ more aligned with real life. It requires the trained model
165
+ M𝑡 not only to detect the previously encountered known
166
+ classes 𝐶 but also to identify an unseen class instance as
167
+ belonging to the unknown class. In addition, it requires the
168
+ object detector to be capable of incremental update for new
169
+ knowledge and this cycle continues over the detector’s lifes-
170
+ pan. In incremental updating phase, the unknown instances
171
+ identified by M𝑡 are annotated manually, and along with
172
+ their corresponding training examples, update D𝑡 to D𝑡+1
173
+ and K𝑡 to K𝑡+1 = {1,2,...,𝐶,...,𝐶 +n}, the model adds the
174
+ 𝑛 new classes to known classes and updates itself to M𝑡+1
175
+ without retraining from scratch on the whole dataset D𝑡+1.
176
+ 3. Proposed method
177
+ This section elaborates the proposed CAT in details. In
178
+ Sec.3.1, the overall architecture of CAT is described in de-
179
+ tail. A novel self-adaptive adjustment strategy for pseudo-
180
+ labelling is proposed in Sec.3.2. We explore to decouple
181
+ the decoding process of the detection transformer and pro-
182
+ pose the localization and identification cascade decoupled
183
+ 2
184
+
185
+ Figure 2. Overall Architecture of proposed CAT framework. The proposed CAT consists of a multi-scale feature extractor, the shared trans-
186
+ former decoder, the regression prediction branch, and the self-adaptive pseudo-labelling. The multi-scale feature extractor comprises the
187
+ mainstream feature extraction backbone and a deformable transformer encoder, for extracting multi-scale features. The shared transformer
188
+ decoder is a deformable transformer decoder and decouples the localization and identification process in the cascade decoding way. The
189
+ regression prediction branch contains the bounding box regression branch 𝐹𝑟𝑒𝑔, novelty objectness branch 𝐹𝑜𝑏 𝑗, and novelty classification
190
+ branch 𝐹𝑐𝑙𝑠. While the novelty classification and objectness branches are single-layer feed-forward networks (FFN) and the regression
191
+ branch is a 3-layer FFN.
192
+ decoding structure in Sec.3.3. In Sec.3.4, we illustrate the
193
+ end-to-end training strategy of CAT.
194
+ 3.1. Overall Architecture
195
+ As shown in Figure.2, for a given image J ∈ R𝐻×𝑊 ×3,
196
+ CAT uses a hierarchical feature extraction backbone to
197
+ extract multi-scale features Z𝑖 ∈ R
198
+ H
199
+ 4×𝑖2 ×
200
+ 𝑤
201
+ 4×2𝑖 ×2𝑖𝐶𝑠,𝑖 = 1,2,3.
202
+ The feature maps 𝑍𝑖 are projected from dimension 𝐶𝑠
203
+ to dimension 𝐶𝑑 by using 1×1 convolution and concate-
204
+ nated to 𝑁𝑠 vectors with 𝐶𝑑 dimensions after flattening
205
+ out.Afterwards, along with supplement positional encod-
206
+ ing 𝑃𝑛 ∈ R𝑁𝑠×𝐶𝑑, the multi-scale features are sent into the
207
+ deformable transformer encoder to encode semantic fea-
208
+ tures. The encoded semantic features 𝑀 ∈ R𝑁𝑠×𝑐𝑑 are ac-
209
+ quired and sent into the shared decoder together with a
210
+ set of 𝑁 learnable location queries and positional embed-
211
+ dings 𝑃𝑚 ∈ R𝑁𝑠×𝐶𝑑. Aided by interleaved cross-attention
212
+ and self-attention modules, the shared decoder transforms
213
+ the location queries Q location ∈ R𝑁 ×𝐷 to a set of N loca-
214
+ tion query embeddings E location ∈ R𝑁 ×𝐷. The Elocation are
215
+ then input to the regression branch to locate N foreground
216
+ bounding boxes containing the known classes and unknown
217
+ classes. Meanwhile, the E location are used as class queries
218
+ and sent into the shared decoder together with the 𝑀 and
219
+ 𝑃𝑚 again. The shared decoder transforms the class queries
220
+ to 𝑁 class query embeddings Eclass that are corresponding to
221
+ the location query embeddings. The Eclass are then sent into
222
+ the objectness and novelty classification branch to predict
223
+ the objectness and category respectively. After selecting the
224
+ unique queries that best match the known instances by a bi-
225
+ partite matching loss, the remaining queries are utilized to
226
+ select the unknown category instances and generate pseudo
227
+ labels by self-adaptive pseudo-labelling mechanism.
228
+ 3.2. Self-Adaptive Pseudo-labelling
229
+ Pseudo labels play an important role in guiding mod-
230
+ els to detect unknown object instances, determining the up-
231
+ per learning limitation of the model. The existing meth-
232
+ ods [13,17] only use model-driven pseudo-labelling and do
233
+ not take full advantage of the inputs’ priori knowledge (light
234
+ flow, textures, 𝑒𝑡𝑐).
235
+ The model-driven pseudo-labelling
236
+ [13] makes the model’s learning get caught up in the knowl-
237
+ edge of known objects, for the reason that the only source
238
+ of knowledge for the model is known object instances.
239
+ In addition, their fixed selection manner cannot guarantee
240
+ the right learning direction for unknown objects. We pro-
241
+ pose to combine model-driven with input-driven pseudo-
242
+ labelling [31, 36, 39] for expanding the knowledge sources
243
+ of the model. In the meanwhile, the pseudo-labels selec-
244
+ tion scheme should not be fixed, but be adapted as train-
245
+ ing and able to adjust itself when facing the unexpected
246
+ problems. In this paper, a novel pseudo-labelling mech-
247
+ anism is proposed for self-adaptively combining model-
248
+ driven and input-driven pseudo-labelling according to the
249
+ situation faced by the model, where the attention-driven
250
+ pseudo-labelling [13] is used as the model-driven pseudo-
251
+ 3
252
+
253
+ Shared Decoder
254
+ Multi-Scale Feature
255
+ Decoder
256
+ Decoder
257
+ Decoder
258
+ Freg
259
+ Layer 1
260
+ Layer 2
261
+ Layer N
262
+ Deformable
263
+ Transformer
264
+ Encoder
265
+ Fobj
266
+ Decoder
267
+ Decoder
268
+ Decoder
269
+ Layer 1
270
+ Layer 2
271
+ Layer N
272
+ Fcls
273
+ Shared Decoder
274
+ human
275
+ cup
276
+ unknown
277
+ Positional Encoding
278
+ Self-Adaptive Pseudo-Labelling
279
+ Pseudo labels
280
+ Model-driven
281
+ Positional Embeddings
282
+ Pseudo-labelling
283
+ Self-Adaptive
284
+ Location Queries
285
+ Adjustment Strategy
286
+ Location Embeddings
287
+ Input-driven
288
+ Class Queries
289
+ Pseudo-labelling
290
+ Class Embeddingslabelling and selective search [36] is selected as the input-
291
+ driven pseudo-labelling. In self-adaptive pseudo-labelling
292
+ mechanism, the model-driven pseudo-labelling generates
293
+ pseudo-labels’ candidate boxes 𝑃𝑚 and the corresponding
294
+ confidence 𝑠𝑜, and the input-driven pseudo-labelling gen-
295
+ erates pseudo-label candidate boxes 𝑃𝐼 . The object confi-
296
+ dence of generated pseudo labels is formulated as follows:
297
+ S𝑖 = (𝑛𝑜𝑟𝑚 (𝑠𝑜))W𝑚 ·
298
+
299
+ max
300
+ 1≤ 𝑗 ≤|P𝐼 |
301
+
302
+ IOU
303
+
304
+ 𝑃𝐼
305
+ 𝑗, 𝑃𝑚
306
+ 𝑖
307
+ ��� W𝐼
308
+ , (1)
309
+ where IOU(·) (Intersection-over-union [34]) is the most
310
+ commonly used metric for comparing the similarity be-
311
+ tween two arbitrary shapes, 𝑖 denotes the index of the
312
+ pseudo labels. W𝑚 and W𝐼 are the self-adaptive weights,
313
+ which are controlled by the 𝑀𝑒𝑎𝑠��𝑟𝑒𝑟, 𝑆𝑒𝑛𝑠𝑜𝑟 and
314
+ 𝐴𝑑𝑗𝑢𝑠𝑡𝑒𝑟, as formulated below:
315
+ W𝑡 = 𝐴𝑑𝑗𝑢𝑠𝑡𝑒𝑟(W𝑡−1, 𝑆𝑒𝑛𝑠𝑜𝑟(𝑀𝑒𝑎𝑠𝑢𝑟𝑒𝑟(𝐿𝑚))), (2)
316
+ where 𝐿𝑚 represents the loss memory which is stored and
317
+ updated in real time during model training. The formulation
318
+ is illustrated in Equation.3:
319
+ 𝐿𝑚 = DEQUE(𝑙𝑜𝑠𝑠𝑡−1,𝑙𝑜𝑠𝑠𝑡−2,··· ,𝑙𝑜𝑠𝑠𝑡−𝑛),
320
+ (3)
321
+ where 𝑡 is the current iteration. Considering the sensitivity
322
+ of the model and the uneven quality of the data, we leverage
323
+ 𝑀𝑒𝑎𝑠𝑢𝑟𝑒𝑟 to obtain the trend of the losses Δ𝑙 for replacing
324
+ the single loss. The formula is as follows:
325
+ 𝑀𝑒𝑎𝑠𝑢𝑟𝑒𝑟(𝐿𝑚) =
326
+ �𝑛
327
+ 𝑖=1 𝛼𝑖 · 𝑙𝑜𝑠𝑠𝑡−𝑖
328
+ �𝑁
329
+ 𝑗=𝑛+1 𝛽 𝑗 · 𝑙𝑜𝑠𝑠𝑡− 𝑗
330
+ ,
331
+ 𝑛 < 𝑁 < 𝑇,
332
+ (4)
333
+ where 𝛼 and 𝛽 denote the weighted average weights and
334
+ �𝑛
335
+ 𝑖=1 𝛼𝑖 = �𝑁
336
+ 𝑗=𝑛+1 𝛽 𝑗 = 𝛼𝑖−𝛼𝑖−1
337
+ 𝛼𝑖+1−𝛼𝑖 = 𝛽𝑗−𝛽𝑗−1
338
+ 𝛽𝑗+1−𝛽𝑗 = 1. In the 𝑆𝑒𝑛𝑠𝑜𝑟,
339
+ the variable of the weight Δ𝑤 is acquired as follows:
340
+ 𝑆𝑒𝑛𝑠𝑜𝑟(Δ𝑙) =
341
+ � 𝜋𝑛𝑚𝑎 · 𝑆𝑖𝑔𝑚𝑜𝑖𝑑(Δ𝑙 −1),Δ𝑙 > 1,
342
+ −𝜋𝑝𝑚𝑎 ·Δ𝑙,Δ𝑙 ≤ 1,
343
+ (5)
344
+ where 𝜋𝑝𝑚𝑎 and 𝜋𝑛𝑚𝑎 represents the positive and negative
345
+ momentum amplitude, respectively. In the 𝐴𝑑𝑗𝑢𝑠𝑡𝑒𝑟, we
346
+ use Equation.6 to update the self-adaptive weight via a in-
347
+ cremental way [5,14,17], for memory storage and enhanc-
348
+ ing the robustness (more explanations in Appendix A.1).
349
+ ���
350
+ ���
351
+ W 𝑡
352
+ 𝑚 = W 𝑡−1
353
+ 𝑚
354
+ +Δ𝑤 ×W 𝑡−1
355
+ 𝑚
356
+ ,
357
+ W 𝑡
358
+ 𝐼 = W 𝑡−1
359
+ 𝐼
360
+ −Δ𝑤 ×W 𝑡−1
361
+ 𝐼
362
+ ,
363
+ W 𝑡
364
+ 𝑚,W 𝑡
365
+ 𝐼 = 𝑛𝑜𝑟𝑚 �W 𝑡
366
+ 𝑚,W 𝑡
367
+ 𝐼
368
+ � ,
369
+ (6)
370
+ where 𝑛𝑜𝑟𝑚(·) is the normalization operation. The update
371
+ strategy for the weights during training is shown in Algo-
372
+ rithm.1.
373
+ Algorithm 1 COMPUTINGADAPTIVEWEIGHTS
374
+ Input: Loss Memory: 𝐿𝑚; Current Interation: 𝑡; Positive
375
+ Momentum Amplitude: 𝜋𝑝𝑚𝑎; Negative Momentum
376
+ Amplitude: 𝜋𝑛𝑚𝑎; 𝑇𝑠𝑡𝑎𝑟𝑡: Start iteration; 𝑇𝑏: Weight
377
+ updating cycle; Loss← Compute using Equation.11
378
+ Output: self-adaptive weights 𝑊𝑚𝑡 and 𝑊𝐼 𝑡
379
+ 1: while 𝑡𝑟𝑎𝑖𝑛 do
380
+ 2:
381
+ if 𝑡 ≤ 𝑇𝑠𝑡𝑎𝑟𝑡 then
382
+ 3:
383
+ Initialise 𝑊𝑚0 ← 0.8 and 𝑊𝐼 0 ← 0.2
384
+ 4:
385
+ Initialise 𝐿𝑚 using Equation.3
386
+ 5:
387
+ else
388
+ 6:
389
+ Update 𝐿𝑚 using Equation.3
390
+ 7:
391
+ if 𝑡%𝑇𝑏 == 0 then
392
+ 8:
393
+ Compute Δ𝑙 using 𝐿𝑚 and Equation.4
394
+ 9:
395
+ Compute Δ𝑤 using Δ𝑙 and Equation.5
396
+ 10:
397
+ Update W𝑚𝑡 and W𝐼 𝑡 using Equation.6
398
+ 11:
399
+ end if
400
+ 12:
401
+ end if
402
+ 13: end while
403
+ 3.3. Exploration of Decoupled Decoding Structure
404
+ Detection transformer [2, 3, 7, 21, 27, 38] leverages the
405
+ object queries to detect object instances, where each ob-
406
+ ject query represents an object instance.
407
+ In the decod-
408
+ ing stage, the object queries are updated to query embed-
409
+ dings by connecting object queries with semantic informa-
410
+ tion from the encoded semantic features. The generated
411
+ query embeddings couple the location and category infor-
412
+ mation for both object localization and identification pro-
413
+ cess simultaneously. For open-world object detection, the
414
+ model requires to detect the known objects, localize the un-
415
+ known objects and identify them as the unknown class. For
416
+ the parallel decoding structure, we observe that the inclu-
417
+ sion of detecting unknown reduces the model’s ability to
418
+ detect known objects.
419
+ Inspired by how humans subcon-
420
+ sciously confront new scenarios, we propose to decouple
421
+ the decoding process of DETR for mitigating the impact of
422
+ unknown object detection on detecting known objects. In
423
+ this paper, we explore two decoupled decoding ways, 𝑖.𝑒.,
424
+ the fully decoupled decoding structure and the cascade de-
425
+ coupled decoding structure.
426
+ 3.3.1
427
+ Fully Decoupled Decoding Structure
428
+ For decoupling the location and category information, an
429
+ intuitive way is to carry out the localization and identifi-
430
+ cation process independently. Motivated by this, the fully
431
+ decoupled decoding structure (FD) is proposed. In the fully
432
+ decoupled decoding structure, location and class queries are
433
+ two sets of mutually independent queries sent to the shared
434
+ decoder. This operation of FD is shown in Figure 3 (a),
435
+ 4
436
+
437
+ Figure 3. (a) The fully decoupled decoding structure has two independent decoding processes for localization and identification. (b) In
438
+ the cascade decoupled decoding structure, the location embeddings are used as class queries for knowledge retention. (c) For the coupled
439
+ decoding structure, the same query is put into the decoder for localization and identification.
440
+ which is formulated as follows:
441
+ ELocation = F𝑠 (F𝑒(∅(J), 𝑃𝑛), 𝑃𝑚,Q Location,R) ,
442
+ (7)
443
+ EClass = F𝑠 (F𝑒(∅(J), 𝑃𝑛), 𝑃𝑚,Q Class ,R) ,
444
+ (8)
445
+ where F𝑠(·) denotes the shared decoder. F𝑒(·) is the en-
446
+ coder and ∅(·) is the backbone. 𝑃𝑛 and 𝑃𝑚 stands for the
447
+ positional encoding and embeddings, respectively. R repre-
448
+ sents the reference points and J denotes the input image.
449
+ Q Class stands for the class queries.
450
+ 3.3.2
451
+ Cascade Decoupled Decoding Structure
452
+ Inspired by how people react to new scenarios, a cascade
453
+ decoupled decoding structure is proposed to decode the en-
454
+ coded features in a cascade way so that the localization pro-
455
+ cess is not restricted by the category information, while the
456
+ identification process can get help from the location knowl-
457
+ edge in the cascade structure. The operation of localization
458
+ and identification cascade decoding structure is expressed
459
+ as follows:
460
+ ELocation = F𝑠 (F𝑒(∅(J), 𝑃𝑛), 𝑃𝑚,Q Location,R) ,
461
+ (9)
462
+ EClass = F𝑠 (F𝑒(∅(J), 𝑃𝑛), 𝑃𝑚,ELocation ,R) .
463
+ (10)
464
+ As shown in Figure.3 (b), the location embeddings are used
465
+ as class queries to generate class embeddings.
466
+ 3.4. Training and Inference
467
+ Our CAT is trained end-to-end using the following joint
468
+ loss formulation:
469
+ 𝐿 = 𝐿𝑙𝑜𝑐𝑎𝑙𝑖𝑧𝑎𝑡𝑖𝑜𝑛 + 𝐿𝑖𝑑𝑒𝑛𝑡𝑖 𝑓 𝑖𝑐𝑎𝑡𝑖𝑜𝑛 + 𝐿𝑜𝑏 𝑗𝑒𝑐𝑡𝑛𝑒𝑠𝑠,
470
+ (11)
471
+ where 𝐿𝑙𝑜𝑐𝑎𝑙𝑖𝑧𝑎𝑡𝑖𝑜𝑛, 𝐿𝑖𝑑𝑒𝑛𝑡𝑖 𝑓 𝑖𝑐𝑎𝑡𝑖𝑜𝑛 and 𝐿𝑜𝑏 𝑗𝑒𝑐𝑡𝑛𝑒𝑠𝑠 de-
472
+ notes the loss terms for foreground localization, novelty
473
+ identification and object scoring, respectively. When a set
474
+ of new categories are introduced at each episode, we em-
475
+ ploy an exemplar replay based finetuning to alleviate catas-
476
+ trophic forgetting of learned classes and then finetune the
477
+ model using a balanced set of exemplars stored for each
478
+ known class. The bounding boxes and categories predic-
479
+ tions of the known and 𝑡𝑜𝑝-k unknown objects are simulta-
480
+ neous used during evaluation.
481
+ 4. Experiments
482
+ 4.1. Datasets and Metrics
483
+ The experiments are implemented on two mainstream
484
+ splits of MS-COCO [23] and Pascal VOC [10] dataset.
485
+ We group the classes into a set of nonoverlapping tasks
486
+
487
+ 𝑇1,...,𝑇𝑡,...
488
+
489
+ . The class in task 𝑇 𝑐 only appears in tasks
490
+ where 𝑡 ≥ 𝑐. In task 𝑇 𝑐, classes encountered in {𝑇 𝑐 : 𝑐 ≤ 𝑡}
491
+ and {𝑇 𝑐 : 𝑐 > 𝑡} are considered as known and unknown
492
+ classes, respectively.
493
+ OWOD SPLIT [17] spilts the 80 classes of MS-COCO into
494
+ 4 tasks and selects training set for each task from the MS-
495
+ COCO and Pascal VOC training set images. Pascal VOC
496
+ testing and MS-COCO validation set are used for evalua-
497
+ tion. See more details in Appendix A.2.
498
+ MS-COCO SPLIT [13] mitigates data leakage across tasks
499
+ in [17] and is more challenging. The training and testing
500
+ data are selected from MS-COCO.
501
+ Metrics: Following the most commonly used evaluation
502
+ metric for object detection, we use mean average preci-
503
+ sion (mAP) to evaluate the known objects.
504
+ Inspired by
505
+ [1,9,13,17,25], U-Recall, Wilderness Impact (WI, see de-
506
+ tailed in Appendix A.3) and Absolute Open-Set Error (A-
507
+ OSE) are used as main metric for unknown objects. U-
508
+ Recall measures the ability of the model to retrieve un-
509
+ 5
510
+
511
+ Location Queries
512
+ Location Queries
513
+ Encoded Semantic Features
514
+ Location
515
+ Encoded Semantic Features
516
+ Location
517
+ I Semantic Features
518
+ Object Queries
519
+ Embeddings
520
+ Shared
521
+ Embeddings
522
+ Shared
523
+ Location
524
+ Decoder
525
+ Decoder
526
+ Embeddings
527
+ Class Queries
528
+ Decoder
529
+ Class Queries
530
+ Shared
531
+ Class Embeddings
532
+ Shared
533
+ Decoder
534
+ Decoder
535
+ Class Embeddings
536
+ Class Embeddings
537
+ (a) Fully Decoupled Decoding
538
+ (b) Cascade Decoupled Decoding
539
+ (c) Coupled DecodingTable 1. State-of-the-art comparison on OWOD split. The comparison is shown in terms of U-Recall, WI, A-OSE and known class mAP.
540
+ U-Recall measures the ability of the model to retrieve unknown object instances for OWOD problem. Both WI and A-OSE implicitly
541
+ quantify the effevtiveness of the model in handling unknown objects. For a fair comparison, we compare with the recently introduced
542
+ OW-DETR [13] and ORE [17] not employing EBUI (the results are reproduced by the same GPUs as our model). The CAT achieves
543
+ improved all metrics over the existing works across all tasks, demonstrating our model’s effectiveness for OWOD problem. U-Recall, WI
544
+ and A-OSE cannot be computed in Task 4 due to the absence of unknown test annotations, for the reason that all 80 classes are known.
545
+ Task IDs →
546
+ Task 1
547
+ Task 2
548
+ Task 3
549
+ Task 4
550
+ U-Recall
551
+ WI
552
+ A-OSE
553
+ mAP(↑)
554
+ U-Recall
555
+ WI
556
+ A-OSE
557
+ mAP(↑)
558
+ U-Recall
559
+ WI
560
+ A-OSE
561
+ mAP(↑)
562
+ mAP(↑)
563
+ (↑)
564
+ (↓)
565
+ (↓)
566
+ Current
567
+ known
568
+ (↑)
569
+ (↓)
570
+ (↓)
571
+ Previously
572
+ known
573
+ Current
574
+ known
575
+ Both
576
+ (↑)
577
+ (↓)
578
+ (↓)
579
+ Previously
580
+ known
581
+ Current
582
+ known
583
+ Both
584
+ Previously
585
+ known
586
+ Current
587
+ known
588
+ Both
589
+ Faster-RCNN [33]
590
+ -
591
+ 0.0699
592
+ 13396
593
+ 56.4
594
+ -
595
+ 0.0371
596
+ 12291
597
+ 3.7
598
+ 26.7
599
+ 15.2
600
+ -
601
+ 0.0213
602
+ 9174
603
+ 2.5
604
+ 15.2
605
+ 6.7
606
+ 0.8
607
+ 14.5
608
+ 4.2
609
+ Faster-RCNN
610
+ + Finetuning
611
+ Not applicable in Task 1
612
+ -
613
+ 0.0375
614
+ 12497
615
+ 51.0
616
+ 25.0
617
+ 38.0
618
+ -
619
+ 0.0279
620
+ 9622
621
+ 38.2
622
+ 13.6
623
+ 30.0
624
+ 29.7
625
+ 13.0
626
+ 25.6
627
+ DDETR [38]
628
+ -
629
+ 0.0608
630
+ 33270
631
+ 60.3
632
+ -
633
+ 0.0368
634
+ 18115
635
+ 4.5
636
+ 31.3
637
+ 17.9
638
+ -
639
+ 0.0197
640
+ 9392
641
+ 3.3
642
+ 22.5
643
+ 8.5
644
+ 2.5
645
+ 16.4
646
+ 6.0
647
+ DDETR
648
+ + Finetuning
649
+ Not applicable in Task 1
650
+ -
651
+ 0.0337
652
+ 17834
653
+ 54.5
654
+ 34.4
655
+ 44.8
656
+ -
657
+ 0.0195
658
+ 10095
659
+ 40.0
660
+ 17.8
661
+ 33.3
662
+ 32.5
663
+ 20.0
664
+ 29.4
665
+ Cascade
666
+ -
667
+ 0.0476
668
+ 42083
669
+ 60.5
670
+ -
671
+ 0.0308
672
+ 21928
673
+ 5.0
674
+ 33.7
675
+ 19.2
676
+ -
677
+ 0.0189
678
+ 12189
679
+ 4.2
680
+ 24.9
681
+ 10.2
682
+ 3.6
683
+ 18.2
684
+ 7.6
685
+ Cascade
686
+ + Finetuning
687
+ Not applicable in Task 1
688
+ -
689
+ 0.0296
690
+ 20587
691
+ 55.4
692
+ 35.0
693
+ 46.0
694
+ -
695
+ 0.0184
696
+ 12854
697
+ 42.4
698
+ 19.2
699
+ 35.2
700
+ 34.6
701
+ 21.8
702
+ 31.6
703
+ ORE-EBUI [17]
704
+ 4.9
705
+ 0.0621
706
+ 10459
707
+ 56.0
708
+ 2.9
709
+ 0.0282
710
+ 10445
711
+ 52.7
712
+ 26.0
713
+ 39.4
714
+ 3.9
715
+ 0.0211
716
+ 7990
717
+ 38.2
718
+ 12.7
719
+ 29.7
720
+ 29.6
721
+ 12.4
722
+ 25.3
723
+ OW-DETR [13]
724
+ 7.1
725
+ 0.0590
726
+ 10248
727
+ 58.9
728
+ 6.8
729
+ 0.0279
730
+ 8540
731
+ 52.9
732
+ 29.1
733
+ 41.0
734
+ 7.8
735
+ 0.0191
736
+ 6840
737
+ 38.1
738
+ 14.7
739
+ 30.3
740
+ 30.8
741
+ 13.3
742
+ 26.4
743
+ Ours:CAT
744
+ 21.8
745
+ 0.0581
746
+ 7070
747
+ 59.9
748
+ 18.6
749
+ 0.0263
750
+ 5902
751
+ 54.0
752
+ 33.6
753
+ 43.8
754
+ 23.9
755
+ 0.0177
756
+ 5189
757
+ 42.1
758
+ 19.8
759
+ 34.7
760
+ 35.1
761
+ 17.1
762
+ 30.6
763
+ (+14.7)
764
+ (-0.0009)
765
+ (-3178)
766
+ (+1.0)
767
+ (+11.8)
768
+ (-0.0016)
769
+ (-2638)
770
+ (+1.1)
771
+ (+4.5)
772
+ (+2.8)
773
+ (+16.1)
774
+ (-0.0014)
775
+ (-1651)
776
+ (+4.0)
777
+ (+5.1)
778
+ (+4.4)
779
+ (+4.3)
780
+ (+3.8)
781
+ (+4.2)
782
+ Table 2. State-of-the-art comparison on MS-COCO split. The
783
+ comparison is shown in terms of U-Recall and mAP. Although
784
+ the MS-COCO split is more challenging, our model gets a more
785
+ significant improvement on this in comparison to ORE and OW-
786
+ DETR. The significant metric improvements demonstrate that our
787
+ CAT has the ability to retrieve new knowledge beyond the range
788
+ of closed set and would not be limited by category knowledge of
789
+ existing objects. See Sec.4.3 for more details.
790
+ Task IDs ↓
791
+ Metrics
792
+ ORE
793
+ OW-DETR
794
+ Our:CAT
795
+ U-Recall(↑)
796
+ 1.5
797
+ 5.7
798
+ 24.0 (+18.3)
799
+ Task1
800
+ mAP(↑)
801
+ Current known
802
+ 61.4
803
+ 71.5
804
+ 74.2 (+2.7)
805
+ U-Recall(↑)
806
+ 3.9
807
+ 6.2
808
+ 23.0 (+16.8)
809
+ Previously known
810
+ 56.5
811
+ 62.8
812
+ 67.6 (+4.8)
813
+ Current known
814
+ 26.1
815
+ 27.5
816
+ 35.5 (+8.0)
817
+ Task2
818
+ mAP(↑)
819
+ Both
820
+ 40.6
821
+ 43.8
822
+ 50.7 (+6.9)
823
+ U-Recall(↑)
824
+ 3.6
825
+ 6.9
826
+ 24.6 (+17.7)
827
+ Previously known
828
+ 38.7
829
+ 45.2
830
+ 51.2 (+6.0)
831
+ Current known
832
+ 23.7
833
+ 24.9
834
+ 32.6 (+7.7)
835
+ Task3
836
+ mAP(↑)
837
+ Both
838
+ 33.7
839
+ 38.5
840
+ 45.0 (+6.5)
841
+ Previously known
842
+ 33.6
843
+ 38.2
844
+ 45.4 (+7.2)
845
+ Current known
846
+ 26.3
847
+ 28.1
848
+ 35.1 (+7.0)
849
+ Task4
850
+ mAP(↑)
851
+ Both
852
+ 31.8
853
+ 33.1
854
+ 42.8 (+9.7)
855
+ known object instances for OWOD problem. Both WI and
856
+ A-OSE implicitly quantify the effevtiveness of the model in
857
+ handling unknown objects.
858
+ 4.2. Implementation Details
859
+ The multi-scale feature extractor consists of a Resnet-
860
+ 50 [16] pretrained on ImageNet [8] in a self-supervised [4]
861
+ manner and a deformable transformer encoder whose num-
862
+ ber of layer is set to 6. For the shared decoder, we use a
863
+ deformable transformer decoder and the numbder of layer
864
+ is set to 6, too. We set the number of queries 𝑀 = 100, the
865
+ dimension of the embeddings 𝐷 = 256 and the number of
866
+ pseudo-labels 𝑘 = 5. During inference, 𝑡𝑜𝑝-50 high scor-
867
+ ing detections are used for evaluation for per image. More
868
+ details are described in the Appendix A.4.
869
+ 4.3. Comparison With State-of-the-art Methods
870
+ For a fair comparison, we compare CAT with ORE [17]
871
+ without the energy-based unknown identifier (EBUI) that
872
+ relies on held-out validation data with weak unknown object
873
+ supervision and OW-DETR [13] to demonstrate the effec-
874
+ tiveness of our method for OWOD problem. We present the
875
+ comparison in terms of known class mAP, unknown class
876
+ recall, WI, and A-OSE, where U-Recall, WI and A-OSE
877
+ cannot be computed in Task 4 due to the absence of un-
878
+ known test annotations, for the reason that all 80 classes are
879
+ known. Furthermore, we demonstrate the effectiveness of
880
+ our model for incremental object detection in comparison
881
+ to [13,17,30,35].
882
+ OWOD SPLIT: The results compared with the state-of-
883
+ the-art methods on OWOD split for OWOD problem are
884
+ shown in Table.1. The performance of proposed standard
885
+ cascade detection transformer is also reported to be com-
886
+ pared with Faster R-CNN [33] and the standard Deformable
887
+ DETR [38] frameworks, for demonstrating the power of
888
+ localization identification cascade structure.
889
+ These three
890
+ can only identify known objects, and so U-Recall cannot
891
+ be computed for them. Benefiting from the self-adaptive
892
+ pseudo-labelling, the ability of CAT to detect unknown ob-
893
+ jects goes substantially beyond the existing models. Com-
894
+ pared with OW-DETR’s U-Recall of 7.1, 6.8 and 7.8 on
895
+ Task 1, 2 and 3, our CAT achieves 21.8, 18.6 and 23.9 in the
896
+ 6
897
+
898
+ corresponding tasks, achieving significant absolute gains up
899
+ to 16.1%. In terms of WI and A-OSE, CAT also exceeds
900
+ them in all tasks. The ability to detect known objects and al-
901
+ leviate catastrophic forgetting of previous knowledge gains
902
+ an improved performance with significant gains, achieving
903
+ significant absolute gains up to 4.4%. This demonstrates the
904
+ significant performance of the cascade decoding structure.
905
+ In addition, we report qualitative results in Figure.4, along
906
+ with failure case analysis. See more detailed qualitative re-
907
+ sults in Appendix B.2.
908
+ MS-COCO SPLIT: We report the results on MS-COCO
909
+ split in Table.2.
910
+ MS-COCO split mitigates data leakage
911
+ across tasks and assign more data to each Task, while CAT
912
+ receives a more significant boost compared with OWOD
913
+ split. Compared with OW-DETR’s U-Recall of 5.7, 6.2 and
914
+ 6.9 on Task 1, 2 and 3, our CAT achieves 24.0, 23.0 and
915
+ 24.6 in the corresponding tasks, achieving significant abso-
916
+ lute gains up to 18.3%. Furthermore, the performance on
917
+ detecting known objects achieves significant absolute gains
918
+ up to 9.7%. This demonstrates that our CAT has the more
919
+ powerful ability to retrieve new knowledge and detect the
920
+ known objects when faced with more difficult tasks.
921
+ Figure 4. Predictions from CAT after being trained on Task 1.
922
+ The results show that the model not only detects other categories
923
+ in the total category that have not yet been learned, such as ‘key-
924
+ board’, ‘kite’ and ‘dining table’, but also accurately detects cate-
925
+ gories outside the total category, such as ‘calendar’, ‘table lamp’
926
+ and ‘rubbish bins’. The approach misclassifies two of the ‘bird’
927
+ as ‘aeroplane’ and ‘unknown’, showing the limitation of CAT. See
928
+ more detailed qualitative results and analysis in Appendix B.2.
929
+ Incremental Object Detection: To intuitively present our
930
+ CAT’s ability for detecting object instances, we compare it
931
+ to [13,17,30,35] on the incremental object detection (IOD)
932
+ task. We evaluate the experiments on three standard set-
933
+ tings, where a group of classes (10, 5 and last class) are in-
934
+ troduced incrementally to a detector trained on the remain-
935
+ ing classes (10, 15 and 19), based on PASCAL VOC 2007
936
+ dataset [10]. As the results shown in Table.3, CAT outper-
937
+ forms the existing method in a great migration on all three
938
+ settings, indicating the power of localization and identifica-
939
+ tion cascade detection transformer for IOD.
940
+ Table 3. State-of-the-art comparison for incremental object detec-
941
+ tion for three different settings on PASCAL VOC dataset. The
942
+ comparison is shown in terms of overall mAP. Our CAT achieves
943
+ significant performance in comparison to existing works on all the
944
+ three settings. See more details in Sec.4.3 and Appedix B.1.
945
+ Method
946
+ 10+10 settings
947
+ 15+5 settings
948
+ 19+1 settings
949
+ ILOD [35]
950
+ 63.2
951
+ 65.8
952
+ 68.2
953
+ Faster ILOD [30]
954
+ 62.1
955
+ 67.9
956
+ 68.5
957
+ ORE [17]
958
+ 64.5
959
+ 68.5
960
+ 68.8
961
+ OW-DETR [13]
962
+ 65.7
963
+ 69.4
964
+ 70.2
965
+ Ours: CAT
966
+ 67.7 (+2.0)
967
+ 72.2 (+2.8)
968
+ 73.8 (+3.6)
969
+ 4.4. Ablation Study
970
+ We conduct abundant ablative experiments to verify the
971
+ effectiveness of CAT’s components on the OWOD split
972
+ [17].
973
+ Cascade Decoupled Decoding Structure: We compare
974
+ between OW-DETR, fully decoupled decoding structure
975
+ and CAT in Table.4. The results illustrate that the decoupled
976
+ decoding structure improves the performance of detecting
977
+ known objects and does mitigate the influence of unknown
978
+ objects on the detection of known objects to some extent.
979
+ Because it reduces the difficulty of parameter learning and
980
+ mitigates the risk of confusion for known and unknown ob-
981
+ jects by disassembling the localization and identification
982
+ process of detection. Compared with the fully decoupled
983
+ decoding structure, the cascade decoupled decoding struc-
984
+ ture is able to allow the identification process to draw on
985
+ location information while the localization process is not
986
+ limited by category knowledge and outperforms it.
987
+ Self-Adaptive Pseudo-labelling: As shown in Figure.5 (a)
988
+ and (b), we performed a number of ablation experiments
989
+ on Task 1 for different update cycles, positive and nega-
990
+ tive momentum amplitudes. The results demonstrate that
991
+ the self-adaptive pseudo-labelling makes the training pro-
992
+ cess of CAT robust, as we analyzed earlier. Especially for
993
+ the pink line, even if there are unexpected situations in the
994
+ training process, CAT can still self-adjust and develop in
995
+ a good direction. In addition, we compare the attention-
996
+ driven (AD) and self-adaptive (SA) paeudo-labelling mech-
997
+ anism in Table.5 and Figure.5 (c). The results demonstrate
998
+ that our self-adaptive pseudo-labelling mechanism signifi-
999
+ cantly improves the model’s ability to retrieve unknown ob-
1000
+ jects. During training, CAT requires double decoding pro-
1001
+ cesses so that it is affected by generated pseudo-labels twice
1002
+ as often as OW-DETR. Thus, for the high quality pseudo-
1003
+ 7
1004
+
1005
+ person:76%
1006
+ unknown:29%
1007
+ unkn0wn:35%
1008
+ tvmonitor:93%
1009
+ unknown:29%
1010
+ aeroplane:79%
1011
+ 2
1012
+ bird:45%
1013
+ unkn0wn:29%
1014
+ unkn0wn:31%
1015
+ unkn0wn:26%
1016
+ unknunkn0wh:26%
1017
+ unknown:27%
1018
+ unkn0wn:26%
1019
+ unkn0wn:28%
1020
+ person:94%
1021
+ chair:82%
1022
+ chair:91%
1023
+ pottedplant:81%
1024
+ unknown:25%
1025
+ unkn0wn:30%Figure 5. (a) and (b) illustrate performance comparison between different update cycles, positive and negative momentum amplitude on
1026
+ A-OSE and U-Recall. Where the cycle is set to 150 and 300, the positive momentum amplitude is set to 25%, 33% and 50%, the negative
1027
+ momentum amplitude is set to 50%, respectively. The lighter coloured lines are the real data and the corresponding darker coloured lines
1028
+ are the data after smoothing. (c) shows performance comparison between AD and SA. See detail in Sec.4.4.
1029
+ Table 4. Performance comparison between different decoupled de-
1030
+ coding structures and OW-DETR. ‘FD’ refers to the fully decou-
1031
+ pled decoding structure. See more details in Sec.4.4.
1032
+ Task IDs ↓
1033
+ Metrics
1034
+ OW-DETR
1035
+ FD
1036
+ CAT
1037
+ Task1
1038
+ mAP(↑)
1039
+ Current known
1040
+ 59.3
1041
+ 57.9
1042
+ 59.9
1043
+ Previously known
1044
+ 53.0
1045
+ 49.5
1046
+ 54.0
1047
+ Current known
1048
+ 29.4
1049
+ 29.4
1050
+ 33.6
1051
+ Task2
1052
+ mAP(↑)
1053
+ Both
1054
+ 41.3
1055
+ 39.4
1056
+ 43.8
1057
+ Previously known
1058
+ 38.1
1059
+ 41.2
1060
+ 42.1
1061
+ Current known
1062
+ 15.0
1063
+ 18.5
1064
+ 19.8
1065
+ Task3
1066
+ mAP(↑)
1067
+ Both
1068
+ 30.5
1069
+ 33.5
1070
+ 34.7
1071
+ Previously known
1072
+ 30.6
1073
+ 33.3
1074
+ 35.1
1075
+ Current known
1076
+ 14.0
1077
+ 15.8
1078
+ 17.1
1079
+ Task4
1080
+ mAP(↑)
1081
+ Both
1082
+ 26.8
1083
+ 28.9
1084
+ 30.6
1085
+ labels, CAT could learn better to detect unknown objects
1086
+ than OW-DETR. For the low quality pseudo-labels, CAT
1087
+ would also be affected to a greater extent. The results in Ta-
1088
+ ble.5 further demonstrate this investigation and the robust-
1089
+ ness of our pseudo-labelling mechanism to generate pseudo
1090
+ labels.
1091
+ Open-set Detection Comparison: To further demonstrate
1092
+ CAT’s ability to handle unknown instances in open-set data,
1093
+ we follow the same evaluation protocol as [13, 17, 26] and
1094
+ report the performance in Table.6. CAT achieves promising
1095
+ performance in comparison to the existing methods.
1096
+ 5. Relation to Prior Works
1097
+ The issue of standard object detection [3,6,12,15,22,24,
1098
+ 29,32,33,38,40] has been raised for several years, number-
1099
+ ous works have investigated this problem and push the field
1100
+ to certain heights. Whereas the strong assumption that the
1101
+ label space of object categories to be encountered during the
1102
+ life-span of the model is the same as during its training re-
1103
+ sults that these methods cannot meet real-world needs. The
1104
+ success of [11,18–20,28,33] demonstrates the feasibility of
1105
+ Table 5. Performance comparison AD and SA pseudo-labelling
1106
+ mechanism.
1107
+ The results demonstrate that SA substantially en-
1108
+ hances the model’s ability to retrieve unknown objects
1109
+ Method
1110
+ Task IDs ↓
1111
+ AD
1112
+ SA
1113
+ U-Recall
1114
+ WI
1115
+ A-OSE
1116
+ 
1117
+ 
1118
+ 7.1
1119
+ 0.0590
1120
+ 10248
1121
+ Task1
1122
+ 
1123
+ 
1124
+ 19.8
1125
+ 0.0578
1126
+ 8360
1127
+ 
1128
+ 
1129
+ 6.8
1130
+ 0.0279
1131
+ 8540
1132
+ Task2
1133
+ 
1134
+ 
1135
+ 16.8
1136
+ 0.0268
1137
+ 6452
1138
+ 
1139
+ 
1140
+ 7.8
1141
+ 0.0191
1142
+ 6840
1143
+ OW-DETR
1144
+ Task3
1145
+ 
1146
+ 
1147
+ 21.8
1148
+ 0.0175
1149
+ 5310
1150
+ 
1151
+ 
1152
+ 5.4
1153
+ 0.0533
1154
+ 41474
1155
+ Task1
1156
+ 
1157
+ 
1158
+ 21.8
1159
+ 0.0581
1160
+ 7070
1161
+ 
1162
+ 
1163
+ 4.9
1164
+ 0.0271
1165
+ 20410
1166
+ Task2
1167
+ 
1168
+ 
1169
+ 18.6
1170
+ 0.0263
1171
+ 5902
1172
+ 
1173
+ 
1174
+ 6.0
1175
+ 0.0186
1176
+ 11078
1177
+ CAT
1178
+ Task3
1179
+ 
1180
+ 
1181
+ 23.9
1182
+ 0.0177
1183
+ 5189
1184
+ Table 6. Performance comparison on open-set object detection
1185
+ task. Our CAT achieves significant performance in comparison to
1186
+ existing works. See more details in Sec.4.4.
1187
+ Evaluated on →
1188
+ VOC
1189
+ WR1
1190
+ Standard Faster R-CNN [35]
1191
+ 81.8
1192
+ 77.1
1193
+ Standard RetinaNet
1194
+ 79.2
1195
+ 73.8
1196
+ Dropout Sampling [26]
1197
+ 78.1
1198
+ 71.1
1199
+ ORE [17]
1200
+ 81.3
1201
+ 78.2
1202
+ OW-DETR [13]
1203
+ 82.1
1204
+ 78.6
1205
+ Ours: CAT
1206
+ 83.2 (+1.1)
1207
+ 79.5 (+0.9)
1208
+ foreground localization based on the position and appear-
1209
+ ance of objects. ORE [17] and OW-DETR [13] leverage
1210
+ the models of standard object detection and pseudo labels
1211
+ to detect objects in open world. In this paper, we propose a
1212
+ novel transformer [37] based framework, CAT, for OWOD.
1213
+ CAT decouples the localization and identification process
1214
+ and connects them in a cascade approach. In CAT, the fore-
1215
+ ground localization process is not limited by the category
1216
+ 8
1217
+
1218
+ 24
1219
+ 24
1220
+ 3e+4
1221
+ 20
1222
+ 20
1223
+ 2.6e+4
1224
+ 16
1225
+ 2.2e+4
1226
+ I
1227
+ 12
1228
+ 1.8e+4
1229
+ Tp=150, Tpma=25%,Tnma=50%
1230
+ Tp=150, Tpma=33%,Tnma=50%
1231
+ 1.4e+4
1232
+ Tp=150, Tpma=50%,Tnma=50%
1233
+ Tp=300, Tpma=25%,Tnma=50%
1234
+ CAT + Self_Adaptive
1235
+ le+z
1236
+ CAT + Attention_Driven
1237
+ Tp=300, Tpma=33%,Tnma=50%
1238
+ OW-DETR + Self_Adaptive
1239
+ be+s
1240
+ Tp=300, Tpma=50%,Tnma=50%
1241
+ OW-DETR + Attention_Drive
1242
+ 5
1243
+ 20
1244
+ 25
1245
+ 30
1246
+ 35
1247
+ 40
1248
+ 45
1249
+ 0
1250
+ 15
1251
+ 20
1252
+ 25
1253
+ 30
1254
+ 35
1255
+ 40
1256
+ 45
1257
+ 50
1258
+ 10
1259
+ 15
1260
+ 20
1261
+ 25
1262
+ 30
1263
+ 35
1264
+ 40
1265
+ 45
1266
+ 50of known objects, whereas the process of foreground iden-
1267
+ tification can use information from the localization process.
1268
+ Along with self-adaptive pseudo-labelling, CAT can gain
1269
+ information beyond the data annotation and maintain a sta-
1270
+ ble learning process according to self-regulation.
1271
+ 6. Conclusions
1272
+ In this paper, we analyze the drawbacks of the paral-
1273
+ lel decoding structure for open-world object detection and
1274
+ explore the decoupled decoding structures of the detection
1275
+ transformer. Motivated by the subconscious reactions of
1276
+ humans when facing new scenes, we propose a novel lo-
1277
+ calization and identification cascade detection transformer
1278
+ (CAT), which decouples the localization and identification
1279
+ process via the cascade decoding structure. The cascade
1280
+ decoding structure alleviates the influence of detecting un-
1281
+ known objects on the detection of known objects.
1282
+ With
1283
+ the self-adaptive pseudo-labelling mechanism, CAT gains
1284
+ knowledge beyond the data annotation, generates pseudo
1285
+ labels with robustness and maintains a stable training pro-
1286
+ cess via self-adjustment. The extensive experiments on two
1287
+ popular benchmarks, 𝑖.𝑒., PASCAL VOC and MS COCO
1288
+ demonstrate that CAT consistently outperforms the existing
1289
+ works for all task settings on all splits and achieves state-of-
1290
+ the-art performance in the incremental object detection and
1291
+ open-set detection.
1292
+ Acknowledgment
1293
+ This work is supported by National Natural Science
1294
+ Foundation of China (grant No.61871106 and No.6137015
1295
+ 2), Key R&D projects of Liaoning Province, China (grant
1296
+ No.2020JH2/10100029), and the Open Project Program Fo-
1297
+ undation of the Key Laboratory of Opto-Electronics Infor-
1298
+ mation Processing, Chinese Academy of Sciences (OEIP-
1299
+ O-202002).
1300
+ A. Additional Experiments Material
1301
+ A.1. Theory For Self-Adaptive Pseudo-labelling
1302
+ For 0 < 𝑤2 < 𝑤1 < 1, we find the potential relationship
1303
+ as follows:
1304
+ � 𝑥𝑤1 > 𝑥𝑤2,𝑖 𝑓 𝑥 > 1
1305
+ 𝑥𝑤1 < 𝑥𝑤2,𝑖 𝑓 𝑥 < 1
1306
+ (12)
1307
+ Thus, for 𝑥𝑤1 ·𝑦𝑤2 and 𝑤1 > 𝑤2, if 𝑥 > 1 and 𝑦 > 1, 𝑥 weights
1308
+ more and 𝑦 weights more if 𝑥 < 1 and 𝑦 < 1.
1309
+ For the self-adaptive pseudo-labelling, we first normal-
1310
+ ize 𝑠𝑜 to the range 0 to 1. Considering that the model it-
1311
+ self has little knowledge in the early stages of model train-
1312
+ ing, the model-driven pseudo-labelling should weight less
1313
+ than the input-deiven pseudo-labelling. As the training time
1314
+ of the model increasing, the knowledge base of the model
1315
+ grows and the weight of the model-driven pseudo-labelling
1316
+ gets bigger. Combining this with the patterns above, we set
1317
+ W𝑚0 to 0.8, W𝐼 0 to 0.2 and update them as follows:
1318
+ ���
1319
+ ���
1320
+ W 𝑡
1321
+ 𝑚 = W 𝑡−1
1322
+ 𝑚
1323
+ +Δ𝑤 ×W 𝑡−1
1324
+ 𝑚
1325
+ ,
1326
+ W 𝑡
1327
+ 𝐼 = W 𝑡−1
1328
+ 𝐼
1329
+ −Δ𝑤 ×W 𝑡−1
1330
+ 𝐼
1331
+ ,
1332
+ W 𝑡
1333
+ 𝑚,W 𝑡
1334
+ 𝐼 = 𝑛𝑜𝑟𝑚 �W 𝑡
1335
+ 𝑚,W 𝑡
1336
+ 𝐼
1337
+ � ,
1338
+ (13)
1339
+ A.2. Additional Illustration For Data Split
1340
+ As shown in Table.7, the OWOD split proposed in ORE
1341
+ groups all VOC classes and data as 𝑇𝑎𝑠𝑘 1. The remaining
1342
+ 60 classes of MS-COCO are grouped into three successive
1343
+ tasks (𝑇𝑎𝑠𝑘 2, 3, 4) with semantic drifts. However, it leads
1344
+ data leakage across tasks since different classes which be-
1345
+ long to a super-categories are introduced in different tasks.
1346
+ The MS-COCO split proposed in OW-DETR is a stricter
1347
+ split, where all the classes of a super-categories are intro-
1348
+ duced at a time in a task.
1349
+ Table 7. The table shows task composition in the OWOD and MS-
1350
+ COCO split for Open-world evaluation protocol. The semantics of
1351
+ each task and the number of images and instances(objects) across
1352
+ splits are shown.
1353
+ Task ID
1354
+ Task 1
1355
+ Task 2
1356
+ Task 3
1357
+ Task 4
1358
+ OWOD split
1359
+ Semantic split
1360
+ VOC
1361
+ Classes
1362
+ Outdoor, Accessories,
1363
+ Appliances, Truck
1364
+ Sports,
1365
+ Food
1366
+ Electronic, Indoor,
1367
+ Kitchen, Furniture
1368
+ # training images
1369
+ 16551
1370
+ 45520
1371
+ 39402
1372
+ 40260
1373
+ # test images
1374
+ 4952
1375
+ 1914
1376
+ 1642
1377
+ 1738
1378
+ # train instances
1379
+ 47223
1380
+ 113741
1381
+ 114452
1382
+ 138996
1383
+ # test instances
1384
+ 14976
1385
+ 4966
1386
+ 4826
1387
+ 6039
1388
+ MS-COCO split
1389
+ Semantic split
1390
+ Animals,Person,
1391
+ Vehicles
1392
+ Appliances, Accessories,
1393
+ Outdoor, Furniture
1394
+ Sports,
1395
+ Food
1396
+ Electronic, Indoor,
1397
+ Kitchen
1398
+ # training images
1399
+ 89490
1400
+ 55870
1401
+ 39402
1402
+ 38903
1403
+ # test images
1404
+ 3793
1405
+ 2351
1406
+ 1642
1407
+ 1691
1408
+ # train instances
1409
+ 421243
1410
+ 163512
1411
+ 114452
1412
+ 160794
1413
+ # test instances
1414
+ 17786
1415
+ 7159
1416
+ 4826
1417
+ 7010
1418
+ A.3. WI, A-OSE and U-Recall Metrics
1419
+ In this paper, we mainly illustrate the state-of-the-art
1420
+ comparison in terms of wilderness impact (WI), absolute
1421
+ open-set error (A-OSE), unknown recall (U-Recall) and
1422
+ mean average precision (mAP). WI measures the model’s
1423
+ confusion in predicting an unknown instance as known
1424
+ class. The calculation formula is as follows:
1425
+ WI =
1426
+ 𝑃K
1427
+ 𝑃K∪U
1428
+ −1
1429
+ (14)
1430
+ Where 𝑃K is the prediction on known classes and 𝑃K∪U
1431
+ is the prediction on known and unknown classes. A-OSE
1432
+ devotes the total number of unknown instances detected as
1433
+ known classes. Both WI and A-OSE indicate the degree of
1434
+ confusion in predicting the known classes in the presence
1435
+ 9
1436
+
1437
+ of unknown instances. Furthermore, U-Recall directly mea-
1438
+ sures the model’s ability to retrieve the unknown instances.
1439
+ A.4. Additional Implementation Details
1440
+ For selective search, we use the 𝑠𝑒𝑙𝑒𝑐𝑡𝑖𝑣𝑒 𝑠𝑒𝑎𝑟𝑐ℎ
1441
+ function in Selectivesearch library and the scale, sigma,
1442
+ min size of parameter is set to 500, 0.9 and 200, respec-
1443
+ tively. In addition, we eliminate candidate boxes with less
1444
+ than 2000 pixel points. The multi-scale feature maps ex-
1445
+ tracted from the backbone are projected to feature maps
1446
+ with 256-channels using 1 × 1 convolution filters and used
1447
+ as multi-scale input to deformable transformer encoder. The
1448
+ PyTorch library and eight NVIDIA RTX 3090 GPUs are
1449
+ used to train our CAT framework with a batch size of 3 im-
1450
+ ages per GPUs. In each task, the CAT framework is trained
1451
+ for 50 epochs and finetuned for 20 epochs during the in-
1452
+ cremental learning step. We train our CAT using the Adam
1453
+ optimizer with a base learning rate of 2 × 10−4, 𝛽1 = 0.9,
1454
+ 𝛽2 = 0.999, and weight decay of 10−4. For finetuning dur-
1455
+ ing incremental step, the learning rate is reduced by a factor
1456
+ of 10 and trained using a set of 50 stored exemplars per
1457
+ known class.
1458
+ B. Additional Results
1459
+ B.1. Incremental Object Detection
1460
+ Table.8 shows a detailed comparison of CAT with exist-
1461
+ ing approaches on PASCAL VOC. Evaluation is performed
1462
+ on three standard settings, where a group of classes (10, 5
1463
+ and last class) are introduced incrementally to a detector
1464
+ trained on the remaining classes (10,15 and 19). Our CAT
1465
+ performs favorably against existing approaches on all three
1466
+ settings, illustrating the power of localization identification
1467
+ cascade detection transformer for incremental objection de-
1468
+ tection.
1469
+ B.2. Additional Qualitative Results
1470
+ Figure.6 illustrates the visualization results comparison
1471
+ between OW-DETR and our CAT. We use OW-DETR and
1472
+ CAT which are both trained on Task 1, the known classes
1473
+ are ‘aeroplane’, ‘bicycle’, ‘bird’, ‘boat’, ‘bottle’, ‘bus’,
1474
+ ‘car’, ‘cat’, ‘chair’, ‘cow’, ‘diningtable’, ‘dog’, ‘horse’,
1475
+ ‘motorbike’, ‘person’, ‘pottedplant’, ‘sheep’, ‘sofa’, ‘train’
1476
+ and ‘tvmonitor’. The results show that our CAT substan-
1477
+ tially outperforms OW-DETR in terms of the ability to ex-
1478
+ plore unknown objects and the accuracy of detection due to
1479
+ the clever cascade decoupled decoding structure and self-
1480
+ adaptive pseudo-labelling. As shown in the first row, OW-
1481
+ DETR identifies the background and known objects as un-
1482
+ knowns and the real unknown object (carton) as the back-
1483
+ ground, and our model accurately identifies the carton as
1484
+ the unknown object. In the second row, OW-DETR iden-
1485
+ tifies the two calendars as the chair and the background,
1486
+ respectively, and the keyboard as the background, and our
1487
+ CAT accurately identifies them as unknown objects. The
1488
+ third row shows that OW-DETR fails to detect the true
1489
+ unknown object (frame) and identifies two known objects
1490
+ (sofa) as one. Our model accurately identifies the frame as
1491
+ an unknown object and also accurately identifies the two
1492
+ sofas.
1493
+ Figure.7 describes the visualization results comparison
1494
+ between CAT and Oracle. We visualize the detection results
1495
+ of our model for known and unknown objects, as well as the
1496
+ ground truth on the tasks corresponding to the weights, in-
1497
+ cluding the labels of known and unknown categories, where
1498
+ the objects of unknown categories are the objects of other
1499
+ categories that have not yet appeared in the total categories
1500
+ of the dataset.
1501
+ Our model can accurately detect known
1502
+ objects and unknown objects outside the total class of the
1503
+ dataset, such as the electric plug and sound switch in the
1504
+ first row, the camera in the second row and the kitten toy
1505
+ in the third row. It is also worth noting that although our
1506
+ model detects the audio, it does not identify it as an un-
1507
+ known object, but as a remote, showing the limitations of
1508
+ our model.
1509
+ Figure.8 exhibits the visualization performance on in-
1510
+ cremental object detection. We visualize the detection re-
1511
+ sults of the weights corresponding to different tasks for the
1512
+ same scenario. The results show that our CAT can identify
1513
+ unknown kinds of objects as the unknown class and accu-
1514
+ rately identify their classes after incrementally learning the
1515
+ unknown classes, such as sports ball and tennis racket in
1516
+ the first row, surfboard in the second row and traffic light in
1517
+ the third row.
1518
+ C. Societal Impact and Limitations
1519
+ Open-world object detection makes artificial intelligence
1520
+ smarter to face more problems in real life. It takes object de-
1521
+ tection to a cognitive level, as the model requires more than
1522
+ simply remembering the objects learned, it requires deeper
1523
+ thinking about the scene.
1524
+ Although our results demonstrate significant improve-
1525
+ ments over ORE and OW-DETR in terms of WI, A-OSE,
1526
+ U-Recall and mAP, the performances are still on the lower
1527
+ side due to the challenging nature of the open-world de-
1528
+ tection problem.
1529
+ In this paper, we are mainly commit-
1530
+ ted to enhance the model’s ability to explore unknown
1531
+ classes. However, the confidence level of our model for
1532
+ the detection of unknown objects still needs to be im-
1533
+ proved, and this is what we will strive for in the fu-
1534
+ ture.
1535
+ References
1536
+ [1] Ankan Bansal, Karan Sikka, Gaurav Sharma, Rama Chel-
1537
+ lappa, and Ajay Divakaran. Zero-shot object detection. In
1538
+ 10
1539
+
1540
+ Figure 6. Visualization results comparison between OW-DETR and our CAT. We use OW-DETR and CAT which are both trained on Task
1541
+ 1, the known classes are ‘aeroplane’, ‘bicycle’, ‘bird’, ‘boat’, ‘bottle’, ‘bus’, ‘car’, ‘cat’, ‘chair’, ‘cow’, ‘diningtable’, ‘dog’, ‘horse’,
1542
+ ‘motorbike’, ‘person’, ‘pottedplant’, ‘sheep’, ‘sofa’, ‘train’ and ‘tvmonitor’. The results show that our CAT substantially outperforms
1543
+ OW-DETR in terms of the ability to explore unknown objects and the accuracy of detection due to the clever cascade decoupled decoding
1544
+ structure and self-adaptive pseudo-labelling mechanism. As shown in the first row, OW-DETR identifies the background and known objects
1545
+ as unknowns and the real unknown object (carton) as the background, and our model accurately identifies the carton as the unknown
1546
+ object. In the second row, OW-DETR identifies the two calendars as the chair and the background, respectively, and the keyboard as the
1547
+ background, and our CAT accurately identifies them as unknown objects. The third row shows that OW-DETR not only does not detect the
1548
+ true unknown object (frame), but also identifies two known objects (sofa) as one. Our model accurately identifies the frame as an unknown
1549
+ object and also accurately identifies the two sofas.
1550
+ 11
1551
+
1552
+ OW-DETR
1553
+ Ours:CAT
1554
+ i0g:29%
1555
+ unkn0wn:41%
1556
+ d0g:57%
1557
+ d0g:66%
1558
+ dog:68%
1559
+ unknown:60
1560
+ unknown:25%
1561
+ unkn0wn:29%
1562
+ unknown:35%
1563
+ pers0n:85%
1564
+ person:76%
1565
+ unknown:29%
1566
+ chair:25%
1567
+ unkn0wn:27%
1568
+ unkn0wn:38%
1569
+ unkn0wn:26%
1570
+ :68
1571
+ person:
1572
+ unknown:34%Table 8. The detailed comparison of CAT with existing approaches on PASCAL VOC. Evaluation is performed on three standard settings,
1573
+ where a group of classes (10, 5 and last class) are introduced incrementally to a detector trained on the remaining classes (10,15 and 19).
1574
+ Our CAT performs favorably against existing approaches on all three settings, illustrating the power of localization identification cascade
1575
+ detection transformer for incremental objection detection.
1576
+ 10 + 10 setting
1577
+ aero
1578
+ cycle
1579
+ bird
1580
+ boat
1581
+ bottle
1582
+ bus
1583
+ car
1584
+ cat
1585
+ chair
1586
+ cow
1587
+ table
1588
+ dog
1589
+ horse
1590
+ bike
1591
+ person
1592
+ plant
1593
+ sheep
1594
+ sofa
1595
+ train
1596
+ tv
1597
+ mAP
1598
+ ILOD
1599
+ 69.9
1600
+ 70.4
1601
+ 69.4
1602
+ 54.3
1603
+ 48
1604
+ 68.7
1605
+ 78.9
1606
+ 68.4
1607
+ 45.5
1608
+ 58.1
1609
+ 59.7
1610
+ 72.7
1611
+ 73.5
1612
+ 73.2
1613
+ 66.3
1614
+ 29.5
1615
+ 63.4
1616
+ 61.6
1617
+ 69.3
1618
+ 62.2
1619
+ 63.2
1620
+ Faster ILOD
1621
+ 72.8
1622
+ 75.7
1623
+ 71.2
1624
+ 60.5
1625
+ 61.7
1626
+ 70.4
1627
+ 83.3
1628
+ 76.6
1629
+ 53.1
1630
+ 72.3
1631
+ 36.7
1632
+ 70.9
1633
+ 66.8
1634
+ 67.6
1635
+ 66.1
1636
+ 24.7
1637
+ 63.1
1638
+ 48.1
1639
+ 57.1
1640
+ 43.6
1641
+ 62.1
1642
+ ORE - (CC + EBUI)
1643
+ 53.3
1644
+ 69.2
1645
+ 62.4
1646
+ 51.8
1647
+ 52.9
1648
+ 73.6
1649
+ 83.7
1650
+ 71.7
1651
+ 42.8
1652
+ 66.8
1653
+ 46.8
1654
+ 59.9
1655
+ 65.5
1656
+ 66.1
1657
+ 68.6
1658
+ 29.8
1659
+ 55.1
1660
+ 51.6
1661
+ 65.3
1662
+ 51.5
1663
+ 59.4
1664
+ ORE - EBUI
1665
+ 63.5
1666
+ 70.9
1667
+ 58.9
1668
+ 42.9
1669
+ 34.1
1670
+ 76.2
1671
+ 80.7
1672
+ 76.3
1673
+ 34.1
1674
+ 66.1
1675
+ 56.1
1676
+ 70.4
1677
+ 80.2
1678
+ 72.3
1679
+ 81.8
1680
+ 42.7
1681
+ 71.6
1682
+ 68.1
1683
+ 77
1684
+ 67.7
1685
+ 64.5
1686
+ OW - DETR
1687
+ 75.4
1688
+ 63.9
1689
+ 57.9
1690
+ 50.0
1691
+ 52.0
1692
+ 70.9
1693
+ 79.5
1694
+ 72.4
1695
+ 44.3
1696
+ 57.9
1697
+ 59.7
1698
+ 73.5
1699
+ 77.7
1700
+ 75.2
1701
+ 76.2
1702
+ 44.9
1703
+ 68.8
1704
+ 65.4
1705
+ 79.3
1706
+ 69.0
1707
+ 65.7
1708
+ Ours: CAT
1709
+ 76.5
1710
+ 75.7
1711
+ 67.0
1712
+ 51.0
1713
+ 62.4
1714
+ 73.2
1715
+ 82.3
1716
+ 83.7
1717
+ 42.7
1718
+ 64.4
1719
+ 56.8
1720
+ 74.1
1721
+ 75.8
1722
+ 79.2
1723
+ 78.1
1724
+ 39.9
1725
+ 65.1
1726
+ 59.6
1727
+ 78.4
1728
+ 67.4
1729
+ 67.7
1730
+ 15 + 5 setting
1731
+ aero
1732
+ cycle
1733
+ bird
1734
+ boat
1735
+ bottle
1736
+ bus
1737
+ car
1738
+ cat
1739
+ chair
1740
+ cow
1741
+ table
1742
+ dog
1743
+ horse
1744
+ bike
1745
+ person
1746
+ plant
1747
+ sheep
1748
+ sofa
1749
+ train
1750
+ tv
1751
+ mAP
1752
+ ILOD
1753
+ 70.5
1754
+ 79.2
1755
+ 68.8
1756
+ 59.1
1757
+ 53.2
1758
+ 75.4
1759
+ 79.4
1760
+ 78.8
1761
+ 46.6
1762
+ 59.4
1763
+ 59
1764
+ 75.8
1765
+ 71.8
1766
+ 78.6
1767
+ 69.6
1768
+ 33.7
1769
+ 61.5
1770
+ 63.1
1771
+ 71.7
1772
+ 62.2
1773
+ 65.8
1774
+ Faster ILOD
1775
+ 66.5
1776
+ 78.1
1777
+ 71.8
1778
+ 54.6
1779
+ 61.4
1780
+ 68.4
1781
+ 82.6
1782
+ 82.7
1783
+ 52.1
1784
+ 74.3
1785
+ 63.1
1786
+ 78.6
1787
+ 80.5
1788
+ 78.4
1789
+ 80.4
1790
+ 36.7
1791
+ 61.7
1792
+ 59.3
1793
+ 67.9
1794
+ 59.1
1795
+ 67.9
1796
+ ORE - (CC + EBUI)
1797
+ 65.1
1798
+ 74.6
1799
+ 57.9
1800
+ 39.5
1801
+ 36.7
1802
+ 75.1
1803
+ 80
1804
+ 73.3
1805
+ 37.1
1806
+ 69.8
1807
+ 48.8
1808
+ 69
1809
+ 77.5
1810
+ 72.8
1811
+ 76.5
1812
+ 34.4
1813
+ 62.6
1814
+ 56.5
1815
+ 80.3
1816
+ 65.7
1817
+ 62.6
1818
+ ORE - EBUI
1819
+ 75.4
1820
+ 81
1821
+ 67.1
1822
+ 51.9
1823
+ 55.7
1824
+ 77.2
1825
+ 85.6
1826
+ 81.7
1827
+ 46.1
1828
+ 76.2
1829
+ 55.4
1830
+ 76.7
1831
+ 86.2
1832
+ 78.5
1833
+ 82.1
1834
+ 32.8
1835
+ 63.6
1836
+ 54.7
1837
+ 77.7
1838
+ 64.6
1839
+ 68.5
1840
+ OW - DETR
1841
+ 78.0
1842
+ 80.7
1843
+ 79.4
1844
+ 70.4
1845
+ 58.8
1846
+ 65.1
1847
+ 84.0
1848
+ 86.2
1849
+ 56.5
1850
+ 76.7
1851
+ 62.4
1852
+ 84.8
1853
+ 85.0
1854
+ 81.8
1855
+ 81.0
1856
+ 34.3
1857
+ 48.2
1858
+ 57.9
1859
+ 62.0
1860
+ 57.0
1861
+ 69.4
1862
+ Ours: CAT
1863
+ 75.3
1864
+ 81.0
1865
+ 84.4
1866
+ 64.5
1867
+ 56.6
1868
+ 74.4
1869
+ 84.1
1870
+ 86.6
1871
+ 53.0
1872
+ 70.1
1873
+ 72.4
1874
+ 83.4
1875
+ 85.5
1876
+ 81.6
1877
+ 81.0
1878
+ 32.0
1879
+ 58.6
1880
+ 60.7
1881
+ 81.6
1882
+ 63.5
1883
+ 72.2
1884
+ 19 + 1 setting
1885
+ aero
1886
+ cycle
1887
+ bird
1888
+ boat
1889
+ bottle
1890
+ bus
1891
+ car
1892
+ cat
1893
+ chair
1894
+ cow
1895
+ table
1896
+ dog
1897
+ horse
1898
+ bike
1899
+ person
1900
+ plant
1901
+ sheep
1902
+ sofa
1903
+ train
1904
+ tv
1905
+ mAP
1906
+ ILOD
1907
+ 69.4
1908
+ 79.3
1909
+ 69.5
1910
+ 57.4
1911
+ 45.4
1912
+ 78.4
1913
+ 79.1
1914
+ 80.5
1915
+ 45.7
1916
+ 76.3
1917
+ 64.8
1918
+ 77.2
1919
+ 80.8
1920
+ 77.5
1921
+ 70.1
1922
+ 42.3
1923
+ 67.5
1924
+ 64.4
1925
+ 76.7
1926
+ 62.7
1927
+ 68.2
1928
+ Faster ILOD
1929
+ 64.2
1930
+ 74.7
1931
+ 73.2
1932
+ 55.5
1933
+ 53.7
1934
+ 70.8
1935
+ 82.9
1936
+ 82.6
1937
+ 51.6
1938
+ 79.7
1939
+ 58.7
1940
+ 78.8
1941
+ 81.8
1942
+ 75.3
1943
+ 77.4
1944
+ 43.1
1945
+ 73.8
1946
+ 61.7
1947
+ 69.8
1948
+ 61.1
1949
+ 68.5
1950
+ ORE - (CC + EBUI)
1951
+ 60.7
1952
+ 78.6
1953
+ 61.8
1954
+ 45
1955
+ 43.2
1956
+ 75.1
1957
+ 82.5
1958
+ 75.5
1959
+ 42.4
1960
+ 75.1
1961
+ 56.7
1962
+ 72.9
1963
+ 80.8
1964
+ 75.4
1965
+ 77.7
1966
+ 37.8
1967
+ 72.3
1968
+ 64.5
1969
+ 70.7
1970
+ 49.9
1971
+ 64.9
1972
+ ORE - EBUI
1973
+ 67.3
1974
+ 76.8
1975
+ 60
1976
+ 48.4
1977
+ 58.8
1978
+ 81.1
1979
+ 86.5
1980
+ 75.8
1981
+ 41.5
1982
+ 79.6
1983
+ 54.6
1984
+ 72.8
1985
+ 85.9
1986
+ 81.7
1987
+ 82.4
1988
+ 44.8
1989
+ 75.8
1990
+ 68.2
1991
+ 75.7
1992
+ 60.1
1993
+ 68.8
1994
+ OW - DETR
1995
+ 82.2
1996
+ 80.7
1997
+ 73.9
1998
+ 56.0
1999
+ 58.6
2000
+ 72.1
2001
+ 82.4
2002
+ 79.6
2003
+ 48.0
2004
+ 72.8
2005
+ 64.2
2006
+ 83.3
2007
+ 83.1
2008
+ 82.3
2009
+ 78.6
2010
+ 42.1
2011
+ 65.5
2012
+ 55.4
2013
+ 82.9
2014
+ 60.1
2015
+ 70.2
2016
+ Ours: CAT
2017
+ 86.0
2018
+ 85.8
2019
+ 78.8
2020
+ 65.3
2021
+ 61.3
2022
+ 71.4
2023
+ 84.8
2024
+ 84.8
2025
+ 52.9
2026
+ 78.4
2027
+ 71.6
2028
+ 82.7
2029
+ 83.8
2030
+ 81.2
2031
+ 80.7
2032
+ 43.7
2033
+ 75.9
2034
+ 58.5
2035
+ 85.2
2036
+ 61.1
2037
+ 73.8
2038
+ Proceedings of the European Conference on Computer Vi-
2039
+ sion (ECCV), pages 384–400, 2018. 5
2040
+ [2] Josh Beal, Eric Kim, Eric Tzeng, Dong Huk Park, Andrew
2041
+ Zhai, and Dmitry Kislyuk. Toward transformer-based object
2042
+ detection. arXiv preprint arXiv:2012.09958, 2020. 4
2043
+ [3] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas
2044
+ Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-
2045
+ end object detection with transformers. In European confer-
2046
+ ence on computer vision, pages 213–229. Springer, 2020. 2,
2047
+ 4, 8
2048
+ [4] Mathilde Caron, Hugo Touvron, Ishan Misra, Herv´e J´egou,
2049
+ Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerg-
2050
+ ing properties in self-supervised vision transformers.
2051
+ In
2052
+ Proceedings of the IEEE/CVF International Conference on
2053
+ Computer Vision, pages 9650–9660, 2021. 6
2054
+ [5] Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He.
2055
+ Improved baselines with momentum contrastive learning.
2056
+ arXiv preprint arXiv:2003.04297, 2020. 4
2057
+ [6] Xingyu Chen, Junzhi Yu, Shihan Kong, Zhengxing Wu,
2058
+ and Li Wen. Joint anchor-feature refinement for real-time
2059
+ accurate object detection in images and videos.
2060
+ IEEE
2061
+ Transactions on Circuits and Systems for Video Technology,
2062
+ 31(2):594–607, 2020. 8
2063
+ [7] Xiyang Dai, Yinpeng Chen, Jianwei Yang, Pengchuan
2064
+ Zhang, Lu Yuan, and Lei Zhang. Dynamic detr: End-to-
2065
+ end object detection with dynamic attention. In Proceedings
2066
+ of the IEEE/CVF International Conference on Computer Vi-
2067
+ sion, pages 2988–2997, 2021. 4
2068
+ [8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li,
2069
+ and Li Fei-Fei. Imagenet: A large-scale hierarchical image
2070
+ database. In 2009 IEEE conference on computer vision and
2071
+ pattern recognition, pages 248–255. Ieee, 2009. 6
2072
+ [9] Akshay Dhamija, Manuel Gunther, Jonathan Ventura, and
2073
+ Terrance Boult. The overlooked elephant of object detection:
2074
+ Open set. In Proceedings of the IEEE/CVF Winter Confer-
2075
+ ence on Applications of Computer Vision, pages 1021–1030,
2076
+ 2020. 5
2077
+ [10] Mark Everingham, Luc Van Gool, Christopher KI Williams,
2078
+ John Winn, and Andrew Zisserman. The pascal visual object
2079
+ classes (voc) challenge. International journal of computer
2080
+ vision, 88(2):303–338, 2010. 5, 7
2081
+ [11] Spyros Gidaris and Nikos Komodakis. Attend refine repeat:
2082
+ Active box proposal generation via in-out localization. arXiv
2083
+ preprint arXiv:1606.04446, 2016. 8
2084
+ [12] Ross Girshick. Fast r-cnn. In Proceedings of the IEEE inter-
2085
+ national conference on computer vision, pages 1440–1448,
2086
+ 2015. 8
2087
+ [13] Akshita Gupta, Sanath Narayan, KJ Joseph, Salman Khan,
2088
+ Fahad Shahbaz Khan, and Mubarak Shah. Ow-detr: Open-
2089
+ world detection transformer. In CVPR, 2022. 2, 3, 5, 6, 7,
2090
+ 8
2091
+ [14] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross
2092
+ Girshick. Momentum contrast for unsupervised visual rep-
2093
+ resentation learning. In Proceedings of the IEEE/CVF con-
2094
+ ference on computer vision and pattern recognition, pages
2095
+ 9729–9738, 2020. 4
2096
+ [15] Kaiming He, Georgia Gkioxari, Piotr Doll´ar, and Ross Gir-
2097
+ shick. Mask r-cnn. In Proceedings of the IEEE international
2098
+ conference on computer vision, pages 2961–2969, 2017. 8
2099
+ 12
2100
+
2101
+ Figure 7. Visualization results comparison between CAT and Oracle. We visualize the detection results of our model for known and
2102
+ unknown objects, as well as the ground truth on the tasks corresponding to the weights, including the labels of known categories and the
2103
+ labels of unknown categories, where the objects of unknown categories are the objects of other categories that have not yet appeared in the
2104
+ total categories of the dataset. Our model can accurately detect known objects and unknown objects outside the total class of the dataset,
2105
+ such as the electric plug and sound switch in the first row, the camera in the second row and the kitten toy in the third row. It is also worth
2106
+ noting that although our model detects the audio, it does not identify it as an unknown object, but as a remote, showing the limitations of
2107
+ our model.
2108
+ 13
2109
+
2110
+ Oracle
2111
+ Ours:CAT
2112
+ tvmonitor
2113
+ tvmonitor:79%
2114
+ remote:28%
2115
+ mote:35
2116
+ O
2117
+ unknown:3
2118
+ unknown:38%
2119
+ 1kn0wn:34%
2120
+ unkn0wn:26%%
2121
+ keyboard
2122
+ keyboard:42
2123
+ mouse
2124
+ mouse
2125
+ person
2126
+ tvmonitor:88%
2127
+ unkn0wn:36%
2128
+ sofa:43%
2129
+ cat:80%
2130
+ cat
2131
+ unknown:26%
2132
+ unknown:43
2133
+ unknown:35%
2134
+ inknov
2135
+ unknown:25%Figure 8. Visualization performance on incremental object detection. We visualize the detection results of the weights corresponding to
2136
+ different tasks for the same scenario. The results show that our CAT can identify unknown kinds of objects as the unknown class and
2137
+ accurately identify their classes after incrementally learning the unknown classes, such as sports ball and tennis racket in the first row,
2138
+ surfboard in the second row and traffic light in the third row.
2139
+ [16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun.
2140
+ Deep residual learning for image recognition. In Proceed-
2141
+ 14
2142
+
2143
+ Before Learning
2144
+ After Learning
2145
+ tennis racket:84%
2146
+ erson:89%
2147
+ unk
2148
+ sports
2149
+ ball:80%
2150
+ erson:87%
2151
+ surfb0ard:60%
2152
+ unkn0wn:30%
2153
+ raffic light:60%3%
2154
+ unknowi
2155
+ traffic
2156
+ 1ght:78%
2157
+ ight:6
2158
+ car:88%
2159
+ car:68%
2160
+ unknown
2161
+ ar:58
2162
+ car
2163
+ 23%
2164
+ :21%ings of the IEEE conference on computer vision and pattern
2165
+ recognition, pages 770–778, 2016. 6
2166
+ [17] K J Joseph, Salman Khan, Fahad Shahbaz Khan, and Vi-
2167
+ neeth N Balasubramanian. Towards open world object de-
2168
+ tection. In 2021 IEEE/CVF Conference on Computer Vision
2169
+ and Pattern Recognition (CVPR), pages 5826–5836, 2021.
2170
+ 1, 3, 4, 5, 6, 7, 8
2171
+ [18] Bingyi Kang, Zhuang Liu, Xin Wang, Fisher Yu, Jiashi Feng,
2172
+ and Trevor Darrell.
2173
+ Few-shot object detection via feature
2174
+ reweighting. In Proceedings of the IEEE/CVF International
2175
+ Conference on Computer Vision, pages 8420–8429, 2019. 8
2176
+ [19] Dahun Kim, Tsung-Yi Lin, Anelia Angelova, In So Kweon,
2177
+ and Weicheng Kuo. Learning open-world object proposals
2178
+ without learning to classify. IEEE Robotics and Automation
2179
+ Letters, 7(2):5453–5460, 2022. 8
2180
+ [20] Hongyang Li, Yu Liu, Wanli Ouyang, and Xiaogang Wang.
2181
+ Zoom out-and-in network with map attention decision for re-
2182
+ gion proposal and object detection. International Journal of
2183
+ Computer Vision, 127(3):225–238, 2019. 8
2184
+ [21] Yanghao Li, Hanzi Mao, Ross Girshick, and Kaiming He.
2185
+ Exploring plain vision transformer backbones for object de-
2186
+ tection. arXiv preprint arXiv:2203.16527, 2022. 4
2187
+ [22] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and
2188
+ Piotr Doll´ar. Focal loss for dense object detection. In Pro-
2189
+ ceedings of the IEEE international conference on computer
2190
+ vision, pages 2980–2988, 2017. 8
2191
+ [23] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays,
2192
+ Pietro Perona, Deva Ramanan, Piotr Doll´ar, and C Lawrence
2193
+ Zitnick. Microsoft coco: Common objects in context. In
2194
+ European conference on computer vision, pages 740–755.
2195
+ Springer, 2014. 5
2196
+ [24] Yue Lu, Xingyu Chen, Zhengxing Wu, and Junzhi Yu. De-
2197
+ coupled metric network for single-stage few-shot object de-
2198
+ tection. IEEE Transactions on Cybernetics, 2022. 8
2199
+ [25] Dimity Miller, Lachlan Nicholson, Feras Dayoub, and Niko
2200
+ S¨underhauf. Dropout sampling for robust object detection
2201
+ in open-set conditions. In 2018 IEEE International Confer-
2202
+ ence on Robotics and Automation (ICRA), pages 3243–3249.
2203
+ IEEE, 2018. 5
2204
+ [26] Dimity Miller, Lachlan Nicholson, Feras Dayoub, and Niko
2205
+ S¨underhauf. Dropout sampling for robust object detection
2206
+ in open-set conditions. In 2018 IEEE International Confer-
2207
+ ence on Robotics and Automation (ICRA), pages 3243–3249.
2208
+ IEEE, 2018. 8
2209
+ [27] Ishan Misra, Rohit Girdhar, and Armand Joulin. An end-to-
2210
+ end transformer model for 3d object detection. In Proceed-
2211
+ ings of the IEEE/CVF International Conference on Com-
2212
+ puter Vision, pages 2906–2917, 2021. 4
2213
+ [28] Pedro O O Pinheiro, Ronan Collobert, and Piotr Doll´ar.
2214
+ Learning to segment object candidates. Advances in neural
2215
+ information processing systems, 28, 2015. 8
2216
+ [29] Yanwei Pang, Tiancai Wang, Rao Muhammad Anwer, Fa-
2217
+ had Shahbaz Khan, and Ling Shao. Efficient featurized im-
2218
+ age pyramid network for single shot detector. In Proceedings
2219
+ of the IEEE/CVF Conference on Computer Vision and Pat-
2220
+ tern Recognition, pages 7336–7344, 2019. 8
2221
+ [30] Can Peng, Kun Zhao, and Brian C Lovell. Faster ilod: In-
2222
+ cremental learning for object detectors based on faster rcnn.
2223
+ Pattern recognition letters, 140:109–115, 2020. 6, 7
2224
+ [31] Jordi Pont-Tuset, Pablo Arbel´aez, Jonathan T. Barron, Fer-
2225
+ ran Marques, and Jitendra Malik. Multiscale combinatorial
2226
+ grouping for image segmentation and object proposal gener-
2227
+ ation. IEEE Transactions on Pattern Analysis and Machine
2228
+ Intelligence, 39(1):128–140, 2017. 3
2229
+ [32] Joseph Redmon, Santosh Divvala, Ross Girshick, and Ali
2230
+ Farhadi. You only look once: Unified, real-time object de-
2231
+ tection. In Proceedings of the IEEE conference on computer
2232
+ vision and pattern recognition, pages 779–788, 2016. 8
2233
+ [33] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun.
2234
+ Faster r-cnn: Towards real-time object detection with region
2235
+ proposal networks. Advances in neural information process-
2236
+ ing systems, 28, 2015. 1, 6, 8
2237
+ [34] Adrian Rosebrock.
2238
+ Intersection over union (iou) for ob-
2239
+ ject detection.
2240
+ Diambil kembali dari PYImageSearch
2241
+ https//www. pyimagesearch. com/2016/11/07/intersection-
2242
+ over-union-iou-for-object-detection, 2016. 4
2243
+ [35] Konstantin Shmelkov, Cordelia Schmid, and Karteek Ala-
2244
+ hari. Incremental learning of object detectors without catas-
2245
+ trophic forgetting. In Proceedings of the IEEE international
2246
+ conference on computer vision, pages 3400–3409, 2017. 6,
2247
+ 7, 8
2248
+ [36] Jasper RR Uijlings, Koen EA Van De Sande, Theo Gev-
2249
+ ers, and Arnold WM Smeulders. Selective search for ob-
2250
+ ject recognition. International journal of computer vision,
2251
+ 104(2):154–171, 2013. 3, 4
2252
+ [37] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko-
2253
+ reit, Llion Jones, Aidan N Gomez, Łukasz Kaiser, and Illia
2254
+ Polosukhin. Attention is all you need. Advances in neural
2255
+ information processing systems, 30, 2017. 8
2256
+ [38] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang
2257
+ Wang, and Jifeng Dai. Deformable detr: Deformable trans-
2258
+ formers for end-to-end object detection.
2259
+ arXiv preprint
2260
+ arXiv:2010.04159, 2020. 2, 4, 6, 8
2261
+ [39] C Lawrence Zitnick and Piotr Doll´ar. Edge boxes: Locat-
2262
+ ing object proposals from edges. In European conference on
2263
+ computer vision, pages 391–405. Springer, 2014. 3
2264
+ [40] Zhengxia Zou, Zhenwei Shi, Yuhong Guo, and Jieping Ye.
2265
+ Object detection in 20 years: A survey.
2266
+ arXiv preprint
2267
+ arXiv:1905.05055, 2019. 8
2268
+ 15
2269
+
EdA0T4oBgHgl3EQfA_-G/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
GtE4T4oBgHgl3EQfgQ2M/content/tmp_files/2301.05115v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
GtE4T4oBgHgl3EQfgQ2M/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
H9AyT4oBgHgl3EQfTPcY/content/tmp_files/2301.00100v1.pdf.txt ADDED
@@ -0,0 +1,1251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00100v1 [math.AP] 31 Dec 2022
2
+ COBORDISM INVARIANCE OF THE INDEX FOR
3
+ REALIZATIONS OF ELLIPTIC OPERATORS REVISITED
4
+ THOMAS KRAINER
5
+ Abstract. We revisit an argument due to Lesch [11, 12] for proving the cobor-
6
+ dism invariance of the index of Dirac operators on even-dimensional closed
7
+ manifolds and combine this with recent work by the author [10] to show van-
8
+ ishing results for the spectral flow for families of selfadjoint Fredholm realiza-
9
+ tions of elliptic operators in case the family is induced on the boundary by an
10
+ elliptic operator on a compact space. This work is motivated by studying the
11
+ behavior of the index of realizations of elliptic operators under cobordisms of
12
+ statified manifolds.
13
+ 1. Introduction
14
+ One of the original proofs of the Atiyah-Singer Index Theorem is based on showing
15
+ that the index of Dirac type operators is invariant under cobordisms, see Palais
16
+ [17]. This proof is analytic in nature and rooted in the classical theory of elliptic
17
+ boundary value problems. Other proof strategies for the index theorem such as the
18
+ heat equation proof have generally been favored because these proofs require less
19
+ sophisticated analytic techniques than the original cobordism proof.
20
+ Higson [9] gave a proof of the cobordism invariance of the index by attaching an
21
+ infinite half-cylinder to the boundary and extending the operator from the manifold
22
+ with boundary to the manifold with cylindrical end. The Dirac type operator on
23
+ the resulting odd-dimensional complete manifold is essentially selfadjoint, and the
24
+ analytic arguments involved in Higson’s proof are considerably simpler compared
25
+ to the original proof. Lesch [11], on the other hand, gave a proof by attaching a
26
+ (generalized) cone to the boundary and extended the operator from the manifold
27
+ with boundary to a cone operator; while conic manifolds are incomplete and thus
28
+ dealing with domains of realizations of the resulting conic Dirac type operator is
29
+ needed, Lesch’s approach is still much simpler from a functional analytic point of
30
+ view than the original proof because the maximal and minimal domains of L2-
31
+ based realizations in the conic case differ only by a finite-dimensional space – the
32
+ price to pay is the more intricate analysis to deal with the singularity which at
33
+ this juncture has been introduced artificially. Several other analytic proofs of the
34
+ cobordism invariance of the index [3, 16], a K-theory proof [5], and generalizations
35
+ [4, 8, 14, 19] have since been found.
36
+ This note is motivated by recent advances in elliptic theory on stratified man-
37
+ ifolds with incomplete iterated wedge metrics [1, 2, 6, 7, 15, 18] and gives an ap-
38
+ plication of the spectral flow formula for indicial operators obtained in our recent
39
+ paper [10]. Stratified cobordisms and the cobordism invariance of the index for the
40
+ 2020 Mathematics Subject Classification. Primary: 58J20; Secondary: 58J05, 58J32, 58J30.
41
+ Key words and phrases. Manifolds with singularities, index theory, cobordism.
42
+ 1
43
+
44
+ 2
45
+ THOMAS KRAINER
46
+ signature operator have been considered in [1, 2], where especially in [2] the opera-
47
+ tor is no longer essentially selfadjoint and suitable boundary conditions associated
48
+ with the singular strata are considered; stratified cobordism and the invariance of
49
+ the index are used in an essential way to establish the properties of the signature
50
+ of a Cheeger space considered in that paper.
51
+ From our point of view Lesch’s proof [11, 12] of the cobordism invariance of
52
+ the index is very natural in the context of elliptic theory on stratified manifolds
53
+ because, unlike in the classical smooth case, singular analysis and dealing with
54
+ boundary conditions associated with singular strata already are essential features
55
+ of the investigations here.
56
+ In this note we will revisit and extend Lesch’s proof from the Dirac case to more
57
+ general operators of any order, and what amounts to the vanishing of the index in
58
+ the Dirac case (for null-cobordisms) will accordingly generalize to the vanishing of
59
+ the spectral flow for indicial families. Our recent paper [10] on indicial operators,
60
+ which are abstract functional analytic model operators associated to generalized
61
+ conical singularities, is the basis for this. We will only be concerned with null-
62
+ cobordisms and proving vanishing results here; more general notions of cobordisms
63
+ and cobordism invariance follow upon reduction to this case. Without detailing the
64
+ precise assumptions, the argument proceeds as follows:
65
+ Let (M, g) be a Riemannian manifold, and let U = U(Y ) ⊂ M be an open subset
66
+ that is isometric to (0, ε) × Y with product metric dx2 + gY for some ε > 0, where
67
+ (Y, gY ) is another Riemannian manifold. The reader ought to think of both M and
68
+ Y as the open interior of compact stratified manifolds M and Y equipped with
69
+ incomplete iterated wedge metrics, where Y is a boundary hypersurface of M, and
70
+ U(Y ) is a collar neighborhood. Let E → M be a Hermitian vector bundle such
71
+ that E
72
+ ��
73
+ U(Y ) ∼= π∗
74
+ Y E isometrically, where E → Y is a Hermitian vector bundle, and
75
+ πY : (0, ε) × Y → Y is the canonical projection. Let
76
+ A : C∞
77
+ c (M; E ) → C∞
78
+ c (M; E )
79
+ be an elliptic differential operator of order µ ≥ 1 that is symmetric with respect to
80
+ the inner product induced by the Riemannian and Hermitian metrics, and suppose
81
+ that A is in U(Y ) of the form
82
+ A ∼= A∧ = x−1
83
+ µ
84
+
85
+ j=0
86
+ aj(y, Dy)(xDx)j : C∞
87
+ c ((0, ε) × Y ; π∗
88
+ Y E) → C∞
89
+ c ((0, ε) × Y ; π∗
90
+ Y E),
91
+ where aj(y, Dy) ∈ Diffµ−j(Y ; E). Let
92
+ p(σ) =
93
+ µ
94
+
95
+ j=0
96
+ aj(y, Dy)σj : C∞
97
+ c (Y ; E) → C∞
98
+ c (Y ; E), σ ∈ C,
99
+ be the indicial family. Now suppose that
100
+ Amin : Dmin(A) ⊂ L2(M; E ) → L2(M; E )
101
+ (1.1)
102
+ is some closed symmetric extension of A : C∞
103
+ c (M; E ) ⊂ L2(M; E ) → L2(M; E ),
104
+ and let Amax : Dmax(A) ⊂ L2(M; E ) → L2(M; E ) be the adjoint – we point out
105
+ here that Amin is not necessarily the minimal extension of A from C∞
106
+ c (M; E ), and
107
+
108
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
109
+ 3
110
+ therefore Amax is not the largest L2-based closed extension either, i.e. we only have
111
+ Dmin(A) ⊃ {u ∈ L2(M; E ); ∃uk ∈ C∞
112
+ c (M; E ), uk → u in L2(M; E ),
113
+ and Auk ⊂ L2(M; E ) Cauchy},
114
+ Dmax(A) ⊂ {u ∈ L2(M; E ); ∃v ∈ L2(M; E ) :
115
+ ⟨Aφ, u⟩L2(M;E ) = ⟨φ, v⟩L2(M;E ) ∀φ ∈ C∞
116
+ c (M; E )},
117
+ and these inclusions are generally proper. The reader ought to think of the operator
118
+ A as an elliptic iterated incomplete wedge operator on M, and the domain Dmin(A)
119
+ as determined by previously chosen boundary conditions for A associated with
120
+ singular strata of M away from the boundary hypersurface Y ⊂ M.
121
+ One of the main points now is that under suitable localization and compatibility
122
+ assumptions these extensions of A should localize to U(Y ) and be fully captured
123
+ by the extensions of the indicial operator
124
+ A∧ : C∞
125
+ c (R+; E1) ⊂ L2(R+ × Y ; π∗
126
+ Y E) → L2(R+ × Y ; π∗
127
+ Y E).
128
+ (1.2)
129
+ Here
130
+
131
+ comp(Y ; E) ⊂ E1 ⊂ Hµ
132
+ loc(Y ; E)
133
+ is the common domain for the indicial family p(σ) : E1 ⊂ E0 → E0, σ ∈ C,
134
+ where E0 = L2(Y ; E), giving rise to a holomorphic family of unbounded Fredholm
135
+ operators that are selfadjoint for σ ∈ R.
136
+ The reader ought to think of E1 as
137
+ determined by certain lateral boundary conditions associated with the singular
138
+ strata of Y , obtained via restriction to U(Y ) by the previously determined boundary
139
+ conditions on M for A that gave rise to Dmin(A); the localization and compatibility
140
+ assumptions are such that the boundary conditions previously chosen for A on M
141
+ should be selfadjoint away from the boundary hypersurface Y . The upshot of all
142
+ of this is that we obtain a unitary equivalence
143
+
144
+ Dmax(A)/Dmin(A), [·, ·]A
145
+ � ∼=
146
+
147
+ Dmax(A∧)/Dmin(A∧), [·, ·]A∧
148
+
149
+ of finite-dimensional indefinite inner product spaces by passing to representatives
150
+ supported in U(Y ) ∼= (0, ε)×Y , thus allowing transitioning between M and R+×Y ;
151
+ here
152
+ [·, ·]A : Dmax(A) × Dmax(A) → C,
153
+ [u, v]A = 1
154
+ i
155
+
156
+ ⟨Amaxu, v⟩L2 − ⟨u, Amaxv⟩L2
157
+
158
+ is the adjoint pairing, and likewise for [·, ·]A∧, while Dmin(A∧) is the domain of the
159
+ closure A∧,min of (1.2), and Dmax(A∧) is the domain of the adjoint A∧,max = A∗
160
+ ∧,min.
161
+ In particular, we have
162
+ sgn
163
+
164
+ Dmax(A)/Dmin(A), [·, ·]A
165
+
166
+ = sgn
167
+
168
+ Dmax(A∧)/Dmin(A∧), [·, ·]A∧
169
+
170
+ for the signatures of these spaces. On the one hand, using the spectral flow formula
171
+ from [10], we have
172
+ sgn
173
+
174
+ Dmax(A∧)/Dmin(A∧), [·, ·]A∧
175
+
176
+ = SF[ p(σ) : E1 ⊂ E0 → E0, −∞ < σ < ∞ ],
177
+ while on the other hand sgn
178
+
179
+ Dmax(A)/Dmin(A), [·, ·]A
180
+
181
+ = 0 if (1.1) is Fredholm or
182
+ the embedding Dmax(A) ֒→ L2(M; E ) is compact, which combined leads to the
183
+ desired conclusion that
184
+ SF[ p(σ) : E1 ⊂ E0 → E0, −∞ < σ < ∞ ] = 0.
185
+
186
+ 4
187
+ THOMAS KRAINER
188
+ In the Dirac case the spectral flow of the indicial family is easily seen to equal
189
+ the Fredholm index of the operator D : D(D) ⊂ L2(Y ; E−) → L2(Y ; E+) on the
190
+ even-dimensional boundary Y , thus recovering cobordism invariance of the index
191
+ in this context.
192
+ The structure of this paper is as follows: In Section 2 we briefly review what is
193
+ needed from extension theory of symmetric operators, in particular the criteria that
194
+ ensure that
195
+
196
+ Dmax(A)/Dmin(A), [·, ·]A
197
+
198
+ is finite-dimensional with signature zero. In
199
+ Section 3 we review results from our paper [10] on indicial operators in the form in
200
+ which they are needed here; we also address in this section how indicial operators of
201
+ first order that model the Dirac case fit into this framework in order to obtain the
202
+ desired conclusions about the cobordism invariance of the index when specializing to
203
+ such operators. In Section 4 we fill in the details of the outline above and prove the
204
+ null-cobordism theorem (Theorem 4.2). Finally, in Appendix A, we discuss the null-
205
+ cobordism theorem for smooth manifolds; assumptions appear much weaker here
206
+ on the geometry and the participating objects because the analytic tools available
207
+ in this case are rich enough to create the preconditions needed to apply the null-
208
+ cobordism theorem rather than having to assume them from the outset.
209
+ With
210
+ the ongoing further development of singular analysis on stratified manifolds we
211
+ anticipate similar reductions and simplifications for such cases in the future as well.
212
+ 2. Preliminaries from extension theory
213
+ Let H be a separable complex Hilbert space, and suppose Amin : Dmin ⊂ H → H
214
+ is closed, densely defined, and symmetric. Let Amax := A∗
215
+ min : Dmax ⊂ H → H be
216
+ the adjoint. We equip Dmax with the graph inner product
217
+ ⟨u, v⟩Amax = ⟨u, v⟩ + ⟨Amaxu, Amaxv⟩
218
+ and associated graph norm. Then Dmin ⊂
219
+
220
+ Dmax, ∥ · ∥Amax
221
+
222
+ is a closed subspace,
223
+ and
224
+ Dmax = Dmin ⊕ ker(Amax + i) ⊕ ker(Amax − i)
225
+ by von Neumann’s formulas. The dimensions
226
+ n± = dim ker(Amax ∓ λi) ∈ N0 ∪ {∞},
227
+ λ > 0,
228
+ are the deficiency indices of the operator Amin and independent of λ > 0. The
229
+ operators
230
+ Amin ± iλ : Dmin ⊂ H → H,
231
+ λ > 0,
232
+ are injective and have closed range, and we have n± < ∞ if and only if Amin ± iλ
233
+ is Fredholm, in which case n± = − ind(Amin ± iλ). The adjoint pairing
234
+ [·, ·]A : Dmax × Dmax → C,
235
+ [u, v]A = 1
236
+ i
237
+
238
+ ⟨Amaxu, v⟩ − ⟨u, Amaxv⟩
239
+
240
+ descends to a nondegenerate Hermitian sesquilinear form (indefinite inner product)
241
+ [·, ·] : Dmax/Dmin × Dmax/Dmin → C.
242
+ If dim Dmax/Dmin < ∞, i.e. if Amin has finite deficiency indices, the signature of
243
+ the adjoint pairing is given by
244
+ sgn
245
+
246
+ Dmax/Dmin, [·, ·]
247
+
248
+ = n+ − n−.
249
+ The following criteria are standard and useful for verification that n+ = n− < ∞.
250
+
251
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
252
+ 5
253
+ Proposition 2.1. Suppose Amin : Dmin ⊂ H → H is Fredholm. Then Amin has
254
+ finite and equal deficiency indices, and therefore
255
+ sgn
256
+
257
+ Dmax/Dmin, [·, ·]
258
+
259
+ = 0.
260
+ Proof. Because Amin : Dmin ⊂ H → H is Fredholm there exists ε > 0 such that
261
+ Amin + iλ : Dmin ⊂ H → H is Fredholm for −ε < λ < ε, and consequently both
262
+ n± < ∞ and Amin + iλ is Fredholm for all λ ∈ R. Now
263
+ R ∋ λ �→ Amin + iλ : Dmin ⊂ H → H
264
+ is a continuous Fredholm function and therefore has constant index. Thus
265
+ n+ = − ind(Amin + i) = − ind(Amin − i) = n−.
266
+
267
+ Proposition 2.2. If the embedding
268
+
269
+ Dmax, ∥ · ∥Amax
270
+
271
+ ֒→ H is compact then Amin
272
+ has finite and equal deficiency indices.
273
+ Proof. The norms ∥·∥Amax and ∥·∥H are equivalent on ker(Amax±i), and the identity
274
+ map
275
+
276
+ ker(Amax ± i), ∥ · ∥Amax
277
+
278
+
279
+
280
+ ker(Amax ± i), ∥ · ∥H
281
+
282
+ is compact by assumption.
283
+ Thus dim ker(Amax ± i) < ∞. Now Amin ± i : Dmin ⊂ H → H are both Fredholm,
284
+ and because Dmin ֒→ H is compact we have ind(Amin − i) = ind(Amin + i). The
285
+ proposition is proved.
286
+
287
+ 3. Indicial operators
288
+ We consider indicial operators of the form
289
+ A∧ = x−1
290
+ µ
291
+
292
+ j=0
293
+ aj(xDx)j : C∞
294
+ c (R+; E1) ⊂ L2(R+; E0) → L2(R+; E0),
295
+ (3.1)
296
+ where µ ∈ N and E0 and E1 are separable complex Hilbert spaces such that E1 ֒→
297
+ E0 is continuous and dense, and the operators aj : E1 → E0 are continuous for
298
+ j = 0, . . . , µ. Let
299
+ p(σ) =
300
+ µ
301
+
302
+ j=0
303
+ ajσj : E1 → E0,
304
+ σ ∈ C
305
+ (3.2)
306
+ be the indicial family associated with A∧. We make the following assumptions:
307
+ (i) p(σ) : E1 ⊂ E0 → E0 is closed, densely defined, and Fredholm for σ ∈ C, and
308
+ the map C ∋ σ �→ p(σ) ∈ L (E1, E0) is holomorphic.
309
+ (ii) We have p(σ)∗ = p(σ) : E1 ⊂ E0 → E0 as unbounded operators in E0.
310
+ (iii) For (λ, σ) ∈ R2 and |λ, σ| ≥ R ≫ 0 sufficiently large p(σ) + iλ : E1 → E0 is
311
+ invertible with
312
+ sup
313
+ |λ,σ|≥R
314
+
315
+ (1 + λ2 + σ2µ)
316
+ 1
317
+ 2 ���
318
+ p(σ) + iλ
319
+ �−1��
320
+ L (E0) +
321
+ ���
322
+ p(σ) + iλ
323
+ �−1��
324
+ L (E0,E1)
325
+
326
+ < ∞,
327
+ and for every k ∈ {1, . . ., µ} we have
328
+ sup
329
+ |λ,σ|≥R
330
+ (1 + λ2 + σ2µ)
331
+ k
332
+ 2µ ���
333
+ ∂k
334
+ σp(σ)
335
+ ��
336
+ p(σ) + iλ
337
+ �−1��
338
+ L (E0) < ∞.
339
+ In [10] we systematically studied operators of the kind (3.1) under such assumptions.
340
+ We summarize some of the findings below:
341
+
342
+ 6
343
+ THOMAS KRAINER
344
+ (1) The operator (3.1) is symmetric and densely defined in L2(R+; E0). Let
345
+ A∧,min be its closure, and A∧,max = A∗
346
+ ∧,min be the adjoint. Then
347
+ dim Dmax(A∧)/Dmin(A∧) < ∞,
348
+ i.e., A∧ has finite deficiency indices.
349
+ (2) The boundary spectrum
350
+ specb(p) = {σ ∈ C; p(σ) : E1 → E0 is not invertible} ⊂ C
351
+ is discrete, and every strip |ℑ(σ)| ≤ K, K > 0, contains only finitely many
352
+ elements of specb(p). The elements of the boundary spectrum are generally
353
+ referred to as indicial roots.
354
+ (3) Fix an arbitrary cut-off function ω ∈ C∞
355
+ c (R+) with ω ≡ 1 near x = 0. For
356
+ each indicial root σ0 ∈ specb(p) let
357
+ Eσ0(p) =
358
+
359
+ u = ω
360
+ k
361
+
362
+ j=0
363
+ ej logj(x)xiσ0; k ∈ N0 and ej ∈ E1,
364
+ and p(σ)(Mu)(σ) is holomorphic at σ = σ0
365
+
366
+ ,
367
+ (3.3)
368
+ where
369
+
370
+ Mu
371
+
372
+ (σ) =
373
+ � ∞
374
+ 0
375
+ x−iσu(x) dx
376
+ x
377
+ is the Mellin transform of u. This space is finite-dimensional for every σ0,
378
+ and we have
379
+ Dmax(A∧) = Dmin(A∧) ⊕
380
+
381
+ σ0���specb(p)
382
+ − 1
383
+ 2 <ℑ(σ0)< 1
384
+ 2
385
+ Eσ0(p).
386
+ (3.4)
387
+ (4) We have
388
+ x
389
+ 1
390
+ 2 H (R+; E1) ∩ L2(R+; E0) ֒→ Dmin(A∧),
391
+ and Dmin(A∧) = x
392
+ 1
393
+ 2 H (R+; E1) ∩ L2(R+; E0) if and only if p(σ) : E1 → E0
394
+ is invertible for all ℑ(σ) = − 1
395
+ 2.
396
+ The space H (R+; E1) is the completion of C∞
397
+ c (R+; E1) with respect to
398
+ the norm
399
+ ∥u∥2
400
+ H =
401
+
402
+ R
403
+ ∥p(σ + iγ0)(Mu)(σ)∥2
404
+ E0 dσ,
405
+ where γ0 ∈ R is arbitrary such that p(σ + iγ0) : E1 → E0 is invertible for
406
+ all σ ∈ R. We have
407
+ H (R+; E1) ֒→ Hµ
408
+ b (R+; E0) ∩ L2
409
+ b(R+; E1),
410
+ and in typical situations these spaces are equal; this is the case, for instance,
411
+ if
412
+ sup
413
+ σ∈R
414
+ ∥p(σ + iγ0)(⟨σ⟩µ + iΛ)−1∥L (E0) < ∞,
415
+ (3.5)
416
+ where Λ : E1 ⊂ E0 → E0 is selfadjoint (e.g. for Λ = p(0)).
417
+ (5) While not discussed in [10] it is not hard to see that, under the added
418
+ assumption that the embedding E1 ֒→ E0 is compact, multiplication by a
419
+
420
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
421
+ 7
422
+ cut-off function ω ∈ C∞
423
+ c (R+) with ω ≡ 1 near x = 0 induces a compact
424
+ operator ω : xαH (R+; E1) → L2
425
+ b(R+; E0) for every α > 01.
426
+ Consequently, if additionally p(σ) : E1 → E0 is invertible for all ℑ(σ) =
427
+ − 1
428
+ 2, we obtain a compact map ω : Dmax(A∧) → L2(R+; E0), and a bounded
429
+ map 1 − ω : Dmax(A∧) → Dmin(A∧). The latter is based on the identity
430
+ Dmin(A∧) = x
431
+ 1
432
+ 2 H (R+; E1) ∩ L2(R+; E0) and localization properties of the
433
+ space H (R+; E1) (see [10, Proposition 7.6]).
434
+ (6) The adjoint pairing
435
+ [·, ·]A∧ : Dmax(A∧) × Dmax(A∧) → C,
436
+ [u, v]A∧ = 1
437
+ i
438
+
439
+ ⟨A∧,maxu, v⟩L2(R+;E0) − ⟨u, A∧,maxv⟩L2(R+;E0)
440
+
441
+ induces a nondegenerate Hermitian sesquilinear form
442
+ [·, ·] : Dmax(A∧)/Dmin(A∧) × Dmax(A∧)/Dmin(A∧) → C,
443
+ and its signature is given by the spectral flow of the indicial family (3.2)
444
+ along the real line:
445
+ sgn
446
+
447
+ Dmax(A∧)/Dmin(A∧), [·, ·]
448
+
449
+ = SF[ p(σ) : E1 ⊂ E0 → E0, −∞ < σ < ∞ ].
450
+ (3.6)
451
+ Note that p(σ) : E1 → E0 is invertible for |σ| ≥ T ≫ 0 large enough,
452
+ σ ∈ R, and the spectral flow in (3.6) then refers to p(σ) on the interval
453
+ −T ≤ σ ≤ T . Only crossings of real indicial roots contribute terms to the
454
+ spectral flow.
455
+ The focus in this paper is on the signature of the adjoint pairing, and by (3.6)
456
+ only real indicial roots are relevant. In order to obtain simple expressions for the
457
+ minimal domain and the maximal domain (3.4) of A∧ it is sometimes convenient
458
+ to introduce a scaling parameter t > 0 to remove any small non-real indicial roots
459
+ from the strip |ℑ(σ)| ≤ 1
460
+ 2. This leads to
461
+ A∧,t = x−1
462
+ µ
463
+
464
+ j=0
465
+ ajtj(xDx)j : C∞
466
+ c (R+; E1) ⊂ L2(R+; E0) → L2(R+; E0)
467
+ with indicial family
468
+ pt(σ) = p(tσ) : E1 ⊂ E0 → E0,
469
+ σ ∈ C,
470
+ and the standing assumptions on p(σ) imply that the analogous properties are also
471
+ satisfied for pt(σ), and all estimates are locally uniform with respect to t > 0. In
472
+ particular, the spectral flow
473
+ SF[ pt(σ) : E1 ⊂ E0 → E0, −∞ < σ < ∞ ]
474
+ is independent of t > 0 by homotopy invariance, and thus
475
+ sgn
476
+
477
+ Dmax(A∧,t)/Dmin(A∧,t), [·, ·]
478
+
479
+ 1The function a0(x, σ) = xαω(x)p(σ + iγ0)−1 is a Mellin symbol taking values in the com-
480
+ pact operators E0 → E0, and we have sup{⟨log(x)⟩j⟨σ⟩µ+k∥(xDx)l∂k
481
+ σa0(x, σ)∥L (E0); (x, σ) ∈
482
+ R+ × R} < ∞ for all j, k, l ∈ N0.
483
+ Thus the Mellin pseudodifferential operator opM(a0) :
484
+ L2
485
+ b(R+; E0) → L2
486
+ b(R+; E0) is compact, which implies compactness of the multiplication opera-
487
+ tor ω : xαH (R+; E1) → L2
488
+ b(R+; E0) as asserted.
489
+
490
+ 8
491
+ THOMAS KRAINER
492
+ is independent of t > 0. For 0 < t ≤ t0 small enough, pt(σ) : E1 ⊂ E0 → E0 is
493
+ invertible for all 0 < |ℑ(σ)| ≤ 1
494
+ 2. We then have
495
+ Dmin(A∧,t) = x
496
+ 1
497
+ 2 H (R+; E1) ∩ L2(R+; E0),
498
+ where the definition of H (R+; E1) is accordingly based on pt(σ), and
499
+ Dmax(A∧,t) = Dmin(A∧,t) ⊕
500
+
501
+ σ0∈specb(pt)∩R
502
+ Eσ0(pt).
503
+ If (3.5) holds for p(σ) it is true for all pt(σ), and in this case the space
504
+ H (R+; E1) = Hµ
505
+ b (R+; E0) ∩ L2
506
+ b(R+; E1)
507
+ is independent of t > 0; thus the minimal domain
508
+ Dmin(A∧) = x
509
+ 1
510
+ 2 Hµ
511
+ b (R+; E0) ∩ x
512
+ 1
513
+ 2 L2
514
+ b(R+; E1) ∩ L2(R+; E0)
515
+ is independent of 0 < t ≤ t0.
516
+ Operators of first order. Let D : D(D) ⊂ H1 → H2 be closed and densely
517
+ defined, and let D∗ : D(D∗) ⊂ H2 → H1 be the adjoint. Write
518
+ E0 =
519
+ H1
520
+
521
+ H2
522
+ and E1 =
523
+ D(D)
524
+
525
+ D(D∗)
526
+ ֒→ E0.
527
+ We assume that D (and therefore also D∗) is Fredholm, and that the embeddings
528
+ for both domains D(D) ֒→ H1 and D(D∗) ֒→ H2 are compact. Consider then
529
+ D∧ = x−1
530
+ ��1
531
+ 0
532
+ 0
533
+ −1
534
+
535
+ (xDx)+
536
+ � 0
537
+ D∗
538
+ D
539
+ 0
540
+ ��
541
+ : C∞
542
+ c (R+; E1) ⊂ L2(R+; E0) → L2(R+; E0)
543
+ with indicial family
544
+ D(σ) =
545
+
546
+ σ
547
+ D∗
548
+ D
549
+ −σ
550
+
551
+ : E1 ⊂ E0 → E0,
552
+ σ ∈ C.
553
+ Now D(σ) satisfies the assumptions previously stated for indicial families with
554
+ µ = 1, including (3.5) with Λ = D(0); see Lemma 3.8 for the required estimates.
555
+ Therefore the conclusions summarized above hold for D∧, and by Lemma 3.9 we
556
+ have
557
+ sgn
558
+ ��
559
+ Dmax(D∧)/Dmin(D∧), [·, ·]
560
+
561
+ = ind[D : D(D) ⊂ H1 → H2].
562
+ (3.7)
563
+ The only real indicial root is σ0 = 0, and after possibly introducing a sufficiently
564
+ small scaling parameter t > 0 and replacing D∧ by
565
+ D∧,t = x−1
566
+
567
+ t
568
+
569
+ 1
570
+ 0
571
+ 0
572
+ −1
573
+
574
+ (xDx) +
575
+
576
+ 0
577
+ D∗
578
+ D
579
+ 0
580
+ ��
581
+ we have
582
+ Dmin(D∧,t) = x
583
+ 1
584
+ 2 H1
585
+ b (R+; E0) ∩ x
586
+ 1
587
+ 2 L2
588
+ b(R+; E1) ∩ L2(R+; E0),
589
+ Dmax(D∧,t) = Dmin(D∧,t) ⊕ E0(Dt).
590
+ In this case E0(Dt) = E0(D) is also independent of t > 0, and we have
591
+ E0(D) =
592
+
593
+ u = ω
594
+ � k
595
+ k∗
596
+
597
+ ; k ∈ ker(D), k∗ ∈ ker(D∗)
598
+
599
+ .
600
+
601
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
602
+ 9
603
+ This follows from (3.3) in view of
604
+ D(σ)−1 = σ
605
+
606
+ 1
607
+ 0
608
+ 0
609
+ −1
610
+
611
+ [D(0)2 + σ2]−1 + D(0)[D(0)2 + σ2]−1
612
+ =
613
+ �ΠD
614
+ 0
615
+ 0
616
+ −ΠD∗
617
+ � 1
618
+ σ + holomorphic
619
+ near σ = 0, where ΠD : H1 → ker(D) and ΠD∗ : H2 → ker(D∗) are the orthogonal
620
+ projections onto the kernels of D and D∗, respectively. For sufficiently small t > 0
621
+ a brief calculation shows that the adjoint pairing is given by
622
+
623
+ ω
624
+
625
+ k1
626
+ k∗
627
+ 1
628
+
629
+ , ω
630
+
631
+ k2
632
+ k∗
633
+ 2
634
+ ��
635
+ D∧,t = t
636
+
637
+ ⟨k1, k2⟩H1 − ⟨k∗
638
+ 1, k∗
639
+ 2⟩H2
640
+
641
+ for kj ∈ ker(D) and k∗
642
+ j ∈ ker(D∗), j = 1, 2, which provides a direct justification for
643
+ (3.7) for D∧,t (for small t > 0) that does not rely on the spectral flow.
644
+ Lemma 3.8. For (λ, σ) ∈ R2 write z = σ + iλ ∈ C and consider
645
+ D(z) = D(σ) + iλ =
646
+ � z
647
+ D∗
648
+ D
649
+ −z
650
+
651
+ : E1 ⊂ E0 → E0.
652
+ Then D(z) is invertible for all z ∈ C \ {0}, and
653
+ sup
654
+ |z|≥1
655
+ {|z| · ∥D(z)−1∥L (E0) + ∥D(z)−1∥L (E0,E1)} < ∞.
656
+ Proof. We have D(z)∗ = D(z), and
657
+ D(z)∗D(z) = D(z)D(z)∗ =
658
+ �|z|2 + D∗D
659
+ 0
660
+ 0
661
+ |z|2 + DD∗
662
+
663
+ = D(0)2 + |z|2.
664
+ This operator is invertible for z ∈ C \ {0}, and consequently D(z) is invertible with
665
+ D(z)−1 = D(z)∗[D(z)D(z)∗]−1 = [zΠ1−zΠ2][D(0)2+|z|2]−1+D(0)[D(0)2+|z|2]−1,
666
+ where Πj : E0 → Hj ⊂ E0 is the orthogonal projection, j = 1, 2.
667
+ In view of
668
+ D(0)[zΠ1 − zΠ2] = [zΠ2 − zΠ1]D(0) we have
669
+ D(0)D(z)−1 = [zΠ2 − zΠ1]D(0)[D(0)2 + |z|2]−1 + D(0)2[D(0)2 + |z|2]−1.
670
+ The Spectral Theorem implies
671
+ sup
672
+ |z|≥1
673
+ {∥D(0)2[D(0)2+|z|2]−1∥+∥zD(0)[D(0)2+|z|2]−1∥+∥z2[D(0)2+|z|2]−1∥} < ∞,
674
+ where ∥ · ∥ = ∥ · ∥L (E0). The lemma now follows.
675
+
676
+ Lemma 3.9. We have
677
+ ind[D : D(D) ⊂ H1 → H2] = SF
678
+
679
+ D(σ) : E1 ⊂ E0 → E0, σ ∈ R
680
+
681
+ .
682
+ Proof. Let K = ker(D(0)) = ker(D) ⊕ ker(D∗). Then
683
+ D(σ) =
684
+ �DK(σ)
685
+ 0
686
+ 0
687
+ DK⊥(σ)
688
+
689
+ :
690
+ K
691
+
692
+ K⊥ ∩ E1
693
+
694
+ K
695
+
696
+ K⊥
697
+ ,
698
+ σ ∈ R.
699
+
700
+ 10
701
+ THOMAS KRAINER
702
+ Now DK(σ) : K → K, σ ̸= 0, has eigenvalues σ, −σ of multiplicities dim ker(D) and
703
+ dim ker(D∗), respectively, and DK⊥(σ) is invertible for all σ ∈ R. Thus
704
+ ind D = dim ker(D) − dim ker(D∗)
705
+ = SF
706
+
707
+ DK(σ) : K → K, σ ∈ R
708
+
709
+ = SF
710
+
711
+ D(σ) : E1 ⊂ E0 → E0, σ ∈ R
712
+
713
+ .
714
+
715
+ 4. The null-cobordism theorem
716
+ We now revisit the setting discussed in the introduction to prove the null-cobordism
717
+ theorem. We make the following product type assumptions on the geometry and
718
+ the operator:
719
+ Let (M, g) be a Riemannian manifold, and let U = U(Y ) ⊂ M be an open subset
720
+ that is isometric to (0, ε) × Y with product metric dx2 + gY for some ε > 0, where
721
+ (Y, gY ) is another Riemannian manifold. Let E → M be a Hermitian vector bundle
722
+ such that E
723
+ ��
724
+ U(Y ) ∼= π∗
725
+ Y E isometrically, where E → Y is a Hermitian vector bundle,
726
+ and πY : (0, ε) × Y → Y is the canonical projection. Let
727
+ A : C∞
728
+ c (M; E ) → C∞
729
+ c (M; E )
730
+ be an elliptic differential operator of order µ ≥ 1 that is symmetric with respect to
731
+ the inner product induced by the Riemannian and Hermitian metrics, and suppose
732
+ that A is in U(Y ) of the form
733
+ A ∼= A∧ = x−1
734
+ µ
735
+
736
+ j=0
737
+ aj(y, Dy)(xDx)j : C∞
738
+ c ((0, ε) × Y ; π∗
739
+ Y E) → C∞
740
+ c ((0, ε) × Y ; π∗
741
+ Y E),
742
+ where aj(y, Dy) ∈ Diffµ−j(Y ; E). Let
743
+ p(σ) =
744
+ µ
745
+
746
+ j=0
747
+ aj(y, Dy)σj : C∞
748
+ c (Y ; E) → C∞
749
+ c (Y ; E), σ ∈ C,
750
+ be the indicial family. We assume that p(σ) : E1 ⊂ E0 → E0 satisfies the assump-
751
+ tions stated in Section 3 with E0 = L2(Y ; E) and some domain
752
+
753
+ comp(Y ; E) ⊂ E1 ⊂ Hµ
754
+ loc(Y ; E).
755
+ We also assume that the embedding E1 ֒→ E0 is compact, and that p(σ) : E1 → E0
756
+ is invertible for 0 < |ℑ(σ)| ≤ 1
757
+ 2; as explained in Section 3, the latter can generally
758
+ be achieved by introducing a scaling parameter (which for geometric operators
759
+ typically corresponds to scaling the metric). The closed extensions of the indicial
760
+ operator
761
+ A∧ : C∞
762
+ c (R+; E1) ⊂ L2(R+ × Y ; π∗
763
+ Y E) → L2(R+ × Y ; π∗
764
+ Y E)
765
+ are then described as explained in Section 3. Let
766
+ Amin : Dmin(A) ⊂ L2(M; E ) → L2(M; E )
767
+ be a closed symmetric extension of A : C∞
768
+ c (M; E ) ⊂ L2(M; E ) → L2(M; E ), and
769
+ let Amax : Dmax(A) ⊂ L2(M; E ) → L2(M; E ) be the adjoint; as discussed in the
770
+ introduction, Amin is generally not the minimal extension of A from C∞
771
+ c (M; E ),
772
+
773
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
774
+ 11
775
+ and thus Amax is not the largest L2-based closed extension. By elliptic regularity
776
+ we have
777
+
778
+ comp(M; E ) ⊂ Dmin(A) ⊂ Dmax(A) ⊂ Hµ
779
+ loc(M; E ).
780
+ By a cut-off function we mean any function ω ∈ C∞
781
+ c ([0, ε)) such that ω ≡ 1 near
782
+ x = 0, and we consider ω a function on M supported in U(Y ).
783
+ We make the
784
+ following localization and compatibility assumptions between A and A∧:
785
+ • For every cut-off function ω, multiplication by 1 − ω gives a continuous
786
+ operator Dmax(A) → Dmin(A). We also assume that 1 − ω : Dmin(A) →
787
+ L2(M; E ) is compact.
788
+ • For every cut-off function ω, multiplication by ω gives continuous operators
789
+ Dmin(A) → Dmin(A∧) and Dmin(A∧) → Dmin(A).
790
+ To make sense of the mappings above note that
791
+ M ⊃ U(Y ) ∼= (0, ε) × Y ⊂ R+ × Y,
792
+ which allows transitioning both ways between functions on M supported in U(Y )
793
+ and functions on R+ × Y supported in (0, ε) × Y . We will use these transitions
794
+ freely in what follows.
795
+ Proposition 4.1. Let ω ∈ C∞
796
+ c ([0, ε)) be any cut-off function. The map
797
+ Dmax(A)/Dmin(A) ∋ u + Dmin(A) �−→ ωu + Dmin(A∧) ∈ Dmax(A∧)/Dmin(A∧)
798
+ is well-defined, and induces a unitary equivalence between the indefinite inner prod-
799
+ uct spaces
800
+
801
+ Dmax(A)/Dmin(A), [·, ·]A
802
+ � ∼=
803
+
804
+ Dmax(A∧)/Dmin(A∧), [·, ·]A∧
805
+
806
+ .
807
+ Proof. We first prove that multiplication by ω gives a well-defined map
808
+ Dmax(A) ∋ u �→ ωu ∈ Dmax(A∧).
809
+ Note that with u also ωu ∈ Dmax(A) by our localization assumption. Now pick
810
+ another cut-off function ˜ω ∈ C∞
811
+ c ([0, ε)) such that ˜ω ≡ 1 in a neighborhood of
812
+ supp(ω). Let φ ∈ Dmin(A∧) be arbitrary, and write φ = ˜ωφ + (1 − ˜ω)φ. Since
813
+ Dmin(A∧) = x
814
+ 1
815
+ 2 H (R+; E1) ∩ L2(R+; E0)
816
+ as a consequence of our assumptions we have that both ˜ωφ, (1 − ˜ω)φ ∈ Dmin(A∧),
817
+ see Section 3. We also have ˜ωφ ∈ Dmin(A) by our localization and compatibility as-
818
+ sumption with respect to the minimal domains. Using the locality of the differential
819
+ operators A∧ and A we get
820
+ ⟨A∧φ, ωu⟩ = ⟨A∧(˜ωφ), ωu⟩ = ⟨A(˜ωφ), ωu⟩ = ⟨˜ωφ, Amax(ωu)⟩
821
+ = ⟨φ, ˜ωAmax(ωu)⟩ = ⟨φ, Amax(ωu)⟩.
822
+ As this is valid for all φ ∈ Dmin(A∧) we see that ωu ∈ Dmax(A∧) with A∧,max(ωu)
823
+ given as the restriction of Amax(ωu) to U(Y ) and extended trivially to R+ × Y . As
824
+ for u ∈ Dmin(A) we also have ωu ∈ Dmin(A∧) by assumption, we thus obtain that
825
+ the map
826
+ Dmax(A)/Dmin(A) ∋ u + Dmin(A) �−→ ωu + Dmin(A∧) ∈ Dmax(A∧)/Dmin(A∧)
827
+ is well-defined.
828
+ Conversely, multiplication by ω likewise gives a well-defined map
829
+ Dmax(A∧) ∋ u �→ ωu ∈ Dmax(A).
830
+
831
+ 12
832
+ THOMAS KRAINER
833
+ Note that if u ∈ Dmax(A∧) then ωu ∈ Dmax(A∧) and (1 − ω)u ∈ Dmin(A∧) by
834
+ Section 3.
835
+ Now let ˜ω ∈ C∞
836
+ c ([0, ε)) be such that ˜ω ≡ 1 in a neighborhood of
837
+ supp(ω). Let φ ∈ Dmin(A) be arbitrary, and write φ = ˜ωφ + (1 − ˜ω)φ; by the
838
+ localization and compatibility assumptions both terms are in Dmin(A), and we also
839
+ have ˜ωφ ∈ Dmin(A∧). We get
840
+ ⟨Aφ, ωu⟩ = ⟨A(˜ωφ), ωu⟩ = ⟨A∧(˜ωφ), ωu⟩ = ⟨˜ωφ, A∧,max(ωu)⟩
841
+ = ⟨φ, ˜ωA∧,max(ωu)⟩ = ⟨φ, A∧,max(ωu)⟩.
842
+ This shows that ωu ∈ Dmax(A) with Amax(ωu) given by A∧,max(ωu) in U(Y ) and
843
+ extended trivially to M. We thus obtain a map
844
+ Dmax(A∧)/Dmin(A∧) ∋ u + Dmin(A∧) �−→ ωu + Dmin(A) ∈ Dmax(A)/Dmin(A),
845
+ and both maps are inverses of each other.
846
+ Finally, as for both A and A∧ each class in Dmax/Dmin has a representative
847
+ supported in U(Y ), and by the standing product type assumptions both adjoint
848
+ pairings agree on those representatives, the proposition follows.
849
+
850
+ Theorem 4.2 (Null-Cobordism Theorem). Under the stated product type, local-
851
+ ization, and compatibility assumptions we have
852
+ SF[ p(σ) : E1 ⊂ E0 → E0, −∞ < σ < ∞ ] = 0.
853
+ If moreover
854
+ p(σ) =
855
+
856
+ σ
857
+ D∗
858
+ D
859
+ −σ
860
+
861
+ :
862
+ D(D)
863
+
864
+ D(D∗)
865
+ ⊂ L2
866
+
867
+ Y ;
868
+ E−
869
+
870
+ E+
871
+
872
+ → L2
873
+
874
+ Y ;
875
+ E−
876
+
877
+ E+
878
+
879
+ with an elliptic Fredholm operator of first order
880
+ D : D(D) ⊂ L2(Y ; E−) → L2(Y ; E+),
881
+ then ind[D : D(D) ⊂ L2(Y ; E−) → L2(Y ; E+)] = 0.
882
+ Proof. By Proposition 4.1 we have a unitary equivalence between the indefinite
883
+ inner product spaces
884
+
885
+ Dmax(A)/Dmin(A), [·, ·]A
886
+ � ∼=
887
+
888
+ Dmax(A∧)/Dmin(A∧), [·, ·]A∧
889
+
890
+ .
891
+ Because
892
+ sgn
893
+
894
+ Dmax(A∧)/Dmin(A∧), [·, ·]A∧
895
+
896
+ = SF[ p(σ) : E1 ⊂ E0 → E0, −∞ < σ < ∞ ]
897
+ by (3.6) it suffices to show that
898
+ sgn
899
+
900
+ Dmax(A)/Dmin(A), [·, ·]A
901
+
902
+ = 0,
903
+ and by Proposition 2.2 this will be the case if the embedding Dmax(A) ֒→ L2(M; E )
904
+ is compact. Because A has finite deficiency indices we only need to prove that
905
+ Dmin(A) ֒→ L2(M; E ) is compact. Now let ω, ˜ω ∈ C∞
906
+ c ([0, ε)) be cut-off functions
907
+ such that ˜ω ≡ 1 in a neighborhood of supp(ω). By assumption the multiplication
908
+ operator
909
+ 1 − ω : Dmin(A) → L2(M; E )
910
+ is compact, and
911
+ ˜ω : Dmin(A) → Dmin(A∧)
912
+ is continuous. Now
913
+ Dmin(A∧) = x
914
+ 1
915
+ 2 H (R+; E1) ∩ L2(R+; E0),
916
+
917
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
918
+ 13
919
+ and because E1 ֒→ E0 is compact, multiplication by ω is a compact operator
920
+ ω : Dmin(A∧) → L2(R+; E0),
921
+ see Section 3. Consequently, using the product type assumptions, the composition
922
+ ω = ω˜ω : Dmin(A) → L2(M; E )
923
+ is compact, which shows that the embedding ι = ω+(1−ω) : Dmin(A) → L2(M; E )
924
+ is compact. Finally, the vanishing of the index in the special case of operators of
925
+ first order follows from (3.7).
926
+
927
+ Appendix A. The null-cobordism theorem for closed manifolds
928
+ In this appendix we discuss a version of the null-cobordism Theorem 4.2 for closed
929
+ manifolds. Most of the previous assumptions no longer explicitly appear in this
930
+ version, e.g., we do not assume product type geometry, and there isn’t an operator
931
+ A on M at the outset, but symbolic assumptions instead. As mentioned in the
932
+ introduction this is due to the richness of analytic tools available for this situation
933
+ that allows to create the preconditions needed to apply Theorem 4.2 instead of
934
+ having to assume them from the outset.
935
+ Let Y be a closed, compact Riemannian manifold and E → Y be a Hermitian
936
+ vector bundle, and consider a family
937
+ p(σ) =
938
+ µ
939
+
940
+ j=0
941
+ aj(y, Dy)σj : C∞(Y ; E) → C∞(Y ; E), σ ∈ R,
942
+ (A.1)
943
+ where aj(y, Dy) ∈ Diffµ−j(Y ; E), and µ ≥ 1.
944
+ We assume that the parameter-
945
+ dependent principal symbol
946
+ σσ(p)(y, η; σ) =
947
+ µ
948
+
949
+ j=0
950
+ σσ(aj)(y, η)σj : Ey → Ey
951
+ (A.2)
952
+ is invertible on
953
+
954
+ T ∗Y × R
955
+
956
+ \ 0, and that p(σ) = p(σ)∗ is (formally) selfadjoint. By
957
+ elliptic and analytic Fredholm theory,
958
+ R ∋ σ �→ p(σ) : Hµ(Y ; E) ⊂ L2(Y ; E) → L2(Y ; E)
959
+ is a family of selfadjoint unbounded Fredholm operators acting in L2(Y ; E) that
960
+ is invertible for all σ ∈ R except at finitely many points, and it makes sense to
961
+ consider the spectral flow
962
+ SF[p(σ)] := SF[p(σ) : Hµ(Y ; E) ⊂ L2(Y ; E) → L2(Y ; E), −∞ < σ < ∞] ∈ Z
963
+ associated with p(σ).
964
+ Lemma A.3. The spectral flow is an invariant of the principal symbol (A.2) in
965
+ the sense that if pj(σ), j = 1, 2, are two elliptic selfadjoint families of order µ ≥ 1
966
+ of the form (A.1) with σσ(p1)(y, η; σ) = σσ(p2)(y, η; σ) then SF[p1(σ)] = SF[p2(σ)].
967
+ Proof. Let R > 0 be such that
968
+ p1(σ) + s[p2(σ) − p1(σ)] : Hµ(Y ; E) ⊂ L2(Y ; E) → L2(Y ; E)
969
+ is invertible for |σ| ≥ R > 0 and all 0 ≤ s ≤ 1. Consequently, this family is a ho-
970
+ motopy of selfadjoint Fredholm functions on [−R, R], invertible at both endpoints,
971
+ and by the homotopy invariance of the spectral flow for such families we see that
972
+ SF[p1(σ)] = SF[p2(σ)].
973
+
974
+
975
+ 14
976
+ THOMAS KRAINER
977
+ Suppose there exists a compact Riemannian manifold M with ∂M = Y . Utilizing
978
+ the geodesic flow from the boundary in the direction of the inner normal vector field
979
+ shows that there exists ε > 0 and a collar neighborhood map U(Y ) ∼= [0, ε) × Y
980
+ near the boundary such that the metric in U(Y ) takes the form dx2 + gY (x) with
981
+ a smooth family of metrics gY (x) on Y , 0 ≤ x < ε, and such that gY (0) = gY is
982
+ the given metric on Y . Moreover, by choosing ε > 0 small enough, there exists a
983
+ defining function for ∂M on M that in U(Y ) is represented by projection onto the
984
+ coordinate in [0, ε). We’ll also denote this global defining function by x : M → R+.
985
+ In particular,
986
+ T ∗M
987
+ ��
988
+ Y = T ∗Y ⊕ span{dx
989
+ ��
990
+ Y }
991
+ subject to these choices, and we can split variables (y, η; σ) ∈ T ∗M
992
+ ��
993
+ Y accordingly.
994
+ Theorem A.4 (Null-Cobordism Theorem). Let M be a compact Riemannian man-
995
+ ifold M with ∂M = Y , and let E → M be a Hermitian vector bundle with E
996
+ ��
997
+ Y = E.
998
+ Let T ∗M
999
+ ��
1000
+ Y ∼= T ∗Y × R subject to the choices described above, and suppose there ex-
1001
+ ists a symmetric, elliptic, differential principal symbol a ∈ C∞(T ∗M \0; End(π∗E ))
1002
+ of order µ such that
1003
+ a(y, η; σ) = σσ(p)(y, η; σ) for (y, η; σ) ∈
1004
+
1005
+ T ∗M \ 0
1006
+ ���
1007
+ Y ,
1008
+ where π : T ∗M → M is the canonical projection. Then SF[p(σ)] = 0.
1009
+ With the family p(σ) from (A.1) we associate the indicial operator
1010
+ A∧ = x−1
1011
+ µ
1012
+
1013
+ j=0
1014
+ aj(y, Dy)(xDx)j : C∞
1015
+ c (R+×Y ; E) ⊂ L2(R+×Y ; E) → L2(R+×Y ; E).
1016
+ (A.5)
1017
+ Here we also write E for its pull-back to R+ ×Y with respect to the projection onto
1018
+ Y , and equip R+ × Y with the product metric dx2 + gY . Then A∧ is symmetric
1019
+ and densely defined. Let Dmin(A∧) be the domain of the closure, and Dmax(A∧)
1020
+ be the domain of the adjoint.
1021
+ Proof of Theorem A.4. In the previously fixed collar neighborhood U(Y ) ∼= [0, ε)×
1022
+ Y we utilize standard deformations of the Riemannian metric on M, the Hermitian
1023
+ metric on E , and the principal symbol a to reduce to a product type structure near
1024
+ the boundary, as follows:
1025
+ Pick an isomorphism E
1026
+ ��
1027
+ U(Y ) ∼= π∗
1028
+ Y E that is the identity over Y , where πY :
1029
+ [0, ε) × Y → Y is the projection map. With respect to the pull-back of the given
1030
+ Hermitian metric on E to π∗
1031
+ Y E, the metric on E
1032
+ ��
1033
+ U(Y ) under this isomorphism is
1034
+ then represented by h(x, y) ∈ C∞([0, ε) × Y ; End(π∗
1035
+ Y E)) such that h = h∗ > 0 and
1036
+ h(0, y) = Id. Choose C∞-functions φ, ψ : [0, ε) → R with
1037
+ φ ≡ 0 on 0 ≤ x ≤ ε
1038
+ 3, 0 < φ < 2ε
1039
+ 3 on ε
1040
+ 3 < x < 2ε
1041
+ 3 , and φ ≡ x on 2ε
1042
+ 3 ≤ x < ε;
1043
+ ψ ≡ x on 0 ≤ x ≤ ε
1044
+ 3, ψ > 0 on ε
1045
+ 3 < x < 2ε
1046
+ 3 , and ψ ≡ 1 on 2ε
1047
+ 3 ≤ x < ε.
1048
+ We then deform the Riemannian metric on U(Y ) and Hermitian metric on E
1049
+ ��
1050
+ U(Y )
1051
+ to
1052
+ ˜g = dx2 + gY (φ(x)) and ˜h(x, y) = h(φ(x), y) ∈ C∞([0, ε) × Y ; End(π∗
1053
+ Y E)),
1054
+ respectively, which both connect seamlessly with the Riemannian metric on M
1055
+ outside U(Y ), and the Hermitian metric on E . We also change the principal symbol
1056
+
1057
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
1058
+ 15
1059
+ in
1060
+ ◦U(Y ) to
1061
+ ˜a(x, y, η; σ) = ψ(x)−1a(φ(x), y, η; ψ(x)σ) : Ey → Ey
1062
+ (A.6)
1063
+ for (x, y, η; σ) ∈ T ∗�
1064
+ (0, ε)×Y
1065
+
1066
+ \0 with the obvious identifications of variables, which
1067
+ again connects seamlessly outside the collar neighborhood. The new homogeneous
1068
+ principal symbol ˜a ∈ C∞(T ∗
1069
+ ◦M \ 0, End(π∗E )) is symmetric with respect to the
1070
+ new metric on E , and elliptic over
1071
+
1072
+ M. In
1073
+
1074
+ U(Y ) we have
1075
+ ˜a(x, y, η; σ) = x−1 σσ(p)(y, η; xσ) : Ey → Ey for 0 < x < ε
1076
+ 3
1077
+ by construction, which aligns with the principal symbol of A∧ from (A.5). Let
1078
+ now A ∈ Diffµ(
1079
+ ◦M; E ) be symmetric C∞
1080
+ c (
1081
+ ◦M; E ) → C∞
1082
+ c (
1083
+ ◦M; E ) with respect to the
1084
+ L2-inner product associated with the modified metrics on M and E , respectively,
1085
+ such that the principal symbol σσ(A) = ˜a on T ∗
1086
+ ◦M \ 0, and such that in
1087
+ ◦U(Y ) we
1088
+ have A = A∧ on C∞
1089
+ c ((0, ε
1090
+ 4) × Y ; E). Then
1091
+ A = x−1P : C∞
1092
+ c (
1093
+
1094
+ M; E ) ⊂ L2(M; E ) = x− 1
1095
+ 2 L2
1096
+ b(M; E ) → x− 1
1097
+ 2 L2
1098
+ b(M; E )
1099
+ is symmetric, and P ∈ Diffµ
1100
+ b (M; E ) is b-elliptic (see [13]). Moreover, by construction
1101
+ p(σ) is the indicial family of the operator P.
1102
+ By analytic Fredholm theory p(σ) : Hµ(Y ; E) → L2(Y ; E) is invertible for σ ∈ C
1103
+ except for the discrete set specb(p). In the sequel it will be convenient to assume
1104
+ that specb(p) ∩ {σ ∈ C; 0 < |ℑ(σ)| ≤
1105
+ 1
1106
+ 2} = ∅. As explained in Section 3, this
1107
+ can be achieved by replacing p(σ) by p(tσ) for sufficiently small t > 0 if necessary,
1108
+ which does not impact the spectral flow. Moreover, the assumptions of the theorem
1109
+ pertaining to the principal symbol of p(σ) also hold for p(tσ); to see this pick a
1110
+ C∞-function χ : [0, ε) → R with
1111
+ χ ≡ t on 0 ≤ x ≤ ε
1112
+ 3, χ > 0 on ε
1113
+ 3 < x < 2ε
1114
+ 3 , and χ ≡ 1 on 2ε
1115
+ 3 ≤ x < ε,
1116
+ and alter the principal symbol (A.6) in
1117
+ ◦U(Y ) to
1118
+ ˜a(x, y, η; σ) = ψ(x)−1a(φ(x), y, η; ψ(x)χ(x)σ) : Ey → Ey
1119
+ for (x, y, η; σ) ∈ T ∗�
1120
+ (0, ε) × Y
1121
+
1122
+ \ 0. We may thus proceed without loss of generality
1123
+ under the assumption that specb(p) ∩ {σ ∈ C; 0 < |ℑ(σ)| ≤ 1
1124
+ 2} = ∅. In view of
1125
+ Section 3 for A∧ and by invoking elliptic regularity for A we then get
1126
+ Dmin(A∧) = x
1127
+ 1
1128
+ 2 Hµ
1129
+ b (R+; L2(Y ; E)) ∩ x
1130
+ 1
1131
+ 2 L2
1132
+ b(R+; Hµ(Y ; E)) ∩ L2(R+ × Y ; E),
1133
+ Dmin(A) = x
1134
+ 1
1135
+ 2 Hµ
1136
+ b (M; E ),
1137
+ and
1138
+ Dmax(A∧) = Dmin(A∧) ⊕
1139
+
1140
+ σ0∈specb(p)∩R
1141
+ Eσ0(p),
1142
+ Dmax(A) = Dmin(A) ⊕
1143
+
1144
+ σ0∈specb(p)∩R
1145
+ Eσ0(p),
1146
+ where Eσ0(p) is defined as in (3.3) based on a cut-off function ω ∈ C∞
1147
+ c ([0, ε
1148
+ 4)) with
1149
+ ω ≡ 1 near x = 0 so that elements in Eσ0(p) can interchangeably be regarded both
1150
+ as sections of E on R+ × Y , as well as sections of E on M supported near the
1151
+ boundary. In particular, this implies that
1152
+
1153
+ Dmax(A)/Dmin(A), [·, ·]A
1154
+ � ∼=
1155
+
1156
+ Dmax(A∧)/Dmin(A∧), [·, ·]A∧
1157
+
1158
+
1159
+ 16
1160
+ THOMAS KRAINER
1161
+ because [u, v]A∧ = [u, v]A for u, v ∈
1162
+
1163
+ σ0∈specb(p)∩R
1164
+ Eσ0(p) by construction. Finally, it
1165
+ remains to note that Dmax ֒→ x− 1
1166
+ 4 Hµ
1167
+ b (M; E ), and the embedding x− 1
1168
+ 4 Hµ
1169
+ b (M; E ) ֒→
1170
+ x− 1
1171
+ 2 L2
1172
+ b(M; E ) = L2(M; E ) is compact.
1173
+
1174
+ Theorem A.4 and Lemma 3.9 imply:
1175
+ Corollary A.7 (Cobordism Invariance of the Index). Suppose that E = E− ⊕ E+
1176
+ is an orthogonal direct sum, and that the family (A.1) is of the form
1177
+ D(σ) =
1178
+ �σ
1179
+ D∗
1180
+ D
1181
+ −σ
1182
+
1183
+ : C∞
1184
+
1185
+ Y ;
1186
+ E−
1187
+
1188
+ E+
1189
+
1190
+ → C∞
1191
+
1192
+ Y ;
1193
+ E−
1194
+
1195
+ E+
1196
+
1197
+ , σ ∈ R,
1198
+ where D : C∞(Y ; E−) → C∞(Y ; E+) is an elliptic differential operator of first
1199
+ order, and D∗ : C∞(Y ; E+) → C∞(Y ; E−) is its (formal) adjoint. Then
1200
+ SF[D(σ)] = ind D = dim ker(D) − dim ker(D∗).
1201
+ In particular, if the assumptions of Theorem A.4 hold, then ind(D) = 0.
1202
+ References
1203
+ [1] P. Albin, E. Leichtnam, R. Mazzeo, and P. Piazza, The signature package on Witt spaces,
1204
+ Ann. Sci. ´Ec. Norm. Sup´er. (4) 45 (2012), no. 2, 241–310.
1205
+ [2]
1206
+ , Hodge theory on Cheeger spaces, J. Reine Angew. Math. 744 (2018), 29–102.
1207
+ [3] M. Braverman, New proof of the cobordism invariance of the index, Proc. Amer. Math. Soc.
1208
+ 130 (2002), no. 4, 1095–1101.
1209
+ [4] M. Braverman and P. Shi, Cobordism invariance of the index of Callias-type operators,
1210
+ Comm. Partial Differential Equations 41 (2016), no. 8, 1183–1203.
1211
+ [5] C. Carvalho, A K-theory proof of the cobordism invariance of the index, K-Theory 36 (2005),
1212
+ no. 1-2, 1–31.
1213
+ [6] L. Hartmann, M. Lesch, and B. Vertman, On the domain of Dirac and Laplace type operators
1214
+ on stratified spaces, J. Spectr. Theory 8 (2018), no. 4, 1295–1348.
1215
+ [7]
1216
+ , Resolvent trace asymptotics on stratified spaces, Pure Appl. Anal. 3 (2021), no. 1,
1217
+ 75–108.
1218
+ [8] M. Hilsum, Bordism invariance in KK-theory, Math. Scand. 107 (2010), no. 1, 73–89.
1219
+ [9] N. Higson, A note on the cobordism invariance of the index, Topology 30 (1991), no. 3,
1220
+ 439–443.
1221
+ [10] T. Krainer, Extensions of symmetric operators that are invariant under scaling and applica-
1222
+ tions to indicial operators, New York J. Math. 28 (2022), 705–772.
1223
+ [11] M. Lesch, Deficiency indices for symmetric Dirac operators on manifolds with conic singu-
1224
+ larities, Topology 32 (1993), no. 3, 611–623.
1225
+ [12]
1226
+ , Operators of Fuchs Type, Conical Singularities, and Asymptotic Methods, Teubner-
1227
+ Texte zur Math. vol 136, B.G. Teubner, Stuttgart, Leipzig, 1997.
1228
+ [13] R. Melrose, The Atiyah-Patodi-Singer index theorem, Research Notes in Mathematics,
1229
+ A K Peters, Ltd., Wellesley, MA, 1993.
1230
+ [14] S. Moroianu, Cusp geometry and the cobordism invariance of the index, Adv. Math. 194
1231
+ (2005), no. 2, 504–519.
1232
+ [15] V.E. Nazaikinskii, A.Yu. Savin, B.-W. Schulze, and B.Yu. Sternin, Elliptic theory on singular
1233
+ manifolds, Differential and Integral Equations and Their Applications, vol. 7, Chapman &
1234
+ Hall/CRC, Boca Raton, FL, 2006.
1235
+ [16] L. Nicolaescu, On the cobordism invariance of the index of Dirac operators, Proc. Amer.
1236
+ Math. Soc. 125 (1997), no. 9, 2797–2801.
1237
+ [17] R. Palais, Seminar on the Atiyah-Singer index theorem, Annals of Mathematics Studies,
1238
+ No. 57, Princeton University Press, Princeton, NJ, 1965.
1239
+ [18] B.-W. Schulze,
1240
+ Pseudo-differential calculus on manifolds with geometric singularities,
1241
+ Pseudo-differential operators:
1242
+ Partial differential equations and time-frequency analysis,
1243
+ pp. 37–83, Fields Inst. Commun., vol. 52, Amer. Math. Soc., Providence, RI, 2007.
1244
+
1245
+ COBORDISM INVARIANCE OF THE INDEX REVISITED
1246
+ 17
1247
+ [19] C. Wulff, Bordism invariance of the coarse index, Proc. Amer. Math. Soc. 140 (2012), no. 8,
1248
+ 2693–2697.
1249
+ Penn State Altoona, 3000 Ivyside Park, Altoona, PA 16601-3760
1250
+ Email address: krainer@psu.edu
1251
+
H9AyT4oBgHgl3EQfTPcY/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
H9AyT4oBgHgl3EQfrvmy/content/2301.00567v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52bc1e2fdaf2ad98b392a60cfa0937d85958f7c16ac9fb41c1b8909c7c97339b
3
+ size 481103
I9FJT4oBgHgl3EQfGCzl/content/tmp_files/2301.11446v1.pdf.txt ADDED
@@ -0,0 +1,877 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ON GRANULARITY OF PROSODIC REPRESENTATIONS IN EXPRESSIVE
2
+ TEXT-TO-SPEECH
3
+ Mikolaj Babianski, Kamil Pokora, Raahil Shah, Rafal Sienkiewicz, Daniel Korzekwa, Viacheslav Klimkov
4
+ Amazon Text-to-Speech Research
5
+ {babiansk, kamipoko, vklimkov}@amazon.com
6
+ ABSTRACT
7
+ In expressive speech synthesis it is widely adopted to use
8
+ latent prosody representations to deal with variability of the
9
+ data during training.
10
+ Same text may correspond to vari-
11
+ ous acoustic realizations, which is known as a one-to-many
12
+ mapping problem in text-to-speech.
13
+ Utterance, word, or
14
+ phoneme-level representations are extracted from target sig-
15
+ nal in an auto-encoding setup, to complement phonetic input
16
+ and simplify that mapping. This paper compares prosodic
17
+ embeddings at different levels of granularity and examines
18
+ their prediction from text. We show that utterance-level em-
19
+ beddings have insufficient capacity and phoneme-level tend
20
+ to introduce instabilities when predicted from text. Word-
21
+ level representations impose balance between capacity and
22
+ predictability. As a result, we close the gap in naturalness by
23
+ 90% between synthetic speech and recordings on LibriTTS
24
+ dataset, without sacrificing intelligibility.
25
+ Index Terms— speech synthesis, TTS, prosody, Text-to-
26
+ Speech, representation learning
27
+ 1. INTRODUCTION
28
+ Neural Text-to-Speech (NTTS) [1] is characterized by syn-
29
+ thesizing speech waveform solely with deep neural networks.
30
+ This paradigm greatly enhanced naturalness and flexibility of
31
+ speech synthesis. It enables new applications such as expres-
32
+ sive [2, 3] and low-resource [4] speech generation, speaker
33
+ identity [5] and prosody transplantation [6, 7]. This paper
34
+ focuses on expressive speech synthesis, i.e.
35
+ generation of
36
+ speech that originally contains great degree of variation in
37
+ terms of intonation and inflections.
38
+ This variation is not described by phoneme sequence, typ-
39
+ ically used as input to NTTS. Thus, the statistical model has
40
+ to perform a one-to-many mapping, where the same input
41
+ text can correspond to different acoustic realizations. Vanilla
42
+ modelling approaches suffer from averaging and fail to repro-
43
+ duce the original variability of the training data.
44
+ To avoid averaging, it is common to use additional input
45
+ that describes variability in the data. Initially, it was proposed
46
+ to extract a single latent representation of the target speech in
47
+ an auto-encoder manner for the whole utterance [8]. Target
48
+ speech is not available during inference, so either the cen-
49
+ troid representation is used [9] or it is separately predicted
50
+ from text [10, 11].
51
+ A single representation for the whole
52
+ utterance can’t store temporal information effectively, thus,
53
+ it was proposed to use more fine-grained representations at
54
+ the phoneme-level for the task of prosody transplantation [6,
55
+ 12]. This idea was further expanded to text-to-speech, where
56
+ word-level [13, 14] and phoneme-level [15, 16, 17] represen-
57
+ tations were utilized. At the fine-grained level, prosody can
58
+ be represented with pre-extracted features such as pitch, en-
59
+ ergy, spectral tilt, but learnt representations can convey more
60
+ information and represent more abstract aspects of prosody
61
+ such as emotions. Therefore, in the rest of the paper we focus
62
+ on learnt representations.
63
+ This paper provides a systematic comparison of prosodic
64
+ representations at different levels of granularity. We compare
65
+ performance of utterance, word, and phoneme-level prosody
66
+ embeddings in terms of a) capacity: what if we have a perfect
67
+ prosody predictor; b) predictability: how sensitive is the ap-
68
+ proach to inaccurate prosody predictions. Main contributions
69
+ of this study are:
70
+ • We systematically compare prosody embeddings at dif-
71
+ ferent levels of granularity.
72
+ • A solution to intelligibility issues in the case of phoneme-
73
+ level prosody reference is proposed.
74
+ • We show the trade-off between capacity and pre-
75
+ dictability of prosody embeddings, advocating the use
76
+ of word-level representations.
77
+ • We examine data quantity and input features needed for
78
+ robust prosody prediction from text.
79
+ The rest of the paper is organized as follows: Section 2
80
+ describes the text-to-speech framework used; Section 3 elab-
81
+ orates on prosody embedding prediction from text; Section 4
82
+ compares prosody embeddings at different levels of granular-
83
+ ity in objective and subjective evaluations; Section 5 presents
84
+ ablation studies on prosody embedding prediction; Section 6
85
+ concludes the paper.
86
+ 978-1-6654-7189-3/22/$31.00 ©2023 IEEE
87
+ arXiv:2301.11446v1 [eess.AS] 26 Jan 2023
88
+
89
+ (a)
90
+ (b)
91
+ Fig. 1. Schematic diagram of the TTS model during a) training and b) inference. The dashed arrow denotes sampling from
92
+ parametric distribution. Components in red are of prosody embeddings granularity (utterance/word/phoneme). Green, dashed
93
+ lines denote loss functions.
94
+ 2. ACOUSTIC MODEL
95
+ The backbone of our acoustic model architecture (Figure 1) is
96
+ similar to the explicit duration TTS model presented in Shah
97
+ et al. [4]. It follows the encoder-decoder paradigm, where the
98
+ input phoneme sequence x is encoded by a phoneme encoder
99
+ presented in the Tacotron2 [1] paper. We concatenate the en-
100
+ coded phoneme sequence with both speaker s and prosody z
101
+ embeddings upsampled by repetition [15, 16] to the phoneme-
102
+ level. Speaker embeddings are represented as corresponding
103
+ entries in the embedding look-up table. Prosody embeddings
104
+ are obtained via compression of the mel-spectrogram y with
105
+ the use of variational prosody reference encoder described in
106
+ Section 2.1. During inference, the encoded sequence is up-
107
+ sampled accordingly to alignments produced by the duration
108
+ model, described in Section 2.2. The upsampled sequence is
109
+ then passed to the decoder to map the disentangled linguistic
110
+ features, speaker and prosodic contents into acoustic param-
111
+ eters represented as mel-spectrograms. In this work, we use
112
+ the non-autoregressive decoder presented in Shah et al. [4].
113
+ 2.1. Variational Prosody Reference Encoder
114
+ To alleviate the one-to-many problem of TTS we use the vari-
115
+ ational prosody reference encoder [16]. We aim to learn the
116
+ latent representation of the information, which cannot be de-
117
+ rived from the other input streams - phoneme sequence and
118
+ speaker embedding. For clarity of the architecture presenta-
119
+ tion, here we describe only one level of granularity - word-
120
+ level. Modification of the model architecture to adjust for dif-
121
+ ferent prosody embedding granularities is described in Sec-
122
+ tion 2.3. The variational reference encoder (Figure 1a) takes
123
+ target mel-spectrogram frames as input and converts them
124
+ into a sequence of n latent vectors z, which corresponds to
125
+ the number of words in the utterance. We refer to this repre-
126
+ sentation as word-level prosody embeddings.
127
+ The encoder comprises a stack of six residual gated con-
128
+ volution blocks [18]. Each residual gated convolution block
129
+ is composed of a 1D-convolution with a kernel size of 15 and
130
+ a hidden dimension of 512, followed by a tanh filter and a
131
+ sigmoid activation gate which are element-wise multiplied
132
+ and then added to a residual connection. The convolution
133
+ stack is followed by a BiLSTM layer with a hidden dimension
134
+ of 128. We use a dropout of 0.1 in convolutional and BiLSTM
135
+ layers. The BiLSTM layer output is firstly aggregated to the
136
+ word-level by taking a middle frame of each word. Then,
137
+ after a dense projection we obtain a sequence of Gaussian
138
+ distribution parameters µ and σ, which we use to sample a
139
+ sequence of prosody embeddings corresponding to words z
140
+ of dimension 8. Finally, we upsample the word-level prosody
141
+ embeddings by repetition to the phoneme-level and concate-
142
+ nate them with the phoneme encoder output (Figure 1a).
143
+ As we do not have access to target mel-spectrograms at
144
+ inference time (Figure 1b), a separate model is introduced to
145
+ predict prosody embeddings z from text. The architecture for
146
+ this model is described in Section 3.
147
+ 2.2. Duration Model
148
+ Neural TTS requires learning the alignment between two dif-
149
+ ferent length sequences, which are the text, represented by
150
+ phonemes, and speech, represented by acoustic parameters
151
+ i.e. mel-spectrogram frames. There are two major approaches
152
+ to obtain the alignment: attention-based [1] and explicit-
153
+ duration-based [4, 19, 16, 20]. Attention-based components
154
+ typically used for this task have known instabilities, which
155
+ exhibit in synthesised speech as mumbling, early cut-offs,
156
+ word repetition and word skipping [21, 22, 23]. Following
157
+ recent research in the field inspired by traditional parametric
158
+ speech synthesis techniques [24, 25], these issues are mit-
159
+ igated by explicitly modelling the durations of phonemes
160
+ [4, 19, 16, 20] which we decide to use in this work.
161
+
162
+ mel-spectrogram
163
+ y
164
+ 6 x Gated Conv
165
+ Duration Model
166
+ BiLSTM
167
+ Aggregation
168
+ predicted duration
169
+ Dense Proj.
170
+ d
171
+ N(0,1)
172
+ : L2
173
+ μ,a
174
+ DKL
175
+ predicted
176
+ oracle duration
177
+ mel-spectrogram
178
+ d
179
+ y
180
+ prosody
181
+ embeddings (z)
182
+ Concat
183
+ Upsampling
184
+ Encoder
185
+ Decoder
186
+ speaker embedding
187
+ phoneme sequence
188
+ x
189
+ sBERT embeddings
190
+ phoneme sequence
191
+ b
192
+ x
193
+ Prosody
194
+ Embeddings
195
+ Duration Model
196
+ Predictor
197
+ predicted
198
+ predicted duration
199
+ mel-spectrogram
200
+ p
201
+ y
202
+ prosody embeddings
203
+ Encoder
204
+ Concat
205
+ Upsampling
206
+ Decoder
207
+ phoneme sequence
208
+ speaker embedding
209
+ x
210
+ sWe use forced alignment from a Gaussian Mixture Model
211
+ (GMM) based external aligner in the Kaldi Speech Recog-
212
+ nition Toolkit [26] to produce ground truth duration for
213
+ each phoneme, represented as the integer number of mel-
214
+ spectrogram frames it corresponds to. To predict these du-
215
+ rations, we train a duration model component following the
216
+ architecture detailed in Shah et al. [4], with the addition of
217
+ speaker and prosody embeddings conditioning. The duration
218
+ model is trained jointly with the acoustic model by minimiz-
219
+ ing L2 loss function in the logarithmic domain between pre-
220
+ dicted and ground truth phoneme durations. During training,
221
+ teacher forcing is used, i.e. the acoustic model uses ground
222
+ truth duration values to upsample a phoneme’s encoding to
223
+ the respective number of mel-spectrogram frames.
224
+ 2.3. Prosody Embeddings Granularity
225
+ The model described above uses word-level prosody embed-
226
+ dings. Which means that there is one embedding correspond-
227
+ ing to each word in the input text. In this work we also explore
228
+ two other levels of prosody modelling granularity: phoneme-
229
+ level (one embedding per each phoneme) and utterance-level
230
+ (single embedding for the whole utterance). For the phoneme-
231
+ level prosody modelling we change the reference encoder to
232
+ output one embedding per each phoneme and reduce prosody
233
+ embedding dimension from 8 to 3, which we found optimal
234
+ in terms of stability, for this level of granularity. In the case
235
+ of utterance-level prosody modelling we use a stride of 2 in
236
+ the residual gated convolution blocks of the reference encoder
237
+ in order to gradually downsample the time resolution [27].
238
+ Then, we project the first and last state of the BiLSTM layer
239
+ into two vectors of dimension 64, which represent mean µ
240
+ and standard deviation σ of the posterior distribution. Finally
241
+ we sample a single 64-dimensional prosody embedding z cor-
242
+ responding to the whole utterance.
243
+ 2.4. Training Procedure
244
+ To train the acoustic and duration models we use Adam opti-
245
+ miser with β1 = 0.9 and β2 = 0.98. We use a linear warm-up
246
+ of the learning rate from 0.1 to 1 for the first 10k steps, fol-
247
+ lowed by an exponential decay from 10k steps to 100k steps
248
+ with a minimum value of 10−5. Acoustic and duration models
249
+ are trained jointly for 500K steps with a batch-size equal 32
250
+ and are optimized with respect to the following loss function:
251
+ Ltotal = L1melspectrogram + L2logduration + γ ∗ DKL (1)
252
+ where L1melspectrogram is the L1-distance between pre-
253
+ dicted and oracle mel-spectrograms and L2logduration is the
254
+ L2-distance between predicted and ground truth durations
255
+ calculated in the logarithmic domain.
256
+ DKL is the Kull-
257
+ back–Leibler divergence between outputs of the variational
258
+ prosody reference encoder and N(0, 1). We find the optimal
259
+ value of γ to be 10−3 for the phoneme-level and 10−5 for
260
+ the utterance and word-level prosody modelling. We present
261
+ ablation of the γ parameter in Section 4.4.
262
+ 3. PROSODY EMBEDDINGS PREDICTOR
263
+ At inference time we do not have access to target mel-
264
+ spectrograms, therefore, we use a separate model to pre-
265
+ dict prosodic representations z from text (Figure 1b). The
266
+ prosody embeddings predictor model (Figure 2) has three in-
267
+ put streams: phoneme sequence, contextual word embeddings
268
+ extracted with a pre-trained BERT model [28] and speaker
269
+ embedding. Phoneme sequence and contextual word embed-
270
+ dings are encoded by separate Tacotron2-based encoders. We
271
+ upsample the encoded BERT embeddings from the word-
272
+ level to the phoneme-level and the speaker embedding to the
273
+ phoneme-level, before concatenating them with the encoded
274
+ phoneme representations.
275
+ Next, we pass the concatenated
276
+ sequence to another Tactotron2-based encoder block. After
277
+ that, in the case of word-level embeddings prediction, the
278
+ encoded phoneme-level representation is aggregated to the
279
+ word-level by taking the middle frame of each word.
280
+ Fi-
281
+ nally, we use an autoregressive decoder to predict prosody
282
+ embeddings. The autoregressive decoder is inspired by the
283
+ architecture of the Tacotron2 mel-spectrogram decoder. In
284
+ order to adapt the decoder to the prosody prediction task, we
285
+ reduce the hidden dimension of the LSTM-layers to 128 and
286
+ pre-net hidden dimension to 6. To predict the utterance-level
287
+ prosody representation, instead of the autoregressive decoder,
288
+ we use a simple linear projection layer.
289
+ We train this model using prosody embeddings extracted
290
+ with previously trained acoustic model as target labels.
291
+ Specifically, the model is trained to predict posterior mean µ
292
+ for each target prosody embedding using L2 loss and teacher-
293
+ forcing framework.
294
+ Fig. 2. Schematic diagram of the prosody embeddings predic-
295
+ tor model. Components with dashed border are used only for
296
+ fine-grained (word or phoneme-level) prosody embeddings
297
+ prediction.
298
+
299
+ BERT embeddings
300
+ phoneme sequence
301
+ b
302
+ x
303
+ Encoder
304
+ Encoder
305
+ speaker embedding
306
+ s
307
+ predicted
308
+ prosody embeddings
309
+ Concat
310
+ Autoregressive
311
+ Encoder
312
+ Decoder
313
+ phoneme-level
314
+ word-level
315
+ Aggregation
316
+ representation
317
+ representation4. EXPERIMENTS - PROSODY EMBEDDINGS
318
+ GRANULARITY
319
+ In this section we conduct a systematic study of prosodic rep-
320
+ resentations at different levels of granularity applied to the ex-
321
+ pressive TTS task. The performance of utterance, word, and
322
+ phoneme-level prosody embeddings is compared in terms of
323
+ capacity and predictability. We evaluate naturalness as well
324
+ as stability and intelligibility of synthesized speech.
325
+ 4.1. Data
326
+ Evaluations are conducted on a publicly available corpus of
327
+ audiobook recordings - LibriTTS [29]. From which we use
328
+ only recordings marked as clean. The training set consists
329
+ of approximately 250 hours of speech (split into 140,000 ut-
330
+ terances) narrated in an expressive manner by 1229 speakers.
331
+ For validation we use a held-out set of 1000 randomly se-
332
+ lected utterances from the 100 most frequent speakers. We
333
+ extract 80-band mel-spectrograms with a 12.5 ms frame-shift
334
+ as acoustic features.
335
+ 4.2. Systems
336
+ We use our acoustic model (Section 2) along with the prosody
337
+ embeddings predictor (Section 3) to test 3 levels of prosody
338
+ embeddings granularity:
339
+ 1) G-VAE - utterance-level.
340
+ 2)
341
+ W-VAE - word-level. 3) P-VAE - phoneme-level. All mel-
342
+ spectrogram prediction systems are used in combination with
343
+ the Universal Neural Parallel WaveNet Vocoder [30] in order
344
+ to obtain a 24kHz audio signal.
345
+ 4.3. Subjective Evaluation Protocol
346
+ For the subjective evaluation we conduct MUSHRA tests [31]
347
+ with the Amazon Mechanical Turk platform. 60 native En-
348
+ glish speakers are presented with the samples in a random
349
+ order side-by-side, and are asked to “Evaluate naturalness of
350
+ the samples on the scale from 0 to 100.” A total of 1000 ut-
351
+ terances are used for testing and the test is balanced in such a
352
+ way that each test case is scored by 3 listeners independently.
353
+ Ground truth mel-spectrograms vocoded with the Universal
354
+ Parallel WaveNet Vocoder (Ref system) are used as an upper
355
+ anchor. The significance of the MUSHRA results is analyzed
356
+ using a Wilcoxon signed-rank test with Bonferroni-Holm cor-
357
+ rection applied [32].
358
+ 4.4. Stability
359
+ To quantify the stability of tested systems and the intelligibil-
360
+ ity of synthesized speech we conduct Word Error Rate (WER)
361
+ analysis. The whole test set of 1000 utterances described in
362
+ Section 4.1 is used for the evaluation. We transcribe speech
363
+ generated in the TTS mode (prosody embeddings predicted
364
+ from text) with the ASpIRE Chain ASR model from Kaldi.
365
+ Then the WER is computed between the sentence text and the
366
+ corresponding transcription.
367
+ The G-VAE and W-VAE models have WER scores (Ta-
368
+ ble 1) comparable to recordings when trained with DKL loss
369
+ weight (γ) equal to 10−5. Whereas, training the P-VAE model
370
+ in an analogical setup results in significant stability issues.
371
+ We believe that this is caused by the phoneme-level prosody
372
+ embeddings distribution being very hard to predict from text.
373
+ Only after applying more regularization during training by in-
374
+ creasing DKL loss weight we are able to obtain a phoneme-
375
+ level model matching other systems in terms of WER score.
376
+ Intelligibility is a crucial property of TTS system. Therefore,
377
+ for all the other experiments we use the G-VAE and W-VAE
378
+ models trained with γ = 10−5 and the P-VAE model trained
379
+ with γ = 10−3, as they are matching our stability require-
380
+ ments. We found that further increasing γ parameter does not
381
+ bring any significant improvements and may lead to degrada-
382
+ tion in segmental quality of synthesized audio.
383
+ System
384
+ DKLγ
385
+ WER ↓
386
+ G-VAE
387
+ 1e − 5
388
+ 2.18% ± 0.30
389
+ W-VAE
390
+ 1e − 5
391
+ 2.13% ± 0.30
392
+ P-VAE
393
+ 1e − 5
394
+ 3.59% ± 0.36
395
+ 1e − 4
396
+ 2.47% ± 0.31
397
+ 1e − 3
398
+ 2.17% ± 0.29
399
+ 1e − 2
400
+ 2.17% ± 0.29
401
+ Ref
402
+
403
+ 2.29% ± 0.31
404
+ Table 1. Word Error Rate with 95% confidence intervals [33]
405
+ computed across the 1000 test utterances, along with DKL
406
+ loss weight (γ).
407
+ 4.5. Capacity
408
+ We analyse the best-case performance of acoustic models by
409
+ simulating perfectly predicted prosody embeddings in the Or-
410
+ acle Resynthesis setup. That is, at inference time we pro-
411
+ vide ground truth latent representations from the variational
412
+ reference encoder for all tested systems. We evaluate natu-
413
+ ralness in a subjective test as described in Section 4.3 and
414
+ summarize results in Figure 3a. In this setup, the G-VAE
415
+ model scores significantly lower than the other systems (p-
416
+ value < 0.01), suggesting that fine-grained embeddings are
417
+ required for natural prosody modelling. There is no statisti-
418
+ cally significant difference between W-VAE and P-VAE sys-
419
+ tems (p-value > 0.01). It is worth mentioning, that we have
420
+ also conducted an analogical experiment with a P-VAE model
421
+ trained with lower DKL loss weight (γ = 10−5). With such
422
+ model, when ground truth prosody embeddings provided dur-
423
+ ing inference, we are able to reconstruct speech almost per-
424
+ fectly. However, it comes at a cost of stability issues, when
425
+ prosody embeddings predicted from text are used as described
426
+ in Section 4.4.
427
+
428
+ (a) Ground truth prosody embeddings
429
+ (b) Prosody embeddings predicted from text
430
+ Fig. 3. Subjective listeners ratings from the naturalness MUSHRA tests with a) ground truth prosody embeddings and b) prosody
431
+ embeddings predicted from text (TTS). Mean scores are reported below the system names.
432
+ To gain a deeper understanding of prosody representa-
433
+ tions, we evaluate our acoustic models in the Oracle Resyn-
434
+ thesis setup with changed speaker embedding. That is, we
435
+ extract prosody from a source recording and resynthesize the
436
+ same text with changed voice - a so-called Voice Conversion
437
+ (VC). We convert all 1000 test utterances into 4 selected (two
438
+ male and two female) target voices.
439
+ To effectively measure how close the prosody patterns
440
+ of converted speech are to the source recordings, we first
441
+ extract fundamental frequency (F0) at the frame-level from
442
+ source and converted audio pairs with the RAPT algorithm
443
+ [34]. Then we calculate two metrics commonly used to mea-
444
+ sure the linear dependence of prosody contours: Pearson
445
+ Correlation Coefficient (PCC) and Root Mean Squared Error
446
+ (RMSE) [35]. We can see that the results (Table 2) are very
447
+ similar for the W-VAE and P-VAE models, while the G-VAE
448
+ performs much worse in both metrics. This reinforces the
449
+ subjective evaluation findings that utterance-level embed-
450
+ dings do not provide sufficient capacity to capture expressive
451
+ prosody and fine-grained modelling is required for expressive
452
+ speech generation.
453
+ System
454
+ F0 RMSE ↓
455
+ F0 PCC ↑
456
+ G-VAE
457
+ 1.693 ± 0.012
458
+ 0.760 ± 0.003
459
+ W-VAE
460
+ 1.535 ± 0.011
461
+ 0.801 ± 0.002
462
+ P-VAE
463
+ 1.539 ± 0.012
464
+ 0.802 ± 0.003
465
+ Table 2. Objective Voice Conversion evaluation metrics with
466
+ 95% confidence intervals computed between source and con-
467
+ verted speech: F0 Root Mean Square Error (RMSE), F0 Pear-
468
+ son Correlation Coefficient (PCC) [35].
469
+ 4.6. Predictability
470
+ Finally, we evaluate our systems in the TTS scenario. That
471
+ is, we generate speech from textual input only and provide
472
+ prosody embeddings predicted from text during inference.
473
+ The naturalness of synthesized speech is evaluated subjec-
474
+ tively as described in Section 4.3. The results are summarised
475
+ in Figure 3b (all comparisons are statistically significant with
476
+ p-value < 0.01). The G-VAE model scores much worse
477
+ than other systems. It fails to reproduce expressive speech
478
+ variability and tends to output flat prosody contours due to
479
+ averaging. We also observe issues with accurate phoneme
480
+ duration prediction for the G-VAE model, e.g.
481
+ unnatural,
482
+ fast-paced speech. We hypothesise, that a single representa-
483
+ tion for the whole utterance can’t store temporal information
484
+ effectively. Such issues are not visible in the models using
485
+ fine-grained prosody representations.
486
+ While both of them
487
+ score much higher than the G-VAE, the word-level model
488
+ performs better in terms of fidelity and prosody naturalness
489
+ and closes the gap between the P-VAE model and the ref-
490
+ erence system by over 90%. We conclude that word-level
491
+ representations impose a good compromise for expressive
492
+ prosody modelling granularity. They have enough capacity to
493
+ produce varied and natural speech and still can be accurately
494
+ predicted from text.
495
+ 5. EXPERIMENTS - PROSODY PREDICTION
496
+ In this section we focus on semantically concerted prosody
497
+ prediction. First, we investigate the impact of data quantity
498
+ used in the training procedure. Second, we conduct an abla-
499
+ tion study of the prosody embeddings predictor input streams.
500
+ 5.1. Data Quantity
501
+ We investigate the impact of training data quantity on our
502
+ system, by looking into a single-speaker scenario with lim-
503
+ ited amount of data.
504
+ We build a dataset by taking 15000
505
+ utterances from the HiFi corpus [36] coming from one male
506
+ speaker (id 6097). Again, we keep a held-out set of 1000
507
+ randomly selected utterances for validation.
508
+ We train the
509
+
510
+ 100
511
+ 80
512
+ Score
513
+ 60
514
+ MUSHRA
515
+ 40
516
+ 20
517
+ 0
518
+ G-VAE
519
+ W-VAE
520
+ P-VAE
521
+ Ref
522
+ 67.77
523
+ 72.72
524
+ 72.03
525
+ 74.00100
526
+ 80
527
+ Score
528
+ 60
529
+ MUSHRA
530
+ 40
531
+ 20
532
+ 0
533
+ G-VAE
534
+ W-VAE
535
+ P-VAE
536
+ Ref
537
+ 62.99
538
+ 71.65
539
+ 68.62
540
+ 71.80word-level acoustic model and the prosody embeddings pre-
541
+ dictor in two setups: 1) HiFi - using only 15000 utterances
542
+ from a single HiFi speaker. 2) HiFi+LibriTTS - addition-
543
+ ally adding LibriTTS corpus, which results in approximately
544
+ 155000 utterances in the training set.
545
+ We evaluate both
546
+ scenarios using a MUSHRA subjective naturalness test anal-
547
+ ogously to Section 4.6 and summarize results in Table 3.
548
+ Using an additional, large-scale dataset during training sig-
549
+ nificantly improves naturalness of synthesized speech (p-
550
+ value < 0.01). Per-case analysis of listeners judgements
551
+ reveal that both systems produce audio of similar segmental
552
+ quality, but the HiFi+LibriTTS system provides more stable
553
+ prosody, especially for longer utterances. We conclude that
554
+ semantically concerted prosody prediction is a data hungry
555
+ problem and limited amount of training data can result in less
556
+ stable prosody of generated speech. However, using auxiliary
557
+ data in the training procedure allows to obtain a more robust
558
+ prosody predictor and mitigate this issue.
559
+ HiFi
560
+ HiFi+LibriTTS
561
+ Ref
562
+ 64.26
563
+ 67.01
564
+ 67.17
565
+ Table 3. Mean MUSHRA scores for the word-level models
566
+ trained on a single speaker form the HiFi corpus with and
567
+ without auxiliary LibriTTS data. All comparisons from this
568
+ Table are statistically significant (p-value < 0.01).
569
+ 5.2. Prosody Predictor Input Streams Ablation Study
570
+ In previous works, word-level prosody embeddings are typ-
571
+ ically predicted at inference time from one of the following
572
+ representations derived from text: word-level contextual em-
573
+ beddings [13, 37] or phoneme sequence [14, 38]. We use both
574
+ of them in our prosody embeddings predictor model. To de-
575
+ termine the contribution of each input stream to the model
576
+ performance we conduct an ablation study. We train the word-
577
+ level prosody embeddings predictor model in three configura-
578
+ tions: BERT & Phoneme - trained exactly as described in sec-
579
+ tion 3; BERT - with only BERT embeddings input; Phoneme -
580
+ with only phoneme sequence input. All three predictor mod-
581
+ els are used in combination with the same W-VAE acoustic
582
+ model to synthesize 1000 validation utterances from section
583
+ 4.1. As a subjective naturalness evaluation, a preference test
584
+ is carried out using Amazon Mechanical Turk platform. Na-
585
+ tive English speakers are asked to ”Select which audio sounds
586
+ more natural” for pairs of audio samples generated with dif-
587
+ ferent systems. We find that removing fine-grained phoneme
588
+ sequence input stream (Figure 4a) causes less stable prosody
589
+ prediction and results in significant degradation in naturalness
590
+ (p-value < 0.01). Whereas, the difference between BERT &
591
+ Phoneme and Phoneme systems (Figure 4b) is not statistically
592
+ significant. Listening to samples revealed that the improve-
593
+ ment of using contextual word embeddings comes mainly in
594
+ better phrasing and pause prediction. However, differences
595
+ are quite subtle and therefore are not reflected in the crowd-
596
+ sourced naturalness evaluation results.
597
+ This result contra-
598
+ dicts the findings from [13], where significant quality degra-
599
+ dation when removing BERT embeddings input was reported.
600
+ However, in [13] authors proposed to use word-level syntax
601
+ features e.g. part-of-speech labels, compound-noun flag and
602
+ punctuation flag as a second input stream to the model pre-
603
+ dicting prosody embeddings.
604
+ We believe that fine-grained
605
+ phoneme sequence input is more correlated with prosody than
606
+ those syntax features, therefore, removing BERT input stream
607
+ is less harmful in our work.
608
+ (a)
609
+ (b)
610
+ Fig. 4.
611
+ Results of naturalness preference tests.
612
+ On the
613
+ right the word-level prosody embeddings predictor model de-
614
+ scribed in Section 3. On the left the same model but with
615
+ only one input stream: a) BERT embeddings, b) phoneme se-
616
+ quence.
617
+ 6. CONCLUSIONS
618
+ In this paper we explored the design of prosodic representa-
619
+ tions learned in an auto-encoder manner. First, we introduced
620
+ a TTS framework allowing for a fair comparison of prosody
621
+ embeddings of different granularity. Then a systematic study
622
+ of utterance, word and phoneme-level representations was
623
+ conducted on a large-scale, publicly available dataset - Lib-
624
+ riTTS. Through our experiments, we demonstrated the trade-
625
+ off between capacity and predictability of prosody represen-
626
+ tations.
627
+ We showed that utterance-level embeddings have
628
+ insufficient capacity to model expressive speech variability.
629
+ Whereas phoneme-level representations require strong reg-
630
+ ularization for stable prediction from text at inference time.
631
+ We found that word-level embeddings impose a good balance
632
+ between capacity and predictability. As a result, we closed
633
+ the gap in naturalness by 90% between synthetic speech
634
+ and recordings without sacrificing intelligibility.
635
+ Finally,
636
+ we looked into applying the proposed approach in a single-
637
+ speaker scenario.
638
+ We showed that semantically concerted
639
+ prosody prediction is a data hungry problem and limited
640
+ amount of training data can result in less stable prosody of
641
+ generated speech. However, this issue can be mitigated by
642
+ using auxiliary data in the training procedure.
643
+
644
+ BERT
645
+ No-Pref
646
+ BERT & Phoneme
647
+ (32.87%)
648
+ (31.78%)
649
+ (35.35%)
650
+ 0
651
+ 25
652
+ 50
653
+ 75
654
+ 100Phoneme
655
+ No-Pref
656
+ BERT & Phoneme
657
+ (34.26%)
658
+ (31.27%)
659
+ (34.47%)
660
+ 0
661
+ 25
662
+ 50
663
+ 75
664
+ 1007. REFERENCES
665
+ [1] Jonathan Shen, Ruoming Pang, Ron J Weiss, Mike
666
+ Schuster, Navdeep Jaitly, Zongheng Yang, Zhifeng
667
+ Chen, Yu Zhang, Yuxuan Wang, Rj Skerrv-Ryan, et al.,
668
+ “Natural tts synthesis by conditioning wavenet on mel
669
+ spectrogram predictions,” in ICASSP. IEEE, 2018, pp.
670
+ 4779–4783.
671
+ [2] Nishant Prateek, Mateusz Łajszczak, Roberto Barra-
672
+ Chicote, Thomas Drugman, Jaime Lorenzo-Trueba,
673
+ Thomas Merritt, Srikanth Ronanki, and Trevor Wood,
674
+ “In other news: A bi-style text-to-speech model for syn-
675
+ thesizing newscaster voice with limited data,”
676
+ arXiv
677
+ preprint arXiv:1904.02790, 2019.
678
+ [3] Abdelhamid
679
+ Ezzerg,
680
+ Adam
681
+ Gabrys,
682
+ Bartosz
683
+ Pu-
684
+ trycz, Daniel Korzekwa, Daniel Saez-Trigueros, David
685
+ McHardy, Kamil Pokora, Jakub Lachowicz, Jaime
686
+ Lorenzo-Trueba, and Viacheslav Klimkov,
687
+ “Enhanc-
688
+ ing audio quality for expressive neural text-to-speech,”
689
+ arXiv preprint arXiv:2108.06270, 2021.
690
+ [4] Raahil Shah, Kamil Pokora, Abdelhamid Ezzerg, Vi-
691
+ acheslav Klimkov, Goeric Huybrechts, Bartosz Pu-
692
+ trycz, Daniel Korzekwa, and Thomas Merritt,
693
+ “Non-
694
+ Autoregressive TTS with Explicit Duration Modelling
695
+ for Low-Resource Highly Expressive Speech,” in Proc.
696
+ 11th ISCA Speech Synthesis Workshop (SSW 11), 2021,
697
+ pp. 96–101.
698
+ [5] Sri Karlapati, Alexis Moinet, Arnaud Joly, Viacheslav
699
+ Klimkov, Daniel S´aez-Trigueros, and Thomas Drug-
700
+ man,
701
+ “Copycat: Many-to-many fine-grained prosody
702
+ transfer for neural text-to-speech,”
703
+ arXiv preprint
704
+ arXiv:2004.14617, 2020.
705
+ [6] Viacheslav Klimkov, Srikanth Ronanki, Jonas Rohnke,
706
+ and Thomas Drugman,
707
+ “Fine-grained robust prosody
708
+ transfer for single-speaker neural text-to-speech,” arXiv
709
+ preprint arXiv:1907.02479, 2019.
710
+ [7] Piotr Bilinski, Thomas Merritt, Abdelhamid Ezzerg,
711
+ Kamil Pokora, Sebastian Cygert, Kayoko Yanagisawa,
712
+ Roberto Barra-Chicote, and Daniel Korzekwa, “Creat-
713
+ ing new voices using normalizing flows,” in accepted to
714
+ Interspeech 2022, 2022.
715
+ [8] RJ Skerry-Ryan, Eric Battenberg, Ying Xiao, Yuxuan
716
+ Wang, Daisy Stanton, Joel Shor, Ron Weiss, Rob Clark,
717
+ and Rif A Saurous, “Towards end-to-end prosody trans-
718
+ fer for expressive speech synthesis with tacotron,” in
719
+ international conference on machine learning. PMLR,
720
+ 2018, pp. 4693–4702.
721
+ [9] Ya-Jie Zhang, Shifeng Pan, Lei He, and Zhen-Hua Ling,
722
+ “Learning latent representations for style control and
723
+ transfer in end-to-end speech synthesis,”
724
+ in ICASSP
725
+ 2019-2019 IEEE International Conference on Acous-
726
+ tics, Speech and Signal Processing (ICASSP). IEEE,
727
+ 2019, pp. 6945–6949.
728
+ [10] Daisy Stanton, Yuxuan Wang, and RJ Skerry-Ryan,
729
+ “Predicting expressive speaking style from text in end-
730
+ to-end speech synthesis,” in 2018 IEEE Spoken Lan-
731
+ guage Technology Workshop (SLT). IEEE, 2018, pp.
732
+ 595–602.
733
+ [11] Sri Karlapati, Ammar Abbas, Zack Hodari, Alexis
734
+ Moinet, Arnaud Joly, Penny Karanasou, and Thomas
735
+ Drugman, “Prosodic representation learning and con-
736
+ textual sampling for neural text-to-speech,” in ICASSP
737
+ 2021-2021 IEEE International Conference on Acous-
738
+ tics, Speech and Signal Processing (ICASSP). IEEE,
739
+ 2021, pp. 6573–6577.
740
+ [12] Younggun Lee and Taesu Kim,
741
+ “Robust and fine-
742
+ grained prosody control of end-to-end speech synthe-
743
+ sis,”
744
+ in ICASSP 2019-2019 IEEE International Con-
745
+ ference on Acoustics, Speech and Signal Processing
746
+ (ICASSP). IEEE, 2019, pp. 5911–5915.
747
+ [13] Zack Hodari, Alexis Moinet, Sri Karlapati, Jaime
748
+ Lorenzo-Trueba, Thomas Merritt, Arnaud Joly, Am-
749
+ mar Abbas, Penny Karanasou, and Thomas Drugman,
750
+ “Camp: A two-stage approach to modelling prosody
751
+ in context,” in ICASSP 2021-2021 IEEE International
752
+ Conference on Acoustics, Speech and Signal Processing
753
+ (ICASSP). IEEE, 2021, pp. 6578–6582.
754
+ [14] Konstantinos Klapsas, Nikolaos Ellinas, June Sig Sung,
755
+ Hyoungmin Park, and Spyros Raptis, “Word-level style
756
+ control for expressive, non-attentive speech synthesis,”
757
+ in International Conference on Speech and Computer.
758
+ Springer, 2021, pp. 336–347.
759
+ [15] Yi Ren, Chenxu Hu, Xu Tan, Tao Qin, Sheng Zhao,
760
+ Zhou Zhao, and Tie-Yan Liu, “Fastspeech 2: Fast and
761
+ high-quality end-to-end text to speech,” arXiv preprint
762
+ arXiv:2006.04558, 2020.
763
+ [16] Isaac Elias, Heiga Zen, Jonathan Shen, Yu Zhang,
764
+ Ye Jia, Ron J Weiss, and Yonghui Wu,
765
+ “Parallel
766
+ tacotron: Non-autoregressive and controllable tts,” in
767
+ ICASSP. IEEE, 2021, pp. 5709–5713.
768
+ [17] Yanqing Liu, Zhihang Xu, Gang Wang, Kuan Chen,
769
+ Bohan Li, Xu Tan, Jinzhu Li, Lei He, and Sheng
770
+ Zhao, “Delightfultts: The microsoft speech synthesis
771
+ system for blizzard challenge 2021,”
772
+ arXiv preprint
773
+ arXiv:2110.12612, 2021.
774
+ [18] Aaron van den Oord, Sander Dieleman, Heiga Zen,
775
+ Karen Simonyan, Oriol Vinyals, Alex Graves, Nal
776
+
777
+ Kalchbrenner, Andrew Senior, and Koray Kavukcuoglu,
778
+ “Wavenet: A generative model for raw audio,” arXiv
779
+ preprint arXiv:1609.03499, 2016.
780
+ [19] Yi Ren, Chenxu Hu, Xu Tan, Tao Qin, Sheng Zhao,
781
+ Zhou Zhao, and Tie-Yan Liu, “Fastspeech 2: Fast and
782
+ high-quality end-to-end text to speech,” in ICLR, 2021.
783
+ [20] Jonathan Shen, Ye Jia, Mike Chrzanowski, Yu Zhang,
784
+ Isaac Elias, Heiga Zen, and Yonghui Wu, “Non-attentive
785
+ tacotron:
786
+ Robust and controllable neural tts synthe-
787
+ sis including unsupervised duration modeling,” arXiv
788
+ preprint arXiv:2010.04301, 2020.
789
+ [21] M. He, Y. Deng, and L. He,
790
+ “Robust Sequence-to-
791
+ Sequence Acoustic Modeling with Stepwise Monotonic
792
+ Attention for Neural TTS,” in Interspeech, 2019, pp.
793
+ 1293–1297.
794
+ [22] H. Guo, F. K. Soong, L. He, and L. Xie, “A New GAN-
795
+ Based End-to-End TTS Training Algorithm,” in Inter-
796
+ speech, 2019, pp. 1288–1292.
797
+ [23] E. Battenberg, RJ Skerry-Ryan, S. Mariooryad, D. Stan-
798
+ ton, D. Kao, M. Shannon, and T. Bagby,
799
+ “Location-
800
+ relative attention mechanisms for robust long-form
801
+ speech synthesis,” in ICASSP, 2020, pp. 6194–6198.
802
+ [24] Heiga Ze, Andrew Senior, and Mike Schuster,
803
+ “Sta-
804
+ tistical parametric speech synthesis using deep neural
805
+ networks,”
806
+ in 2013 IEEE International Conference
807
+ on Acoustics, Speech and Signal Processing, 2013, pp.
808
+ 7962–7966.
809
+ [25] Heiga Zen, Keiichi Tokuda, and Alan W. Black, “Statis-
810
+ tical parametric speech synthesis,” Speech Communica-
811
+ tion, vol. 51, no. 11, pp. 1039–1064, 2009.
812
+ [26] Daniel Povey, Arnab Ghoshal, Gilles Boulianne, Lukas
813
+ Burget, Ondrej Glembek, Nagendra Goel, Mirko Han-
814
+ nemann, Petr Motlicek, Yanmin Qian, Petr Schwarz, Jan
815
+ Silovsky, Georg Stemmer, and Karel Vesely, “The kaldi
816
+ speech recognition toolkit,” in IEEE 2011 Workshop on
817
+ Automatic Speech Recognition and Understanding. Dec.
818
+ 2011, IEEE Signal Processing Society, IEEE Catalog
819
+ No.: CFP11SRW-USB.
820
+ [27] Yuxuan Wang, Daisy Stanton, Yu Zhang, RJ-Skerry
821
+ Ryan, Eric Battenberg, Joel Shor, Ying Xiao, Ye Jia,
822
+ Fei Ren, and Rif A Saurous, “Style tokens: Unsuper-
823
+ vised style modeling, control and transfer in end-to-end
824
+ speech synthesis,” in International Conference on Ma-
825
+ chine Learning. PMLR, 2018, pp. 5180–5189.
826
+ [28] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and
827
+ Kristina Toutanova, “Bert: Pre-training of deep bidirec-
828
+ tional transformers for language understanding,” arXiv
829
+ preprint arXiv:1810.04805, 2018.
830
+ [29] Heiga Zen, Viet Dang, Rob Clark, Yu Zhang, Ron J
831
+ Weiss, Ye Jia, Zhifeng Chen, and Yonghui Wu, “Lib-
832
+ ritts: A corpus derived from librispeech for text-to-
833
+ speech,” arXiv preprint arXiv:1904.02882, 2019.
834
+ [30] Yunlong Jiao, Adam Gabry´s, Georgi Tinchev, Bartosz
835
+ Putrycz, Daniel Korzekwa, and Viacheslav Klimkov,
836
+ “Universal neural vocoding with parallel wavenet,” in
837
+ ICASSP 2021-2021 IEEE International Conference on
838
+ Acoustics, Speech and Signal Processing (ICASSP).
839
+ IEEE, 2021, pp. 6044–6048.
840
+ [31] Recommendation BS ITU-R, “1534-1,“method for the
841
+ subjective assessment of intermediate quality levels of
842
+ coding systems (mushra)”,” International Telecommu-
843
+ nication Union, 2003.
844
+ [32] Robert AJ Clark, Monika Podsiadlo, Mark Fraser,
845
+ Catherine Mayo, and Simon King, “Statistical analy-
846
+ sis of the blizzard challenge 2007 listening test results,”
847
+ in Proc. Blizzard Challenge Workshop, 2007, vol. 2007.
848
+ [33] Maximilian Bisani and Hermann Ney, “Bootstrap esti-
849
+ mates for confidence intervals in asr performance eval-
850
+ uation,”
851
+ in 2004 IEEE International Conference on
852
+ Acoustics, Speech, and Signal Processing. IEEE, 2004,
853
+ vol. 1, pp. I–409.
854
+ [34] David Talkin and W Bastiaan Kleijn, “A robust algo-
855
+ rithm for pitch tracking (rapt),” Speech coding and syn-
856
+ thesis, vol. 495, pp. 518, 1995.
857
+ [35] Berrak Sisman, Junichi Yamagishi, Simon King, and
858
+ Haizhou Li, “An overview of voice conversion and its
859
+ challenges: From statistical modeling to deep learning,”
860
+ IEEE/ACM Transactions on Audio, Speech, and Lan-
861
+ guage Processing, vol. 29, pp. 132–157, 2020.
862
+ [36] Evelina Bakhturina, Vitaly Lavrukhin, Boris Ginsburg,
863
+ and Yang Zhang,
864
+ “Hi-fi multi-speaker english tts
865
+ dataset,” arXiv preprint arXiv:2104.01497, 2021.
866
+ [37] Yi Ren, Ming Lei, Zhiying Huang, Shiliang Zhang, Qian
867
+ Chen, Zhijie Yan, and Zhou Zhao, “Prosospeech: En-
868
+ hancing prosody with quantized vector pre-training in
869
+ text-to-speech,” in ICASSP 2022-2022 IEEE Interna-
870
+ tional Conference on Acoustics, Speech and Signal Pro-
871
+ cessing (ICASSP). IEEE, 2022, pp. 7577–7581.
872
+ [38] Yiwei Guo, Chenpeng Du, and Kai Yu, “Unsupervised
873
+ word-level prosody tagging for controllable speech syn-
874
+ thesis,” in ICASSP 2022-2022 IEEE International Con-
875
+ ference on Acoustics, Speech and Signal Processing
876
+ (ICASSP). IEEE, 2022, pp. 7597–7601.
877
+
I9FJT4oBgHgl3EQfGCzl/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
KdE2T4oBgHgl3EQfpgjD/content/tmp_files/2301.04030v1.pdf.txt ADDED
@@ -0,0 +1,802 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Conversational Turn-taking as a Stochastic Process
2
+ on Networks
3
+ Lisa O’Bryan1, Santiago Segarra2, Jensine Paoletti1, Stephanie Zajac1, Margaret E. Beier1,
4
+ Ashutosh Sabharwal2, Matthew Wettergreen3, and Eduardo Salas1
5
+ Abstract—Understanding why certain individuals work well
6
+ (or poorly) together as a team is a key research focus in
7
+ the psychological and behavioral sciences and a fundamental
8
+ problem for team-based organizations. Nevertheless, we have a
9
+ limited ability to predict the social and work-related dynamics
10
+ that will emerge from a given combination of team members.
11
+ In this work, we model vocal turn-taking behavior within
12
+ conversations as a parametric stochastic process on a network
13
+ composed of the team members. More precisely, we model
14
+ the dynamic of exchanging the ‘speaker token’ among team
15
+ members as a random walk in a graph that is driven by both
16
+ individual level features and the conversation history. We fit our
17
+ model to conversational turn-taking data extracted from audio
18
+ recordings of multinational student teams during undergraduate
19
+ engineering design internships. Through this real-world data we
20
+ validate the explanatory power of our model and we unveil
21
+ statistically significant differences in speaking behaviors between
22
+ team members of different nationalities.
23
+ I. INTRODUCTION
24
+ Data consisting of entities in interconnected systems are
25
+ ubiquitous in multiple fields. Thus, network structures are
26
+ commonly used across many disciplines for representation and
27
+ analysis of complex information [1], from neuroscience [2]
28
+ to wireless communications [3]. In this work, we represent
29
+ interactions within teams as small in-person social networks.
30
+ Computational models can be an effective way to study
31
+ the social dynamics that emerge from individuals interact-
32
+ ing within groups [4]. In particular, a variety of modeling
33
+ approaches have been used to try to replicate natural turn-
34
+ taking behaviors observed in conversation [5]–[8]. Many of
35
+ these models have been successful in replicating realistic
36
+ patterns of conversational turn-taking. However, the ability to
37
+ understand the driving mechanisms underlying these patterns
38
+ and generalize to novel team compositions is lacking. As
39
+ a step towards this goal, we develop a stochastic model of
40
+ conversations that can be used to explore how individual dif-
41
+ ferences impact the emergence of turn-taking patterns within
42
+ teams. More precisely, we propose a parametric model that
43
+ captures the individuals’ innate tendency to speak as well
44
+ as the effect that having spoken recently has on speaking
45
+ again. At every point in time, the next speaker is drawn
46
+ 1Department of Psychological Sciences, Rice University, Houston, TX,
47
+ USA. 2Department of Electrical and Computer Engineering, Rice University,
48
+ Houston, TX, USA. 3Department of Bioengineering, Rice University, Hous-
49
+ ton, TX, USA. Subsets of the team data used in this manuscript have been
50
+ previously published as part of dissertations by Stephanie Zajac, Department
51
+ of Psychological Sciences, Rice University and Jian Cao, Department of
52
+ Electrical and Computer Engineering, Rice University. Funding for this project
53
+ was provided by a Microsoft Productivity Research Grant, the National
54
+ Science Foundation (Award Number: 1910117), and the Army Research
55
+ Institute (Grant Number: W911NF-22-1-0226).
56
+ from a probability distribution determined by the history
57
+ of speakers and the aforementioned parameters. The model
58
+ replicates the majority of conversational turn-taking patterns
59
+ observed in our real-world data, and our results highlight the
60
+ important role the memory function plays in replicating these
61
+ patterns. Furthermore, our results indicate that differences in
62
+ team member nationality can play a strong role in shaping
63
+ communication patterns within multinational teams.
64
+ Contributions. The contributions of our work are twofold:
65
+ i) We propose a simple parametric stochastic process that can
66
+ capture complex behaviors observed in real data.
67
+ ii) We present a novel dataset of conversational turn-taking
68
+ in undergraduate teams and we apply our model to reveal
69
+ significant differences in speaking behavior between student
70
+ nationalities.
71
+ II. CONVERSATION MODEL
72
+ Inspired by a model by Stasser and Taylor [7], our model
73
+ incorporates two key notions: i) the relative likelihood πi that
74
+ team member i speaks on a given turn independent of their
75
+ speaking history and ii) the effect mi that an individual’s
76
+ current speaking turn has on their likelihood of speaking
77
+ on subsequent turns. We consider the inherent likelihood of
78
+ speaking πi of each member i as independent of the history
79
+ of exchanges and, thus, a constant throughout the conversation.
80
+ In contrast, we encode dependencies within a conversation
81
+ through the (turn-dependent) memory function mi(t). More
82
+ precisely, for a given member i and a turn t, the memory
83
+ value is given by
84
+ mi(t) = di e−0.5 (t−tlast
85
+ i
86
+ ),
87
+ (1)
88
+ where di is a learnable parameter that controls the scale of the
89
+ memory effect for each individual and tlast
90
+ i
91
+ denotes the last
92
+ turn on which member i spoke. The negative exponential form
93
+ in (1) reveals that the memory value asymptotically decreases
94
+ to 0 as (t − tlast
95
+ i
96
+ ) increases, i.e., as more turns occur since
97
+ the last time that member i spoke. This encodes the natural
98
+ assumption that whether or not an individual spoke many turns
99
+ ago is inconsequential to their likelihood of speaking next. The
100
+ memory function in (1) is combined with the innate speaking
101
+ tendencies πi to compute the likelihood ℓi(t) that member i
102
+ speaks at turn t as follows
103
+ ℓi(t) =
104
+
105
+ 0,
106
+ if tlast
107
+ i
108
+ = t − 1,
109
+ πi + mi(t),
110
+ otherwise.
111
+ (2)
112
+ Speakers are not allowed to speak on two consecutive turns
113
+ since these would simply be considered part of the same turn.
114
+ arXiv:2301.04030v1 [cs.SI] 10 Jan 2023
115
+
116
+ This is enforced in (2) by setting the likelihood to zero for the
117
+ member that has just spoken. Lastly, denoting by N the total
118
+ number of team members, the likelihoods ℓi(t) are normalized
119
+ to sum up to 1 so that they define bona fide probabilities pi(t)
120
+ as follows
121
+ pi(t) =
122
+ ℓi(t)
123
+ �N
124
+ j=1 ℓj(t)
125
+ .
126
+ (3)
127
+ The speaker at turn t is then drawn from this probability
128
+ distribution across team members.
129
+ In summary, the conversational behavior of each individual
130
+ i within our model is given by two parameters (πi, di). Given
131
+ these parameters for every team member, the model provides
132
+ a well-defined stochastic process to generate conversations
133
+ by the team. More precisely, to determine the speaker at
134
+ turn t, we first compute the memory values of each member
135
+ following (1), we then compute likelihoods and transform
136
+ those into probabilities following (2) and (3), respectively,
137
+ and we finally draw the next speaker from that probability
138
+ distribution.
139
+ Our main departure from Stasser and Taylor [7] is that
140
+ our model is based on individual-level parameters whereas
141
+ theirs is based on team-level parameters. Specifically, Stasser
142
+ and Taylor’s [7] model depends on a single parameter r
143
+ that determines the inherent speaking probability of every
144
+ individual (what we denote by πi) as well as a single parameter
145
+ d that determines the scale of the memory function for every
146
+ team member. This fundamental difference is a key enabler for
147
+ our study of how individual traits relate to each team member’s
148
+ conversational behavior since, given observed conversations of
149
+ a team, our model enables the estimation of the parameters
150
+ (πi, di) for every team member.
151
+ Given observed turn-taking data, we can fit our model by
152
+ selecting the parameters (πi, di) for every team member that
153
+ maximize the probability of generating the observed data.
154
+ More precisely, if we denote by Ht−1 the history of turn-
155
+ taking up to turn t − 1 in the observed data for a given
156
+ team, and by ht the speaker at turn t, we can compute the
157
+ probability that our model selects that true speaker ht [cf. (3)].
158
+ Following the notation in (3), we denote this probability by
159
+ pht(t | Ht−1, {(πi, di)}n
160
+ i=1), i.e., the probability of selecting
161
+ the true speaker ht at turn t but where we have now made
162
+ explicit that this value depends on the past history Ht−1 and
163
+ the parameters (πi, di) for each of the n members in the team.
164
+ With this notation in place, the log-likelihood of observing the
165
+ true history of T turns is given by
166
+ L(HT | {(πi, di)}n
167
+ i=1) =
168
+ T
169
+
170
+ t=1
171
+ log pht(t | Ht−1, {(πi, di)}n
172
+ i=1).
173
+ (4)
174
+ We fit our model by finding the parameters {(πi, di)}n
175
+ i=1
176
+ that maximize (4). We also fit a reduced model that does
177
+ not contain the memory parameters {di}n
178
+ i=1 or, equivalently,
179
+ where di = 0 for all i. We did this to determine the minimal
180
+ viable model that can explain our observed data.
181
+ III. DATASET
182
+ In 2016 and 2017, we collected data on team interactions
183
+ in student engineering design teams during 7-week internships
184
+ at a private university in the southern United States. The first
185
+ week of the internship consisted of a condensed course on
186
+ the engineering design process, which helped to ensure all
187
+ participants had a similar baseline level of knowledge. During
188
+ the remaining six weeks, team members worked together to
189
+ plan and execute their project which sought to meet a real-
190
+ world need. We collected data from 7 multi-national teams
191
+ with team members from the United States (n = 13), Malawi
192
+ (n = 7), and Brazil (n = 4), with equal numbers of female and
193
+ male participants. After consenting to the study, participants
194
+ completed a self-report survey of their personality traits,
195
+ attitudes, and demographic information. From these data we
196
+ extracted five features for each individual that we hypothesized
197
+ could relate to individual differences in speaking patterns,
198
+ namely, extraversion, agreeableness, conscientiousness, sex
199
+ (male, female), and nationality (American, Non-American).
200
+ Our dataset includes multiple meetings from throughout the
201
+ internships for all teams. Audio streams were processed by
202
+ annotating the start and end times of speaking turns by each
203
+ team member during the meetings. Overall, we extracted a
204
+ mean (SD) of 1941 (1416.5) speaking turns per team.
205
+ IV. NUMERICAL EXPERIMENTS
206
+ To assess the predictive power of the full and reduced
207
+ (i.e., without the memory component) models, we perform the
208
+ following three classes of experiments.
209
+ Predicting the next speaker. For each team, we split their
210
+ turn-taking history into a training and a testing set. The
211
+ training data contains the first 80 percent of the total turns
212
+ whereas the testing data contains the remaining turns. As
213
+ previously explained, we compute the maximum likelihood
214
+ estimates of the model parameters but this time based only
215
+ on maximizing the probability of observing the history of
216
+ the training dataset. We then compute the log-likelihood of
217
+ observing the history of the testing dataset, as in (4), for
218
+ both the full and reduced fitted models. The larger (less
219
+ negative) the attained value, the better predictive power of the
220
+ corresponding model.
221
+ Overall, the full simulation model (i.e., with memory pa-
222
+ rameter) predicts the observed data better than the reduced
223
+ simulation model. Table I shows the log-likelihoods attained
224
+ for the testing dataset (last 20 percent of speaking turns)
225
+ for each team and simulation model. The full simulation
226
+ model consistently yields larger (less negative) log-likelihoods,
227
+ indicating a better predictive performance.
228
+ Reproducing relevant conversation patterns.
229
+ We test the fit of both the full and reduced simulation
230
+ models by comparing three measures between our observed
231
+ and simulated datasets: 1) the proportion of time in which each
232
+ team member spoke, 2) the proportion of a given speaker’s
233
+ speaking turns following an ABA format in which there was
234
+ one turn by a different speaker between a given speaker’s
235
+ sequential turns (reflecting the proportion of turns that were
236
+ part of dyadic exchanges), and 3) the proportion of turns that
237
+
238
+ TABLE I
239
+ LOG-LIKELIHOOD ATTAINED BY BOTH MODELS
240
+ Team
241
+ No Memory
242
+ Memory
243
+ Team 1
244
+ -159.1586
245
+ -154.4533
246
+ Team 2
247
+ -45.3267
248
+ -43.2123
249
+ Team 3
250
+ -196.5418
251
+ -188.6291
252
+ Team 4
253
+ -278.8524
254
+ -255.2545
255
+ Team 5
256
+ -909.8953
257
+ -617.3478
258
+ Team 6
259
+ -623.9848
260
+ -589.0674
261
+ Team 7
262
+ -105.8199
263
+ -104.0864
264
+ were part of long dyadic exchanges (4 or more consecutive
265
+ speaking turns (e.g. ABAB) between two team members).
266
+ We calculate these measures for each of 10,000 replications
267
+ of our simulation models and compare them to the values
268
+ found in our observed data from each team. For each of our
269
+ three measures, we find the proportion of model replications
270
+ in which the observed value in the real data fell within the
271
+ 95 percent confidence interval for values produced by each
272
+ simulation model. For each of the three speaking patterns of
273
+ interest, we use chi-squared tests to compare the number of
274
+ individuals or dyads across teams whose behaviors are not
275
+ significantly different from those displayed by the full and
276
+ reduced simulation models.
277
+ The full simulation model matches the patterns displayed
278
+ by significantly more individuals and dyads across teams
279
+ than the reduced simulation model. The full simulation model
280
+ correctly estimates the proportion of speaking turns spoken
281
+ by each team member for 100% (24/24) of team members
282
+ whereas the reduced simulation model correctly estimates the
283
+ proportion for 70.8% (17/24) of team members (χ2 = 6.0, p
284
+ = 0.014; Figure 1(a)). Moreover, the full simulation model
285
+ correctly estimates the proportion of each team member’s
286
+ speaking turns with an ABA format for 87.5% (21/24) of team
287
+ members, but the reduced simulation model correctly estimates
288
+ the proportion for 29.2% (7/24) of team members, with the
289
+ tendency to underestimate the proportion of turns (χ2 = 14.5,
290
+ p = 0.00014; Figure 1(b)). Finally, the full simulation model
291
+ correctly estimates the proportion of speaking turns that were
292
+ part of dyadic exchanges of length 4 turns or greater for 86.7%
293
+ (26/30) of team member dyads, and the reduced simulation
294
+ model correctly estimates the proportion for 36.7% (11/30) of
295
+ team member dyads, with the tendency to underestimate the
296
+ proportion of turns (χ2 = 13.8, p = 0.00020; Figure 1(c)).
297
+ Relating individual traits and speaking behavior. To gain
298
+ insight into the relative importance of different individual traits
299
+ in understanding speaking behaviors, we use an information-
300
+ theoretic approach [9] to determine which trait(s) best explain
301
+ between-individual variation in model parameters πi (baseline
302
+ likelihood of speaking) and di (likelihood of speaking again
303
+ after speaking recently). Using the MuMIn function [10] in
304
+ R, we examine which linear model (i.e., a null model and 5
305
+ uni-variate models consisting of each of our individual-level
306
+ predictor variables; see Section III) best explains variation in
307
+ each parameter value across team members. We group-mean
308
+ center our three continuous variables (extraversion, agreeable-
309
+ ness, conscientiousness) to reflect the relative values of these
310
+ personality traits among team members. We limit the number
311
+ of variables per linear model to one to avoid overfitting. We
312
+ rank our linear models according to the Akaike information
313
+ criterion adjusted for small sample sizes (AICc) [9]. We con-
314
+ sider top-performing linear models to be the best performing
315
+ model (i.e. the model with the lowest AICc value) in our model
316
+ set and any model less than 2 AICc points greater than the
317
+ best performing linear model [9]. We examine the correlation
318
+ between individual traits and simulation model parameters for
319
+ all top-performing linear models to determine how these traits
320
+ shape speaking behaviors.
321
+ Tables II and III display the results of our model selection
322
+ analysis that compares the relative ability of each of our five
323
+ univariate models to explain between-individual variation in
324
+ πi and di. The tables display the AICc values for each model
325
+ as well as the ∆AICc values (relative to the best model)
326
+ and corresponding model weights. Model weights reflect the
327
+ relative support for a given linear model compared to the other
328
+ candidate models, with 1 indicating full support.
329
+ TABLE II
330
+ MODEL SELECTION FOR BASELINE LIKELIHOOD OF SPEAKING πi
331
+ model
332
+ df
333
+ AICc
334
+
335
+ wt
336
+ Nationality
337
+ 3
338
+ -19.6
339
+ 0.0
340
+ 0.94
341
+ Null
342
+ 2
343
+ -12.4
344
+ 7.2
345
+ 0.025
346
+ Agreeableness
347
+ 3
348
+ -11.4
349
+ 8.2
350
+ 0.015
351
+ Sex
352
+ 3
353
+ -10.3
354
+ 9.3
355
+ 0.010
356
+ Extraversion
357
+ 3
358
+ -10.0
359
+ 9.6
360
+ 0.010
361
+ Conscientiousness
362
+ 3
363
+ -9.9
364
+ 9.7
365
+ 0.010
366
+ TABLE III
367
+ MODEL SELECTION FOR SHAPE OF MEMORY FUNCTION di
368
+ model
369
+ df
370
+ AICc
371
+
372
+ wt
373
+ Null
374
+ 2
375
+ 105.2
376
+ 0.0
377
+ 0.42
378
+ Extraversion
379
+ 3
380
+ 107.5
381
+ 2.4
382
+ 0.13
383
+ Nationality
384
+ 3
385
+ 107.7
386
+ 2.5
387
+ 0.12
388
+ Conscientiousness
389
+ 3
390
+ 107.8
391
+ 2.6
392
+ 0.11
393
+ Sex
394
+ 3
395
+ 107.8
396
+ 2.6
397
+ 0.11
398
+ Agreeableness
399
+ 3
400
+ 107.8
401
+ 2.6
402
+ 0.11
403
+ The linear model that best explains between-individual dif-
404
+ ferences in πi, the parameter reflecting the baseline likelihood
405
+ of initiating a speaking turn, has nationality as the predictor
406
+ variable. This linear model has a cumulative model weight
407
+ of 93.5%. The second best linear model is the null model,
408
+ which has a ∆AICc value 7.2 higher than the best model
409
+ (Table II). Since this ∆AICc value is greater than our criterion
410
+ of ∆AICc = 2 [9], we only consider the linear model with
411
+ nationality as a predictor variable as a top-performing model
412
+ within our model set. Overall, this model is supported 37.6
413
+ times more strongly (evidence ratio = wi/wj = 0.94/0.025 =
414
+ 37.6) than the null model. When we analyze our top model,
415
+ we find that Americans have significantly higher likelihoods
416
+ of initiating speaking turns than non-Americans (β = 0.20, p
417
+ < 0.01, Figure 2).
418
+ The linear model that best explains between-individual
419
+ differences in di, the change in likelihood of speaking after
420
+ having just spoken, is the null model. This linear model has
421
+ a cumulative model weight of 41.6%. The second best linear
422
+
423
+ Fig. 1. Black points represent a) observed proportion of speaking turns by each team member, b) observed proportion of speaking turns
424
+ with one turn in between (e.g. ABA) for each team member, c) observed proportion of speaking turns that were part of consecutive dyadic
425
+ exchanges of length 4 turns or greater. Error bars represent the 95 percent confidence intervals for the proportions estimated by the reduced
426
+ simulation model (i.e., without memory parameter) (red) and full simulation model (blue). Y-axis scale varies by team to improve visibility
427
+ Fig. 2. Boxplot of baseline likelihood of speaking (parameter πi) by
428
+ nationality across all teams.
429
+ model has extraversion as a predictor variable and a ∆AICc
430
+ value of 2.4 (Table III). Thus, only the null model is con-
431
+ sidered a top-performing linear model within our model set,
432
+ indicating that none of the predictor variables we considered
433
+ explain between-individual variation in di. Overall, the null
434
+ model is supported approximately 3.2 times more strongly
435
+ (evidence ratio = wi/wj = 0.42/0.13 = 3.2) than the model
436
+ with extraversion as a predictor.
437
+ V. DISCUSSION
438
+ The presence of the memory parameter is important in
439
+ simulating the patterns of vocal turn-taking we observed in
440
+ our study. Compared to the reduced simulation model with no
441
+ memory parameter, the full simulation model more accurately
442
+ predicts future speaking turns and better captures individual
443
+ and dyadic speaking patterns. This result supports the findings
444
+ by Stasser and Taylor [7] and Parker [5] that an individual’s
445
+ current likelihood of speaking is impacted by their recent
446
+ speaking behaviors. Nevertheless, our results differ from those
447
+ of Stasser and Taylor in that we find different parameter values
448
+ controlling memory function shape for different individuals.
449
+ Our study finds evidence for consistent between-individual
450
+ differences in speaking behaviors supporting previous find-
451
+ ings that individual traits can correlate with communication
452
+ behaviors [11]–[13]. Our finding that non-Americans initiate
453
+ speaking turns less frequently than Americans is consistent
454
+ with a recent study by Li et al. [11] which found that Chinese
455
+ team members, who tended to be less proficient in English, ini-
456
+ tiated fewer speaking turns than the American team members.
457
+ Although we did not measure English language proficiency in
458
+ our study, our finding could be related to language proficiency
459
+ since the non-American students in our study were non-native
460
+ English speakers. Another reason why non-Americans may not
461
+ have initiated speaking turns as frequently could be that they
462
+ had a perceived lower status than American team members.
463
+ Social status may be awarded to the ethnic subgroup with
464
+ the greatest numerical majority [14]. Since both the Brazilian
465
+ and Malawian students were completing the internship at
466
+
467
+ a)
468
+ Team 1
469
+ Team 2
470
+ Team 3
471
+ Team 4
472
+ Team 5
473
+ Team 6
474
+ Team 7
475
+
476
+
477
+ 工工
478
+ 0.40-
479
+
480
+ 0.32
481
+ 0.4-
482
+ 0.4 -
483
+ 0.40
484
+
485
+ 0.4
486
+ 0.35
487
+ 0.28
488
+
489
+ 0.35
490
+
491
+ 1
492
+ 0.3-
493
+ 0.3
494
+ 0.3 -
495
+ 0.30
496
+ 0.30
497
+ 0.24 -
498
+ 0.32
499
+ 0.25
500
+ T
501
+ 0.2
502
+ 0.2 -
503
+ 0.25
504
+ 0.2
505
+
506
+ 0.20-
507
+ 0.20-
508
+ 0.30
509
+
510
+ 1
511
+
512
+ 0.16 -
513
+
514
+ 0.1 -
515
+ 0.1-
516
+ 2
517
+ 0.15-
518
+ 0.20-
519
+ 2
520
+ 3
521
+ 4
522
+ 3
523
+ 4
524
+ 2
525
+ 3
526
+ 4
527
+ 1
528
+ 2
529
+ 3
530
+ A
531
+ 2
532
+ 3
533
+ 4
534
+ 1
535
+ 2
536
+ 3
537
+ 4
538
+ 1
539
+ 2
540
+ 3
541
+ 4
542
+ Team Member
543
+ b)
544
+ 0.8-
545
+ 工王
546
+ 0.8-
547
+ 0.8-
548
+
549
+ 0.7-
550
+
551
+ 0.75
552
+
553
+ 0.6
554
+ 0.6
555
+ :
556
+ Proportion
557
+ 0.6
558
+ 0.6
559
+ T
560
+ 0.6
561
+ 0.5
562
+ 0.50
563
+ T
564
+ 0.4
565
+ 0.4
566
+
567
+ 0.4
568
+ 0.4
569
+ P
570
+ 0.25
571
+ 0.4
572
+ 0.3
573
+ 0.2
574
+ 0.2-
575
+
576
+ 0.2-
577
+
578
+ 0.4
579
+ 0.2
580
+ 1
581
+ 2
582
+ 3
583
+ 1
584
+ 2
585
+ 3
586
+ 4
587
+ 1
588
+ 2
589
+ 3
590
+ 4
591
+ 1
592
+ 3
593
+ 4
594
+ 1
595
+ 2
596
+ 3
597
+ 1
598
+ 3
599
+ 1
600
+ 2
601
+ 3
602
+ 4
603
+ TeamMember
604
+ (2
605
+ 0.3
606
+ 0.8
607
+ 0.6 -
608
+ FI
609
+ T
610
+ 0.4
611
+ 0.5
612
+ Exchanges
613
+ 0.5-
614
+
615
+ Long
616
+ 0.20
617
+ 0.6
618
+ 0.4
619
+ 0.4-
620
+ 0.3-
621
+ 0.2
622
+ 0.4
623
+ 0.15
624
+ 0.3
625
+ 0.4
626
+ 0.3
627
+ 0.2-
628
+ 0.10
629
+ 0.2
630
+ 0.2
631
+ 0.2
632
+ 0.2
633
+ .
634
+ 0.1-
635
+ 0.05
636
+ 0.1
637
+ 王工
638
+ 0.1-
639
+ I
640
+ 1
641
+
642
+
643
+
644
+
645
+
646
+
647
+ 王王
648
+ 0.0
649
+ 0.0-
650
+ 0.0 -
651
+ 0.00
652
+ 0.0 -
653
+ 2.3.4.3.4
654
+ 2.3.
655
+ 4
656
+ 2. 3.
657
+ 3
658
+ 3
659
+ 2'2'
660
+ 3
661
+ Dyad0.6
662
+ Baseline Likelihood
663
+ 0.4
664
+ 0.2
665
+ American
666
+ Nonamerican
667
+ Nationalityan American university and were outnumbered by American
668
+ students, they may have demonstrated lower status behaviors
669
+ like speaking up less frequently [12].
670
+ Although nationality best explained differences in baseline
671
+ frequency of speaking turn initiation, none of our predictor
672
+ variables explained variation in memory function shape. Future
673
+ studies are needed to determine whether other traits may
674
+ explain the observed variation in this speaking tendency.
675
+ Nevertheless, since Americans were more likely to initiate
676
+ speaking turns, the broad tendency to speak again after hav-
677
+ ing recently spoken further enhanced individual differences
678
+ in speaking frequency across team members. Overall, these
679
+ results help expand knowledge of the impact cultural diversity
680
+ can have on team processes [15].
681
+ A limitation of our study was that we only had data on a
682
+ relatively small number of teams and team members. This
683
+ lack of power prevented us from exploring more complex
684
+ relationships between individual traits and their impacts on
685
+ speaking behaviors. For example, Neubert and Tagger [16]
686
+ found that gender moderated the relationship between indi-
687
+ vidual traits and leadership, with certain traits being more
688
+ important for leadership in males than females and vice
689
+ versa. Since we tested each of our predictor variables on its
690
+ own, the strong effect of nationality may have overpowered
691
+ more subtle or complicated effects of other variables, such as
692
+ personality and gender. This could be a reason why individual
693
+ traits like extraversion, which can be strongly correlated with
694
+ communication tendencies [17]–[19], did not correspond to
695
+ individual differences in speaking behaviors in our study.
696
+ Extending our study to more teams would enable a greater
697
+ understanding of how multiple traits may interact to impact
698
+ speaking behaviors.
699
+ VI. CONCLUSION AND FUTURE WORK
700
+ Our study develops a model of conversational turn-taking
701
+ that can provide a mechanistic understanding of how patterns
702
+ of communication emerge within teams and can be used to
703
+ investigate the relationship between team member traits and
704
+ specific speaking behaviors. Future extensions of our model
705
+ could integrate more fine-grained speaking behaviors such as
706
+ the timing between turns and turn overlap, which may enable
707
+ the study of more complex or subtle turn-taking dynamics. For
708
+ example, individuals higher in dominance have been found
709
+ to interrupt more often, which can have a suppressive effect
710
+ on the speaking behaviors of others [20]. Ultimately, through
711
+ extensions of our modeling approach, it could be possible to
712
+ predict the conversational interactions among team members
713
+ based on their trait composition alone. This ability could
714
+ enable the anticipation of undesirable team outcomes (e.g., de-
715
+ velopment of subgroups) so that interventions could be applied
716
+ ahead of time. Similarly, for established teams, it could also be
717
+ possible to predict the effects team composition changes may
718
+ have on communication patterns, thus providing guidelines for
719
+ restaffing or retraining team members. Such predictive models
720
+ would represent a significant advancement in teams research,
721
+ enabling a more mechanistic understanding of the connection
722
+ between team composition and team processes [21], [22].
723
+ REFERENCES
724
+ [1] I. Brugere, B. Gallagher, and T. Y. Berger-Wolf, “Network structure
725
+ inference, a survey: Motivations, methods, and applications,” ACM
726
+ Computing Surveys (CSUR), vol. 51, no. 2, pp. 1–39, 2018.
727
+ [2] J. D. Medaglia, W. Huang, S. Segarra, C. Olm, J. Gee, M. Grossman,
728
+ A. Ribeiro, C. T. McMillan, and D. S. Bassett, “Brain network
729
+ efficiency is influenced by the pathologic source of corticobasal
730
+ syndrome,” Neurology, vol. 89, no. 13, pp. 1373–1381, 2017. [Online].
731
+ Available: https://n.neurology.org/content/89/13/1373
732
+ [3] A. Chowdhury, G. Verma, C. Rao, A. Swami, and S. Segarra, “Unfolding
733
+ wmmse using graph neural networks for efficient power allocation,”
734
+ IEEE Trans. Wireless Commun., vol. 20, no. 9, pp. 6004–6017, 2021.
735
+ [4] J. E. McGrath, H. Arrow, and J. L. Berdahl, “The study of groups: Past,
736
+ present, and future,” Personality and Social Psychology Review, vol. 4,
737
+ no. 1, pp. 95–105, 2000.
738
+ [5] K. C. H. Parker, “Speaking Turns in Small Group Interaction: A
739
+ Context-Free Event Sequence Model,” Journal of Personality and Social
740
+ Psychology, vol. 54, no. 6, pp. 965–971, 1988.
741
+ [6] S. Basu, T. Choudhury, B. Clarkson, and A. Pentland, “Learning
742
+ human interactions with the influence model,” MIT Media Laboratory,
743
+ Cambridge, MA, Tech. Rep., 2001.
744
+ [7] G. Stasser and L. A. Taylor, “Speaking turns in face-to-face discussions.”
745
+ Journal of Personality and Social Psychology, vol. 60, no. 5, pp. 675–
746
+ 684, 1991.
747
+ [8] E. Padilha and J. Carletta, “A simulation of small group discussion,”
748
+ in Workshop on the Semantics and Pragmatics of Dialogue, 2002, pp.
749
+ 117–124.
750
+ [9] K. P. Burnham and D. R. Anderson, Model Selection and Multimodel
751
+ Inference: A Practical Information-Theoretic Approach, 2nd ed. Berlin:
752
+ Springer, 2002.
753
+ [10] K. Barton, “MuMIn: Multi-model inference,” 2013. [Online]. Available:
754
+ http://cran.r-project.org/package=MuMIn
755
+ [11] H. Li, Y. C. Yuan, N. N. Bazarova, and B. S. Bell, “Talk and let talk:
756
+ The effects of language proficiency on speaking up and competence per-
757
+ ceptions in multinational teams,” Group and Organization Management,
758
+ vol. 44, no. 5, pp. 953–989, 2019.
759
+ [12] M. S. Mast, “Dominance as expressed and inferred through speaking
760
+ time,” Human Communication Research, vol. 28, no. 3, pp. 420–450,
761
+ 2002.
762
+ [13] C. E. Kimble and J. I. Musgrove, “Dominance in arguing mixed-sex
763
+ dyads: Visual dominance patterns, talking time, and speech loudness,”
764
+ Journal of Research in Personality, vol. 22, no. 1, pp. 1–16, 1988.
765
+ [14] A. D. Bellmore, A. Nishina, M. R. Witkow, S. Graham, and J. Juvonen,
766
+ “The influence of classroom ethnic composition on same- and other-
767
+ ethnicity peer nominations in middle school,” Social Development,
768
+ vol. 16, no. 4, pp. 720–740, 2007.
769
+ [15] G. K. Stahl, M. L. Maznevski, A. Voigt, and K. Jonsen, “Unraveling
770
+ the effects of cultural diversity in teams: A meta-analysis of research on
771
+ multicultural work groups,” Journal of International Business Studies,
772
+ vol. 41, no. 4, pp. 690–709, 2010.
773
+ [16] M. J. Neubert and S. Taggar, “Pathways to informal leadership: The
774
+ moderating role of gender on the relationship of individual differences
775
+ and team member network centrality to informal leadership emergence,”
776
+ Leadership Quarterly, vol. 15, no. 2, pp. 175–194, 2004.
777
+ [17] K. C. McLean and M. Pasupathi, “Collaborative narration of the past
778
+ and extraversion,” Journal of Research in Personality, vol. 40, no. 6, pp.
779
+ 1219–1231, 2006.
780
+ [18] S. K. Leung and M. H. Bond, “Interpersonal communication and person-
781
+ ality: Self and other perspectives,” Asian Journal of Social Psychology,
782
+ vol. 4, no. 1, pp. 69–86, 2001.
783
+ [19] G. A. Macht, D. A. Nembhard, J. H. Kim, and L. Rothrock, “Structural
784
+ models
785
+ of
786
+ extraversion,
787
+ communication,
788
+ and
789
+ team
790
+ performance,”
791
+ International Journal of Industrial Ergonomics, vol. 44, no. 1, pp. 82–91,
792
+ 2014. [Online]. Available: http://dx.doi.org/10.1016/j.ergon.2013.10.007
793
+ [20] E. M. Rogers and D. K. Bhowmik, “Homophily-heterophily: Rela-
794
+ tional concepts for communication research,” Public Opinion Quarterly,
795
+ vol. 34, no. 4, pp. 523–538, 1970.
796
+ [21] S. T. Bell, S. G. Brown, A. Colaneri, and N. Outland, “Team composition
797
+ and the ABCs of teamwork,” American Psychologist, vol. 73, no. 4, pp.
798
+ 349–362, 2018.
799
+ [22] L. O’Bryan, M. Beier, and E. Salas, “How approaches to animal swarm
800
+ intelligence can improve the study of collective intelligence in human
801
+ teams,” Journal of Intelligence, vol. 8, no. 1, 2020.
802
+
KdE2T4oBgHgl3EQfpgjD/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
L9E3T4oBgHgl3EQfYgo-/content/tmp_files/2301.04488v1.pdf.txt ADDED
@@ -0,0 +1,1527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ WuYun: Exploring hierarchical skeleton-guided
2
+ melody generation using knowledge-enhanced deep
3
+ learning
4
+ Kejun Zhang1,2,3,∗,
5
+ Xinda Wu1,∗,
6
+ Tieyao Zhang1,
7
+ Zhijie Huang1,
8
+ Xu Tan4,
9
+ Qihao Liang1,
10
+ Songruoyao Wu1,
11
+ Lingyun Sun1,2,†
12
+ 1College of Computer Science and Technology, Zhejiang University, China.
13
+ 2Alibaba-Zhejiang University Joint Institute of Frontier Technologies, China.
14
+ 3Innovation Center of Yangtze River Delta, China.
15
+ 4Microsoft Research Asia
16
+ {zhangkejun, wuxinda, kreutzer0421, zj_huang,
17
+ qhliang, 12221193, sunly}@zju.edu.cn
18
+ xuta@microsoft.com
19
+ Abstract
20
+ Although deep learning has revolutionized music generation, existing methods
21
+ for structured melody generation follow an end-to-end left-to-right note-by-note
22
+ generative paradigm and treat each note equally. Here, we present WuYun, a
23
+ knowledge-enhanced deep learning architecture for improving the structure of
24
+ generated melodies, which first generates the most structurally important notes to
25
+ construct a melodic skeleton and subsequently infills it with dynamically decorative
26
+ notes into a full-fledged melody. Specifically, we use music domain knowledge
27
+ to extract melodic skeletons and employ sequence learning to reconstruct them,
28
+ which serve as additional knowledge to provide auxiliary guidance for the melody
29
+ generation process. We demonstrate that WuYun can generate melodies with better
30
+ long-term structure and musicality and outperforms other state-of-the-art methods
31
+ by 0.51 on average on all subjective evaluation metrics. Our study provides a
32
+ multidisciplinary lens to design melodic hierarchical structures and bridge the
33
+ gap between data-driven and knowledge-based approaches for numerous music
34
+ generation tasks.
35
+ 1
36
+ Introduction
37
+ Automatic music generation is one of the popular multidisciplinary research topics in generative
38
+ art and computational creativity (1), which has achieved revolutionary advances in various artificial
39
+ intelligence-generated content applications by utilizing deep learning techniques (2, 3), including
40
+ interactive music production collaboration tools (4, 5), video background music generation (6), music
41
+ education (7), and music therapy (8). As one of the crucial components of music generation, melody
42
+ generation has drawn much attention from both the academic and industrial fields. Although melodies
43
+ appear to be a simple linear succession of notes unfolding over time, the organizational structure of
44
+ the melodic notes is hierarchical, like a tree resulting in intricate long-distance dependencies (9, 10).
45
+ Hence, the complex long-distance dependencies make it difficult for neural networks to discover and
46
+ learn the hierarchical structure relationships among these musical elements and generate long-term
47
+ structured melodies. In recent years, language models in natural language processing (NLP) have
48
+ ∗Equal contribution.
49
+ †Corresponding author.
50
+ Preprint. Under review.
51
+ arXiv:2301.04488v1 [cs.SD] 11 Jan 2023
52
+
53
+ been employed to capture long-distance dependencies for structured melody generation with the
54
+ advantages of an easy-to-use end-to-end deep learning framework, effective representation learning,
55
+ and arbitrary sequence length generation. Their powerful ability to automatically learn the latent
56
+ knowledge from big data, without explicitly codifying the domain-specific rules, has been proved and
57
+ applied in multiple disciplines (11–14).
58
+ Numerous specialized architectures of the language model for music generation have demonstrated
59
+ promising performance in generating long-range coherent melodies, including effective attention
60
+ mechanisms (15, 16), enhanced memory networks (17–19), large-scale deep neural networks (20),
61
+ and explicit musicality regularization (21). Furthermore, various MIDI-derived symbolic music
62
+ representation methods designed auxiliary musical spatiotemporal symbols (e.g., BAR, POSITION,
63
+ and CHORD) for the input symbolic music data to help music generation models learn the long-
64
+ distance dependencies better, longer, and faster (17–19, 22, 23). However, the scarcity of publicly
65
+ available melody data limits the usage of the power of language-based music generation models.
66
+ Moreover, the process of melody generation still lacks controllability. These models are trained in the
67
+ dominant end-to-end and data-driven learning paradigms, which optimize the network’s large-scale
68
+ parameters via learning to map the input data to output data, thus occasionally resulting in excessive
69
+ repetition or boring sounds in the generated music (21).
70
+ Recent studies used a deep learning-based hierarchical generation strategy to first hallucinate or
71
+ predict the object’s structure and then use it to constrain downstream generation tasks (e.g., protein,
72
+ font, or music) (24–33), which enables the neural networks to learn from the limited data far
73
+ more efficiently and improves the controllability of the generation process. For structured melody
74
+ generation, some scholars first generate a melody’s hierarchical music structure representation (31)
75
+ or bar-level musical structure relationship graph (32, 33) and then generate melodies conditioned
76
+ on the generated parallel structure information as additional knowledge. Such a strategy requires
77
+ recognizing the group structure of the musical syntax in a melodic surface (e.g., phrases and sections)
78
+ to extract music features for building a structure generation model. Nonetheless, inadequate music
79
+ structure boundary detection algorithms hinder the extraction of accurate melodic group structure.
80
+ Conversely, little attention has been paid to the organizational logic of the deep structure beneath
81
+ the melodic surface, organized by different levels of structural importance among various musical
82
+ events (34–36) with the potential to enhance structured melody generation. Typically, the majority
83
+ of existing melody generation methods for pursuing long-term structure follow an end-to-end left-
84
+ to-right note-by-note generative paradigm and treat each note equally. So far, however, there is still
85
+ an insufficient investigation into an alternative order of melody generation and the difference in the
86
+ relative structural importance among musical notes.
87
+ In this study, we propose WuYun, a hierarchical skeleton-guided melody generation architecture
88
+ based on knowledge-enhanced deep learning that incorporates the melodic skeleton as deep structural
89
+ support to provide explicit guidance on the development direction of melody generation (Fig. 1A).
90
+ WuYun follows the hierarchical organization principle of structure and prolongation (35, 37), thus
91
+ dividing traditional single-stage end-to-end melody generation into two stages: melodic skeleton
92
+ construction and melody inpainting (Fig. 1B). At the stage of melodic skeleton construction, we first
93
+ extract the most structurally important notes in a musical piece from rhythm and pitch dimensions
94
+ as melodic skeletons on the basis of the music domain knowledge. We then train an autoregressive
95
+ decoder-only Transformer-based network (38) on the collected melodic skeleton data to construct
96
+ new melodic skeletons (Fig. 1C, a). We treat the melodic skeleton as the underlying framework of the
97
+ final generated melody. At the stage of melody inpainting, we adopt a Transformer encoder–decoder
98
+ architecture (39) to elaborate the melodic skeleton into a full-fledged melody by encoding the melodic
99
+ skeleton as additional knowledge into the decoder to guide the melody generation process (Fig. 1C,
100
+ b). To prove the effectiveness of the architecture, we evaluate WuYun on a publicly available melody
101
+ dataset. Experimental results show that the generated melodic skeleton has comparable quality with
102
+ the real one extracted by our proposed melodic skeleton extraction framework. The hierarchical
103
+ skeleton-guided melody generation architecture effectively improves generated melodies’ long-term
104
+ structure and musicality and outperforms other state-of-the-art methods by 0.51 on average on all
105
+ subjective evaluation metrics.
106
+ 2
107
+
108
+ Melodic skeleton
109
+ Melody
110
+ Melody
111
+ Database
112
+ Melodic Skeleton Extraction
113
+ Framework
114
+ Pairing
115
+ B
116
+ Stage 1: melodic skeleton construction
117
+ Stage 2: melody inpainting
118
+ Melodic
119
+ Skeleton
120
+ Database
121
+ Hierarchical structure analysis in music
122
+ (Music domain knowledge)
123
+ Input Module
124
+ (Embedding)
125
+ Positional
126
+ Encoding
127
+ Transformer-XL
128
+ (4 blocks)
129
+ Output Module
130
+ (Classifier)
131
+ C
132
+ a) Melodic Skeleton Generation Module
133
+ Input
134
+ (Melodic skeleton)
135
+ Representation
136
+ Input Module
137
+ (Embedding)
138
+ Encoder
139
+ (4 blocks)
140
+ Decoder
141
+ (4 blocks)
142
+ Recurrent Transformer
143
+ b) Melodic Prolongation Generation Module
144
+ Representation
145
+ Input Module
146
+ (Embedding)
147
+ Input
148
+ (Melody)
149
+ Representation
150
+ Input
151
+ (Melodic skeleton)
152
+ Output Module
153
+ (Classifier)
154
+ Train
155
+ A
156
+ Melodic Skeleton
157
+ Melody (example)
158
+ Melodic skeleton
159
+ Sequence learning model
160
+ Y1
161
+ Y2
162
+ Yn
163
+ ···
164
+ ···
165
+ X1
166
+ X2
167
+ Xn
168
+ ···
169
+ ···
170
+ Partial sequence
171
+ Melodic skeleton
172
+ ···
173
+ X1
174
+ X2
175
+ Xn
176
+ ···
177
+ Encoder
178
+ Y2
179
+ Y3
180
+ Yn
181
+ ···
182
+ Y1
183
+ Y2
184
+ Yn-1
185
+ ···
186
+ ···
187
+ Decoder
188
+
189
+ Predicted tokens
190
+ Predicted tokens
191
+ ···
192
+ ···
193
+ ···
194
+ ···
195
+ ···
196
+ ···
197
+ ···
198
+ ···
199
+ ···
200
+ ···
201
+ ···
202
+ ···
203
+ ···
204
+ ···
205
+ ···
206
+ ···
207
+ Sequence-to-sequence
208
+ learning model
209
+ Melody
210
+ Figure 1: Architecture of WuYun. (A) The first eight bars of the melody of “Hey Jude” from The Beatles
211
+ (excluding anacrusis). The upper part of the figure shows the basic shape of the melodic motion, and the low
212
+ part of the figure shows the melodic skeleton in the rhythm dimension. Every melody has an underlying melodic
213
+ skeleton that provides structural support and connections among musical elements to guide the melodic motion.
214
+ (B) Hierarchical melody generation process. WuYun divides the melody generation process into melodic skeleton
215
+ construction and melody inpainting stages following the hierarchical organization principle of structure and
216
+ prolongation. At the melodic skeleton construction stage, the melodic skeleton extraction framework is proposed
217
+ to extract the melodic skeleton in the rhythm and pitch dimensions by the hierarchical structure theory from
218
+ music domain knowledge. A neural network for sequence learning trained on melodic skeletons can generate
219
+ novel ones. At the melody inpainting stage, another neural network for sequence-to-sequence learning would
220
+ fill the generated melodic skeleton into a full-fledged melody. (C) Architecture details of WuYun. WuYun is
221
+ composed of a melodic skeleton generation module and a melodic prolongation generation module; the former
222
+ is used for the melodic skeleton construction stage, and the latter is used for the melody inpainting stage with the
223
+ guidance of the melodic skeleton.
224
+ 3
225
+
226
+ OO4车
227
+ 42
228
+ Result
229
+ 2.1
230
+ Hierarchical organization principle of structure and prolongation
231
+ Most AI artistic generative models differ significantly from humans in their artistic creation process,
232
+ especially in music generation. For example, a typical music generation model generates music
233
+ content sequentially from left to right at once (40). However, human artworks tend to develop
234
+ iteratively from a basic underlying idea or structure through elaboration, expansion, and individual
235
+ shaping. Human artistic creation follows an age-old fundamental principle of creative thinking,
236
+ namely, structure and prolongation, which has significantly contributed to human thinking and
237
+ creativity. In the art of music, this principle governs the underlying logic in musical composition
238
+ and makes musical reasoning and explanation comprehensible and acceptable (37). It conforms
239
+ to the brain’s cognitive processing mechanism of structurally organizing sequential information
240
+ (41–44), which makes the brain encode and process information more efficiently and improves
241
+ musical memories (45, 46). For example, musicians use this principle, consciously or unconsciously,
242
+ to study, organize, and perform their musical works.
243
+ The hierarchical structure is a key feature of the tonal musical syntax system, where musical elements
244
+ are almost always hierarchically organized by strict rules at a fundamental level rather than unlimited
245
+ creative expression (36). Some researchers have investigated the patterns of structural organization
246
+ and generalized them into music theories regarding the hierarchical structure in music from the
247
+ perspective of the structure and prolongation principle. Schenker (47) was the first to introduce this
248
+ principle to describe the musical structure in a hierarchically organized way. The central idea of
249
+ Schenkerian theory about the hierarchical structure is that some musical events are elaborated by
250
+ other musical events in a recursive and embedded fashion (9, 34). That is, not all musical events
251
+ are equally important. Some musical events have structural importance as stable factors in music,
252
+ whereas others are more decorative as dynamic factors. Therefore, Schenker proposed different
253
+ levels of structure hierarchy to organize tonal music and analyze its motion. Based on Schenker’s
254
+ ideas, the generative theory of tonal music (GTTM), proposed by Lerdahl and Jackendoff (34), is
255
+ one of the most influential theories in current music theory and music psychology. GTTM provides
256
+ a systematic analysis and description of the hierarchical structure in music. Based on the listeners’
257
+ perception of tonal music, GTTM lists four hierarchical structure relationships from rhythm and pitch
258
+ dimensions: grouping structure, metrical structure, time span reduction, and prolongational reduction.
259
+ The term “reduction” refers to the stepwise reduction of less important musical events from the
260
+ musical surface, revealing the underlying framework or skeleton that plays an essential role in the
261
+ music’s qualities and developmental direction. In summary, under the surface of the music, musical
262
+ events are hierarchically organized based on the structural stability in rhythm and pitch dimensions
263
+ (48, 49).
264
+ Inspired by the iterative mode of human composition guided by the principles of structure and
265
+ prolongation, the whole process of melody creation can be seen as progressively filling individual
266
+ decorative notes among the melodic skeleton; it is an effective modern composition technique that
267
+ perfectly combines rules and composers’ personality (35). This composition technique has been
268
+ developed and applied in music teaching for a long history. In the following, we elaborate on the
269
+ melodic skeleton extraction framework from rhythm and pitch dimensions and introduce the design of
270
+ WuYun melody generation architecture that first constructs the melodic skeleton and then completes
271
+ the melody instead of sequentially generating a melody note-by-note at once. The manner of WuYun’s
272
+ melody generation process is more musically meaningful than the dominant end-to-end left-to-right
273
+ note-by-note melody generation paradigm.
274
+ 2.2
275
+ Melodic skeleton extraction framework
276
+ Music theories present that there is an underlying identifiable framework beneath the melody surface
277
+ called the melodic skeleton (35, 50). The melodic skeleton is composed of certain notes, which
278
+ sound more structurally important from rhythm and pitch dimensions (34, 48) and are called the
279
+ skeleton notes. The skeleton note attracts the audience’s attention and makes a deeper impression
280
+ on them. By contrast, the remaining part of the notes plays a decorative role, giving the melody
281
+ personalities or styles, and are called the decorative notes or prolongation notes. The melodic skeleton
282
+ serves as the crucial structural support of rhythm and harmony, indicating the direction of melody
283
+ development. Knowledge of melodic skeleton information can help humans and machines better
284
+ 4
285
+
286
+ Figure 2: Melodic skeleton extraction framework. (A) Rhythm pattern of strong and weak beat distribution
287
+ in the 4/4 time signature with different note resolutions. (B) Rhythmic skeleton extraction. The rhythmic
288
+ skeleton consists of the metrical accents, agogic accents on metrical accents, and agogic accents on syncopations
289
+ in each measure. (C) Illustration of the tension measure in the pitch class helix of the spiral array with a C major
290
+ chord. The right part of the figure presents the tension value of the notes in the first eight bars of the melody of
291
+ “Hey Jude.” (D) Tonal skeleton extraction. The tonal skeleton consists of the notes with the minimum tension
292
+ value in the rhythm cell.
293
+ 5
294
+
295
+ A
296
+ Strong
297
+ 4th
298
+ Weak
299
+ Sub.
300
+ Weak
301
+ Strong
302
+ Strong
303
+ 8th
304
+ Weak
305
+ Weak
306
+ Weak
307
+ Strong
308
+ Strong
309
+ Weak
310
+ Weak
311
+ Strong
312
+ Weak
313
+ Strong
314
+ Weak
315
+ Weak
316
+ Weak
317
+ Weak
318
+ Strong
319
+ Strong
320
+ Weak
321
+ 16th
322
+ Weak
323
+ Sub.
324
+ Sub.
325
+ Sub.
326
+ Sub.
327
+ Grid
328
+ 1
329
+ 2
330
+ 3
331
+ 5
332
+ 6
333
+ 7
334
+ 8
335
+ 9
336
+ 10
337
+ 11
338
+ 12
339
+ 13
340
+ 14
341
+ 15
342
+ 16
343
+ 4
344
+ B
345
+ Original
346
+ Metrical Accent
347
+ Agogic
348
+ Synopation
349
+ Rhythmic skeleton
350
+ c
351
+ 1.9
352
+ A#
353
+ D#
354
+ ○ Tonal Skeleton Note
355
+ 1.8
356
+ Prolongation Note
357
+ 1.7
358
+ G#
359
+ F#
360
+ B
361
+ 1.6
362
+ C#
363
+ 1.5
364
+ AD
365
+ V
366
+ D
367
+ 1.4
368
+ G
369
+ E
370
+ E
371
+ A
372
+ E-
373
+ B-
374
+ C
375
+ 1.1
376
+ IV
377
+ F
378
+ G-
379
+ 0.9
380
+ G
381
+ A-
382
+ 0.8
383
+ D.
384
+ 0.7
385
+ 12
386
+ 14
387
+ 24
388
+ 26
389
+ 28
390
+ 30
391
+ 32
392
+ 40
393
+ 0
394
+ 16
395
+ 20
396
+ 22
397
+ 38
398
+ Note Sequence
399
+ D
400
+ Rhythm Cell
401
+ Tonal Skeletonanalyze, understand, and learn the logic of melodic hierarchy organization from surface to deep
402
+ layers.
403
+ Here, we introduce a framework for melodic skeleton extraction based on the knowledge of music
404
+ theory (34, 35, 50–53) and music psychology (48, 49) to identify the dominant and subordinate
405
+ relationship of structural importance from rhythm and pitch dimensions between melody notes. The
406
+ theoretical basis and implementation details are described briefly below.
407
+ 2.2.1
408
+ Rhythmic skeleton extraction
409
+ Before describing the theoretical basis of this work, we would have to cover some basic musical
410
+ terms and concepts regarding meter and rhythm in the time aspect of music. In music theory, the
411
+ pulse splits time into a series of uniformly spaced chunks called beats. Not all beats are created equal,
412
+ and certain beats are felt stronger than others. The first beat of each measure is a downbeat, and the
413
+ one that follows is an upbeat. The meter measures the number of beats in the regular and repeated
414
+ pattern of the downbeat. For example, the most common meter in music is 4/4; each measure has
415
+ four quarter note beats. The distribution pattern of strong and weak beats is “strong weak sub-strong
416
+ weak,” as shown in Fig. 2A. Rhythm can be defined as the organization pattern of one downbeat with
417
+ one or more upbeat (54). Therefore, the meter provides the temporal framework for organizing music
418
+ rhythm.
419
+ To draw listeners’ attention, musicians often use accents in musical compositions or performances
420
+ to emphasize a particular note. Accents can be expressed in various ways to increase musical
421
+ expressiveness and add character to the movement of music. Metrical and rhythmic accents are the
422
+ two main accents in the symbolic melody data. A metrical accent is an accent that falls on the strong
423
+ beat position within a measure. The metrical accent is periodic and cyclic, whose distribution pattern
424
+ depends on the type of meter. A rhythmic accent is an accent in a strong position within the rhythm,
425
+ which emphasizes a point that is not constrained by the meter’s structure. Consequently, it is flexible
426
+ and changeable rather than static and fixed. Rhythmic accents can be created by increasing the notes’
427
+ dynamic (i.e., dynamic accent), extending the notes’ duration (agogic accent), using syncopation,
428
+ and so forth. The agogic accent can be easily distinguished by comparing the surrounding notes on
429
+ duration. Additionally, the musician can use syncopation to change the normal rhythm pattern by
430
+ extending the duration of notes from the weak beat or weak beat part to the subsequent strong beat
431
+ or strong beat part. Note that a note cannot be both a metrical accent and a syncopation, which are
432
+ conflicted with each other. The dynamic accent is not used since it has no obvious changes in most
433
+ symbolic melody data.
434
+ In this study, we extract the metrical accents, the agogic accents falling on the metrical accent, and
435
+ the agogic accents falling on the syncopation as the rhythmic skeleton notes, as illustrated in Fig.
436
+ 2B. Metrical accents are the foundation of other types of accent (34). When two or more different
437
+ types of accents work together, the listener will experience a particularly more prominent accent.
438
+ Therefore, when a rhythmic accent and a metrical accent overlap, the rhythmic accent is generally
439
+ more perceptible and is the one that is preferred. If there were continuous rhythmic skeleton notes,
440
+ we chose the most structurally important note as the rhythmic skeleton note according to the intensity
441
+ of the accent.
442
+ 2.2.2
443
+ Tonal skeleton extraction
444
+ In tonal music, the pitch of the melody moves around a centrally stable note (i.e., the tonal center or
445
+ tonic), repeatedly moving away from it and back to it. Pitches are essentially organized into a distinct
446
+ hierarchy scale based on tonal stability. There is a mutual attraction between pitches with different
447
+ stable levels, which can stimulate different emotional experiences (47). Specifically, an unstable
448
+ pitch tends to be a stable pitch, which would make the listeners feel relaxed or dismissed. Moving
449
+ from a stable to an unstable pitch would increase the listener’s sense of tension. The prolongational
450
+ reduction theory of GTTM suggests that the more important music event has less tension and vice
451
+ versa (53). Note that the same tone may have different feelings in different contexts, which may be
452
+ pleasant or anxious.
453
+ For the tonal skeleton extraction method, we use the tension level as a metric to quantify the relative
454
+ importance of the pitch. The specific recognition procedure is as follows.
455
+ 6
456
+
457
+ • First, we used the position of the rhythmic skeleton notes as the boundary of the individual
458
+ context because the metrical structure is the important basis of all hierarchical structure
459
+ types (34).
460
+ • Second, we combined two or three successive notes as the minimum rhythmic cell in each
461
+ segment, according to the repetition frequency in the melody (55) and the number of notes
462
+ of this rhythmic cell. The term “rhythmic cell” defines as a “small rhythmic and melodic
463
+ design that can be isolated or can make up one part of a thematic context” (56). Therefore,
464
+ each rhythmic cell can be seen as an isolated thematic context for calculating the tension
465
+ profile.
466
+ • Finally, we adopted a mathematical tonal tension model to quantify each note’s tension
467
+ value by calculating the distance between every single tone and global key in the spiral array
468
+ (57–59), as shown in Fig. 2C. We selected the note with the minimum tension value in each
469
+ rhythmic cell as the tonal skeleton note. For example, Fig. 2D shows the tonal skeleton of
470
+ the first eight bars from the song “Hey Jude.”
471
+ 2.3
472
+ Design of WuYun
473
+ Figure 1C shows the diagram of the proposed hierarchical melody generation architecture called
474
+ WuYun. First, we convert the melody MIDI files and their melodic skeletons into musical event
475
+ sequences as the input data for model training using the MeMIDI symbolic music representation
476
+ method. Then, we design a hierarchical melody generation architecture with two generative modules
477
+ responsible for melodic skeleton construction and melody inpainting, respectively. In this subsection,
478
+ we will introduce the hierarchical melody generation architecture about how we generate the melodic
479
+ skeleton and incorporate it to guide the melody generation process. The details about the MeMIDI
480
+ symbolic music representation method and the word embedding technique used in this architecture’s
481
+ input module are described in the Materials and Methods section.
482
+ WuYun is designed to generate melodies in two stages hierarchically: melodic skeleton construc-
483
+ tion and melody inpainting, instead of the dominant end-to-end left-to-right note-by-note melody
484
+ generation paradigm. At the stage of melodic skeleton construction, we use the Transformer-XL
485
+ model with only the decoder as the melodic skeleton generation module (19), which has the ad-
486
+ vantage of remarkable performance in capturing long-term dependence. To develop the capacity
487
+ of melodic skeleton construction, we trained the Transformer-XL model on the extracted melodic
488
+ skeleton database. At the stage of melody inpainting, we employ the recurrent Transformer-based
489
+ encoder–decoder architecture (18) in a sequence-to-sequence setup as the melody inpainting module
490
+ to complete the melody conditioned on the melodic skeleton, i.e., filling the missing information
491
+ between the melodic skeleton notes. In this work, the melody inpainting problem can be defined as
492
+ follows: given a melodic skeleton sequence Cs, generate an inpainted melody sequence Cm. The
493
+ encoder maps the discrete input symbols of the melodic skeleton sequence Cs to a high-dimensional
494
+ continuous vector as conditional input into the decoder, and the decoder then generates an output
495
+ sequence Cm in an autoregressive manner. The melodic skeleton sequence will be saved in the final
496
+ generated melody. This method provides users an entry point to interact with the melody generation
497
+ model by adjusting melodic skeleton notes between two stages to control the melodic motion.
498
+ In this work, we focus on designing a hierarchical skeleton-guided melody generation architecture
499
+ based on knowledge-enhanced deep learning, following the hierarchical organization principle of
500
+ structure and prolongation. Therefore, we used common language models in NLP to make the WuYun
501
+ architecture accessible. The capacity of these two generative modules may be further optimized;
502
+ however, the goal of this study is not to find the most optimal neural network.
503
+ 2.4
504
+ Evaluation metrics
505
+ Subjective and objective evaluations are the two essential aspects of evaluating the performance of
506
+ music generation systems. The human listening test is currently an indispensable and viable method
507
+ for subjective evaluation to measure the quality of the generated musical pieces. However, for the
508
+ objective evaluation, many efforts have been made to design quantitative metrics; there is not a set of
509
+ convincing and unified metrics. Although the research field of music generation is multidisciplinary,
510
+ most researchers mainly focus on generative models with different improvement goals rather than
511
+ their contribution to quantifying music complexity. Consequently, almost all the proposed objective
512
+ 7
513
+
514
+ evaluation metrics are difficult to apply for comparing different music creation systems and lack
515
+ sustainability for future development demands. We tried to calculate the averaging overlapped area
516
+ of some musical feature distributions between generated musical pieces and ground-truth musical
517
+ pieces as the objective evaluation metrics like (18, 60) using the public evaluation toolbox. We
518
+ arrived at a similar conclusion as PopMNet (32) that a better result of objective evaluation does not
519
+ mean better structure and musicality of generated music. The same objective evaluation result can be
520
+ calculated and verified with the provided melody MIDI files of this study’s next two experiments.
521
+ Therefore, we conducted two subjective evaluation experiments to evaluate the performance of our
522
+ proposed WuYun, including different melodic skeleton settings in rhythm and pitch dimensions and
523
+ comparisons with public state-of-the-art (SOTA) music generation models.
524
+ We randomly selected ten melodies from the evaluation dataset for the listening materials. Similar to
525
+ previous studies (15, 17, 19), we took the first four bars as prompt and set the maximum number of
526
+ generated bars to 28. We assigned a random order for all musical pieces as the file name, including
527
+ the generated and ground-truth musical pieces. All melody MIDI files were rendered into audio via a
528
+ piano MIDI synthesizer. In the blind listening test, participants were asked to rate each melody on a
529
+ five-point Likert scale (i.e., 1 for bad and 5 for excellent) on five dimensions:
530
+ • Rhythm: Whether the brain can feel the regular accents and rhythm patterns.
531
+ • Richness: Whether the melody sounds rich and interesting in the rhythm and pitch dimen-
532
+ sions.
533
+ • Structure: Whether the brain can feel the boundary of melodic phrases and the balance
534
+ among melodic phrases’ length.
535
+ • Expectation: Whether the direction of melody development meets the audience’s expecta-
536
+ tions for melody development (61).
537
+ • Overall: Overall quality.
538
+ We found that most nonmusicians had heard technical music terms but did not understand what
539
+ they meant. Therefore, before formal experiments, we conducted multiple rounds of discussions,
540
+ testing, and validation with musicians and nonmusicians regarding the above subjective evaluation
541
+ metrics and their descriptions until they could easily understand and grasp them. To ensure that
542
+ the recruited subjects have a common understanding of the metrics and scales in the questionnaire,
543
+ we also conducted evaluation training for them, including the explanation of subjective evaluation
544
+ metrics and preliminary experiments. All audio and MIDI files for evaluation can be found in
545
+ Acknowledgments.
546
+ 2.5
547
+ Model performance based on different melodic skeleton settings
548
+ To compare the effectiveness of variants of melodic skeleton extracted from rhythm and pitch
549
+ dimensions, we comprehensively evaluated the performance of WuYun based on different settings of
550
+ the melodic skeleton. Furthermore, we added three control group settings of randomly selected notes
551
+ with different percentages as the melodic skeleton in order to verify the effectiveness of the proposed
552
+ melodic skeleton extraction method based on music domain knowledge. All experimental settings
553
+ and the proportion of melodic skeleton notes in the melody are described below:
554
+ 1. Downbeat only uses metrical accents as the melodic skeleton (32.8%).
555
+ 2. Long Note only uses agogic accents as the melodic skeleton (27.4%).
556
+ 3. Rhythm uses rhythmic skeleton notes as the melodic skeleton (33.8%).
557
+ 4. Tonic uses tonal skeleton notes as the melodic skeleton (43.2%).
558
+ 5. Interaction uses the intersection of rhythmic skeleton notes and tonal skeleton notes as the
559
+ melodic skeleton (14.2%).
560
+ 6. Union uses the union of rhythmic skeleton notes and tonal skeleton notes as the melodic
561
+ skeleton (62.8%).
562
+ 7. Random25% randomly selects 25% of melody notes as the melodic skeleton (25%).
563
+ 8. Random50% randomly selects 50% of melody notes as the melodic skeleton (50%).
564
+ 9. Random75% randomly selects 75% of melody notes as the melodic skeleton (75%).
565
+ 8
566
+
567
+ In this experiment, we obtained 90 musical pieces for rating. We recruited 30 subjects (13 females
568
+ and 17 males, ages 18 and 30 years) from Zhejiang University and Zhejiang Conservatory of Music
569
+ to evaluate the musical pieces with payment. Fifteen subjects among them were professional music
570
+ practitioners with an average of 9 years of music training and 4 years of music performance experience.
571
+ The rest of the subjects have little professional music training or performance experience. Each
572
+ musical piece was assigned to three professionals and three nonprofessional subjects. Each subject
573
+ was required to rate 18 musical pieces, which cost approximately 25 min.
574
+ Figure 3A shows the mean opinion scores of WuYun architecture’s melody generation performances
575
+ with nine different settings on the five subjective evaluation metrics from all subjects in the form of
576
+ histograms. The detailed experimental result is shown in Table S1. Generally, among all melodic
577
+ skeleton settings, the proposed rhythmic and tonal skeleton based on music theory and psychological
578
+ study performs better than other skeletons. The rhythmic skeleton setting (No. 3) achieved the best
579
+ result on all subjective evaluation metrics, followed by the tonal skeleton setting (No. 4). Among
580
+ the three types of melodic skeletons associated with rhythm (Nos. 1, 2, and 3), the melodic skeleton
581
+ composed of a single type of accent (e.g., metrical accents or agogic accents (31)) has a large gap with
582
+ the rhythmic skeleton in richness, expectation, and overall quality and even surpassed by the random
583
+ melodic skeletons (Nos. 7, 8, and 9) on most subjective evaluation metrics. This result indicates that
584
+ a flexible rhythmic skeleton (i.e., including several kinds of musical accents) is essential for melody
585
+ composition to improve musicality. In contrast, a rigid melodic skeleton (i.e., including only one type
586
+ of accent, especially metrical accents) reduces the quality of the generated melody and limits the
587
+ models’ performance. Likewise, as depicted in the right part of Fig. 2C, the pitch classes of the tonal
588
+ skeleton notes are mostly C, D, and E, which also lead to the rigidity of the generated tonal skeletons.
589
+ Additionally, compared to the rhythmic and tonal skeleton settings, the intersection (No.5) and union
590
+ (No. 6) skeleton settings led to a distinct degradation of the melody generation performance. For
591
+ instance, the intersection (No. 5) skeleton setting received the worst scores in most evaluation aspects,
592
+ even worse than the random sampling skeleton settings. This phenomenon can be explained by
593
+ the structure and prolongation proportion tradeoffs in the design of two-stage melody generation
594
+ architecture using an end-to-end learning framework. We can preliminarily see that with the increased
595
+ percentage of melodic skeleton notes, the performance of the two-stage melody generation went up
596
+ first but then down. On the one hand, a low proportion of melodic skeleton notes makes it easier
597
+ to train the melodic skeleton construction model in the first stage. However, the generated skeleton
598
+ notes will be too sparse to guide the second stage of melodic inpainting (such as the intersection
599
+ skeleton setting, only 14.2%). On the other hand, if the proportion of melodic skeleton notes is too
600
+ large, the training difficulty and data dependency of the melodic skeleton construction model will
601
+ increase. Besides, from the perspective of the gestalt theory (62) about the law of the figure–ground
602
+ relationship, during the perception of music, a person’s attention constantly switches between different
603
+ musical elements; sometimes, he/she may be attracted to the rhythm, whereas at other times, to
604
+ the pitch. Therefore, the extraction of the hierarchical dependency structural relationship between
605
+ musical elements is affected by multiple musical dimensions; it will not be like a simple addition or
606
+ subtraction operation but a complex organic combination (63).
607
+ In this study, we chose the setting of the rhythmic skeleton (No. 3) that performed best on all
608
+ subjective evaluation metrics in this experiment as the default skeleton configuration (denoted as
609
+ WuYun-RS) for the next experiment to compare with other melody generation models.
610
+ 2.6
611
+ Comparisons with other melody generation methods
612
+ To prove the effectiveness of the proposed hierarchical skeleton-guided melody generation architecture
613
+ based on knowledge-enhanced deep learning, we compared WuYun-RS (i.e., using the rhythmic
614
+ skeleton setting) to five public SOTA Transformer-based melody generation models, namely, Music
615
+ Transformer (15), Pop Music Transformer (17), Compound Word Transformer (19), Melons (33)
616
+ and MeMIDI, that follow an end-to-end left-to-right note-by-note generative paradigm and treat
617
+ each note equally. The MeMIDI setting uses the MeMIDI data representation method like WuYun-
618
+ RS and employs the Transformer-XL model without using the melodic skeleton for the melody
619
+ generation task. Moreover, to prove the effectiveness of the generated melodic skeleton, we added
620
+ the setting of WuYun-RRS, skipped the melodic skeleton construction in the first stage, and directly
621
+ used the real rhythmic skeleton as additional knowledge to guide the melody generation process
622
+ of melody inpainting in the second stage. However, the original music representation of Music
623
+ 9
624
+
625
+ Figure 3: Subjective evaluation results of the WuYun melody generation architecture based on different
626
+ melodic skeleton settings, and the other public melody generation models. (A) Subjective comparison of the
627
+ performance of the WuYun architecture based on different melodic skeleton settings in Experiment 1. Data is the
628
+ mean opinion score. The WuYun architecture with the rhythmic skeleton setting achieves the best performance in
629
+ all melodic skeleton settings on all subjective evaluation metrics. (B) Subjective comparison of the performance
630
+ of different music generation models in Experiment 2. Violin plots show the kernel density estimate of rating
631
+ distribution, the larger the area of the area graph, the greater the probability of the value distribution. The
632
+ black dot within the violin plots indicates the mean; the black line within the violin plot indicates the standard
633
+ deviation. Statistical analyses are done between WuYun-RS and the rest of the models using the one-tailed
634
+ t-test (N = 130). P values of statistical significance are represented as *P < 0.05, **P < 0.01, ***P < 0.001,
635
+ and ****P < 0.0001; ns, insignificant. The subjective comparison results’ data are in Tables S1, 1, and S2,
636
+ respectively.
637
+ Transformer does not include chord progressions. For a fair comparison, we added the CHORD
638
+ events proposed in this work into the MIDI-Like music representation of Music Transformer. Each
639
+ CHORD event was followed by a TIME-SHIFT event and had a higher sorting priority than NOTE-
640
+ related events. Additionally, the MIDI quantization level of the Pop MusicTransformer, Compound
641
+ Word Transformer, and Melons only considered the 16th note time grid. Therefore, in this experiment,
642
+ we applied the 16th note time grid as the MIDI quantization level to the melody dataset for all music
643
+ generation models.
644
+ In this experiment, we obtained 80 musical pieces for rating. Since the second subjective evaluation
645
+ experiment relies on the result of the first subjective evaluation experiment and requires some time to
646
+ collect, process, and analyze, we recruited 13 participants again (i.e., six females and seven males,
647
+ ages 18 and 25 years) to evaluate the musical pieces with payment. Six subjects among them were
648
+ professional music practitioners with an average of 8 years of music training and 4 years of music
649
+ performance experience. Each subject was required to rate all musical pieces. After rating 20 musical
650
+ pieces, subjects were asked to rest for 5 min against hearing fatigue. The average experiment time
651
+ cost each subject about 2 h.
652
+ Figure 3B shows the mean opinion scores and one-tailed t-test results of the different music generation
653
+ systems on the five evaluation metrics in the form of violin plots. The detailed experimental results are
654
+ shown in Tables 1 and S2. Overall, WuYun-RS (No. 6) and WuYun-RRS (No. 7) outperformed the
655
+ other five current SOTA end-to-end left-to-right note-by-note melody generation models on all metrics,
656
+ including MusicTransformer (No. 1), Pop Music Transformer (No. 2), Compound Word Transformer
657
+ (No. 4), Melons (No. 5), and MeMIDI (No. 3). Besides, except for WuYun-RRS, there is a significant
658
+ 10
659
+
660
+ A
661
+ 3.25
662
+ Downbeat Long Note
663
+ Rhythm Tonic Intersection
664
+ Union Random25% Random50%
665
+ ■ Random75%
666
+ Mean opinion score (MoS)
667
+ 3.00
668
+ 2.75
669
+ 2.50
670
+ 2.25
671
+ 2.00
672
+ Rhythm
673
+ Richness
674
+ Structure
675
+ Expectation
676
+ Overall
677
+ Subjective Evaluation Metrics
678
+ B
679
+ Music Transformer
680
+ Pop Music Transformer
681
+ MeMIDI
682
+ Compound Word Transformer
683
+ ■ Melons
684
+ -WuYun-Rs
685
+ WuYun-RRS
686
+ Mean opinion score(MOS)
687
+ ****
688
+ ****
689
+ ****
690
+ ****
691
+ ****
692
+ ****
693
+ ****
694
+ **
695
+ ***
696
+ **
697
+ ns
698
+ ns
699
+ ns
700
+ ns
701
+ 5
702
+ 3 -
703
+ Richness
704
+ Structure
705
+ Rhythm
706
+ Expectation
707
+ Overall
708
+ Subjective Evaluation MetricsTable 1: Subjective evaluation scores of generated melodies based on different melody generation models in
709
+ Experiment 2 (mean ± standard deviation).
710
+ No.
711
+ Model
712
+ Rhythm
713
+ Richness
714
+ Structure
715
+ Expectation
716
+ Overall
717
+ 1
718
+ MT
719
+ 2.52 ± 0.93
720
+ 2.34 ± 0.83
721
+ 2.28 ± 0.86
722
+ 2.47 ± 1.01
723
+ 2.25 ± 0.88
724
+ 2
725
+ PMT
726
+ 2.57 ± 0.88
727
+ 2.43 ± 0.99
728
+ 2.54 ± 1.11
729
+ 2.50 ± 1.16
730
+ 2.39 ± 1.12
731
+ 3
732
+ MeMIDI
733
+ 2.61 ± 0.91
734
+ 2.55 ± 0.95
735
+ 2.53 ± 1.00
736
+ 2.51 ± 0.97
737
+ 2.42 ± 0.96
738
+ 4
739
+ CWT
740
+ 2.77 ± 0.83
741
+ 2.72 ± 0.85
742
+ 2.74 ± 0.81
743
+ 2.70 ± 0.83
744
+ 2.65 ± 0.90
745
+ 5
746
+ Melons
747
+ 2.84 ± 0.89
748
+ 2.68 ± 0.95
749
+ 2.75 ± 0.87
750
+ 2.67 ± 0.87
751
+ 2.71 ± 0.88
752
+ 6
753
+ WuYun-RS
754
+ 3.13 ± 0.88
755
+ 3.07 ± 0.87
756
+ 3.13 ± 0.86
757
+ 3.02 ± 0.92
758
+ 3.00 ± 0.87
759
+ 7
760
+ WuYun-RRS
761
+ 3.20 ± 0.81
762
+ 3.11 ± 0.85
763
+ 3.15 ± 0.88
764
+ 3.00 ± 0.96
765
+ 3.02 ± 0.88
766
+ 8
767
+ Human
768
+ 3.54 ± 0.82
769
+ 3.65 ± 0.76
770
+ 3.68 ± 0.89
771
+ 3.55 ± 0.92
772
+ 3.57 ± 0.84
773
+ MT, PMT, and CWT stand for Music Transformer, Pop Music Transformer, and Compound Word Trans-
774
+ former, respectively.
775
+ difference (P < 0.01) between WuYun-RS and the other melody generation systems. This result
776
+ demonstrates that WuYun-RS and WuYun-RRS are able to generate melodies with improved long-
777
+ term structure and musicality, which benefit from the rhythmic skeleton as a deep structure to guide
778
+ the melody generation process. Furthermore, WuYun-RS and WuYun-RRS demonstrate highly similar
779
+ performances in terms of the quality of generated melodies on all evaluation metrics. This result
780
+ indicates the effectiveness of the melodic skeletons generated via the melodic skeleton construction
781
+ module. However, there is still an obvious gap between the WuYun melody generation architecture
782
+ and human-composed music, leaving room for improvement. This also shows that designing clever
783
+ decorations for melodic skeletons is another difficult research problem, even for human composers.
784
+ Additionally, when using the same symbolic music representation method, the knowledge-enhanced
785
+ hierarchical skeleton-guided melody generation model of WuYun-RS greatly outperformed the single-
786
+ stage end-to-end left-to-right note-by-note melody generation model of MeMIDI (No. 3). On the one
787
+ hand, this demonstrates that our proposed hierarchical melody generation paradigm can be applied to
788
+ empower the dominant end-to-end left-to-right note-by-note melody generation paradigm. On the
789
+ other hand, although the Compound Word Transformer and Melons (Nos. 4 and 5) were inferior to
790
+ WuYun-RS, their effective compound word representation and the linear Transformer as the backbone
791
+ architecture enable it to process multidimensional music information in one step simultaneously and
792
+ obtain a better result among these five public SOTA melody generation models. Thus, combining the
793
+ proposed knowledge-enhanced hierarchical skeleton-guided music generation architecture with more
794
+ efficient music representation methods and advanced language models can bring a better result for
795
+ melody generation tasks.
796
+ 3
797
+ Discussion
798
+ The methodology we have taken in designing WuYun, a hierarchical skeleton-guided melody genera-
799
+ tion architecture based on knowledge-enhanced deep learning, combines music analysis theory and
800
+ musical psychology. Unlike the dominant end-to-end left-to-right note-by-note melody generation
801
+ paradigm, we use the hierarchical organization principle of structure and prolongation to decompose
802
+ the melody generation process into melodic skeleton construction and melody inpainting stages. We
803
+ extract the most structurally important notes based on hearing sensitivity as melodic skeletons and
804
+ incorporate them into the melody generation process as a deep structure to guide the model to learn
805
+ the hierarchical dependency structures among musical event sequences from the limited melody data
806
+ without music boundary detection (31). The human evaluation results demonstrated that our model
807
+ exhibits significant improvement in both long-term structure and musicality across the structured
808
+ melody generation task.
809
+ In practical application scenarios, the ability to obtain real feedback from human users for improving
810
+ the performance and interaction experience of the system is essential for the next generation of
811
+ iterative and interactive music generation systems. In general, WuYun allows human users to edit the
812
+ generated melodic skeleton and adjust its shape to guide and constrain the range of the decorative
813
+ notes at the next stage of melody inpainting. Thus, the proposed generation strategy based on the
814
+ 11
815
+
816
+ hierarchical organization principle of structure and prolongation not only can maintain the long-range
817
+ tonal coherence of generated melodies but also achieve control over the target of melodic motion by
818
+ human users. Additionally, with WuYun and its melodic skeleton analysis framework, human users
819
+ can directly extract the skeleton from existing music compositions for music composition analysis or
820
+ re-creation.
821
+ Our study has some limitations, notably the performance of the melody inpainting model, except
822
+ that the quality of generated melodic skeleton may be poor; even if an original rhythmic skeleton
823
+ is provided, the quality of the completed melody is still far from the real one. Further performance
824
+ improvements could be achieved using pretrained masked language models (23) for music generation,
825
+ especially for the melody inpainting task. Another issue is how to effectively extract an organic
826
+ melodic skeleton from hierarchical musical structures combining two or more musical dimensions
827
+ (e.g., rhythm and pitch) to further improve the structure of generated melodies. According to the
828
+ research in the cognitive psychology in music, while listening to music, only by combining the tonal
829
+ and rhythmic structures can we form a more coherent musical representation and create a complete
830
+ sense of melody (34, 48, 49). However, the brain’s processing mechanism of the hierarchical musical
831
+ structure remains a fundamental research problem in the field of music cognitive psychology (64–66).
832
+ With the help of advanced electroencephalography devices, cognitive musicology can break through
833
+ the human cognition of hierarchical musical structure and apply it to music generation. Additionally,
834
+ we expect to investigate other systematic music analysis theories and gain further psychological
835
+ knowledge to analyze and compare the hierarchical levels of important musical events along different
836
+ musical dimensions for designing a more effective melodic skeleton extraction framework. Another
837
+ direction is to explore explainable AI for music generation to assist end users in making better
838
+ decisions since deep learning methods lack physical transparency of methods. With these potential
839
+ future improvements in mind, we hope that our findings for structured melody generation will optimize
840
+ the dominant melody generation paradigms to improve long-term structure and musicality and provide
841
+ a new lens to develop multidisciplinary research via combining data-driven and knowledge-based
842
+ approaches.
843
+ 4
844
+ Materials and Methods
845
+ 4.1
846
+ Details of dataset preprocessing
847
+ We evaluate the effectiveness of WuYun architecture on a commonly used and publicly available
848
+ symbolic melody dataset of Wikifonia (32, 33, 67). The Wikifonia dataset contains thousands of
849
+ lead sheets in MusicXML format. It covers various music genres, composed of melody and the
850
+ accompanying chord progression and tonality labels. Here, we describe the procedure below to clean
851
+ up noisy data and artificial errors since the dataset is user-generated.
852
+ • Data Segmentation: To simplify rhythm modeling, we only keep those segments from
853
+ MIDI files with the most commonly used 4/4 time signature (18).
854
+ • MIDI Quantization: For a more beat-accurate timing of sounds, quantization is a useful
855
+ digital music processing of setting MIDI data on beats or exact fractions of beats to eliminate
856
+ some imprecise timing because of expressive musical performance or mistake record. We
857
+ contend that a more precise and adaptable time grid is required to model a more expressive
858
+ metrical context, including the 32nd (18), 64th note, and even triplets. By contrast, most
859
+ prior works only use the 16-note time grid for quantification (17, 19, 31–33); each bar
860
+ is quantized into 16 intervals. In this study, we propose a self-adaptive mixed precision
861
+ quantization method to reduce quantization errors (Fig. 4). This method can automatically
862
+ choose a suitable quantize grid for every single note based on its duration, including straight
863
+ notes and triplets. The difference between straight notes and triplets is dividing the musical
864
+ beat evenly in half or third. First, the notes shorter than a 64th note are discarded, whereas
865
+ notes longer than one bar are saved into the whole note. Second, according to the note
866
+ duration, the rest notes are classified into straight or triplets. However, triplets do not always
867
+ have to have three notes. There are only two notes in triplets is quite common, such as in
868
+ Swing. Additionally, in theory, every note in triplets has an equal rhythm value. However, in
869
+ practice, most notes are slightly different from each other in musical performance. Therefore,
870
+ two or three consecutive notes with approximately the same duration and consistent with
871
+ the duration of triplets are considered as triplets. Based on our experimental statistical
872
+ 12
873
+
874
+ A
875
+ 1
876
+ 2
877
+ 4
878
+ 8
879
+ 16
880
+ 1
881
+ 2
882
+ 4
883
+ 8
884
+ 16
885
+ 2
886
+ 4
887
+ 8
888
+ 16
889
+ 2
890
+ 4
891
+ 8
892
+ 16
893
+ 1
894
+ 2
895
+ 4
896
+ 8
897
+ 16
898
+ 2
899
+ 4
900
+ 8
901
+ 16
902
+ 1
903
+ 2
904
+ 4
905
+ 8
906
+ 16
907
+ 4
908
+ 8
909
+ 16
910
+ 4
911
+ 8
912
+ 16
913
+ 4
914
+ 8
915
+ 16
916
+ 4
917
+ 8
918
+ 16
919
+ 4
920
+ 8
921
+ 16
922
+ 4
923
+ 8
924
+ 16
925
+ 4
926
+ 8
927
+ 16
928
+ 16
929
+ 16 16 16
930
+ 8
931
+ 16 16 16
932
+ 8
933
+ 16 16 16
934
+ 8
935
+ 16 16 16
936
+ 8
937
+ 16
938
+ 16
939
+ 8
940
+ 16 16 16
941
+ 8
942
+ 16 16 16
943
+ 8
944
+ 16 16 16
945
+ 8
946
+ 16
947
+ 16 16
948
+ 8
949
+ 16 16 16
950
+ 8
951
+ 16 16 16
952
+ 8
953
+ 16 16 16
954
+ 8
955
+ 16 16 16
956
+ 8
957
+ 16
958
+ 4
959
+ 4
960
+ 4
961
+ 4
962
+ Basic Note
963
+ Quantization Error
964
+ Shift Backward
965
+ Shift Forward
966
+ Selected Grids
967
+ Standard Grids
968
+ Standard Duration
969
+ Triplet Grids
970
+ Time Signature
971
+ Triplet
972
+ Time
973
+ Grid
974
+ 4
975
+ Whole Note
976
+ 16
977
+ 16
978
+ 16
979
+ 16
980
+ 8
981
+ 16
982
+ 16
983
+ 16
984
+ 8
985
+ 8
986
+ 8
987
+ 16
988
+ 8
989
+ 16
990
+ 16
991
+ 16
992
+ 16
993
+ 32
994
+ 64
995
+ Triplet
996
+ Triplet
997
+ 16
998
+ 16
999
+ 16
1000
+ 8
1001
+ 16
1002
+ 16
1003
+ 16
1004
+ 64
1005
+ Triplet
1006
+ Triplet
1007
+ Shorter than 64th Note
1008
+ 16
1009
+ 16
1010
+ 16
1011
+ 16
1012
+ 32
1013
+ 8
1014
+ 8
1015
+ 16
1016
+ 4
1017
+ 16
1018
+ 8
1019
+ 8
1020
+ Longer than a whole note
1021
+ 16
1022
+
1023
+ Figure 4: Illustration of the self-adaptive mixed precision MIDI quantization. The MIDI quantization
1024
+ method would automatically choose a suitable quantize grid for every note according to its note length to
1025
+ eliminate imprecise timing, including straight notes (minimum 64th note) and triplets (minimum 48th note).
1026
+ results, we set the acceptable duration error ratio between the actual triplet and the standard
1027
+ triplet to within 20%. Last, the method automatically selects a proper quantize grid for
1028
+ every note. In terms of straight notes, the granularity of the time grid depends on the note
1029
+ duration. Particularly, the note onset is aligned to its closest 16th note time grid when the
1030
+ note duration is greater than or equal to a 16th note, to its closest 32nd note time grid when
1031
+ the note duration is between a 16nd and 32th note, and to its closest 64th note time grid
1032
+ when the note duration is between a 32nd and 64th note. Moreover, the straight note offset
1033
+ is aligned to the 64th note time grid. In terms of triplets, the note onset and offset time is
1034
+ aligned to the 48th note time grid.
1035
+ • Tonality Unification: For simplicity, the tonalities and chord progressions of those MIDI
1036
+ files are transposed to “C major” and “A minor” tonalities (68). We set one chord per beat
1037
+ and unify the chord representation of the Wikifonia dataset using the chord dictionary as
1038
+ described in the following subsection.
1039
+ • Octave Transposition: All melodies are applied octave transposition to shift the pitch into
1040
+ the range from C3 to C5 or are removed, which are out of the regular melodic pitch range
1041
+ (32).
1042
+ After data cleaning, we get 2,921 musical pieces in Wikifonia, including 116,935 bars and 425,223
1043
+ notes. Finally, randomly hold out 50 songs for testing and use the remaining for training.
1044
+ 4.2
1045
+ Symbolic melody representation
1046
+ In this work, we adopted a modified version of the “MuMIDI” symbolic music representation (18)
1047
+ to encode a piece of monophonic melody into discrete musical event sequences. We refer to it as
1048
+ MeMIDI. Following is a description of the MeMIDI extensive symbols information, which includes
1049
+ bar, position, note, chord, and tempo symbols.
1050
+ • Bar and Position
1051
+ We use a bar symbol to represent a bar line and a position symbol to represent the onset
1052
+ of a note or a chord event. Since the minimum time grids of the straight and triplet note
1053
+ are 64th and 48th notes, respectively, and the MIDI files’ time resolution is 480 ticks per
1054
+ beat; thus we merge these two kinds of minimum time grids values ({0, 30, 60, ..., 1890} ∪
1055
+ {0, 40, 80, ..., 1880}) and use the <Pos_Value> symbol to represent 96 kinds of starting
1056
+ positions, such as <Pos_30>. We assign a position symbol for every chord and note music
1057
+ event.
1058
+ 13
1059
+
1060
+ Table 2: List of chord events.
1061
+ Chord
1062
+ Content
1063
+ Chord root
1064
+ C, Db, D, Eb, E, F, F#, G, Ab, A, Bb, B
1065
+ Triad
1066
+ M, m, o, +
1067
+ Seventh chord
1068
+ MM7, Mm7, mM7, mm7, o7, %7, +7, +M7
1069
+ Chord quality
1070
+ Suspension
1071
+ Sus.
1072
+ • Note
1073
+ A note has three basic attributes: pitch, duration, and velocity. Here, the value of the note
1074
+ pitch attribute ranges from 48 (C3) to 83 (C5). The value of the note velocity attribute
1075
+ ranges from 0 to 127. Considering both straight notes and triplet notes, the value range of
1076
+ the note duration attribute is {30, 60, 90, ..., 1920} ∪ {40, 80, 160, 320, 640} ticks. We use
1077
+ a compound word <Pitch_Value, Velocity_Value, Duration_Value> to compress these three
1078
+ attributes of one note in one token to shorten the length of the melody events sequence.
1079
+ • Chord
1080
+ To cover the chord types in the Wikifonia dataset, we use a more comprehensive chord
1081
+ event list. As shown in Table 2, we consider 12 chord roots and 13 chord qualities, yielding
1082
+ 156 possible chord events. We use a chord symbol <Root_Quality> to represent a chord
1083
+ musical event. To reduce repetition, we use the same position symbol for Note and Chord,
1084
+ which start at the same time. For simplicity, we do not use the Chord symbol in the melodic
1085
+ skeleton event sequence.
1086
+ • Tempo
1087
+ We divide the tempo into three categories: low (below 90), medium (90 to 160), and high
1088
+ (above 160).
1089
+ 4.3
1090
+ WuYun architecture
1091
+ Here, we briefly elaborate on the configuration details of the two Transformer-based generative
1092
+ modules of WuYun architecture, i.e., the melodic skeleton generation module for the melodic skeleton
1093
+ construction stage and the melodic prolongation generation module for the melody inpainting stage.
1094
+ We refer readers to (17–19) for more details. For reproducibility, we do not tweak the architecture of
1095
+ referenced models so that our music generation architecture can be easily assembled with the public
1096
+ implementation of Transformers.
1097
+ We use an unconditional sequence learning model Transformer-XL for the melodic skeleton genera-
1098
+ tion module. We use four self-attention layers, each with eight attention heads. The model hidden
1099
+ size and the inner layer of the feed-forward part are set to 512 and 2,048, respectively. All token
1100
+ embedding sizes are set to 512, following (19). We use the compound word embedding (Fig. S1) and
1101
+ token attribute prediction method for the input and output modules, repectively (18). We employed
1102
+ the top-k temperature-controlled stochastic sampling method (k = 10, temperature = 0.9) during
1103
+ inference. The length of training input tokens and the memory length are also 512. Here, we used the
1104
+ melodic skeleton data extracted from the training part of the Wikifonia dataset to train the melodic
1105
+ skeleton generation module.
1106
+ We use a conditional sequence-to-sequence model based on Transformer-based recurrent en-
1107
+ coder–decoder neural networks for the melodic prolongation generation module (18). We set the
1108
+ number of encoder layers, decoder layers, encoder heads, and decoder heads to 4. The size of hidden
1109
+ layers and the dimension of token embeddings are set to 256. We keep the same input module, output
1110
+ module, sampling method, length of training input tokens, and memory as same as the melodic
1111
+ skeleton generation module. For training the melodic prolongation generation module, we use the
1112
+ MeMIDI representations of the paired melodic skeleton and melody data as the encoder and decoder
1113
+ input data, respectively.
1114
+ 14
1115
+
1116
+ 4.4
1117
+ Training
1118
+ We implemented the WuYun architecture with Pytorch (v1.7.1) (69). The parameters of the WuYun
1119
+ architecture were optimized by minimizing the cross-entropy loss on a single NVIDIA GTX 2080-Ti
1120
+ GPU with 11 GB memory. Specifically, the training loss was minimized with the Adam optimizer
1121
+ (β1 = 0.9, β2 = 0.98), a learning rate of ε = 10−3 , and dropout was applied with a ratio of 0.1.
1122
+ The mini-batches of the input data for the melodic skeleton generation module and the melodic
1123
+ prolongation generation module were 20 and 44, respectively. It took nearly 2 days to train the two
1124
+ modules until training convergence.
1125
+ 4.5
1126
+ Statistical analysis
1127
+ All subjective evaluation results were expressed as mean ± standard deviation. The statistical
1128
+ significance of the performance difference in WuYun-RS and other melody generation methods
1129
+ was analyzed using the one-tailed t-test. Asterisk indicates significant difference at *P < 0.05,
1130
+ **P < 0.01, ***P < 0.001, ****P < 0.0001, and ns, not significant.
1131
+ Acknowledgements
1132
+ Thanks to Huawei Technologies Co., Ltd for the help in dataset collection and comments. We
1133
+ thank Jiaxing Yu, Chongjun Zhong, Ruiyuan Tang, and Jiaqi Wang for insightful discussions and
1134
+ visualizations.
1135
+ Funding:
1136
+ This work is supported by the National Natural Science Foundation of China
1137
+ (No.62272409), the Key R&D Program of Zhejiang Province (No.2022C03126), the Project of
1138
+ Key Laboratory of Intelligent Processing Technology for Digital Music (Zhejiang Conservatory of
1139
+ Music), and the Ministry of Culture and Tourism (No.2022DMKLB001).
1140
+ Author contributions: Conceptualization: K.Z., L.S. Methodology: X.W., T.Z., Z.H., K.Z. Investi-
1141
+ gation: T.Z., Z.H., Q.L. Visualization: S.W., Q.L. Supervision: L.S., K.Z., X.T. Writing—original
1142
+ draft: X.W., K.Z., Q.L. Writing—review & editing: K.Z., X.T., X.W., L.S.
1143
+ Competing interests: K.Z., X.W., and T.Z. are inventors on a patent application related to this work
1144
+ filed by Zhejiang University. The authors declare that they have no other competing interests.
1145
+ Data and materials availability: All data needed to evaluate the conclusions in the paper are present
1146
+ in the paper and/or the Supplementary Materials. Raw experimental data and the generated symbolic
1147
+ melody files are available on Zenodo at DOI 10.5281/zenodo.7480957
1148
+ under a Creative Commons Attribution 4.0 International license. The code of the WuYun music
1149
+ generation framework is available at https://github.com/NEXTLab-ZJU/wuyun.
1150
+ References
1151
+ [1] F. Carnovalini, A. Rodà, Computational creativity and music generation systems: An introduction to the state
1152
+ of the art. Front. Artif. Intell. Appl. 3, 14 (2020).
1153
+ [2] Y. LeCun, Y. Bengio, G. Hinton, Deep learning. Nature 521, 436–444 (2015).
1154
+ [3] J. Briot, G. Hadjeres, F. Pachet, Deep Learning Techniques for Music Generation (Springer, 2020).
1155
+ [4] E. Frid, C. Gomes, Z. Jin, Music creation by example, in Proceedings of the 2020 CHI conference on human
1156
+ factors in computing systems (CHI, 2020), pp. 1–13.
1157
+ [5] C. Yu, Z. Qin, F. J. Martín-Martínez, M. Buehler, A Self-Consistent Sonification Method to Translate Amino
1158
+ Acid Sequences into Musical Compositions and Application in Protein Design Using Artificial Intelligence. ACS
1159
+ Nano 13, 7471–7482 (2019).
1160
+ [6] S. Di, Z. Jiang, S. Liu, Z. Wang, L. Zhu, Z. He, Z. He, H. Liu, S. Yan, Video background music generation
1161
+ with controllable music transformer, in Proceedings of the 29th ACM International Conference on Multimedia
1162
+ (MM, 2021), pp. 2037–2045.
1163
+ [7] L.W. Wesseldijk, M. A. Mosing, F. Ullén,. Why Is an Early Start of Training Related to Musical Skills in
1164
+ Adulthood? A Genetically Informative Study. Psychol. Sci. 32, 3–13 (2020).
1165
+ 15
1166
+
1167
+ [8] W. Zhou, C. Ye, H. Wang, Y. Mao, W. Zhang, A. Liu, C. Yang, T. Li, L. Hayashi, W. Zhao, L. Chen, Y. Liu,
1168
+ W. Tao, Z. Zhang, Sound induces analgesia through corticothalamic circuits. Science 377, 198–204 (2022).
1169
+ [9] S. Koelsch, M. A. Rohrmeier, R. A. Torrecuso, S. Jentschke, Processing of hierarchical syntactic structure in
1170
+ music. Proc. Natl. Acad. Sci. 110, 15443–15448 (2013).
1171
+ [10] A. D. Patel, Language, music, syntax and the brain. Nat. Neurosci. 6, 674–681 (2003).
1172
+ [11] J. M. J. Valanarasu, P. Oza, I. Hacihaliloglu, V. M. Patel, Medical Transformer: Gated Axial-Attention for
1173
+ Medical Image Segmentation, in International Conference on Medical Image Computing and Computer-Assisted
1174
+ Intervention (Cham, 2021), pp. 36–46.
1175
+ [12] N. Li, S. Liu, Y. Liu, S. Zhao, M. Liu, M. T. Zhou, Neural speech synthesis with transformer network, in
1176
+ Proceedings of the AAAI Conference on Artificial Intelligence (AAAI, 2019), pp. 6706–6713.
1177
+ [13] P. Schwaller, B. Hoover, J. L. Reymond, H. Strobelt, T. Laino, Extraction of organic chemistry grammar
1178
+ from unsupervised learning of chemical reactions. Sci. Adv. 7, eabe4166 (2021).
1179
+ [14] J. Jumper, R. Evans, A. Pritzel, T. Green, M. Figurnov, O. Ronneberger, K. Tunyasuvunakool, R. Bates, A.
1180
+ Žídek, A. Potapenko, A. Bridgland, C. Meyer, S. A. A. Kohl, A. J. Ballard, A. Cowie, B. Romera-Paredes, S.
1181
+ Nikolov, R. Jain, J. Adler, T. Back, S. Petersen, D. Reiman, E. Clancy, M. Zielinski, M. Steinegger, M. Pacholska,
1182
+ T. Berghammer, S. Bodenstein, D. Silver, O. Vinyals, A. W. Senior, K. Kavukcuoglu, P. Kohli, D. Hassabis,
1183
+ Highly accurate protein structure prediction with AlphaFold. Nature 596, 583–589 (2021).
1184
+ [15] C. A. Huang, A. Vaswani, J. Uszkoreit, I. Simon, C. Hawthorne, N. Shazeer, A. M. Dai, M. D. Hoffman,
1185
+ M. Dinculescu, D. Eck, Music Transformer: Generating music with long-term structure, in 7th International
1186
+ Conference on Learning Representations (ICLR, 2018).
1187
+ [16] B. Yu, P. Lu, R. Wang, W. Hu, X. Tan, W. Ye, S. Zhang, T. Qin, T. Liu, Museformer: Transformer with
1188
+ fine-and coarse-grained attention for music generation, in Advances in Neural Information Processing Systems
1189
+ (NeurIPS, 2022).
1190
+ [17] Y. Huang, Y. Yang, Pop music transformer: Beat-based modeling and generation of expressive pop piano
1191
+ compositions, in Proceedings of the 28th ACM International Conference on Multimedia (MM, 2020), pp.
1192
+ 1180–1188.
1193
+ [18] Y. Ren, J. He, X. Tan, T. Qin, Z. Zhao, T. Liu, Popmag: Pop music accompaniment generation, in
1194
+ Proceedings of the 28th ACM International Conference on Multimedia (MM, 2020), pp. 1198–1206.
1195
+ [19] W. Hsiao, J. Liu, Y. Yeh, Y. Yang, Compound word transformer: Learning to compose full-song music over
1196
+ dynamic directed hypergraphs, in Proceedings of the AAAI Conference on Artificial Intelligence (AAAI, 2021),
1197
+ pp. 178–186.
1198
+ [20] C. Payne. 2019. “Musenet.” OpenAI, July 21, 2022. http://openai.com/blog/musenet.
1199
+ [21] N. Zhang, Learning adversarial transformer for symbolic music generation. IEEE Trans. Neural Netw.
1200
+ Learn. Syst. 1–10 (2020).
1201
+ [22] O. Sageev, S. Ian, D. Sander, E. Douglas, S. Karen, This time with feeling: Learning expressive musical
1202
+ performance. Neural. Comput. Appl. 32, 955–967 (2020).
1203
+ [23] M. Zeng, X. Tan, R. Wang, Z. Ju, T. Qin, T. Liu, MusicBERT: Symbolic music understanding with
1204
+ large-scale pre-training, in Findings of the ACL: ACL-IJCNLP 2021 (ACL, 2021), pp. 791–800.
1205
+ [24] I. V. Anishchenko, T. M. Chidyausiku, S. Ovchinnikov, S. J. Pellock, D. Baker, De novo protein design by
1206
+ deep network hallucination. Nature 600, 547–552 (2020).
1207
+ [25] R. Chowdhury, N. Bouatta, S. Biswas, C. Floristean, A. Kharkare, K. Roye, C. Rochereau, G. Ahdritz,
1208
+ J. Zhang, G. M. Church, P. K. Sorger, M. AlQuraishi, Single-sequence protein structure prediction using a
1209
+ language model and deep learning. Nat. Biotechnol. 40, 1617–1623 (2022).
1210
+ [26] J. Dauparas, I. V. Anishchenko, N. Bennett, H. Bai, R. J. Ragotte, L. F. Milles, B. I. M. Wicky, A. Courbet,
1211
+ R. Haas, N. Bethel, P. J. Y. Leung, T. F. Huddy, S. J. Pellock, D. Tischer, F. Chan, B. Koepnick, H. Nguyen, A.
1212
+ Kang, B. Sankaran, A. K. Bera, N. P. King, D. Baker, Robust deep learning-based protein sequence design using
1213
+ ProteinMPNN. Science 378, 49–56 (2022).
1214
+ [27] J. Tang, X. Han, M. Tan, X. Tong, K. Jia, SkeletonNet: A Topology-Preserving Solution for Learning Mesh
1215
+ Reconstruction of Object Surfaces From RGB Images. IEEE Trans. Pattern Anal. Mach. Intell. 44, 6454–6471
1216
+ (2020).
1217
+ [28] D. Shi, X. Diao, H. Tang, X. Li, H. Xing, H. Xu, RCRN: Real-world Character Image Restoration Network
1218
+ via Skeleton Extraction. In Proceedings of the 30th ACM International Conference on Multimedia (MM, 2022),
1219
+ pp. 1177–1185.
1220
+ 16
1221
+
1222
+ [29] K. Zhang, R. Zhang, Y. Yin, Y. Li, W. Wu, L. Sun, F. Wu, H. Deng, Y. Pan, Visual knowledge guided
1223
+ intelligent generation of Chinese seal carving. Front. Inf. Technol. Electron. Eng. 23, 1479–1493 (2022).
1224
+ [30] J. Wu, C. Hu, Y. Wang, X. Hu, J. Zhu, A hierarchical recurrent neural network for symbolic melody
1225
+ generation. IEEE Trans. Cybern. 50, 2749–2757 (2019).
1226
+ [31] S. Dai, Z. Jin, C. Gomes, R. B. Dannenberg, Controllable deep melody generation via hierarchical music
1227
+ structure representation, in Proceedings of the 22nd International Society for Music Information Retrieval
1228
+ Conference (ISMIR, 2021), pp. 143–150.
1229
+ [32] J. Wu, X. Liu, X. Hu, J. Zhu, Popmnet: Generating structured pop music melodies using neural networks.
1230
+ Artif. Intell. 286, 103303 (2020).
1231
+ [33]Y. Zou, P. Zou, Y. Zhao, K. Zhang, R. Zhang, X. Wang, MELONS: generating melody with long-term
1232
+ structure using transformers and structure graph, in ICASSP 2022-2022 IEEE International Conference on
1233
+ Acoustics, Speech and Signal Processing (ICASSP, 2022), pp. 191–195.
1234
+ [34] F. Lerdahl, R. Jackendoff, A Generative Theory of Tonal Music (MIT Press, Cambridge, MA, 1983).
1235
+ [35] B. M. K. Ayotte, Heinrich Schenker: A Guide to Research (Routledge, 2020).
1236
+ [36] J. Berezovsky, The structure of musical harmony as an ordered phase of sound: A statistical mechanics
1237
+ approach to music theory. Sci. Adv. 5, eaav8490 (2019).
1238
+ [37] F. Salzer, Structural Hearing: Tonal Coherence in Music (Courier Corporation, 1962).
1239
+ [38] Z. Dai, Z. L. Yang, Y. M. Yang, J. Carbonell, Q. Le, R. Salakhutdinov, Transformer-XL: Attentive language
1240
+ models beyond a fixed-length context, in Proceedings of the 57th Annual Meeting of the ACL (ACL, 2019), pp.
1241
+ 2978–2988.
1242
+ [39] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, I. Polosukhin, Attention
1243
+ is all you need, in Proceedings of the 31st Conference on Neural Information Processing Systems (NIPS, 2017).
1244
+ [40] G. Hadjeres, F. Nielsen, Anticipation-RNN: enforcing unary constraints in sequence generation, with
1245
+ application to interactive music generation. Neural. Comput. Appl. 32, 995–1005 (2020).
1246
+ [41] B. Maess, S. Koelsch, T. C. Gunter, A. D. Friederici, Musical syntax is processed in Broca’s area: An MEG
1247
+ study. Nat. Neurosci. 4, 540–545 (2001).
1248
+ [42] M. D. Hauser, N. Chomsky, W. T. Fitch, The faculty of language: What is it, who has it, and how did it
1249
+ evolve. Science 298, 1569–1579 (2002).
1250
+ [43] W. T. Fitch, M. D. Hauser, Computational constraintson syntactic processing in a nonhuman primate.
1251
+ Science 303, 377–380 (2004). [44]A. D. Friederici, J. Bahlmann, S. Heim, R. I. Schubotz, A. Anwander, The
1252
+ brain differentiates human and non-human grammars: Functional localization and structural connectivity. Proc.
1253
+ Natl. Acad. Sci. 103, 2458–2463 (2006).
1254
+ [45] R. Näätänen, L. Anne, L. Mietta, C. Marie, H. Minna, I. Antti, V. Martti, Alku, Paavo, I. Risto, L. Aavo,
1255
+ Language-specific phoneme representations revealed by electric and magnetic brain responses. Nature 385,
1256
+ 432–434 (1997).
1257
+ [46] A. Baddeley, Working memory: Looking back and looking forward. Nat. Rev. Neurosci 4, 829–839 (2003).
1258
+ [47] H. Schenker, Neue Musikalische Theorien und Phantasien: Der Freie Satz (Universal Edition, 1956).
1259
+ [48] D. Hodges, Music in the Human Experience: An Introduction to Music Psychology (Routledge, 2010).
1260
+ [49] A. D. Patel, Music, Language, and the Brain (Oxford University Press, 2010).
1261
+ [50] D. Povel, Melody generator: a device for algorithmic music construction. J. Softw. Eng. Appl 3, 683 (2010).
1262
+ [51] S. G. Laitz, The Complete Musician: An Integrated Approach to Tonal Theory, Analysis, and Listening
1263
+ (Oxford University Press, 2012).
1264
+ [52] F. Lerdahl, Tonal Pitch Space (Oxford University Press, 2001).
1265
+ [53] F. Lerdahl, C. Krumhansl, Modeling tonal tension. Music Percept. 24, 329–366 (2007).
1266
+ [54] G. W. Cooper, L. B. Meyer, The Rhythmic Structure of Music (Chicago University Press, 1963).
1267
+ [55] C. L. Krumhansl, L. L. Cuddy, A theory of tonal hierarchies in music. Music Percept. 36, 51–87 (2010).
1268
+ [56] Nattiez, J. J., Music and Discourse: Toward a Semiology of Music (Princeton University Press, 1990).
1269
+ [57] E. Chew, Mathematical and computational modeling of tonality, AMC 10, 141 (2014).
1270
+ 17
1271
+
1272
+ [58] D. Herremans, E. Chew, Tension ribbons: Quantifying and visualising tonal tension, in Proceedings of the
1273
+ Second International Conference on Technologies for Music Notation and Representation (TENSOR, 2016), pp.
1274
+ 8–18.
1275
+ [59] H. Dorien, E. Chew, MorpheuS: generating structured music with constrained patterns and tension. IEEE
1276
+ Trans. Affect. Comput. 10, 510–523 (2017).
1277
+ [60] L. C. Yang, A. Lerch, On the evaluation of generative models in music. Neural. Comput. Appl. 32,
1278
+ 4773–4784 (2020).
1279
+ [61] D. B. Huron, Sweet Anticipation: Music and the Psychology of Expectation (MIT Press, Cambridge, MA,
1280
+ 2006).
1281
+ [62] K. Koffka, Principles of Gestalt Psychology (Routledge, 2013).
1282
+ [63] A. M. Treisman, G. Gelade, A feature-integration theory of attention. Cogn. Psychol. 12, 97–136 (1980).
1283
+ [64] R. M. Brown, J. L. Chen, A. Hollinger, V. B. Penhune, C. Palmer, R. J. Zatorre, Repetition suppression in
1284
+ auditory-motor regions to pitch and temporal structure in music. J. Cogn. Neurosci. 25, 313328 (2013).
1285
+ [65] I. Peretz, M. Coltheart, Modularity of music processing. Nat. Neurosci. 6, 688–691 (2003).
1286
+ [66] G. R. Kuperberg, T. F. Jaeger, What do we mean by prediction in language comprehension? Lang. Cogn.
1287
+ Neurosci. 31, 32–59 (2016).
1288
+ [67] P. E. Hutchings, J. McCormack, Adaptive music composition for games. IEEE Trans. Games 12, 270–280
1289
+ (2019).
1290
+ [68] Z. Ju, P. Lu, X. Tan, R. Wang, C. Zhang, S. Wu, K. Zhang, X. Li, T. Qin, T. Liu, TeleMelody: Lyric-
1291
+ to-Melody generation with a template-based two-stage method, in Proceedings of the 2022 Conference on
1292
+ Empirical Methods in Natural Language Processing (EMNLP, 2022), pp. 5426–5437.
1293
+ [69] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury, G. Chanan, T. Killeen, Z. Lin, N. Gimelshein, L.
1294
+ Antiga, A. Desmaison, A. Kopf, E. Yang, Z. De Vito, M. Raison, A. Tejani, S. Chilamkurthy, B. Steiner, L.
1295
+ Fang, J. Bai, S. Chintala, PyTorch: An imperative style, high-performance deep learning library. Adv. Neural Inf.
1296
+ Process. Syst. 32, 8026–8037 (2019).
1297
+ 18
1298
+
1299
+ A
1300
+ Appendix
1301
+ A.1
1302
+ Additional Supplementary Figure
1303
+ ···
1304
+ TempM
1305
+ Velocity Embeddings
1306
+ Duration Embeddings
1307
+ Token Embeddings
1308
+ Position Embeddings
1309
+ Bar Embeddings
1310
+ Tempo Embeddings
1311
+ Timestep
1312
+ A
1313
+
1314
+
1315
+
1316
+
1317
+
1318
+ 1
1319
+ 2
1320
+ 3
1321
+ 4
1322
+ 5
1323
+ 6
1324
+ 7
1325
+ 8
1326
+ 9
1327
+ 10
1328
+ 11
1329
+ 12
1330
+ 13
1331
+ 14
1332
+ 15
1333
+ 16
1334
+ TempM
1335
+ TempM
1336
+ TempM
1337
+ TempM
1338
+ TempM
1339
+ TempM
1340
+ TempM
1341
+ TempM
1342
+ TempM
1343
+ TempM
1344
+ TempM
1345
+ TempM
1346
+ TempM
1347
+ TempM
1348
+ TempM
1349
+ Bar1
1350
+ Bar1
1351
+ Bar1
1352
+ Bar1
1353
+ Bar1
1354
+ Bar1
1355
+ Bar1
1356
+ Bar1
1357
+ Bar1
1358
+ Bar1
1359
+ Bar2
1360
+ Bar2
1361
+ Bar1
1362
+ Bar1
1363
+ Bar1
1364
+ Bar2
1365
+ Pos0
1366
+ Pos0
1367
+ Pos720
1368
+ Pos720
1369
+ Pos960
1370
+ Pos960
1371
+ Pos960
1372
+ Pos1440
1373
+ Pos1440
1374
+ Pos0
1375
+ Pos0
1376
+ Pos480
1377
+ Pos480
1378
+ Pos0
1379
+ Bar
1380
+ Pos0
1381
+ ChC_M
1382
+ Pos720
1383
+ Pitch62
1384
+ Pos960
1385
+ ChC_M
1386
+ Pitch63
1387
+ ChC_M
1388
+ Pitch60
1389
+ Bar
1390
+ Pos0
1391
+ Pitch60
1392
+ Pos480
1393
+ ChD_M
1394
+ ChC_M
1395
+ Dur240
1396
+ Dur480
1397
+ Dur480
1398
+ Dur720
1399
+ Vel127
1400
+ Vel127
1401
+ Vel127
1402
+ Vel127
1403
+ Figure S1: MeMIDI encoding method for MIDI sequences (example). The input embedding in each timestep
1404
+ of the MeMIDI event sequence is the sum of the event embeddings, including tempo embedding, bar embedding,
1405
+ position embedding, token embedding, duration embedding, and velocity embedding in this timestep.
1406
+ A.2
1407
+ Additional Supplementary Tables
1408
+ Table S1: Subjective evaluation scores of generated melodies based on different melodic skeleton settings in
1409
+ Experiment 1 (mean ± standard deviation).
1410
+ No.
1411
+ Settings
1412
+ Rhythm
1413
+ Richness
1414
+ Structure
1415
+ Expectation
1416
+ Overall
1417
+ 1
1418
+ Downbeat
1419
+ 2.75 ± 0.96
1420
+ 2.50 ± 0.98
1421
+ 2.63 ± 1.13
1422
+ 2.45 ± 1.03
1423
+ 2.52 ± 1.03
1424
+ 2
1425
+ Long Note
1426
+ 2.70 ± 1.03
1427
+ 2.57 ± 1.17
1428
+ 2.77 ± 1.21
1429
+ 2.47 ± 1.27
1430
+ 2.67 ± 1.26
1431
+ 3
1432
+ Rhythm
1433
+ 3.02 ± 1.01
1434
+ 3.10 ± 0.89
1435
+ 2.88 ± 1.02
1436
+ 2.82 ± 0.81
1437
+ 2.93 ± 0.97
1438
+ 4
1439
+ Tonic
1440
+ 2.95 ± 1.11
1441
+ 2.80 ± 1.09
1442
+ 2.87 ± 0.95
1443
+ 2.65 ± 1.01
1444
+ 2.80 ± 1.06
1445
+ 5
1446
+ Intersection
1447
+ 2.60 ± 0.85
1448
+ 2.52 ± 0.87
1449
+ 2.53 ± 0.83
1450
+ 2.28 ± 0.98
1451
+ 2.43 ± 0.83
1452
+ 6
1453
+ Union
1454
+ 2.95 ± 0.95
1455
+ 2.57 ± 1.05
1456
+ 2.62 ± 1.09
1457
+ 2.43 ± 1.05
1458
+ 2.60 ± 1.04
1459
+ 7
1460
+ Random25%
1461
+ 2.80 ± 1.11
1462
+ 2.78 ± 0.98
1463
+ 2.67 ± 1.07
1464
+ 2.62 ± 0.95
1465
+ 2.65 ± 1.12
1466
+ 8
1467
+ Random50%
1468
+ 2.92 ± 0.98
1469
+ 2.82 ± 0.89
1470
+ 2.70 ± 1.01
1471
+ 2.68 ± 1.03
1472
+ 2.73 ± 0.95
1473
+ 9
1474
+ Random75%
1475
+ 2.82 ± 0.95
1476
+ 2.58 ± 1.12
1477
+ 2.50 ± 1.11
1478
+ 2.45 ± 1.04
1479
+ 2.53 ± 1.08
1480
+ Table S2: One-tailed t-test results between WuYun-RS and other music generation models on the five evaluation
1481
+ metrics in experiment 2.
1482
+ Model
1483
+ Rhythm
1484
+ Richness
1485
+ Structure
1486
+ Expectation
1487
+ Overall
1488
+ MT
1489
+ 3.43 × 10−7
1490
+ 5.37 × 10−9
1491
+ 2.44 × 10−12
1492
+ 6.50 × 10−6
1493
+ 2.21 × 10−10
1494
+ PMT
1495
+ 2.18 × 10−8
1496
+ 1.21 × 10−9
1497
+ 1.20 × 10−6
1498
+ 2.55 × 10−6
1499
+ 1.13 × 10−7
1500
+ MeMIDI
1501
+ 1.88 × 10−6
1502
+ 3.09 × 10−6
1503
+ 6.86 × 10−7
1504
+ 2.31 × 10−5
1505
+ 6.29 × 10−7
1506
+ CWT
1507
+ 3.31 × 10−4
1508
+ 8.47 × 10−4
1509
+ 3.02 × 10−4
1510
+ 2.03 × 10−3
1511
+ 1.69 × 10−3
1512
+ Melons
1513
+ 4.60 × 10−3
1514
+ 1.54 × 10−3
1515
+ 8.02 × 10−4
1516
+ 2.12 × 10−3
1517
+ 7.41 × 10−3
1518
+ WuYun-RRS
1519
+ 0.26
1520
+ 0.36
1521
+ 0.42
1522
+ 0.42
1523
+ 0.41
1524
+ MT, PMT, and CWT stand for Music Transformer, Pop Music Transformer, and Compound Word Trans-
1525
+ former, respectively.
1526
+ 19
1527
+
L9E3T4oBgHgl3EQfYgo-/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
OtFKT4oBgHgl3EQffy6V/content/2301.11831v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16ca724abca29ed93b2f0ad54b5c68316d85a00744af463df29437f180ee0be4
3
+ size 346643
PdE3T4oBgHgl3EQfCglF/content/tmp_files/2301.04276v1.pdf.txt ADDED
@@ -0,0 +1,902 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Performance Analysis of Superconductor-constriction-Superconductor Transmon
2
+ Qubits
3
+ Mingzhao Liu∗ and Charles T. Black†
4
+ Center for Functional Nanomaterials, Brookhaven National Laboratory, Upton, NY 11973, USA
5
+ This work presents a computational analysis of a superconducting transmon qubit design, in which
6
+ the superconductor-insulator-superconductor (SIS) Josephson junction is replaced by a co-planar,
7
+ superconductor-constriction-superconductor (ScS) junction.
8
+ For short junctions having a Kulik-
9
+ Omelyanchuk current-phase relationship, we find that the ScS transmon has an improved charge
10
+ dispersion compared to the SIS transmon, with a tradeoff of 50% smaller anharmonicity. These
11
+ calculations provide a framework for estimating the superconductor material properties and junction
12
+ dimensions needed to provide proper ScS transmon operation at typical gigahertz frequencies.
13
+ I.
14
+ INTRODUCTION
15
+ The transmon has become an enabling superconduct-
16
+ ing qubit device architecture, with primary advantages of
17
+ immunity to charge noise and longer coherence lifetimes
18
+ achieved by designing the device to have Josephson en-
19
+ ergy far exceeding the charging energy. Similar to other
20
+ superconducting qubit architectures, the transmon core
21
+ consists of one or more Josephson junctions (JJs), which
22
+ are exclusively superconductor-insulator-superconductor
23
+ tunnel junctions (SIS) — typically a thin film sand-
24
+ wich structure of aluminum/aluminum oxide/aluminum
25
+ (Al/AlOx/Al), in which AlOx is the tunnel barrier (Fig-
26
+ ure 1a).
27
+ Fabrication of Al/AlOx/Al SIS JJs typically involves
28
+ physical vapor deposition of the top and bottom Al lay-
29
+ ers from two different angles relative to the substrate,
30
+ through a common mask [1]. After depositing the first
31
+ Al layer, the sample is exposed to a controlled level of
32
+ oxygen to form the thin AlOx barrier. This ingenious
33
+ fabrication method has been refined over many years but
34
+ will nevertheless be highly challenging to implement at
35
+ the manufacturing scale required for larger-scale quan-
36
+ tum computers. The exponential dependence of the JJ
37
+ critical supercurrent (Ic) on tunnel barrier width also re-
38
+ sults in a typical few percent variation in Ic across devices
39
+ Al
40
+ Al
41
+ AlOx
42
+ Superconductor
43
+ Constriction
44
+ a
45
+ b
46
+ Figure 1. (a) Schematic of an Al/AlOx/Al superconductor-
47
+ insulator-superconductor (SIS) Josephson junction. For clar-
48
+ ity, the native oxide covering both Al electrodes is omitted.
49
+ (b) Schematic of a co-planar superconductor-constriction-
50
+ superconductor (ScS) Josephson junction.
51
+ ∗ mzliu@bnl.gov
52
+ † ctblack@bnl.gov
53
+ fabricated within a few centimeters, even when oxidation
54
+ conditions are tightly controlled.[2–5] Since the Joseph-
55
+ son energy is directly proportional to Ic, this variation
56
+ presents an additional design and manufacturing chal-
57
+ lenge.
58
+ In a transmon, the SIS JJ is shunted by a large capac-
59
+ itor to minimize the charging energy and thus provide
60
+ immunity to charge noise. Further, the qubit is coupled
61
+ to a high-Q microwave resonator for readout. Typically,
62
+ the shunting capacitor and the resonator are fabricated
63
+ separately from the SIS JJ, using a superconductor with
64
+ higher Tc and better chemical robustness compared to Al
65
+ (e.g., niobium (Tc = 9.2 K)[6], tantalum (Tc = 4.4 K)[7],
66
+ and titanium nitride (Tc = 5.6 K)[8]). The improved ro-
67
+ bustness allows post-fabrication chemical treatments to
68
+ remove surface contaminants, which contribute to TLS
69
+ loss. However, most of these treatments are not possible
70
+ after Al/AlOx/Al junction fabrication, due to the junc-
71
+ tion’s fragile nature [9].
72
+ In this work we analyze the performance impact
73
+ of replacing the transmon SIS tunnel junction with
74
+ a co-planar superconductor-constriction-superconductor
75
+ (ScS) Josephson junction. A ScS JJ is comprised of two
76
+ superconductors separated by a thin neck of the same
77
+ superconductor (Figure 1b), with the constriction estab-
78
+ lishing the superconducting phase difference that enables
79
+ Josephson behavior. ScS JJs are co-planar (unlike SIS
80
+ tunnel junctions) and can be fabricated using conven-
81
+ tional lithography and metallization. Here, we follow the
82
+ formalism established by Koch et al.
83
+ in [10] to deter-
84
+ mine the electrical properties of ScS transmons, which
85
+ are shown to be different from SIS transmons, stemming
86
+ from a different ScS JJ current-phase relationship (CPR
87
+ or CΦR) compared to that of a SIS JJ.[11–13]. Compar-
88
+ ing the two device architectures, we show that the ScS
89
+ transmon has 50% less anharmonicity than the SIS trans-
90
+ mon, for devices with the same Josephson energy and
91
+ capacitive energy. However, the smaller anharmonicity
92
+ is accompanied by a significantly smaller charge disper-
93
+ sion, giving the ScS transmon stronger immunity against
94
+ charge noise.
95
+ arXiv:2301.04276v1 [cond-mat.supr-con] 11 Jan 2023
96
+
97
+ 2
98
+ II.
99
+ RESULTS AND DISCUSSION
100
+ II.1.
101
+ Current-phase relation of a short ScS junction
102
+ Consider a ScS Josephson junction comprised of two
103
+ large superconductors connected by a diffusive quasi-one-
104
+ dimensional wire with length d ≪ √ξ0l and width w ≪
105
+ d, where ξ0 is the Pippard superconducting coherence
106
+ length, and l ≪ ξ0 is the dirty-limit electron mean free
107
+ path. In this case, Kulik and Omelyanchuk showed that
108
+ the CPR for the ScS junction (KO-1) at T = 0 K is:
109
+ IScS(ϕ) = π∆
110
+ eRn
111
+ cos ϕ
112
+ 2 tanh−1 �
113
+ sin ϕ
114
+ 2
115
+
116
+ ,
117
+ (1)
118
+ in which ∆ is the superconducting energy gap and Rn
119
+ is the normal state resistance of the junction.[11, 12]
120
+ The junction critical current Ic,ScS = 0.662π∆/(eRn) is
121
+ achieved at ϕ = (2k ± 0.627)π to satisfy dI(ϕ)/dϕ ∝
122
+ 1 − sin(ϕ/2) tanh−1 [sin(ϕ/2)] = 0. Given the Maclau-
123
+ rin series tanh−1(x) = x + x3/3 + O(x5), Eq.
124
+ 1 may
125
+ be rewritten to a form that resembles the CPR of a SIS
126
+ Josephson junction, as
127
+ IScS(ϕ) = 0.755Ic,ScS sin ϕ
128
+
129
+ 1 + 1
130
+ 3 sin2 ϕ
131
+ 2 + O
132
+
133
+ sin4 ϕ
134
+ 2
135
+ � �
136
+ ,
137
+ which shows that the CPR of a ScS junction distorts
138
+ from the conventional sinusoidal form, but still bears odd
139
+ parity and a 2π periodicity (Figure 2a).
140
+ II.2.
141
+ Josephson energy of a ScS transmon
142
+ The potential energy of a Josephson junction is given
143
+ by the integral
144
+ EJ(ϕ) =
145
+
146
+ IJV dt =
147
+
148
+ IJ
149
+ Φ0
150
+
151
+
152
+ dt dt =
153
+
154
+ IJ
155
+ Φ0
156
+ 2π dϕ.
157
+ (2)
158
+ −2
159
+ −1
160
+ 0
161
+ 1
162
+ 2
163
+ φ/π
164
+ −1.0
165
+ −0.5
166
+ 0.0
167
+ 0.5
168
+ 1.0
169
+ I(φ)/Ic
170
+ ScS
171
+ SIS
172
+ −1.0
173
+ −0.5
174
+ 0.0
175
+ 0.5
176
+ 1.0
177
+ φ/π
178
+ 0
179
+ 1
180
+ 2
181
+ 3
182
+ EJ(φ)/EJ
183
+ ScS
184
+ SIS
185
+ φ2/2
186
+ a
187
+ b
188
+ Figure 2. (a) The CPR of a ScS Josephson junction in the
189
+ KO-1 limit (solid red) is distorted from the sinusoidal form
190
+ of a SIS junction (dashed black). (b) The Josephson energy
191
+ of a ScS transmon (solid red) deviates from the cosine form
192
+ of a SIS transmon (dashed black) and has 50% smaller an-
193
+ harmonicity at its lowest order (ϕ4). A harmonic parabola,
194
+ ϕ2/2, is displayed (dotted cyan) for reference.
195
+ −1.0
196
+ −0.5
197
+ 0.0
198
+ 0.5
199
+ 1.0
200
+ φ/π
201
+ 0.0
202
+ 0.5
203
+ 1.0
204
+ 1.5
205
+ 2.0
206
+ 2.5
207
+ EJ(φ)/EJ
208
+ −1.0
209
+ −0.5
210
+ 0.0
211
+ 0.5
212
+ 1.0
213
+ 0.0
214
+ 0.5
215
+ 1.0
216
+ 1.5
217
+ 2.0
218
+ 2.5
219
+ φ/π
220
+ EJ(φ)/EJ
221
+ a
222
+ b
223
+ SIS
224
+ ScS
225
+ 0.3031
226
+ 0.8800
227
+ 1.3993
228
+ 1.7751
229
+ 0.3096
230
+ 0.9145
231
+ 1.4896
232
+ 2.0077
233
+ Figure 3. The eigenenergies (blue lines and numbers) and the
234
+ probability densities (∥Ψ∥2) of the first 4 eigenstates of (a) a
235
+ SIS transmon and (b) a ScS transmon, both with EJ/EC = 20
236
+ and ng = 1/2. The corresponding potential energies, normal-
237
+ ized by EJ, are plotted in red lines for both transmons.
238
+ For a KO-1 junction defined by Eq. 1, the integral in Eq.
239
+ 2 leads to
240
+ EJ,ScS(ϕ) = ∆Φ0
241
+ 2eRn
242
+
243
+ ln
244
+
245
+ cos2 ϕ
246
+ 2
247
+
248
+ + 2 sin ϕ
249
+ 2 tanh−1 �
250
+ sin ϕ
251
+ 2
252
+ � �
253
+ .
254
+ (3)
255
+ Although this form appears very different from the po-
256
+ tential energy of a SIS junction, EJ,SIS(ϕ) = EJ,SIS(1 −
257
+ cos ϕ), with EJ,SIS = Ic,SISΦ0/2π, Maclaurin expansions
258
+ of EJ,ScS and EJ,SIS make their similarities apparent:
259
+ EJ,ScS(ϕ) = ∆Φ0
260
+ 4eRn
261
+ �1
262
+ 2ϕ2 − 1
263
+ 48ϕ4 + O(ϕ6)
264
+
265
+ EJ,SIS(ϕ) = EJ,SIS
266
+ �1
267
+ 2ϕ2 − 1
268
+ 24ϕ4 + O(ϕ6)
269
+
270
+ .
271
+ (4)
272
+ Comparing the coefficients of the harmonic (ϕ2) term,
273
+ we observe that the Josephson energy of a ScS transmon
274
+ can be defined as:
275
+ EJ,ScS = ∆Φ0
276
+ 4eRn
277
+ = 0.755Ic,ScSΦ0/(2π) ,
278
+ (5)
279
+ where
280
+ the
281
+ last
282
+ equality
283
+ recognizes
284
+ that
285
+ Ic,ScS
286
+ =
287
+ 0.662π∆/(eRn). Eq. 5 shows that both potential ener-
288
+ gies contain anharmonicity led by a ϕ4 term, from which
289
+ we estimate that the anharmonicity of a ScS transmon
290
+ is about one half that of a SIS transmon, for devices
291
+ with the same EJ. This difference is clear when compar-
292
+ ing normalized EJ(ϕ) of ScS and SIS transmons with a
293
+ harmonic parabolic potential ϕ2/2 (Figure 2b). A more
294
+ precise evaluation of the anharmonicity is given in II.3,
295
+ by computing the ScS transmon eigenenergies.
296
+ II.3.
297
+ Eigenenergies and eigenstates of a ScS
298
+ transmon
299
+ A conventional SIS transmon has a Hamiltonian of the
300
+ form:
301
+ ˆHSIS = 4Ec(ˆn − ng)2 + EJ(1 − cos ˆϕ),
302
+ (6)
303
+
304
+ 3
305
+ 0
306
+ 20
307
+ 40
308
+ 60
309
+ 80
310
+ 100
311
+ 0
312
+ 20
313
+ 40
314
+ 60
315
+ 80
316
+ E0m/EC
317
+ 0
318
+ 20
319
+ 40
320
+ 60
321
+ 80
322
+ 100
323
+ −1
324
+ 0
325
+ 1
326
+ 2
327
+ 3
328
+ 4
329
+ 5
330
+ (E12 - E01)/EC
331
+ ScS
332
+ SIS
333
+ a
334
+ b
335
+ EJ/EC
336
+ EJ/EC
337
+ ScS
338
+ SIS
339
+ m = 0
340
+ 1
341
+ 2
342
+ 3
343
+ 0
344
+ 20
345
+ 40
346
+ 60
347
+ 80
348
+ 100
349
+ 100
350
+ τp (ns)
351
+ SIS
352
+ ScS
353
+ EJ/EC
354
+ 10-1
355
+ 10-2
356
+ c
357
+ Figure 4. (a) Transition energy E0m = Em − E0 at ng = 1/2 and (b) oscillator anharmonicity (E12 − E01) at ng = 1/2, as
358
+ functions of EJ/EC for ScS transmon (solid lines) and SIS transmon (dashed lines). (c) The minimal pulse duration (τp) of
359
+ ScS (solid line) and SIS transmons (dashed line) vs. EJ/EC, all operated at ω01 = 2π × 10 GHz.
360
+ where ng is the offset charge. The wave equation for a
361
+ SIS transmon can be solved analytically.[10]
362
+ In the ScS transmon, the potential energy is given by
363
+ Eq. 3, so that the Hamiltonian becomes,
364
+ ˆHScS = 4EC(ˆn − ng)2 + EJ
365
+
366
+ 2 ln
367
+
368
+ cos2 ˆϕ
369
+ 2
370
+
371
+ + 4 sin ˆϕ
372
+ 2 tanh−1
373
+
374
+ sin ˆϕ
375
+ 2
376
+ � �
377
+ .
378
+ (7)
379
+ The wave equation of a ScS transmon can be solved nu-
380
+ merically using the finite difference method, in which the
381
+ Hamiltonian is expressed in a discretized space of phase
382
+ ϕ ∈ [−π, π), with the periodic boundary condition ap-
383
+ plied to both ends. The validity of the computation is
384
+ confirmed by comparing a similar numerical solution of
385
+ the wave equation for a SIS transmon with the analytical
386
+ solutions presented by Koch et al.[10] Figure 3 compares
387
+ the first 4 eigenstates of a SIS transmon and a ScS trans-
388
+ mon, both with EJ/EC = 20 and ng = 1/2. Although
389
+ the lower level eigenenergies and eigenfunctions are sim-
390
+ ilar, the differences become more apparent at higher en-
391
+ ergies. This trend is more clearly observed for the transi-
392
+ tion energies E0m = Em − E0 calculated for both trans-
393
+ mon types, across a range of EJ/EC from 1 and 100
394
+ (Figure 4a). This difference reflects the smaller anhar-
395
+ monicity of the ScS transmon, compared to the SIS trans-
396
+ mon. By treating the leading anharmonic term (−ϕ4/24)
397
+ in Eq.
398
+ 4 as a perturbation to the harmonic potential
399
+ and applying the first-order perturbation theory, the mth
400
+ eigenenergy of a SIS transmon is approximated by [10]
401
+ Em,SIS ≈ ℏωp
402
+
403
+ m + 1
404
+ 2
405
+
406
+ − EC
407
+ 4
408
+
409
+ 2m2 + 2m + 1
410
+
411
+ ,
412
+ (8)
413
+ in which ℏωp = √8EJEC is the Josephson plasma energy.
414
+ The transition energy between the (m − 1)th and mth
415
+ levels is therefore
416
+ Em−1,m,SIS ≈ ℏωp − mEC.
417
+ (9)
418
+ From Eq.
419
+ 9, we find that the anharmonicity of SIS
420
+ transmon αSIS ≡ E12,SIS−E01,SIS is approximately −EC.
421
+ By applying the same first-order perturbation theory cal-
422
+ culation but realizing that the perturbation term is half
423
+ as a SIS transmon (Eq. 4), the mth eigenenergy of a ScS
424
+ transmon can be approximated by
425
+ Em,ScS ≈ ℏωp
426
+
427
+ m + 1
428
+ 2
429
+
430
+ − EC
431
+ 8
432
+
433
+ 2m2 + 2m + 1
434
+
435
+ ,
436
+ (10)
437
+ so that its anharmonicity, αScS, is approximately −EC/2,
438
+ or half the anharmonicity of a SIS transmon. We can vi-
439
+ sualize this finding in a plot of numerical results, looking
440
+ at transmons with EJ/EC ≥ 20 (Figure 4b).
441
+ The smaller anharmonicity of a ScS transmon means
442
+ that the transitions E01 and E12 lie closer in energy, so
443
+ that a longer RF pulse is needed to correctly excite the
444
+ desired transition E01. The minimal pulse duration can
445
+ be estimated as τp ≈ ℏ|α|−1.
446
+ As shown in Figure 4c,
447
+ despite its lower anharmonicity, τp of the ScS transmon
448
+ remains below 1 ns even for EJ/EC = 100, when the
449
+ qubit operates at 10 GHz. Because typical qubit pulse
450
+ durations are ∼10 ns, we may conclude that the lower
451
+ anharmonicity will not inhibit normal operation of a ScS
452
+ transmon.
453
+ II.4.
454
+ Charge dispersion of a ScS transmon
455
+ A primary benefit of the transmon architecture is its
456
+ relative immunity to charge noise, when designed to op-
457
+ erate in the regime of EJ ≫ EC. In a SIS transmon, the
458
+ charge dispersion of the mth level decreases exponentially
459
+ with
460
+
461
+ 8EJ/EC, following [10]
462
+ ϵm ≡ Em(ng = 1/2) − Em(ng = 0)
463
+ ≈ EC
464
+ 24m+5
465
+ (−1)mm!
466
+
467
+ 2
468
+ π
469
+ � EJ
470
+ 2EC
471
+ � m
472
+ 2 + 3
473
+ 4
474
+ e−√
475
+ 8EJ/EC.
476
+ (11)
477
+ Intuitively, the charge dispersion is related to the
478
+ tunneling probability between neighboring potential en-
479
+ ergy valleys (Figure 3), e.g., when ϕ makes a full 2π
480
+
481
+ 4
482
+ −2
483
+ −1
484
+ 0
485
+ 1
486
+ 2
487
+ ng
488
+ 0.5
489
+ 1.0
490
+ 1.5
491
+ 2.0
492
+ Em/EJ
493
+ SIS
494
+ ScS
495
+ SIS
496
+ ScS
497
+ |ϵm|/E01(ng = 1/2)
498
+ 101
499
+ 10-3
500
+ 10-7
501
+ 10-1
502
+ 10-5
503
+ 10-9
504
+ 10-11
505
+ 1
506
+ 2
507
+ 3
508
+ 4
509
+ 5
510
+ 6
511
+ 7
512
+ 8
513
+ 9
514
+ 10
515
+ (EJ/EC)1/2
516
+ 10
517
+ 20
518
+ 30
519
+ 40
520
+ 50 60 70 80 90 100
521
+ EJ/EC
522
+ a
523
+ b
524
+ m = 0
525
+ 1
526
+ 2
527
+ m = 0
528
+ 1
529
+ 2
530
+ 0
531
+ 20
532
+ 40
533
+ 60
534
+ 80
535
+ 100
536
+ SIS
537
+ ScS
538
+ T2 (ns)
539
+ EJ/EC
540
+ 100
541
+ 10-2
542
+ 104
543
+ 102
544
+ 106
545
+ 108
546
+ c
547
+ Figure 5. (a) The eigenenergies Em of the lowest 3 eigenstates (m = 0, 1, 2) of a ScS transmon (solid line) and a SIS transmon
548
+ (dashed lines), both with EJ/EC = 10, as functions of the offset charge ng. (b) The charge dispersion ϵm of the lowest 3
549
+ eigenstates of a ScS transmon (solid line) and a SIS transmon (dashed lines), as functions of EJ/EC. (c) The dephasing time
550
+ T2 of ScS (solid line) and SIS transmons (dashed line) vs. EJ/EC, all operated at ω01 = 2π × 10 GHz.
551
+ rotation.[10] By this reasoning, we may expect the higher
552
+ barrier height of a ScS transmon (∼ 2.8EJ vs.
553
+ 2EJ)
554
+ to better suppress the tunneling probability and provide
555
+ lower charge dispersion, compared to a SIS transmon.
556
+ Figure 5a plots the first three eigenenergies Em (m =
557
+ 0, 1, 2) versus the effective offset charge ng for both SIS
558
+ (dashed) and ScS (solid) transmons, with EJ/EC = 10.
559
+ Clearly, the ScS transmon eigenenergies are more weakly
560
+ perturbed by ng. Calculations of the charge dispersion,
561
+ ϵm = Em(ng = 1/2) − Em(ng = 0), across a wide range
562
+ of 1 ≤ EJ/EC ≤ 100 show that suppression of charge dis-
563
+ persion in the ScS transmon becomes more effective for
564
+ larger EJ/EC ratios (Figure 5b). When EJ/EC = 100,
565
+ the charge dispersion of the first excited state of a ScS
566
+ transmon, ϵ1,ScS, is over one order of magnitude less than
567
+ the corresponding SIS transmon. It is noted that com-
568
+ putation on SIS transmon matches the analytical result
569
+ very well[10], again demonstrating the high numerical
570
+ precision of our finite difference computation.
571
+ Never-
572
+ theless, the computational error becomes significant as
573
+ the normalized charge dispersion, |ϵm|/E01, approaches
574
+ 10−11 and smaller. This is due to the accumulation of
575
+ floating-point error that eventually shows up for evaluat-
576
+ ing the vanishing difference between the two eigenener-
577
+ gies at ng = 0 and 1/2.
578
+ In Figure 5b, the y−axis is presented in the logarith-
579
+ mic scale and the x−axis is presented in the scale of
580
+
581
+ EJ/EC, so that all curves take a linear form approach-
582
+ ing large EJ/EC values. For the SIS transmon, the slope
583
+ matches the expected exp(−
584
+
585
+ 8EJ/EC) dependence in
586
+ Eq. 11. For the ScS transmon, the slope is larger, and is
587
+ best described by:
588
+ ϵm ∝ exp
589
+
590
+
591
+
592
+ 1.16 × 8EJ/EC
593
+
594
+ .
595
+ The improved charge dispersion makes the ScS trans-
596
+ mon both less sensitive to charge noise and, in turn, gives
597
+ it a longer dephasing time T2.
598
+ For dephasing caused
599
+ by slow charge fluctuations of large amplitude, Koch et
600
+ al[10] has found an upper limit of T2 given by
601
+ T2 ≈
602
+ 4ℏ
603
+ e2π|ϵ1|.
604
+ Using this relation, we compare T2 for both SIS and ScS
605
+ transmons for EJ/EC between 1 and 100 (Figure 5c).
606
+ The ScS transmon improves T2 across the entire range of
607
+ EJ/EC and especially at higher ratios. At EJ/EC = 100,
608
+ the SIS transmon has a T2 ceiling of about 3 ms, com-
609
+ pared to about 50 ms for the ScS transmon, an over 10
610
+ fold increase. At present, because the T1 lifetime of SIS
611
+ transmon qubits is still beyond 1 ms and not limited by
612
+ the charge noise, this benefit of the ScS transmon archi-
613
+ tecture will have little performance benefit. However, be-
614
+ cause we expect the lifetimes of superconducting qubits
615
+ to continue improving (Schoelkopf’s Law) [14], we an-
616
+ ticipate a point when charge noise dephasing becomes a
617
+ bottleneck, and the ScS transmon architecture can offer
618
+ effective mitigation.
619
+ II.5.
620
+ ScS transmon design parameters
621
+ The operational behavior of a ScS transmon is deter-
622
+ mined by its EJ and EC, which define the operating fre-
623
+ quency ω01, the relative immunity to charge noise (ϵ1),
624
+ and the minimum excitation pulse duration (τp).
625
+ Be-
626
+ cause these three quantities are determined by EJ and
627
+ EC, they are not independent. We can visualize this in-
628
+ terdependence with three sets contour lines plotted in the
629
+ plane of EJ versus EC in Figure 6. These contours rep-
630
+ resent: (1) a transmon operating frequency (ω01/(2π))
631
+ between 1 and 10 GHz (set of red, descending diagonal
632
+ lines), (2) ratios of EJ/EC from 10, 100, and 1000 (set
633
+ of blue, ascending diagonal lines), and the minimum ex-
634
+ citation pulse duration τp between 0.32 and 10 ns (set of
635
+ dashed, predominantly vertical lines). Selecting two of
636
+ these defines the third one. For example, a ScS trans-
637
+ mon designed to operate at ω01/(2π) = 5 GHz and with
638
+
639
+ 5
640
+ EC/(2�ħ) (GHz)
641
+ 102
642
+ EJ/(2�ħ) (GHz)
643
+ 101
644
+ 100
645
+ 10-2
646
+ 10-1
647
+ 100
648
+ EJ/EC=103
649
+ EJ/EC=102
650
+ EJ/EC=101
651
+ ω01/(2�) = 1 GHz
652
+ 2 GHz
653
+ 3 GHz
654
+ 4 GHz
655
+ 5 GHz
656
+ 6 GHz
657
+ 7 GHz
658
+ 8 GHz
659
+ 9 GHz
660
+ 10 GHz
661
+ τp = 10 ns
662
+ 7.94 ns
663
+ 6.31 ns
664
+ 5.01 ns
665
+ 1.58 ns
666
+ 1.26 ns
667
+ 1.00 ns
668
+ 0.79 ns
669
+ 0.63 ns
670
+ 0.50 ns
671
+ 0.40 ns
672
+ 3.98 ns
673
+ 0.32 ns
674
+ 104
675
+ 105
676
+ 2
677
+ 3
678
+ 4
679
+ 5
680
+ 6
681
+ 7
682
+ 8
683
+ 9
684
+ 2
685
+ 3
686
+ 4
687
+ 5
688
+ 6
689
+ 7
690
+ 8
691
+ 9
692
+ 103
693
+ 2
694
+ 3
695
+ 4
696
+ 5
697
+ 6
698
+ 7
699
+ 8
700
+ 9
701
+ 2
702
+ 3
703
+ 4
704
+ 5
705
+ 6
706
+ 7
707
+ 8
708
+ 9
709
+ 3.16 ns
710
+ 2.51 ns
711
+ 2.00 ns
712
+ Rn/Tc (Ω K-1)
713
+ 102
714
+ 103
715
+ 2x101
716
+ 2
717
+ 3
718
+ 4
719
+ 5
720
+ 6
721
+ 7 8 9
722
+ 2
723
+ 3
724
+ 4
725
+ 5
726
+ 6
727
+ 7 8 9
728
+ 2
729
+ 3
730
+ 4
731
+ 5
732
+ 6
733
+ 7
734
+ 8
735
+ 9
736
+ 3
737
+ 4
738
+ 5
739
+ 6
740
+ 7
741
+ 8
742
+ 9
743
+ CΣ (fF)
744
+ Figure 6. A graphical guide for designing ScS transmon with required EJ and EC to match desired transmon frequency ω01
745
+ and minimum pulse duration τp. The red lines are contours lines for transmon frequencies set at values between 1 and 10 GHz.
746
+ The dashed black lines are contours lines for τp set at a few values between 0.32 and 10 ns. The blue lines are contours lines
747
+ for EJ/EC ratios set at 10, 100, and 1000. A second x-axis that is parallel to EC is presented for CΣ, following CΣ = e2/2EC.
748
+ Simlarly, a second y-axis that is parallel to EJ is presented for Rn/Tc, following Rn/Tc = 1.76kBΦ0/(4eEJ).
749
+ a readout pulse of τp = 4 ns (green dot in Figure 6) will
750
+ have a EJ/EC ratio of about 600.
751
+ Instead, a shorter
752
+ excitation pulse of τp = 1 ns (purple dot in Figure 6)
753
+ requires a tradeoff of smaller EJ/EC ≈ 40, and thus less
754
+ immunity against charge noise.
755
+ Importantly, EJ and EC of a ScS transmon are set by
756
+ the physical device dimensions and fundamental prop-
757
+ erties of the materials composing it. EJ is determined
758
+ by the superconducting energy gap of the material (∆)
759
+ and the normal state resistance of the junction (Rn) (Eq.
760
+ 5).
761
+ For a BCS superconductor where ∆ = 1.76kBTc,
762
+ we can express EJ in terms of the material properties
763
+ Rn/Tc = 1.76kBΦ0/(4eEJ,ScS), which is shown as the
764
+ second (right) y-axis in Figure 6. Similarly, because EC
765
+ is set by the total capacitance (CΣ = e2/2EC,ScS) which
766
+ depends on device geometry and dielectric properties, we
767
+ can express EC as a capacitance, shown as a second (top)
768
+ x-axis in Figure 6.
769
+ Returning to the example, we can now see from Figure
770
+ 6 that designing a ScS transmon with ω01/(2π) = 5 GHz,
771
+ τp = 4 ns, and EJ/EC ratio of about 600 (green dot) re-
772
+ quires a junction with Rn/Tc ≈ 3 kΩ · K−1 and capacitor
773
+ with CΣ ≈ 250 fF. The properties can be realized by a
774
+ constriction junction fabricated from a thin film super-
775
+ conductor that has both a relatively high normal state
776
+ resistivity and a long superconducting coherence length.
777
+ As one example, a 10-nm-thick PtSi film was reported
778
+ has normal-state sheet resistance Rs = 67 Ω/□, super-
779
+ conducting Tc = 0.63 K, and Pippard coherence length
780
+ ξ = 440 nm.[15] Using these material parameters, we can
781
+ meet the ScS transmon design criteria using a constric-
782
+ tion junction with physical length of 440 nm and width of
783
+
784
+ 6
785
+ 16 nm. The qubit capacitor physical dimensions should
786
+ be designed for CΣ ≈ 250 fF, according to Figure 6. If one
787
+ instead desires the shorter readout pulse time of τp = 1
788
+ ns (purple dot), the constriction must have Rn/Tc ≈ 10
789
+ kΩ · K−1 and CΣ ≈ 70 fF. For the same PtSi supercon-
790
+ ductor, these values can be met with physical length of
791
+ 440 nm and width of 5 nm. which are more challenging
792
+ dimensions to fabricate.
793
+ III.
794
+ CONCLUSION
795
+ In summary, we have demonstrated through computa-
796
+ tion that a short ScS Josephson junction can be used as
797
+ a drop-in replacement for the SIS tunnel junction in a
798
+ transmon qubit. In the transmon regime (EJ ≫ EC), a
799
+ ScS transmon has 50% smaller anharmonicity than a SIS
800
+ transmon, but is compensated by its appreciably lower
801
+ charge dispersion that provides a significantly higher T2
802
+ ceiling. Using this analysis, we estimate that high per-
803
+ formance ScS transmons can be achieved with constric-
804
+ tions having a normal state resistance of a few kiloohms,
805
+ which can be made from a thin nanobridge formed in low
806
+ Tc superconductors using conventional, high-resolution
807
+ nanofabrication techniques.
808
+ The ScS transmon design
809
+ allows all components, including constriction junction,
810
+ capacitor, and resonator, to be fabricated in a single
811
+ lithography step. This is a significant simplification com-
812
+ pared to multistep SIS transmon fabrication, and also
813
+ provides an robust architecture amenable to device post-
814
+ processing, cleaning, and encapsulation.
815
+ ACKNOWLEDGMENTS
816
+ This material is based upon work supported by the
817
+ U.S. Department of Energy, Office of Science, National
818
+ Quantum Information Science Research Centers, Co-
819
+ design Center for Quantum Advantage (C2QA) under
820
+ contract number DE-SC0012704.
821
+ This research used
822
+ computational resources of the Center for Functional
823
+ Nanomaterials (CFN), which is a U.S. Department of
824
+ Energy Office of Science User Facility, at Brookhaven Na-
825
+ tional Laboratory under Contract No. DE-SC0012704.
826
+ [1] G. J. Dolan, Offset masks for lift-off photoprocessing,
827
+ Appl. Phys. Lett. 31, 337 (1977).
828
+ [2] J. M. Kreikebaum, K. P. O’Brien, A. Morvan, and I. Sid-
829
+ diqi, Improving wafer-scale Josephson junction resistance
830
+ variation in superconducting quantum coherent circuits,
831
+ Supercond. Sci. Technol. 33, 06LT02 (2020).
832
+ [3] J. B. Hertzberg, E. J. Zhang, S. Rosenblatt, E. Mage-
833
+ san, J. A. Smolin, J.-B. Yau, V. P. Adiga, M. Sandberg,
834
+ M. Brink, J. M. Chow, and J. S. Orcutt, Laser-annealing
835
+ Josephson junctions for yielding scaled-up superconduct-
836
+ ing quantum processors, npj Quantum Inf. 7, 129 (2021).
837
+ [4] A. Osman, J. Simon, A. Bengtsson, S. Kosen, P. Krantz,
838
+ D. P. Lozano, M. Scigliuzzo, P. Delsing, J. Bylander, and
839
+ A. Fadavi Roudsari, Simplified Josephson-junction fabri-
840
+ cation process for reproducibly high-performance super-
841
+ conducting qubits, Appl. Phys. Lett. 118, 064002 (2021).
842
+ [5] H. Kim, C. J¨unger, A. Morvan, E. S. Barnard, W. P.
843
+ Livingston, M. V. P. Alto´e, Y. Kim, C. Song, L. Chen,
844
+ J. M. Kreikebaum, D. F. Ogletree, D. I. Santiago, and
845
+ I. Siddiqi, Effects of laser-annealing on fixed-frequency
846
+ superconducting qubits, Appl. Phys. Lett. 121, 142601
847
+ (2022).
848
+ [6] A. Premkumar, C. Weiland, S. Hwang, B. J¨ack, A. P. M.
849
+ Place, I. Waluyo, A. Hunt, V. Bisogni, J. Pelliciari,
850
+ A. Barbour,
851
+ M. S. Miller,
852
+ P. Russo,
853
+ F. Camino,
854
+ K. Kisslinger, X. Tong, M. S. Hybertsen, A. A. Houck,
855
+ and I. Jarrige, Microscopic relaxation channels in mate-
856
+ rials for superconducting qubits, Commun. Mater. 2, 72
857
+ (2021).
858
+ [7] A. P. M. Place, L. V. H. Rodgers, P. Mundada, B. M.
859
+ Smitham, M. Fitzpatrick, Z. Leng, A. Premkumar,
860
+ J. Bryon, A. Vrajitoarea, S. Sussman, G. Cheng, T. Mad-
861
+ havan, H. K. Babla, X. H. Le, Y. Gang, B. J¨ack, A. Gye-
862
+ nis, N. Yao, R. J. Cava, N. P. de Leon, and A. A. Houck,
863
+ New material platform for superconducting transmon
864
+ qubits with coherence times exceeding 0.3 milliseconds,
865
+ Nature Commun. 12, 1779 (2021).
866
+ [8] J. B. Chang, M. R. Vissers, A. D. C´orcoles, M. Sandberg,
867
+ J. Gao, D. W. Abraham, J. M. Chow, J. M. Gambetta,
868
+ M. B. Rothwell, G. A. Keefe, M. Steffen, and D. P. Pap-
869
+ pas, Improved superconducting qubit coherence using ti-
870
+ tanium nitride, Appl. Phys. Lett. 103, 012602 (2013).
871
+ [9] K. Williams, K. Gupta, and M. Wasilik, Etch rates for
872
+ micromachining processing-part ii, J. Microelectromech.
873
+ Syst. 12, 761 (2003).
874
+ [10] J. Koch, T. M. Yu, J. Gambetta, A. A. Houck, D. I.
875
+ Schuster, J. Majer, A. Blais, M. H. Devoret, S. M. Girvin,
876
+ and R. J. Schoelkopf, Charge-insensitive qubit design de-
877
+ rived from the Cooper pair box, Phys. Rev. A 76, 042319
878
+ (2007).
879
+ [11] I. O. Kulik and A. N. Omel’yanchuk, Contribution to the
880
+ microscopic theory of the Josephson effect in supercon-
881
+ ducting bridges, JETP Lett. 21, 96 (1975).
882
+ [12] A. A. Golubov, M. Y. Kupriyanov, and E. Il’ichev, The
883
+ current-phase relation in Josephson junctions, Rev. Mod.
884
+ Phys. 76, 411 (2004).
885
+ [13] R. Vijay, E. M. Levenson-Falk, D. H. Slichter, and I. Sid-
886
+ diqi, Approaching ideal weak link behavior with three di-
887
+ mensional aluminum nanobridges, Appl. Phys. Lett. 96,
888
+ 223112 (2010).
889
+ [14] M.
890
+ Kjaergaard,
891
+ M.
892
+ E.
893
+ Schwartz,
894
+ J.
895
+ Braum¨uller,
896
+ P. Krantz, J. I.-J. Wang, S. Gustavsson, and W. D.
897
+ Oliver, Superconducting qubits: Current state of play,
898
+ Annu. Rev. Condens. Matter Phys. 11, 369 (2020).
899
+ [15] K. Oto, S. Takaoka, K. Murase, and S. Ishida, Supercon-
900
+ ductivity in PtSi ultrathin films, J. Appl. Phys. 76, 5339
901
+ (1994).
902
+
PdE3T4oBgHgl3EQfCglF/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,477 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf,len=476
2
+ page_content='Performance Analysis of Superconductor-constriction-Superconductor Transmon Qubits Mingzhao Liu∗ and Charles T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
3
+ page_content=' Black† Center for Functional Nanomaterials, Brookhaven National Laboratory, Upton, NY 11973, USA This work presents a computational analysis of a superconducting transmon qubit design, in which the superconductor-insulator-superconductor (SIS) Josephson junction is replaced by a co-planar, superconductor-constriction-superconductor (ScS) junction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
4
+ page_content=' For short junctions having a Kulik- Omelyanchuk current-phase relationship, we find that the ScS transmon has an improved charge dispersion compared to the SIS transmon, with a tradeoff of 50% smaller anharmonicity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
5
+ page_content=' These calculations provide a framework for estimating the superconductor material properties and junction dimensions needed to provide proper ScS transmon operation at typical gigahertz frequencies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
6
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
7
+ page_content=' INTRODUCTION The transmon has become an enabling superconduct- ing qubit device architecture, with primary advantages of immunity to charge noise and longer coherence lifetimes achieved by designing the device to have Josephson en- ergy far exceeding the charging energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
8
+ page_content=' Similar to other superconducting qubit architectures, the transmon core consists of one or more Josephson junctions (JJs), which are exclusively superconductor-insulator-superconductor tunnel junctions (SIS) — typically a thin film sand- wich structure of aluminum/aluminum oxide/aluminum (Al/AlOx/Al), in which AlOx is the tunnel barrier (Fig- ure 1a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
9
+ page_content=' Fabrication of Al/AlOx/Al SIS JJs typically involves physical vapor deposition of the top and bottom Al lay- ers from two different angles relative to the substrate, through a common mask [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
10
+ page_content=' After depositing the first Al layer, the sample is exposed to a controlled level of oxygen to form the thin AlOx barrier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
11
+ page_content=' This ingenious fabrication method has been refined over many years but will nevertheless be highly challenging to implement at the manufacturing scale required for larger-scale quan- tum computers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
12
+ page_content=' The exponential dependence of the JJ critical supercurrent (Ic) on tunnel barrier width also re- sults in a typical few percent variation in Ic across devices Al Al AlOx Superconductor Constriction a b Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
13
+ page_content=' (a) Schematic of an Al/AlOx/Al superconductor- insulator-superconductor (SIS) Josephson junction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
14
+ page_content=' For clar- ity, the native oxide covering both Al electrodes is omitted.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
15
+ page_content=' (b) Schematic of a co-planar superconductor-constriction- superconductor (ScS) Josephson junction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
16
+ page_content=' ∗ mzliu@bnl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
17
+ page_content='gov † ctblack@bnl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
18
+ page_content='gov fabricated within a few centimeters, even when oxidation conditions are tightly controlled.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
19
+ page_content=' [2–5] Since the Joseph- son energy is directly proportional to Ic, this variation presents an additional design and manufacturing chal- lenge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
20
+ page_content=' In a transmon, the SIS JJ is shunted by a large capac- itor to minimize the charging energy and thus provide immunity to charge noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
21
+ page_content=' Further, the qubit is coupled to a high-Q microwave resonator for readout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
22
+ page_content=' Typically, the shunting capacitor and the resonator are fabricated separately from the SIS JJ, using a superconductor with higher Tc and better chemical robustness compared to Al (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
23
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
24
+ page_content=', niobium (Tc = 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
25
+ page_content='2 K)[6], tantalum (Tc = 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
26
+ page_content='4 K)[7], and titanium nitride (Tc = 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
27
+ page_content='6 K)[8]).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
28
+ page_content=' The improved ro- bustness allows post-fabrication chemical treatments to remove surface contaminants, which contribute to TLS loss.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
29
+ page_content=' However, most of these treatments are not possible after Al/AlOx/Al junction fabrication, due to the junc- tion’s fragile nature [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
30
+ page_content=' In this work we analyze the performance impact of replacing the transmon SIS tunnel junction with a co-planar superconductor-constriction-superconductor (ScS) Josephson junction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
31
+ page_content=' A ScS JJ is comprised of two superconductors separated by a thin neck of the same superconductor (Figure 1b), with the constriction estab- lishing the superconducting phase difference that enables Josephson behavior.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
32
+ page_content=' ScS JJs are co-planar (unlike SIS tunnel junctions) and can be fabricated using conven- tional lithography and metallization.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
33
+ page_content=' Here, we follow the formalism established by Koch et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
34
+ page_content=' in [10] to deter- mine the electrical properties of ScS transmons, which are shown to be different from SIS transmons, stemming from a different ScS JJ current-phase relationship (CPR or CΦR) compared to that of a SIS JJ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
35
+ page_content='[11–13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
36
+ page_content=' Compar- ing the two device architectures, we show that the ScS transmon has 50% less anharmonicity than the SIS trans- mon, for devices with the same Josephson energy and capacitive energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
37
+ page_content=' However, the smaller anharmonicity is accompanied by a significantly smaller charge disper- sion, giving the ScS transmon stronger immunity against charge noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
38
+ page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
39
+ page_content='04276v1 [cond-mat.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
40
+ page_content='supr-con] 11 Jan 2023 2 II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
41
+ page_content=' RESULTS AND DISCUSSION II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
42
+ page_content='1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
43
+ page_content=' Current-phase relation of a short ScS junction Consider a ScS Josephson junction comprised of two large superconductors connected by a diffusive quasi-one- dimensional wire with length d ≪ √ξ0l and width w ≪ d, where ξ0 is the Pippard superconducting coherence length, and l ≪ ξ0 is the dirty-limit electron mean free path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
44
+ page_content=' In this case, Kulik and Omelyanchuk showed that the CPR for the ScS junction (KO-1) at T = 0 K is: IScS(ϕ) = π∆ eRn cos ϕ 2 tanh−1 � sin ϕ 2 � , (1) in which ∆ is the superconducting energy gap and Rn is the normal state resistance of the junction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
45
+ page_content=' [11, 12] The junction critical current Ic,ScS = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
46
+ page_content='662π∆/(eRn) is achieved at ϕ = (2k ± 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
47
+ page_content='627)π to satisfy dI(ϕ)/dϕ ∝ 1 − sin(ϕ/2) tanh−1 [sin(ϕ/2)] = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
48
+ page_content=' Given the Maclau- rin series tanh−1(x) = x + x3/3 + O(x5), Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
49
+ page_content=' 1 may be rewritten to a form that resembles the CPR of a SIS Josephson junction, as IScS(ϕ) = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
50
+ page_content='755Ic,ScS sin ϕ � 1 + 1 3 sin2 ϕ 2 + O � sin4 ϕ 2 � � , which shows that the CPR of a ScS junction distorts from the conventional sinusoidal form, but still bears odd parity and a 2π periodicity (Figure 2a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
51
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
52
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
53
+ page_content=' Josephson energy of a ScS transmon The potential energy of a Josephson junction is given by the integral EJ(ϕ) = � IJV dt = � IJ Φ0 2π dϕ dt dt = � IJ Φ0 2π dϕ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
54
+ page_content=' (2) −2 −1 0 1 2 φ/π −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
55
+ page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
56
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
57
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
58
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
59
+ page_content='0 I(φ)/Ic ScS SIS −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
60
+ page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
61
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
62
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
63
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
64
+ page_content='0 φ/π 0 1 2 3 EJ(φ)/EJ ScS SIS φ2/2 a b Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
65
+ page_content=' (a) The CPR of a ScS Josephson junction in the KO-1 limit (solid red) is distorted from the sinusoidal form of a SIS junction (dashed black).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
66
+ page_content=' (b) The Josephson energy of a ScS transmon (solid red) deviates from the cosine form of a SIS transmon (dashed black) and has 50% smaller an- harmonicity at its lowest order (ϕ4).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
67
+ page_content=' A harmonic parabola, ϕ2/2, is displayed (dotted cyan) for reference.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
68
+ page_content=' −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
69
+ page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
70
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
71
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
72
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
73
+ page_content='0 φ/π 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
74
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
75
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
76
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
77
+ page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
78
+ page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
79
+ page_content='5 EJ(φ)/EJ −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
80
+ page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
81
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
82
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
83
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
84
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
85
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
86
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
87
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
88
+ page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
89
+ page_content='0 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
90
+ page_content='5 φ/π EJ(φ)/EJ a b SIS ScS 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
91
+ page_content='3031 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
92
+ page_content='8800 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
93
+ page_content='3993 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
94
+ page_content='7751 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
95
+ page_content='3096 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
96
+ page_content='9145 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
97
+ page_content='4896 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
98
+ page_content='0077 Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
99
+ page_content=' The eigenenergies (blue lines and numbers) and the probability densities (∥Ψ∥2) of the first 4 eigenstates of (a) a SIS transmon and (b) a ScS transmon, both with EJ/EC = 20 and ng = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
100
+ page_content=' The corresponding potential energies, normal- ized by EJ, are plotted in red lines for both transmons.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
101
+ page_content=' For a KO-1 junction defined by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
102
+ page_content=' 1, the integral in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
103
+ page_content=' 2 leads to EJ,ScS(ϕ) = ∆Φ0 2eRn � ln � cos2 ϕ 2 � + 2 sin ϕ 2 tanh−1 � sin ϕ 2 � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
104
+ page_content=' (3) Although this form appears very different from the po- tential energy of a SIS junction, EJ,SIS(ϕ) = EJ,SIS(1 − cos ϕ), with EJ,SIS = Ic,SISΦ0/2π, Maclaurin expansions of EJ,ScS and EJ,SIS make their similarities apparent: EJ,ScS(ϕ) = ∆Φ0 4eRn �1 2ϕ2 − 1 48ϕ4 + O(ϕ6) � EJ,SIS(ϕ) = EJ,SIS �1 2ϕ2 − 1 24ϕ4 + O(ϕ6) � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
105
+ page_content=' (4) Comparing the coefficients of the harmonic (ϕ2) term, we observe that the Josephson energy of a ScS transmon can be defined as: EJ,ScS = ∆Φ0 4eRn = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
106
+ page_content='755Ic,ScSΦ0/(2π) , (5) where the last equality recognizes that Ic,ScS = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
107
+ page_content='662π∆/(eRn).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
108
+ page_content=' Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
109
+ page_content=' 5 shows that both potential ener- gies contain anharmonicity led by a ϕ4 term, from which we estimate that the anharmonicity of a ScS transmon is about one half that of a SIS transmon, for devices with the same EJ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
110
+ page_content=' This difference is clear when compar- ing normalized EJ(ϕ) of ScS and SIS transmons with a harmonic parabolic potential ϕ2/2 (Figure 2b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
111
+ page_content=' A more precise evaluation of the anharmonicity is given in II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
112
+ page_content='3, by computing the ScS transmon eigenenergies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
113
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
114
+ page_content='3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
115
+ page_content=' Eigenenergies and eigenstates of a ScS transmon A conventional SIS transmon has a Hamiltonian of the form: ˆHSIS = 4Ec(ˆn − ng)2 + EJ(1 − cos ˆϕ), (6) 3 0 20 40 60 80 100 0 20 40 60 80 E0m/EC 0 20 40 60 80 100 −1 0 1 2 3 4 5 (E12 - E01)/EC ScS SIS a b EJ/EC EJ/EC ScS SIS m = 0 1 2 3 0 20 40 60 80 100 100 τp (ns) SIS ScS EJ/EC 10-1 10-2 c Figure 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
116
+ page_content=' (a) Transition energy E0m = Em − E0 at ng = 1/2 and (b) oscillator anharmonicity (E12 − E01) at ng = 1/2, as functions of EJ/EC for ScS transmon (solid lines) and SIS transmon (dashed lines).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
117
+ page_content=' (c) The minimal pulse duration (τp) of ScS (solid line) and SIS transmons (dashed line) vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
118
+ page_content=' EJ/EC, all operated at ω01 = 2π × 10 GHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
119
+ page_content=' where ng is the offset charge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
120
+ page_content=' The wave equation for a SIS transmon can be solved analytically.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
121
+ page_content=' [10] In the ScS transmon, the potential energy is given by Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
122
+ page_content=' 3, so that the Hamiltonian becomes, ˆHScS = 4EC(ˆn − ng)2 + EJ � 2 ln � cos2 ˆϕ 2 � + 4 sin ˆϕ 2 tanh−1 � sin ˆϕ 2 � � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
123
+ page_content=' (7) The wave equation of a ScS transmon can be solved nu- merically using the finite difference method, in which the Hamiltonian is expressed in a discretized space of phase ϕ ∈ [−π, π), with the periodic boundary condition ap- plied to both ends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
124
+ page_content=' The validity of the computation is confirmed by comparing a similar numerical solution of the wave equation for a SIS transmon with the analytical solutions presented by Koch et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
125
+ page_content=' [10] Figure 3 compares the first 4 eigenstates of a SIS transmon and a ScS trans- mon, both with EJ/EC = 20 and ng = 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
126
+ page_content=' Although the lower level eigenenergies and eigenfunctions are sim- ilar, the differences become more apparent at higher en- ergies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
127
+ page_content=' This trend is more clearly observed for the transi- tion energies E0m = Em − E0 calculated for both trans- mon types, across a range of EJ/EC from 1 and 100 (Figure 4a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
128
+ page_content=' This difference reflects the smaller anhar- monicity of the ScS transmon, compared to the SIS trans- mon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
129
+ page_content=' By treating the leading anharmonic term (−ϕ4/24) in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
130
+ page_content=' 4 as a perturbation to the harmonic potential and applying the first-order perturbation theory, the mth eigenenergy of a SIS transmon is approximated by [10] Em,SIS ≈ ℏωp � m + 1 2 � − EC 4 � 2m2 + 2m + 1 � , (8) in which ℏωp = √8EJEC is the Josephson plasma energy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
131
+ page_content=' The transition energy between the (m − 1)th and mth levels is therefore Em−1,m,SIS ≈ ℏωp − mEC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
132
+ page_content=' (9) From Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
133
+ page_content=' 9, we find that the anharmonicity of SIS transmon αSIS ≡ E12,SIS−E01,SIS is approximately −EC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
134
+ page_content=' By applying the same first-order perturbation theory cal- culation but realizing that the perturbation term is half as a SIS transmon (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
135
+ page_content=' 4), the mth eigenenergy of a ScS transmon can be approximated by Em,ScS ≈ ℏωp � m + 1 2 � − EC 8 � 2m2 + 2m + 1 � , (10) so that its anharmonicity, αScS, is approximately −EC/2, or half the anharmonicity of a SIS transmon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
136
+ page_content=' We can vi- sualize this finding in a plot of numerical results, looking at transmons with EJ/EC ≥ 20 (Figure 4b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
137
+ page_content=' The smaller anharmonicity of a ScS transmon means that the transitions E01 and E12 lie closer in energy, so that a longer RF pulse is needed to correctly excite the desired transition E01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
138
+ page_content=' The minimal pulse duration can be estimated as τp ≈ ℏ|α|−1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
139
+ page_content=' As shown in Figure 4c, despite its lower anharmonicity, τp of the ScS transmon remains below 1 ns even for EJ/EC = 100, when the qubit operates at 10 GHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
140
+ page_content=' Because typical qubit pulse durations are ∼10 ns, we may conclude that the lower anharmonicity will not inhibit normal operation of a ScS transmon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
141
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
142
+ page_content='4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
143
+ page_content=' Charge dispersion of a ScS transmon A primary benefit of the transmon architecture is its relative immunity to charge noise, when designed to op- erate in the regime of EJ ≫ EC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
144
+ page_content=' In a SIS transmon, the charge dispersion of the mth level decreases exponentially with � 8EJ/EC, following [10] ϵm ≡ Em(ng = 1/2) − Em(ng = 0) ≈ EC 24m+5 (−1)mm!' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
145
+ page_content=' � 2 π � EJ 2EC � m 2 + 3 4 e−√ 8EJ/EC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
146
+ page_content=' (11) Intuitively, the charge dispersion is related to the tunneling probability between neighboring potential en- ergy valleys (Figure 3), e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
147
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
148
+ page_content=', when ϕ makes a full 2π 4 −2 −1 0 1 2 ng 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
149
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
150
+ page_content='0 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
151
+ page_content='5 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
152
+ page_content='0 Em/EJ SIS ScS SIS ScS |ϵm|/E01(ng = 1/2) 101 10-3 10-7 10-1 10-5 10-9 10-11 1 2 3 4 5 6 7 8 9 10 (EJ/EC)1/2 10 20 30 40 50 60 70 80 90 100 EJ/EC a b m = 0 1 2 m = 0 1 2 0 20 40 60 80 100 SIS ScS T2 (ns) EJ/EC 100 10-2 104 102 106 108 c Figure 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
153
+ page_content=' (a) The eigenenergies Em of the lowest 3 eigenstates (m = 0, 1, 2) of a ScS transmon (solid line) and a SIS transmon (dashed lines), both with EJ/EC = 10, as functions of the offset charge ng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
154
+ page_content=' (b) The charge dispersion ϵm of the lowest 3 eigenstates of a ScS transmon (solid line) and a SIS transmon (dashed lines), as functions of EJ/EC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
155
+ page_content=' (c) The dephasing time T2 of ScS (solid line) and SIS transmons (dashed line) vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
156
+ page_content=' EJ/EC, all operated at ω01 = 2π × 10 GHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
157
+ page_content=' rotation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
158
+ page_content=' [10] By this reasoning, we may expect the higher barrier height of a ScS transmon (∼ 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
159
+ page_content='8EJ vs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
160
+ page_content=' 2EJ) to better suppress the tunneling probability and provide lower charge dispersion, compared to a SIS transmon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
161
+ page_content=' Figure 5a plots the first three eigenenergies Em (m = 0, 1, 2) versus the effective offset charge ng for both SIS (dashed) and ScS (solid) transmons, with EJ/EC = 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
162
+ page_content=' Clearly, the ScS transmon eigenenergies are more weakly perturbed by ng.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
163
+ page_content=' Calculations of the charge dispersion, ϵm = Em(ng = 1/2) − Em(ng = 0), across a wide range of 1 ≤ EJ/EC ≤ 100 show that suppression of charge dis- persion in the ScS transmon becomes more effective for larger EJ/EC ratios (Figure 5b).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
164
+ page_content=' When EJ/EC = 100, the charge dispersion of the first excited state of a ScS transmon, ϵ1,ScS, is over one order of magnitude less than the corresponding SIS transmon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
165
+ page_content=' It is noted that com- putation on SIS transmon matches the analytical result very well[10], again demonstrating the high numerical precision of our finite difference computation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
166
+ page_content=' Never- theless, the computational error becomes significant as the normalized charge dispersion, |ϵm|/E01, approaches 10−11 and smaller.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
167
+ page_content=' This is due to the accumulation of floating-point error that eventually shows up for evaluat- ing the vanishing difference between the two eigenener- gies at ng = 0 and 1/2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
168
+ page_content=' In Figure 5b, the y−axis is presented in the logarith- mic scale and the x−axis is presented in the scale of � EJ/EC, so that all curves take a linear form approach- ing large EJ/EC values.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
169
+ page_content=' For the SIS transmon, the slope matches the expected exp(− � 8EJ/EC) dependence in Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
170
+ page_content=' 11.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
171
+ page_content=' For the ScS transmon, the slope is larger, and is best described by: ϵm ∝ exp � − � 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
172
+ page_content='16 × 8EJ/EC � .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
173
+ page_content=' The improved charge dispersion makes the ScS trans- mon both less sensitive to charge noise and, in turn, gives it a longer dephasing time T2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
174
+ page_content=' For dephasing caused by slow charge fluctuations of large amplitude, Koch et al[10] has found an upper limit of T2 given by T2 ≈ 4ℏ e2π|ϵ1|.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
175
+ page_content=' Using this relation, we compare T2 for both SIS and ScS transmons for EJ/EC between 1 and 100 (Figure 5c).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
176
+ page_content=' The ScS transmon improves T2 across the entire range of EJ/EC and especially at higher ratios.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
177
+ page_content=' At EJ/EC = 100, the SIS transmon has a T2 ceiling of about 3 ms, com- pared to about 50 ms for the ScS transmon, an over 10 fold increase.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
178
+ page_content=' At present, because the T1 lifetime of SIS transmon qubits is still beyond 1 ms and not limited by the charge noise, this benefit of the ScS transmon archi- tecture will have little performance benefit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
179
+ page_content=' However, be- cause we expect the lifetimes of superconducting qubits to continue improving (Schoelkopf’s Law) [14], we an- ticipate a point when charge noise dephasing becomes a bottleneck, and the ScS transmon architecture can offer effective mitigation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
180
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
181
+ page_content='5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
182
+ page_content=' ScS transmon design parameters The operational behavior of a ScS transmon is deter- mined by its EJ and EC, which define the operating fre- quency ω01, the relative immunity to charge noise (ϵ1), and the minimum excitation pulse duration (τp).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
183
+ page_content=' Be- cause these three quantities are determined by EJ and EC, they are not independent.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
184
+ page_content=' We can visualize this in- terdependence with three sets contour lines plotted in the plane of EJ versus EC in Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
185
+ page_content=' These contours rep- resent: (1) a transmon operating frequency (ω01/(2π)) between 1 and 10 GHz (set of red, descending diagonal lines), (2) ratios of EJ/EC from 10, 100, and 1000 (set of blue, ascending diagonal lines), and the minimum ex- citation pulse duration τp between 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
186
+ page_content='32 and 10 ns (set of dashed, predominantly vertical lines).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
187
+ page_content=' Selecting two of these defines the third one.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
188
+ page_content=' For example, a ScS trans- mon designed to operate at ω01/(2π) = 5 GHz and with 5 EC/(2�ħ) (GHz) 102 EJ/(2�ħ) (GHz) 101 100 10-2 10-1 100 EJ/EC=103 EJ/EC=102 EJ/EC=101 ω01/(2�) = 1 GHz 2 GHz 3 GHz 4 GHz 5 GHz 6 GHz 7 GHz 8 GHz 9 GHz 10 GHz τp = 10 ns 7.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
189
+ page_content='94 ns 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
190
+ page_content='31 ns 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
191
+ page_content='01 ns 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
192
+ page_content='58 ns 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
193
+ page_content='26 ns 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
194
+ page_content='00 ns 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
195
+ page_content='79 ns 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
196
+ page_content='63 ns 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
197
+ page_content='50 ns 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
198
+ page_content='40 ns 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
199
+ page_content='98 ns 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
200
+ page_content='32 ns 104 105 2 3 4 5 6 7 8 9 2 3 4 5 6 7 8 9 103 2 3 4 5 6 7 8 9 2 3 4 5 6 7 8 9 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
201
+ page_content='16 ns 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
202
+ page_content='51 ns 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
203
+ page_content='00 ns Rn/Tc (Ω K-1) 102 103 2x101 2 3 4 5 6 7 8 9 2 3 4 5 6 7 8 9 2 3 4 5 6 7 8 9 3 4 5 6 7 8 9 CΣ (fF) Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
204
+ page_content=' A graphical guide for designing ScS transmon with required EJ and EC to match desired transmon frequency ω01 and minimum pulse duration τp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
205
+ page_content=' The red lines are contours lines for transmon frequencies set at values between 1 and 10 GHz.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
206
+ page_content=' The dashed black lines are contours lines for τp set at a few values between 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
207
+ page_content='32 and 10 ns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
208
+ page_content=' The blue lines are contours lines for EJ/EC ratios set at 10, 100, and 1000.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
209
+ page_content=' A second x-axis that is parallel to EC is presented for CΣ, following CΣ = e2/2EC.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
210
+ page_content=' Simlarly, a second y-axis that is parallel to EJ is presented for Rn/Tc, following Rn/Tc = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
211
+ page_content='76kBΦ0/(4eEJ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
212
+ page_content=' a readout pulse of τp = 4 ns (green dot in Figure 6) will have a EJ/EC ratio of about 600.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
213
+ page_content=' Instead, a shorter excitation pulse of τp = 1 ns (purple dot in Figure 6) requires a tradeoff of smaller EJ/EC ≈ 40, and thus less immunity against charge noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
214
+ page_content=' Importantly, EJ and EC of a ScS transmon are set by the physical device dimensions and fundamental prop- erties of the materials composing it.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
215
+ page_content=' EJ is determined by the superconducting energy gap of the material (∆) and the normal state resistance of the junction (Rn) (Eq.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
216
+ page_content=' 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
217
+ page_content=' For a BCS superconductor where ∆ = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
218
+ page_content='76kBTc, we can express EJ in terms of the material properties Rn/Tc = 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
219
+ page_content='76kBΦ0/(4eEJ,ScS), which is shown as the second (right) y-axis in Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
220
+ page_content=' Similarly, because EC is set by the total capacitance (CΣ = e2/2EC,ScS) which depends on device geometry and dielectric properties, we can express EC as a capacitance, shown as a second (top) x-axis in Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
221
+ page_content=' Returning to the example, we can now see from Figure 6 that designing a ScS transmon with ω01/(2π) = 5 GHz, τp = 4 ns, and EJ/EC ratio of about 600 (green dot) re- quires a junction with Rn/Tc ≈ 3 kΩ · K−1 and capacitor with CΣ ≈ 250 fF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
222
+ page_content=' The properties can be realized by a constriction junction fabricated from a thin film super- conductor that has both a relatively high normal state resistivity and a long superconducting coherence length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
223
+ page_content=' As one example, a 10-nm-thick PtSi film was reported has normal-state sheet resistance Rs = 67 Ω/□, super- conducting Tc = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
224
+ page_content='63 K, and Pippard coherence length ξ = 440 nm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
225
+ page_content=' [15] Using these material parameters, we can meet the ScS transmon design criteria using a constric- tion junction with physical length of 440 nm and width of 6 16 nm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
226
+ page_content=' The qubit capacitor physical dimensions should be designed for CΣ ≈ 250 fF, according to Figure 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
227
+ page_content=' If one instead desires the shorter readout pulse time of τp = 1 ns (purple dot), the constriction must have Rn/Tc ≈ 10 kΩ · K−1 and CΣ ≈ 70 fF.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
228
+ page_content=' For the same PtSi supercon- ductor, these values can be met with physical length of 440 nm and width of 5 nm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
229
+ page_content=' which are more challenging dimensions to fabricate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
230
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
231
+ page_content=' CONCLUSION In summary, we have demonstrated through computa- tion that a short ScS Josephson junction can be used as a drop-in replacement for the SIS tunnel junction in a transmon qubit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
232
+ page_content=' In the transmon regime (EJ ≫ EC), a ScS transmon has 50% smaller anharmonicity than a SIS transmon, but is compensated by its appreciably lower charge dispersion that provides a significantly higher T2 ceiling.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
233
+ page_content=' Using this analysis, we estimate that high per- formance ScS transmons can be achieved with constric- tions having a normal state resistance of a few kiloohms, which can be made from a thin nanobridge formed in low Tc superconductors using conventional, high-resolution nanofabrication techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
234
+ page_content=' The ScS transmon design allows all components, including constriction junction, capacitor, and resonator, to be fabricated in a single lithography step.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
235
+ page_content=' This is a significant simplification com- pared to multistep SIS transmon fabrication, and also provides an robust architecture amenable to device post- processing, cleaning, and encapsulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
236
+ page_content=' ACKNOWLEDGMENTS This material is based upon work supported by the U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
237
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
238
+ page_content=' Department of Energy, Office of Science, National Quantum Information Science Research Centers, Co- design Center for Quantum Advantage (C2QA) under contract number DE-SC0012704.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
239
+ page_content=' This research used computational resources of the Center for Functional Nanomaterials (CFN), which is a U.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
240
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
241
+ page_content=' Department of Energy Office of Science User Facility, at Brookhaven Na- tional Laboratory under Contract No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
242
+ page_content=' DE-SC0012704.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
243
+ page_content=' [1] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
244
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
245
+ page_content=' Dolan, Offset masks for lift-off photoprocessing, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
246
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
247
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
248
+ page_content=' 31, 337 (1977).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
249
+ page_content=' [2] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
250
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
251
+ page_content=' Kreikebaum, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
252
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
253
+ page_content=' O’Brien, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
254
+ page_content=' Morvan, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
255
+ page_content=' Sid- diqi, Improving wafer-scale Josephson junction resistance variation in superconducting quantum coherent circuits, Supercond.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
256
+ page_content=' Sci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
257
+ page_content=' Technol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
258
+ page_content=' 33, 06LT02 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
259
+ page_content=' [3] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
260
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
261
+ page_content=' Hertzberg, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
262
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
263
+ page_content=' Zhang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
264
+ page_content=' Rosenblatt, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
265
+ page_content=' Mage- san, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
266
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
267
+ page_content=' Smolin, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
268
+ page_content='-B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
269
+ page_content=' Yau, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
270
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
271
+ page_content=' Adiga, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
272
+ page_content=' Sandberg, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
273
+ page_content=' Brink, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
274
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
275
+ page_content=' Chow, and J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
276
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
277
+ page_content=' Orcutt, Laser-annealing Josephson junctions for yielding scaled-up superconduct- ing quantum processors, npj Quantum Inf.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
278
+ page_content=' 7, 129 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
279
+ page_content=' [4] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
280
+ page_content=' Osman, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
281
+ page_content=' Simon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
282
+ page_content=' Bengtsson, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
283
+ page_content=' Kosen, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
284
+ page_content=' Krantz, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
285
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
286
+ page_content=' Lozano, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
287
+ page_content=' Scigliuzzo, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
288
+ page_content=' Delsing, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
289
+ page_content=' Bylander, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
290
+ page_content=' Fadavi Roudsari, Simplified Josephson-junction fabri- cation process for reproducibly high-performance super- conducting qubits, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
291
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
292
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
293
+ page_content=' 118, 064002 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
294
+ page_content=' [5] H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
295
+ page_content=' Kim, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
296
+ page_content=' J¨unger, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
297
+ page_content=' Morvan, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
298
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
299
+ page_content=' Barnard, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
300
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
301
+ page_content=' Livingston, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
302
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
303
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
304
+ page_content=' Alto´e, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
305
+ page_content=' Kim, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
306
+ page_content=' Song, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
307
+ page_content=' Chen, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
308
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
309
+ page_content=' Kreikebaum, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
310
+ page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
311
+ page_content=' Ogletree, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
312
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
313
+ page_content=' Santiago, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
314
+ page_content=' Siddiqi, Effects of laser-annealing on fixed-frequency superconducting qubits, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
315
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
316
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
317
+ page_content=' 121, 142601 (2022).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
318
+ page_content=' [6] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
319
+ page_content=' Premkumar, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
320
+ page_content=' Weiland, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
321
+ page_content=' Hwang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
322
+ page_content=' J¨ack, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
323
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
324
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
325
+ page_content=' Place, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
326
+ page_content=' Waluyo, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
327
+ page_content=' Hunt, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
328
+ page_content=' Bisogni, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
329
+ page_content=' Pelliciari, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
330
+ page_content=' Barbour, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
331
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
332
+ page_content=' Miller, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
333
+ page_content=' Russo, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
334
+ page_content=' Camino, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
335
+ page_content=' Kisslinger, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
336
+ page_content=' Tong, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
337
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
338
+ page_content=' Hybertsen, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
339
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
340
+ page_content=' Houck, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
341
+ page_content=' Jarrige, Microscopic relaxation channels in mate- rials for superconducting qubits, Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
342
+ page_content=' Mater.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
343
+ page_content=' 2, 72 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
344
+ page_content=' [7] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
345
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
346
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
347
+ page_content=' Place, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
348
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
349
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
350
+ page_content=' Rodgers, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
351
+ page_content=' Mundada, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
352
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
353
+ page_content=' Smitham, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
354
+ page_content=' Fitzpatrick, Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
355
+ page_content=' Leng, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
356
+ page_content=' Premkumar, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
357
+ page_content=' Bryon, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
358
+ page_content=' Vrajitoarea, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
359
+ page_content=' Sussman, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
360
+ page_content=' Cheng, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
361
+ page_content=' Mad- havan, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
362
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
363
+ page_content=' Babla, X.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
364
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
365
+ page_content=' Le, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
366
+ page_content=' Gang, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
367
+ page_content=' J¨ack, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
368
+ page_content=' Gye- nis, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
369
+ page_content=' Yao, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
370
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
371
+ page_content=' Cava, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
372
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
373
+ page_content=' de Leon, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
374
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
375
+ page_content=' Houck, New material platform for superconducting transmon qubits with coherence times exceeding 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
376
+ page_content='3 milliseconds, Nature Commun.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
377
+ page_content=' 12, 1779 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
378
+ page_content=' [8] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
379
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
380
+ page_content=' Chang, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
381
+ page_content=' R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
382
+ page_content=' Vissers, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
383
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
384
+ page_content=' C´orcoles, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
385
+ page_content=' Sandberg, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
386
+ page_content=' Gao, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
387
+ page_content=' W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
388
+ page_content=' Abraham, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
389
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
390
+ page_content=' Chow, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
391
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
392
+ page_content=' Gambetta, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
393
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
394
+ page_content=' Rothwell, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
395
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
396
+ page_content=' Keefe, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
397
+ page_content=' Steffen, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
398
+ page_content=' P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
399
+ page_content=' Pap- pas, Improved superconducting qubit coherence using ti- tanium nitride, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
400
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
401
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
402
+ page_content=' 103, 012602 (2013).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
403
+ page_content=' [9] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
404
+ page_content=' Williams, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
405
+ page_content=' Gupta, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
406
+ page_content=' Wasilik, Etch rates for micromachining processing-part ii, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
407
+ page_content=' Microelectromech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
408
+ page_content=' Syst.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
409
+ page_content=' 12, 761 (2003).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
410
+ page_content=' [10] J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
411
+ page_content=' Koch, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
412
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
413
+ page_content=' Yu, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
414
+ page_content=' Gambetta, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
415
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
416
+ page_content=' Houck, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
417
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
418
+ page_content=' Schuster, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
419
+ page_content=' Majer, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
420
+ page_content=' Blais, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
421
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
422
+ page_content=' Devoret, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
423
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
424
+ page_content=' Girvin, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
425
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
426
+ page_content=' Schoelkopf, Charge-insensitive qubit design de- rived from the Cooper pair box, Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
427
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
428
+ page_content=' A 76, 042319 (2007).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
429
+ page_content=' [11] I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
430
+ page_content=' O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
431
+ page_content=' Kulik and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
432
+ page_content=' N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
433
+ page_content=' Omel’yanchuk, Contribution to the microscopic theory of the Josephson effect in supercon- ducting bridges, JETP Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
434
+ page_content=' 21, 96 (1975).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
435
+ page_content=' [12] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
436
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
437
+ page_content=' Golubov, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
438
+ page_content=' Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
439
+ page_content=' Kupriyanov, and E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
440
+ page_content=' Il’ichev, The current-phase relation in Josephson junctions, Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
441
+ page_content=' Mod.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
442
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
443
+ page_content=' 76, 411 (2004).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
444
+ page_content=' [13] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
445
+ page_content=' Vijay, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
446
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
447
+ page_content=' Levenson-Falk, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
448
+ page_content=' H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
449
+ page_content=' Slichter, and I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
450
+ page_content=' Sid- diqi, Approaching ideal weak link behavior with three di- mensional aluminum nanobridges, Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
451
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
452
+ page_content=' Lett.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
453
+ page_content=' 96, 223112 (2010).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
454
+ page_content=' [14] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
455
+ page_content=' Kjaergaard, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
456
+ page_content=' E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
457
+ page_content=' Schwartz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
458
+ page_content=' Braum¨uller, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
459
+ page_content=' Krantz, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
460
+ page_content=' I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
461
+ page_content='-J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
462
+ page_content=' Wang, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
463
+ page_content=' Gustavsson, and W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
464
+ page_content=' D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
465
+ page_content=' Oliver, Superconducting qubits: Current state of play, Annu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
466
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
467
+ page_content=' Condens.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
468
+ page_content=' Matter Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
469
+ page_content=' 11, 369 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
470
+ page_content=' [15] K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
471
+ page_content=' Oto, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
472
+ page_content=' Takaoka, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
473
+ page_content=' Murase, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
474
+ page_content=' Ishida, Supercon- ductivity in PtSi ultrathin films, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
475
+ page_content=' Appl.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
476
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
477
+ page_content=' 76, 5339 (1994).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/PdE3T4oBgHgl3EQfCglF/content/2301.04276v1.pdf'}
PdFKT4oBgHgl3EQfgi6G/content/tmp_files/2301.11834v1.pdf.txt ADDED
@@ -0,0 +1,1274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.11834v1 [cond-mat.soft] 27 Jan 2023
2
+ Gas Diffusion in Cement Pastes: An Analysis using a
3
+ Fluctuating Diffusivity Model
4
+ Fumiaki Nakai1, Takato Ishida1,∗
5
+ Department of Materials Physics, Graduate School of Engineering, Nagoya University, Furo-cho, Chikusa,
6
+ Nagoya 464-8603, Japan
7
+ Abstract
8
+ This work propose an application of the concept of fluctuating diffusivity to the dif-
9
+ fusion of gas molecules in cementitious materials, particularly through a two-state
10
+ fluctuating diffusivity (2SFD) model. The 2SFD model is utilized to investigate the
11
+ diffusion of oxygen in cement pastes. The analysis provides a reasonable description
12
+ of the diffusion coefficient of oxygen in cement pastes, and highlights the presence of
13
+ non-Gaussian diffusion, which can be attributed to the heterogeneous microstructure.
14
+ The presence of non-Gaussianity in the probability density of the molecule’s displace-
15
+ ment, characterized by heavier tails than those of the Gaussian distribution, may have
16
+ a significant impact on the durability assessments of concrete structures.
17
+ 1. Introduction
18
+ Since the invention of Portland cement by Joseph Aspdin in 1824, cementitious ma-
19
+ terials have been widely utilized in the construction of infrastructure. In recent decades,
20
+ there has been a growing emphasis on assessing the long-term performance of rein-
21
+ forced concrete structures, with a focus on reducing carbon emissions and preserving
22
+ resources. The durability of concrete structures can be compromised by the penetration
23
+ of aggressive lightweight molecules (causing chemical degradation [1] such as carbon-
24
+ ation [2, 3, 4], corrosion [5, 6], sulphate attack [7], calcium leaching [8, 9]), making
25
+ the examination of transport phenomena in cementitious materials a vital subject in the
26
+ field of cement and concrete research. It is an undeniable fact that cementitious ma-
27
+ terials are inherently porous in nature, possessing pores of various scales. Diffusion,
28
+ the primary mode of mass transport, has been comprehended by devising effective dif-
29
+ fusion coefficients which properly reflect the characteristics of the pore network struc-
30
+ ture (tortuosity, connectivity, constrictivity, formation factor [10, 11, 12, 13, 14, 15])
31
+ and by utilizing them to solve the diffusion equation. It is obvious that the probabilis-
32
+ tic displacement distribution is Gaussian when the conventional diffusion equation is
33
+ ∗Corresponding author
34
+ Email addresses: nakai.fumiaki.c7@s.mail.nagoya-u.ac.jp (Fumiaki Nakai),
35
+ ishida@mp.pse.nagoya-u.ac.jp (Takato Ishida )
36
+
37
+ resolved [16, 17]. However, recent research in the field of theoretical physics has high-
38
+ lighted the existence of cases in which the displacement distribution deviates from a
39
+ Gaussian distribution, depending on the spatio-temporal scale of interest. Such non-
40
+ Gaussianity may have a significant impact on the long-term reliability probability as-
41
+ sessment of reinforced concrete structures. We have effectively formulated the concept
42
+ in a form that is applicable to diffusion in cementitious materials.
43
+ A microstructure of cementitious materials inherently exhibits a heterogeneous
44
+ composition, which can result in the non-Gaussian diffusion of gases. To effectively
45
+ describe the diffusion in heterogeneous material, the concept of fluctuating diffusivity
46
+ (FD) [18, 19, 20, 21, 22, 23, 24] has been demonstrated to be useful, as evidenced by
47
+ the studies for the glass forming liquid [25], colloidal suspensions [26, 27], and bio-
48
+ logical systems [28, 29]. The diffusion of a free molecule with fluctuating diffusivity
49
+ is described by the equation
50
+ ∂G(x; t)
51
+ ∂t
52
+ = D(t)∇2G(x; t)
53
+ (1)
54
+ where t denotes the time, x represents the displacement vector of the particle, G(x; t)
55
+ is the probability density of x at time t, and D(t) represents the fluctuating diffusivity
56
+ and is subject to a stochastic process. By providing a simple and physically reasonable
57
+ rule for D(t), it is possible to theoretically analyze the dynamics of the diffusing par-
58
+ ticles. The Fluctuating diffusivity is based on the idea that the diffusion environment
59
+ experienced by the particle changes in time, either as a result of a temporal alteration
60
+ in the environment or due to the migration of particles to a distinct milieu. Upon ini-
61
+ tial inspection, one may think that the fluctuating diffusivity approach, expressed as
62
+ Eq. (1), is similar to the time-dependent diffusivity models taking into account the
63
+ long-term effects of changing diffusion media, such as prolonged hydration reactions
64
+ and accumulated damages [30, 31, 32]. However, it is important to note that these
65
+ two approaches are fundamentally distinct in terms of their concepts and underlying
66
+ motivations. The fluctuating diffusivity approach posits that the diffusion coefficient
67
+ changes stochastically over time, reflecting the temporal and spatial heterogeneity of
68
+ the matrix. In contrast, the time-dependent diffusion coefficient varies deterministi-
69
+ cally, reflecting the time evolution of internal microstructures caused by the long-term
70
+ effects. In this paper, the latter approach, which is characterized by the deterministic
71
+ variation of the diffusion coefficient, is referred to as deterministic drifting diffusiv-
72
+ ity (DDD), and is distinguished from the fluctuating diffusivity. It is undeniable that
73
+ the extensive research conducted on DDD has greatly enhanced our understanding of
74
+ transport phenomena in cementitious materials and continues to be applied effectively
75
+ in current studies. It is important to note that fluctuating diffusivity does not aim to
76
+ replace or update DDD, but rather it takes a distinct physical perspective. In fact, the
77
+ target timescale is significantly different between the fluctuating diffusivity and DDD
78
+ approaches. Typically, the FD analyzes the particle diffusion on a timescale where the
79
+ particle diffuses over the characteristic length of the heterogeneous environment, while
80
+ the DDD approach focuses on the timescale where the state of the diffusion medium
81
+ changes over a prolonged period. Here, it is important to note that some studies have
82
+ employed DDD approach [30], which does not treat temporal and spatial fluctuations
83
+ and is inadequate in describing diffusion in heterogeneous environments. The appli-
84
+ 2
85
+
86
+ cation of the fluctuating diffusivity framework allows for an effective analysis of the
87
+ phenomena of small molecule diffusion in cementitious materials, where the diffu-
88
+ sivity may fluctuate spatio-temporally in response to the heterogeneous nature of the
89
+ diffusion medium. In the context of diffusion in cementitious materials, it should be ef-
90
+ fortless for researchers in the field of cement materials to envision diffusion phenomena
91
+ that fall within the scope of such a framework, such as gas diffusion in a depercolated
92
+ capillary pore network, cases of diffusion coupling with adsorption on the pore wall or
93
+ dissolution in the pore solution. Additionally, phenomena such as the consumption of
94
+ CO2 by carbonation and the immobilization of chloride ions through Friedel’s salt and
95
+ calcium oxychlorides formation [33, 34], may also fall within the scope of this frame-
96
+ work if these phenomena are regarded as trapping states with quite long time constants.
97
+ When the timescale of observation is comparable to a timescale where the molecules
98
+ diffuse over the characteristic length of the heterogeneous environment, non-Gaussian
99
+ behavior of the displacement distribution is exhibited, i.e., the tails of the displacement
100
+ distribution tend to be heavy).
101
+ Let us herein present several sophisticated approaches for investigating diffusion
102
+ in cementitious materials. There are two primary existing methods for understanding
103
+ the diffusion phenomena of small molecules in cementitious materials: (i) numerical
104
+ diffusion simulations on virtual microstructures that replicate the microstructural char-
105
+ acteristics of cementitious materials, and (ii) empirical or semi-empirical modeling of
106
+ effective diffusion coefficients through a process of homogenization. In recent years,
107
+ the former approach of numerical diffusion simulations on virtual microstructures has
108
+ made significant progress, successfully simulating the diffusion of various diffusants
109
+ in cementitious materials of various types and compositions, both with and without
110
+ interfacial transition zones (ITZs) [35, 36, 6, 37, 38, 39, 40, 41, 42, 43]. A particularly
111
+ successful recent approach within this model has been the implementation of numerical
112
+ diffusion models, such as those based on the Lattice Bolzmann method [37, 40, 41, 42],
113
+ random walk method [6, 39, 44, 43], and finite element method [35], utilizing virtual
114
+ 3D microstructures generated by hydration models. Several hydration models have
115
+ been previously proposed, such as CHEMHYD3D [45, 46, 47], HYMOSTRUC3D
116
+ [48], THAMES [49, 50], DuCOM [51], IPKM [52], µic [53], which are widely used
117
+ in the field of cement and concrete research. In such microstructure-guided diffusion
118
+ models, the CHEMHYD3D model (a voxel-based approach) devised by Bentz and Gar-
119
+ boczi [45, 46, 47] and the HYMOSTRUC3D (a vector-based approach) developed by
120
+ van Breugel [48], are commonly utilized [35, 54, 40, 41]. Both CHEMHYD3D and
121
+ HYMOSTRUC3D are founded upon Jennings’s colloidal model of Calcium-Silicate-
122
+ Hydrates (CSH) morphology [55]. Recently, advancements in the force field of molec-
123
+ ular dynamics in cementitious materials is becoming quite well-developed [56, 57, 58].
124
+ Zhang et al. reported the modeling of diffusion simulations using the random walk
125
+ method on structures generated by molecular dynamics [43]. The latter approach en-
126
+ tails describing mass diffusion phenomena through empirically or semi-empirically
127
+ modeling the effective diffusion coefficient in heterogeneous media and solving the
128
+ standard diffusion equations utilizing that effective diffusion coefficient. The effec-
129
+ tive diffusion coefficients are modeled in accordance with homogenization procedures
130
+ commonly utilized in the field of composite materials, and are inferred to be in agree-
131
+ ment with experimental observations and structural insights garnered from hydration
132
+ 3
133
+
134
+ models [59, 60, 61, 62, 63, 14, 64, 65]. In the realm of finite element-based analysis
135
+ utilizing representative elementary volume (REV) meshes (where the discretizing mesh
136
+ size is generally greater than the discretization scale in microstructure-guided models),
137
+ the identical homogenization procedure is applied to assign an effective diffusion co-
138
+ efficient to each REV mesh [66, 15]. The empirical relationship linking the parameters
139
+ of capillary pore and the effective diffusion coefficient is well organized in a critical
140
+ review article by Patel et al [11]. When the porosity is known, the primary strategy is
141
+ to attempt to express the effective diffusion coefficient through Archie’s law [67], and
142
+ when porosity data is unavailable, the effective diffusion coefficient is frequently de-
143
+ rived via the Powers model [68], which can link the hydration degree and water-cement
144
+ ratio (w/c) to the capillary porosity. Yamaguchi et al. refined the empirical relationship
145
+ by assessing the accessible capillary pores, and demonstrated that the modified model
146
+ is efficacious in describing the effective diffusion coefficient of tritiated water [69].
147
+ Furthermore, the empirical effective diffusive coefficient has been adapted to include
148
+ semi-empirical parameters that characterize the morphology of the pore network (tor-
149
+ tuosity, connectivity, constrictivity, formation factor) [10, 11, 12, 13, 14, 15]. There has
150
+ been extensive research aimed at relating these parameters to the actual pore topology
151
+ obtained from imaging techniques, rather than simply adjusting bulk diffusion coef-
152
+ ficients to effective diffusion coefficients [70, 71, 10, 66, 72, 13, 15]. Recently, an
153
+ attempt has been reported to construct a regression model for the diffusion of chloride
154
+ ions in concrete using machine learning techniques [73]. It is important to note that
155
+ none of the models presented in this paragraph, which express diffusion coefficients,
156
+ can be considered universally applicable. For instance, the microstructure-based dif-
157
+ fusion model in dry cement paste established by Liu et al., despite taking into account
158
+ various factors related multi-scale properties, cannot perfectly explain the diffusion co-
159
+ efficient in low w/c mixing cement pastes [40]. This discrepancy may be attributed to
160
+ the structural fluctuations of the generated virtual microstructures, which have a greater
161
+ impact on the apparent diffusivity in the regime of low w/c regime. Additionally, the
162
+ empirical model also appears to exhibit a somewhat greater discrepancy between its
163
+ predicted diffusion coefficients and those observed in the low w/c regime [11]. In this
164
+ work, we introduce an up-to-date concept of theoretical physics, “fluctuating diffusiv-
165
+ ity”, to the cement and concrete field. The proposed framework enables the incor-
166
+ poration of morphological features of heterogeneous medias and the consideration of
167
+ several types of diffusion as stochastic processes, without the requirement for detailed
168
+ structural information or multiple empirical parameters.
169
+ The paper is structured as follows: In Section 2.1, we present a comprehensive for-
170
+ mulation of the fluctuating diffusivity using a general discretized state. In Section 2.2,
171
+ we delve into a simplified two-state fluctuating diffusivity (2SFD) model, following
172
+ the work by Uneyama et al [21] and Miyaguchi et al [74]. We analytically calculate
173
+ the self-part of the intermediate scattering function and the second and fourth moments
174
+ of the probability density of particle displacement, which are integral components for
175
+ discussing the probability density of the displacement within the model. In Section 2.3,
176
+ we apply the 2SFD model to a fundamental system, specifically the diffusion of O2 in
177
+ cement pastes under standard temperatures and pressures as a preliminary test case.
178
+ The subsequent Section 3 discusses the distinctions of the proposed model in com-
179
+ parison to existing models, its scope of applicability and limitations, its potential for
180
+ 4
181
+
182
+ generalization to cementitious systems, and the potential impact of the derived diffuse
183
+ displacement distribution on the long-term durability assessment of future structures.
184
+ The conclusion is provided in section 4.
185
+ 2. Theory
186
+ 2.1. Fluctuating diffusivity with n-states
187
+ The fluctuating diffusivity can be represented by the diffusion equation, which in-
188
+ cludes a fluctuating diffusivity term, D(t), as
189
+ ∂G(x; t)
190
+ ∂t
191
+ = D(t)∇2G(x; t)
192
+ (2)
193
+ where x represents the tracer position, t denotes the time, G(x; t) is the probabil-
194
+ ity density of x for a given t, and D(t) is the time-dependent fluctuating diffusivity.
195
+ While this work analyzes the 2SFD model in the following subsections, the calcula-
196
+ tion method is not restricted to the two-state. Thus, we here calculate for the general
197
+ n-states case as
198
+ D(t) = D⊤ξ(t)
199
+ (3)
200
+ where D⊤ = (D1, D2, · · · , Dn) is the vector of the diffusion coefficients and its
201
+ component Di denotes the diffusion coefficient of the i-th state. ξ(t) indicates the state
202
+ of the diffusivity at time t; ξi = 1 and the other components are zero.
203
+ We here describe the probability density vector where the particle is in i-state at
204
+ time t as P (t), and its stochastic process is described as:
205
+ ∂P (t)
206
+ ∂t
207
+ = RP (t)
208
+ (4)
209
+ where R represents the transition matrix. From this expression, we can formally ex-
210
+ press the probability density of P (t+ ∆) with the infinitesimal time step ∆ for a given
211
+ P (t) as
212
+ P (t + ∆) = exp (∆R) P (t)
213
+ (5)
214
+ From this expression, the transition probability where the state changes from ξ(t) to
215
+ ξ(t + ∆) is
216
+ P(ξ(t + ∆); ξ(t)) = ξ⊤(t + ∆) exp (∆R) ξ(t)
217
+ (6)
218
+ To proceed with the calculation of Eq. (2), the intermediate scattering function:
219
+ F(k, t) =
220
+
221
+ e−ik·rG(x; t) is useful. By taking the Fourier-transform of Eq. 2, we
222
+ obtain the differential equation with F(k, t) as follows.
223
+ ∂F(k; t)
224
+ ∂t
225
+ = −D(t)k2F(k; t).
226
+ (7)
227
+ This differential equation is formally solved as [22, 24]
228
+ F(k; t) =
229
+
230
+ exp
231
+
232
+ −k2
233
+ � t
234
+ 0
235
+ D(t′)dt′
236
+ ��
237
+ D
238
+ (8)
239
+ 5
240
+
241
+ where ⟨· · · ⟩D denotes the ensemble average for D(t). Formally, Eq. (8) can be de-
242
+ scribed as a discretized form as
243
+ F(k; t) =
244
+
245
+ ξ(j∆t)
246
+ exp
247
+
248
+ −
249
+ t/∆−1
250
+
251
+ j=0
252
+ ∆k2D⊤ξ(j∆)
253
+
254
+  ×
255
+ t/∆−1
256
+
257
+ j=0
258
+ [P(ξ((j + 1)∆); ξ(j∆))] ξ⊤(0)P (0)
259
+ =
260
+
261
+ ξ(j∆)
262
+ t/∆−1
263
+
264
+ j=0
265
+
266
+ exp
267
+
268
+ −∆k2D⊤ξ(j∆)
269
+
270
+ ×
271
+ ξ⊤((j + 1)∆) exp (∆R) ξ(j∆)
272
+
273
+ ξ⊤(0)P (0)
274
+ (9)
275
+ This equation is akin to that of the partition function of the Ising model under an exter-
276
+ nal field. Then, we define the transfer matrix as
277
+ ξ⊤T ξ(t) =ξ⊤(t + ∆) exp
278
+
279
+ ∆R − ∆k2D⊤ [ξ(t + ∆) + ξ(t)]
280
+ 2
281
+
282
+ ξ(t)
283
+ (10)
284
+ Since ∆ is an infinitesimal quantity, the elements of the transfer matrix can be ex-
285
+ pressed as:
286
+ Tij = exp(∆R)ij exp(−∆k2Djδij) = δij + ∆(Rij − k2Djδij)
287
+ (11)
288
+ For the sake of brevity, we also define the matrix Qij as:
289
+ Tij = δij + ∆Qij
290
+ (12)
291
+ By utilizing the transfer matrix, Eq. (9) can be reduced to
292
+ F(k; t) =
293
+
294
+ ξ(j∆)
295
+ e∆k2D⊤ξ(t)/2×
296
+ t/∆−1
297
+
298
+ j=0
299
+ ξ⊤((j + 1)∆)T ξ(j∆)e−∆k2D⊤ξ(0)/2ξ⊤(0)P (0)
300
+ =
301
+
302
+ ξ(j∆)
303
+ t/∆−1
304
+
305
+ j=0
306
+ ξ⊤((j + 1)∆)T ξ(j∆)ξ⊤(0)P (0)
307
+ =
308
+
309
+ ξ(t)
310
+ ξ⊤(t)T t/∆P (0) =
311
+
312
+ ξ(t)
313
+ ξ⊤(t)etQP (0)
314
+ (13)
315
+ This equation can be calculated when the initial probability density P (0), the i-th state
316
+ diffusivity coefficient from Eq.(3), and the transition probability R from Eq.(4) are
317
+ provided.
318
+ 6
319
+
320
+ 2.2. 2SFD model
321
+ We here consider the two-state fluctuating diffusivity (2SFD) model following the
322
+ literature by Uneyama et al [21] and Miyaguchi et al [74], which serves as a mathemati-
323
+ cally tractable model. The diffusivity of the particle in the 2SFD model is characterized
324
+ by distinct variables, D⊤ = (Df, Ds), and the transition probability matrix, R, which
325
+ is represented as
326
+ R =
327
+ �−rf
328
+ rs
329
+ rf
330
+ −rs
331
+
332
+ (14)
333
+ In the equilibrium state, the initial probability density is given by
334
+ P (0) =
335
+ 1
336
+ rf + rs
337
+
338
+ rs
339
+ rf
340
+
341
+ (15)
342
+ Then, the matrix Q in Eq. (13) is presented as
343
+ Q =
344
+
345
+ −rf − k2Df
346
+ rs
347
+ rf
348
+ −rs − k2Ds
349
+
350
+ (16)
351
+ For this Q, the eigenvalues and the corresponding eigenvectors are respectively given
352
+ by:
353
+ λ± = −rf + k2Df + rs + k2Ds ±
354
+
355
+ (rf + k2Df − rs − k2Ds)2 + 4rfrs
356
+ 2
357
+ (17)
358
+ v± =
359
+
360
+
361
+ rf+k2Df −rs−k2Ds±√
362
+ (rf+k2Df −rs−k2Ds)2+4rf rs
363
+ 2rf
364
+ 1
365
+
366
+ (18)
367
+ Using λ± and v±, matrix Q can be described as
368
+ Q = (v+, v−)
369
+
370
+ λ+
371
+ 0
372
+ 0
373
+ λ−
374
+
375
+ (v+, v−)−1
376
+ (19)
377
+ Combining Eq. (13) and (19), we obtain
378
+ F(k; t) =(1, 1)(v+, v−)
379
+ �eλ+t
380
+ 0
381
+ 0
382
+ eλ−t
383
+
384
+ (v+, v−)−1
385
+ 1
386
+ rf + rs
387
+ �rs
388
+ rf
389
+
390
+ =χ+eλ+t + χ−eλ−t
391
+ (20)
392
+ where we defined χ± as
393
+ χ± = 1
394
+ 2
395
+
396
+ 1 ± (k2Df − k2Ds)(rf − rs) + (rs + rf)2
397
+ (λ+ − λ−)(rf + rs)
398
+
399
+ (21)
400
+ Eq. (20) includes all information for the probability density function G(x, t). From
401
+ Eq. (20), we can calculate all moments of the probability density such as second and
402
+ fourth moments (⟨x2(t)⟩ and ⟨x4(t)⟩), respectively, where the bracket ⟨· · · ⟩ denotes
403
+ the statistical average. The utilization of higher moments serves to quantify the devi-
404
+ ation of G(r; t) from the Gaussian distribution, as will be discussed subsequently. As
405
+ 7
406
+
407
+ per the definition of the self-part of the intermediate scattering function, these moments
408
+ are formally obtained in the isotropic system as
409
+ ⟨x2(t)⟩ = − ∂2
410
+ ∂k2 F(k, t)|k=0
411
+ (22)
412
+ ⟨x4(t)⟩ = ∂2
413
+ ∂k2
414
+ ∂2F(k, t)
415
+ ∂k2
416
+ |k=0
417
+ (23)
418
+ To assign Eq. (20) to Eq. (22), we obtain
419
+ ⟨x2(t)⟩ = 6Dfrs + Dsrf
420
+ rf + rs
421
+ t,
422
+ (24)
423
+ Using this relation, the average diffusion coefficient D can be determined through the
424
+ relation ⟨x2(t)⟩ = 6Dt in a three-dimensional system as
425
+ D = Dfrs + Dsrf
426
+ rf + rs
427
+ (25)
428
+ This outcome indicates that the average diffusion coefficient in the present 2SFD model
429
+ is the weighted average of Df and Ds with the transition rates rf and rs. Furthermore,
430
+ by utilizing Eq.(23), we can obtain an analytical expression for the fourth moment of
431
+ G(r; t) as
432
+ ⟨x4(t)⟩ =120
433
+ �(Dfrs + Dsrf)2
434
+ 2(rf + rs)2
435
+ t2−
436
+ (Df − Ds)2rfrs
437
+ (rf + rs)4
438
+
439
+ 1 − (rf + rs)t − e−(rf+rs)t��
440
+ ,
441
+ (26)
442
+ which is used later.
443
+ 2.3. Application of 2SFD model to gas O2 in cement paste
444
+ In this study, we address the fundamental problem of O2 diffusion, which is known
445
+ to be one of the basic aggressive gases that can affect the long-term performance of
446
+ reinforced concrete structures [75]. The diffusion of oxygen in dry cement paste (i.e.,
447
+ the absence of free water in capillary pores), is chosen as the primary case study. This
448
+ system was selected as it presents a relatively simple diffusion medium of cementi-
449
+ tious materials, yet offers somewhat heterogeneity. We here focus on the O2 diffusion
450
+ in dry cement paste consisting of the capillary pore phase and the colloidal CSH [55]
451
+ phase under ambient temperature and pressure conditions T = 298 K, P = 1 atm.
452
+ As depicted in Figure 1, the colloidal CSH consists of two different density phases in
453
+ proximity to the surface and the hydration front, which are classified as LD-CSH (low-
454
+ density CSH) and HD-CSH (high-density CSH), respectively [76, 77]. For simplicity,
455
+ this study treats the capillary pore phase and the LD-CSH phase are considered as
456
+ diffusive, while the HD-CSH and unhydrated clinker regions as non-diffusive phases.
457
+ Given the non-negligible difference in density between the LD-CSH and HD-CSH, we
458
+ tentatively assumed that O2 molecules cannot be able to penetrate into the HD-CSH
459
+ 8
460
+
461
+ Capillary pore
462
+ : O molecule
463
+ 2
464
+ (Fast diffusive phase; Df )
465
+ (Slow diffusion phase; Ds )
466
+ LD CSH
467
+ HD CSH & clinker
468
+ (Non-diffusive)
469
+ Figure 1: Schematic diagram of O2 diffusion in a cement paste, consisting of three phases: capillary
470
+ pore, low-density CSH (diffusive) phase and non-diffusive phase (high-density CSH and unhydrated cement
471
+ clinker)
472
+ Trapping at CSH phase
473
+ Diffusion in capillary pores
474
+ fast
475
+ slow
476
+ Figure 2: Schematic illustration of transitional process of diffusivity D(t). Df corresponds diffusivity of
477
+ the fast diffusion in capillary pores, and Ds corresponds diffusivity of the slow diffusion at CSH phase.
478
+ phase through the LD-CSH phase. This study regards the diffusion in the capillary
479
+ void as a rapid diffusion process (diffusion coefficient Df), comprising both molecu-
480
+ lar diffusion and Knudsen diffusion, while diffusion in the LD-CSH is considered as
481
+ a slow diffusion process (diffusion coefficient Ds). They are used as the inputs for
482
+ the 2SFD model, as illustrated in Figure 2. Note that the following analyses derive
483
+ all characteristic values of the heterogeneous diffusion media through physically rea-
484
+ sonable estimations. In our system, at ambient temperature and pressure, the impact
485
+ of surface diffusion on the overall diffusion characteristics is possibly negligible (the
486
+ coverage of O2 molecule is approximately 0.01 or less, it could be estimated by the
487
+ similar way in Ref. [40]).
488
+ From this point on, the system setup is described in detail. The size of the colloidal
489
+ CSH is assumed to be l = 50 nm, which is determined based on the size of the globule
490
+ floc in the CM-II model proposed by Jennings [78]. In this study, the thickness of the
491
+ LD-CSH on colloidal CSH, which is treated as the diffusive phase, is assumed as 10 nm
492
+ from the surface, in accordance with the value utilized in the previous microstructure-
493
+ guided model [40]. The porosity is represented by φ, and the number density of the
494
+ colloidal CSH is denoted by ρ. For simplicity, we assume that the colloidal CSH is
495
+ spherical, and then the relation between φ and ρ is described as
496
+ 1 − φ = ρπl3
497
+ 6
498
+ (27)
499
+ With the parameters specified above, we describe the four input parameters, namely
500
+ 9
501
+
502
+ Df, Ds, rf, and rs, in the 2SFD model. In the present model, the diffusion coefficient
503
+ of the fast state, Df, can be considered as the harmonic average of the molecular and
504
+ Knudsen diffusion coefficients, DM and DK, as follows:
505
+ Df =
506
+ DMDK
507
+ DM + DK
508
+ (28)
509
+ In the ordinary pressure and temperature conditions, DM is estimated as [79]
510
+ DM = 3kBT
511
+ 8Pσ2
512
+
513
+ kBT
514
+ πm
515
+ (29)
516
+ where kB denotes the Boltzmann constant. σ and m represent the diameter and mass
517
+ of the Oxygen, respectively. They are effectively given as σ = 3.46×10−10m [80, 81]
518
+ and m = 5.31 × 10−26kg. From these variables, DM is estimated as DM = 1.99 ×
519
+ 10−5m2s−1. In a complex system such as cement materials, the estimation of DK
520
+ is difficult. We roughly estimate DK by approximating the target cement system as a
521
+ Lorentz gas, i.e. a single mobile particle in fixed spherical obstacles. An analogous
522
+ postulation was utilized in the research examining gas diffusion in cement paste by Liu
523
+ et al [40]. Under this assumption, the diffusion coefficient is determined as [82]
524
+ DK = ¯v2τ
525
+ 3
526
+ (30)
527
+ where ¯v denotes the mean speed of the Oxygen, given as ¯v =
528
+
529
+ 8kBT/πm, and τ
530
+ represents the mean free time. The estimation of τ is a challenging task, however,
531
+ it has been roughly estimated from the mean pore size [40]. In this study, a rough
532
+ approximation of τ is made by considering the gas kinetics. When the colloidal CSH
533
+ is dilute, the mean free time can be expressed as τ = 4/ρπl2¯v, where it is assumed
534
+ that the interaction distance between O2 and the colloidal CSH is approximated as (l +
535
+ σ)/2 ≃ l/2. This estimated τ is not adequate for the low porosity regime, for instance,
536
+ τ should be 0 for φ = 0. To account for the case of small φ, a phenomenological
537
+ description of τ as depicted in previous literature [83] is employed:
538
+ τ =
539
+
540
+ 1 − ρπl3
541
+ 6
542
+
543
+ 4
544
+ ρπl2¯v
545
+ (31)
546
+ Combining Eqs. (27), (30), and (31) we obtain
547
+ DK =
548
+ 4lφ
549
+ 9(1 − φ)
550
+
551
+ 2kBT
552
+ πm
553
+ (32)
554
+ In this expression, DK becomes 0 for φ = 0 and diverges for φ = 1, this is in agree-
555
+ ment with the intuitive representation of Knudsen diffusion. The slow diffusion state
556
+ pertains to diffusion within the LD-CSH phase. The determination of diffusivity is
557
+ not straightforward as the handling of diffusion within the LD-CSH phase is complex.
558
+ Though this estimation remains an open problem, prior investigations suggest that there
559
+ may exist two possible approaches, (i) consider it as surface diffusion and determining
560
+ 10
561
+
562
+ the diffusion coefficient through Wu’s empirical equation [84] and the model of Chen
563
+ and Yang [85], which are commonly employed in the context of shale gas, or (ii) by
564
+ utilizing effective medium theory as demonstrated by Patel et al [61]. Here, we ten-
565
+ tatively assume the slow diffusion coefficient as Ds = 10−8m2s−1. This value does
566
+ not contradict with both estimations introduced above. The first approach necessitates
567
+ the isosteric adsorption heat (∆H) as an input for Wu’s empirical equation [84]. If
568
+ we adopt the isosteric adsorption heat of CO2 on the CSH surface, ∆H ∼ 10 kJ/mol
569
+ is tentatively applied to O2 as the same procedure conducted by Liu et al. [40], the
570
+ Ds would be of the order of 10−8m2s−1. Furthermore, Patel et al. also reported that
571
+ the C-S-H diffusivity is three orders of magnitude lower than the bulk diffusivity for
572
+ various diffusants [61]. Subsequently, the transition rates rf and rs are determined
573
+ consistently with information on the pore structure. The rf corresponds to the transi-
574
+ tion rate from the fast diffusion state at capillary pores to the slow diffusion state in
575
+ the LD-CSH phase. In the dilute limit of the volume fraction of the CSH phase, the
576
+ average capillary pore size, L, can be roughly approximated to be ρ−1/3. When the
577
+ volume fraction of the CSH phase is not dilute, the effect of excluded volume must be
578
+ taken into account, which can be phenomenologically estimated. As L approaches 0
579
+ when the space is entirely occupied by the CSH phase and diverges when the CSH is
580
+ absent, a possible relation between L and the CSH number density is
581
+ L =
582
+
583
+ ρ
584
+ (1 − ρπl3/6)
585
+ �−1/3
586
+ = l
587
+
588
+ πφ
589
+ 6(1 − φ)
590
+ �1/3
591
+ (33)
592
+ If it is assumed that rf represents the rate of contact with the colloidal CSH from the
593
+ capillary pore phase, rf can be estimated as
594
+ Df = L2rf
595
+ (34)
596
+ The rs represents the transition rate from the slow diffusion state in the LD-CSH phase
597
+ back to the fast diffusion state at capillary pores. Considering that the thickness of the
598
+ LD-CSH phase is about lLD = 10nm and that it can escape from the surface by about
599
+ 10nm motion, the following estimation can be obtained.
600
+ Ds = l2
601
+ LDrs
602
+ (35)
603
+ By utilizing the relations of Df, Ds, rf, and rs as stated above, we can calculate the
604
+ dynamics of the O2 in the 2SFD model.
605
+ The representation of the trajectory may prove informative in providing an intu-
606
+ itive understanding of the 2SFD model. Subsequently, utilizing a kinetic Monte Carlo
607
+ scheme [86, 87] based on Equations (2) and (4), we numerically calculate the trajectory.
608
+ Figure 3 illustrates a representative trajectory of an O2 molecule in the 2SFD model,
609
+ depicted by the red curve with black dots indicating a time interval of (rf + rs)−1.
610
+ For comparative purposes, the trajectory of an O2 molecule moving without fluctuat-
611
+ ing diffusivity (diffusion coefficient kept constant as per Equation (25)) is presented
612
+ by the yellow curve with black dots plotted at every time interval (rf + rs)−1. The
613
+ red curve effectively captures the heterogeneous diffusivity, which can be interpreted
614
+ as a reflection of the heterogeneous nature of the cement paste. In contrast, the yellow
615
+ curve does not exhibit heterogeneity, of course.
616
+ 11
617
+
618
+ x
619
+ y
620
+ 2SFD model
621
+ Constant diffusion coefficient
622
+ 100 nm
623
+ Figure 3: The trajectory of O2 over the observed time duration 1000/(rf + rs) at a porosity φ = 0.5 is
624
+ represented by the pink curve. For comparison, the trajectory of particle diffusion with a constant diffusion
625
+ coefficient, as represented by equation (25), is depicted by the blue curve. Closed circle symbols are also
626
+ displayed at the same time intervals of (rf + rs)−1.
627
+ 12
628
+
629
+ From Equation (25), we depict the diffusion coefficient D as a function of porosity
630
+ φ in Figure (4). For comparative purposes, data obtained in previous studies by Yio
631
+ et al. [13], Boumaaza et al. [88], and Houst and Wittmann [89] are represented by
632
+ blue, purple, and green symbols, respectively. Table 1 summarizes the detailed condi-
633
+ tions of previous works that measured oxygen diffusion coefficients in cement pastes.
634
+ In this study, the ideal comparison for the measured O2 diffusivity in cement pastes
635
+ would have been to data obtained from completely dry cement pastes, as reported by
636
+ Boumaaza et al. [88]. However, to the best of our knowledge, such data is quite scarce.
637
+ Therefore, in order to provide a reasonable comparison, we have elected to include
638
+ the results of previous studies that have measured O2 diffusivity in cement pastes un-
639
+ der conditions of relatively low humidity, as suggested by the findings of Houst and
640
+ Wittmann [89] that the effect of relative humidity on diffusivity is minimal below 55%.
641
+ Specifically, we have included the results of Yio et al. [13], Houst and Wittmann [89]
642
+ as comparable data for O2 diffusivity in cement pastes. However, we have not included
643
+ the part of the results in Yio et al. where the hydration reaction was not fully completed
644
+ in the comparison data. The size of the colloidal CSH changes as the hydration reaction
645
+ progresses, which affects the estimated diffusion coefficient in this 2SFD model, as we
646
+ will discuss later in the study (See discussion section). Our theoretical results exhibit
647
+ a qualitative agreement with the data presented in these prior works. It is important to
648
+ note that the four inputs, Df, Ds, rf, and rs, are derived from system parameters and
649
+ can be determined through physical considerations.
650
+ Porosity
651
+ 0.1
652
+ 0.2
653
+ 0.3
654
+ 0.4
655
+ 0.5
656
+ 0
657
+ 1
658
+ 2
659
+ 3
660
+ 4
661
+ [m / s]
662
+ 2
663
+ Yio et al. (2019)
664
+ Boumaaza et al. (2018)
665
+ Houst and Wittmann (1994)
666
+ 2SFD model
667
+ Figure 4: Diffusion coefficient of the molecule O2 against the porosity φ.
668
+ Gas diffusion in cementitious materials is often characterized by the diffusion coef-
669
+ ficient, however, it may be insufficient to fully explore the corrosion of reinforcement.
670
+ The tail of the probability density of the displacement G(r; t) should also be taken into
671
+ account. We herein analyze the probability density of the displacement for a single de-
672
+ 13
673
+
674
+ Table 1: Detail information of previous gas diffusion datasets.
675
+ Ref.
676
+ w/c ratio
677
+ Curing
678
+ Drying method
679
+ Porosity
680
+ Conditions
681
+ Yio et al. [13]
682
+ 0.30
683
+ Cured at 100% RH,
684
+ Kept in 55% RH, 293 K
685
+ 0.133
686
+ 0.5 ∼ 2.5 atm
687
+ 0.45
688
+ 293 K for 90 days
689
+ 0.194
690
+ and room temperature
691
+ Boumaaza
692
+ 0.50
693
+ Cured at 100% RH
694
+ Oven-dried
695
+ 0.492
696
+ 1 atm and 293 K
697
+ et al. [88]
698
+ for 1 day, 2 months and 8 month
699
+ 0.455
700
+ 0.417
701
+ 0.60
702
+ 0.483
703
+ 0.454
704
+ Houst and
705
+ 0.40
706
+ Immersed in lime water
707
+ Oven-dried
708
+ 0.110
709
+ 1 atm, room temperature
710
+ Wittmann [89]
711
+ 0.80
712
+ for 6 months or more
713
+ 0.390
714
+ and 47 % RH
715
+ gree of freedom, x, G(x; t), which is derived from the inverse Fourier transform of the
716
+ self-part of the intermediate scattering function as G(x; t) =
717
+
718
+ eikxxF(kx; t). F(kx; t)
719
+ can be computed from Eq.(2) by substituting x with x; as a result, Eq.(20) where k is
720
+ substituted with kx is obtained. As the analytical calculation of the inverse transfor-
721
+ mation of F(kx; t) is difficult, we perform the numerical integration. Fig. 5 illustrates
722
+ the probability density of the O2 displacement for various time durations t at a typical
723
+ porosity φ = 0.5. We scale the horizontal and vertical axes by the standard deviation of
724
+ the displacement
725
+
726
+ 2Dt and display the Gaussian distribution function as a reference.
727
+ In the short time scale t ≤ 10−8s, clear deviations of G(x; t) from the Gaussian distri-
728
+ bution are observed. These deviations gradually diminish with increasing observation
729
+ time t. t ∼ 10−8s is comparable to the timescales of the inverse of rf or rs. This result
730
+ suggests that the Gaussian approximation for G(x; t) may not be appropriate for the
731
+ timescale over which O2 diffuses the lengths of colloidal CSH or the capillary pore.
732
+ Our result is reasonable since non-Gaussian distributions, in fact, have been frequently
733
+ observed at the microscopic scale in various heterogeneous systems such as confined
734
+ water in CSH [90], glass-forming liquids [25], or colloidal suspensions [26, 27].
735
+ To characterize non-Gaussian diffusion, the non-Gaussian parameter α is often em-
736
+ ployed [91, 92, 93] and defined in three-dimensional systems as [94]
737
+ α(t) = 3⟨r4(t)⟩
738
+ 5⟨r2(t)⟩2 − 1
739
+ (36)
740
+ where brackets denote the statistical average. α is equal to zero when the stochastic
741
+ process of displacement conforms to a Gaussian distribution; if the dynamics of the
742
+ particle can be described by the conventional diffusion equation with constant diffusiv-
743
+ ity, α is equal to zero. Empirically, non-Gaussianity cannot be neglected for α > 0.1.
744
+ We have obtained the second moment ⟨r2(t)⟩ and the fourth moment ⟨r4(t)⟩ as repre-
745
+ sented by Equations (24) and (26), respectively. Consequently, we can determine α as
746
+ the following expression:
747
+ α(t) =
748
+ 2(Df − Ds)2rfrs
749
+ (Dfrs + Dsrf)2(rf + rs)2
750
+ e−(rf+rs)t + (rf + rs)t − 1
751
+ t2
752
+ (37)
753
+ Fig. 6 displays α against time t with various porosity φ.
754
+ α exhibits strong non-
755
+ Gaussianity for the small-time regime t ≪ 10−9, and it non-monotonically changes
756
+ with increasing porosity φ. This result may be reasonable since the heterogeneity of
757
+ 14
758
+
759
+
760
+
761
+
762
+
763
+
764
+ 0
765
+ 1
766
+ 2
767
+ 3
768
+ 4
769
+ -1
770
+ -2
771
+ -3
772
+ -4
773
+ 10
774
+ -1
775
+ 10
776
+ 0
777
+ 10
778
+ -2
779
+ 10
780
+ -3
781
+ =10
782
+ -9
783
+ =10
784
+ -8
785
+ =10
786
+ -7
787
+ =10
788
+ -6
789
+ Gaussian
790
+ Figure 5: Probability density of O2 displacement for various time duration t at the porosity φ = 0.5. The
791
+ horizontal and vertical axes are normalized by the standard deviation
792
+
793
+ 2Dt. For comparison, the Gaussian
794
+ distribution is also presented with the black curve.
795
+ the diffusivity will disappear for φ = 0 and φ = 1. Additionally, the non-Gaussian
796
+ parameter α exhibits a decrease from 10−9s < t < 10−8, indicating that diffusion in
797
+ the timescale of t < 10−8 cannot be described by a Gaussian process or a conventional
798
+ diffusion equation with constant diffusivity.
799
+ 3. Discussion
800
+ In this study, we employed the 2SFD model for O2 diffusion in cement pastes,
801
+ which constitutes a stochastic diffusion model comprising parameters that can be phys-
802
+ ically inferred from an abundance of experimental studies on gas diffusivity in cemen-
803
+ titious materials, while incorporating some crucial aspects of microstructures. This
804
+ model effectively addresses stochastic processes involving transitions between multi-
805
+ ple diffuse states, and is capable of analytically determining the probabilistic displace-
806
+ ment distribution including the non-Gaussian parameter. Therefore, we posit that it
807
+ constitutes a highly flexible framework that can be easily modified as long as the tran-
808
+ sition rates between multiple diffuse states including additional states can be effectively
809
+ assessed.
810
+ In the above analysis, we tentatively assumed a colloidal CSH dimension of 50 nm.
811
+ This is possibly acceptable since the Jennings’s CSH morphological model of CM-II
812
+ [78], suggests that the size of globule flocs within the ranges from 30 to 60 nm. The
813
+ estimated net diffusion coefficient, as one of the outputs of the 2SFD model increases
814
+ as the assumed colloidal CSH size increases, as shown in Figure 7. This behavior is
815
+ consistent with the experimental results reported by Bentz et al. [95] that the diffusion
816
+ 15
817
+
818
+ Time
819
+ 10
820
+ 0
821
+ 10
822
+ 1
823
+ 10
824
+ -11
825
+ 10
826
+ -10
827
+ 10
828
+ -9
829
+ 10
830
+ -8
831
+ =0.005
832
+ =0.250
833
+ =0.450
834
+ =0.650
835
+ =0.850
836
+ Figure 6: Non-Gaussian parameter α against time t with various porosity φ.
837
+ coefficient increases with the size of the cement particles used in the cement paste in
838
+ the high-porosity region, while remaining largely independent of cement particle size
839
+ in the low-porosity region.
840
+ In this model, we have demonstrated that the assumed size of the colloidal CSH
841
+ not only has an impact on the diffusion coefficient, but also influences the higher-
842
+ order moments of the probability distribution of displacement, such as the shape of
843
+ the distribution function and the non-Gaussian parameter. Besides the size effect of the
844
+ colloidal CSH, the examination of the influence of the shape of the assumed CSH shape
845
+ may also be significant. Zhang et al. [96] recently revealed that the effect of the shape
846
+ of cement particles (elliptical or spherical) on the chloride diffusion behavior in cement
847
+ paste is limited. However, it is feasible that the shape of cement particles might have
848
+ an effect on the shape of the probabilistic distribution of diffusion displacement, even
849
+ in the system studied by Zhang et al. If we adopt the Jennings’s CM-II model for the
850
+ CSH morphology in modeling, the CSH should possess the shape of an ellipsoid, rather
851
+ than perfectly spherical. This discussion is worth for further investigations. Hence, it
852
+ can be inferred that some of the elements of the microstructure have an impact on the
853
+ shape of the probabilistic distribution of diffusional displacement, despite their limited
854
+ influence on the diffusion coefficient. Since the tail of the diffusion distribution has
855
+ a significant influence on the reliability (durability) assessment of reinforced concrete
856
+ structures, it should be quite important to consider not only the diffusion coefficient,
857
+ but also the shape of the displacement distribution in any theoretical, numerical, or
858
+ empirical approaches.
859
+ Liu, Liu, and Zhang [40] conducted a study investigating the dynamics of CO2, O2,
860
+ and H2 in dry cement paste using the lattice Boltzmann method. In their research, a
861
+ heterogeneous structure of cement paste was phenomenologically constructed, within
862
+ which gas diffused. They determined the diffusion coefficient as a function of porosity,
863
+ however, the probability density of displacement G(x; t) or the non-Gaussian parame-
864
+ 16
865
+
866
+ 2.5
867
+ [m / s]
868
+ 2
869
+ 2.0
870
+ 1.5
871
+ 1.0
872
+ 0.5
873
+ [nm]
874
+ 30
875
+ 40
876
+ 50
877
+ 60
878
+ =0.10
879
+ =0.20
880
+ =0.30
881
+ =0.40
882
+ =0.50
883
+ Figure 7: Diffusion coefficient D against the colloidal CSH size l with various porosity φ.
884
+ ter α were not examined. The theoretical model presented here is akin to their system,
885
+ thus their system would exhibit similar non-Gaussianity of G(x; t) and a non-negligible
886
+ non-Gaussian parameter. In other words, their simulation methodology could verify
887
+ our theoretical results.
888
+ The current study addresses O2 diffusion as a case in point, however, since oxygen
889
+ is not highly soluble in water, the estimates can be readily extrapolated even if the ce-
890
+ ment paste in a humid environment (i.e., non-negligible amount of free or physically
891
+ adsorbed water in capillary pores). The correction could be accomplished by incorpo-
892
+ rating the relationship between relative humidity and water adsorption layer thickness
893
+ [97], and incorporating it into the estimation of the diffusion coefficient of Knudsen
894
+ diffusion. However, to extend the 2SFD model to CO2 diffusion (another crucial ag-
895
+ gressive gas species), a slight alteration of the model may be necessary. Due to its high
896
+ solubility in water, CO2 must be addressed as a diffusion phenomenon in conjunction
897
+ with local solubility equilibrium, or it may be immobilized through an in-situ carbon-
898
+ ation reaction that occurs in the pore solution and/or inside the CSH gel. To take this
899
+ into account, it is essential to additionally incorporate as a third state a stochastic pro-
900
+ cess that transits on a time scale so long in comparison to the observation time that
901
+ the diffusion coefficient is virtually zero, but the trapped time can be considered effec-
902
+ tively infinite. A comparable methodology should be necessary when addressing the
903
+ diffusion problem of chloride ions as it also necessitates consideration of the effects
904
+ of chloride binding. Even when it is expanded to a three-state (even for the extension
905
+ for a multi-state model), as long as the eigenvalues and eigenvectors of the matrix Q
906
+ in Eq. (13) (case of the 2SFD model in Eq. (16))) are obtained, all other calculations
907
+ can always be performed. The simplicity of its mathematical structure is also another
908
+ benefit of this model. The application to the diffusion phenomenon of chemical species
909
+ (CO2, Cl –), which is more critical and reactive for the durability of concrete structures,
910
+ 17
911
+
912
+ will be discussed in a future publication as ongoing research in the near future.
913
+ 4. Conclusion
914
+ In conclusion, this work presents an application of the analytic method of fluc-
915
+ tuating diffusivity to the study of gas diffusion in cementitious materials. Note that
916
+ the concept of fluctuating diffusivity is not in opposition to the time-dependent diffu-
917
+ sivity approach reflecting the long-term effects of changing diffusion media, such as
918
+ prolonged hydration reactions, pore closure due to carbonation, crackings, but rather
919
+ the target timescale is significantly different between the two approaches The fluctu-
920
+ ating diffusivity framework effectively analyzes the diffusion of small molecules in
921
+ cementitious materials, where the diffusivity may fluctuate spatio-temporally due to
922
+ the heterogeneous nature of the diffusion medium, and has potential applicability to
923
+ various diffusion phenomena in these materials. Our theoretical results of the 2SFD
924
+ model provide a reasonable description of the diffusion coefficient of O2 in colloidal
925
+ CSH, as measured in previous studies, by estimating input parameters from the vari-
926
+ ables in the target systems. Furthermore, the 2SFD model highlights the presence of
927
+ non-Gaussian diffusion, which can be attributed to the heterogeneous microstructure of
928
+ cement pastes. The presence of non-Gaussianity in the displacement distribution, char-
929
+ acterized by heavier tails than those of the Gaussian distribution, is quite critical for
930
+ the accurate evaluation of the long-term reliability probability of reinforced concrete
931
+ structures. The deviation in the shape of the tail of the Gaussian distribution obtained
932
+ when solving the diffusion equation using a comparable diffusion coefficient in the
933
+ 2SFD model may lead to an underestimation of the conventional method’s reliability.
934
+ In addition, while some numerical approaches utilizing the lattice Boltzmann meth-
935
+ ods and/or random walk methods on virtual microstructures generated by previously
936
+ established hydration models, it is important to acknowledge that there is still ample
937
+ scope for improvement. In this regard, the development of a more conceptual stochas-
938
+ tic model, such as the 2SFD model rooted in statistical physics, for the examination of
939
+ diffusion phenomena in cementitious materials from a micro-perspective, and which
940
+ can be solved analytically owing to its straightforward theoretical framework, in addi-
941
+ tion to the structure-based model, would be of great significance to the field of cement
942
+ and concrete materials research. We are convinced that this work contributes novel in-
943
+ sights into the comprehension of diffusion of small molecules in cement and concrete
944
+ materials, and has potential for further applications in the field of cement and concrete
945
+ research.
946
+ References
947
+ [1] Fredrik P Glasser, Jacques Marchand, and Eric Samson. Durability of concrete —
948
+ Degradation phenomena involving detrimental chemical reactions. Cement and
949
+ Concrete Research, 38(2):226–246, 2008.
950
+ [2] Cheng-Feng Chang and Jing-Wen Chen. The experimental investigation of con-
951
+ crete carbonation depth. Cement and Concrete Research, 36(9):1760–1767,2006.
952
+ 18
953
+
954
+ [3] B Bary and A Sellier. Coupled moisture—carbon dioxide–calcium transfer model
955
+ for carbonation of concrete. Cement and Concrete Research, 34(10):1859–1872,
956
+ 2004.
957
+ [4] A Morandeau, M Thi´ery, and P Dangla. Investigation of the carbonation mecha-
958
+ nism of CH and C-S-H in terms of kinetics, microstructure changes and moisture
959
+ properties. Cement and Concrete Research, 56:153–170, 2014.
960
+ [5] Ueli Angst, Bernhard Elsener, Claus K Larsen, and Øystein Vennesland. Criti-
961
+ cal chloride content in reinforced concrete — A review. Cement and Concrete
962
+ Research, 39(12):1122–1138, 2009.
963
+ [6] Hongyan Ma, Dongshuai Hou, and Zongjin Li. Two-scale modeling of transport
964
+ properties of cement paste: Formation factor, electrical conductivity and chloride
965
+ diffusivity. Computational Materials Science, 110:270–280, 2015.
966
+ [7] Adam Neville. The confused world of sulfate attack on concrete. Cement and
967
+ Concrete Research, 34(8):1275–1296, 2004.
968
+ [8] Christophe Carde, Raoul Franc¸ois, and Jean-Michel Torrenti. Leaching of both
969
+ calcium hydroxide and C-S-H from cement paste: Modeling the mechanical be-
970
+ havior. Cement and Concrete Research, 26(8):1257–1268, 1996.
971
+ [9] Christophe Carde and Raoul Franc¸ois. Effect of the leaching of calcium hydrox-
972
+ ide from cement paste on mechanical and physical properties. Cement and Con-
973
+ crete Research, 27(4):539–550, 1997.
974
+ [10] M A B Promentilla, T Sugiyama, T Hitomi, and N Takeda. Quantification of
975
+ tortuosity in hardened cement pastes using synchrotron-based X-ray computed
976
+ microtomography. Cement and Concrete Research, 39(6):548–557, 2009.
977
+ [11] Ravi A Patel, Quoc Tri Phung, Suresh C Seetharam, Janez Perko, Diederik
978
+ Jacques, Norbert Maes, Geert De Schutter, Guang Ye, and Klaas Van Breugel.
979
+ Diffusivity of saturated ordinary Portland cement-based materials: A critical re-
980
+ view of experimental and analytical modelling approaches. Cement and Concrete
981
+ Research, 90:52–72, 2016.
982
+ [12] Akira Hatanaka, Yogarajah Elakneswaran, Kiyofumi Kurumisawa, and Toyoharu
983
+ Nawa. The Impact of Tortuosity on Chloride Ion Diffusion in Slag-Blended Ce-
984
+ mentitious Materials. Journal of Advanced Concrete Technology, 15(8):426–439,
985
+ 2017.
986
+ [13] M H N Yio, H S Wong, and N R Buenfeld. 3D pore structure and mass transport
987
+ properties of blended cementitious materials. Cement and Concrete Research,
988
+ 117:23–37, 2019.
989
+ [14] Jinbo Yang and Peng Zhang. A concise pore structure model for predicting the
990
+ effective ion diffusion coefficients of cementitious materials. Construction and
991
+ Building Materials, 265:120321, 2020.
992
+ 19
993
+
994
+ [15] Yun Gao, Kai Wu, Zhidan Rong, and Qiang Yuan. A hybrid analytical-numerical
995
+ algorithm based general modeling framework of molecular diffusivity in cement
996
+ paste. International Journal of Heat and Mass Transfer, 180:121774, 2021.
997
+ [16] Robert Zwanzig. Nonequilibrium Statistical Mechanics. Oxford university press,
998
+ 2001.
999
+ [17] E Nelson. Dynamical Theories of Brownian Motion. Princeton University Press,
1000
+ 2020.
1001
+ [18] Ralf Metzler. Superstatistics and non-gaussian diffusion. The European Physical
1002
+ Journal Special Topics, 229(5):711–728, 2020.
1003
+ [19] Aleksei V Chechkin, Flavio Seno, Ralf Metzler, and Igor M Sokolov. Brown-
1004
+ ian yet non-gaussian diffusion: from superstatistics to subordination of diffusing
1005
+ diffusivities. Physical Review X, 7(2):021002, 2017.
1006
+ [20] Mykyta V Chubynsky and Gary W Slater. Diffusing diffusivity: a model for
1007
+ anomalous, yet brownian, diffusion.
1008
+ Physical review letters, 113(9):098302,
1009
+ 2014.
1010
+ [21] Takashi Uneyama, Tomoshige Miyaguchi, and Takuma Akimoto.
1011
+ Relaxation
1012
+ functions of the ornstein-uhlenbeck process with fluctuating diffusivity. Physi-
1013
+ cal Review E, 99(3):032127, 2019.
1014
+ [22] Takashi Uneyama, Tomoshige Miyaguchi, and Takuma Akimoto.
1015
+ Fluctu-
1016
+ ation analysis of time-averaged mean-square displacement for the langevin
1017
+ equation with time-dependent and fluctuating diffusivity.
1018
+ Physical Review E,
1019
+ 92(3):032140, 2015.
1020
+ [23] Tomoshige Miyaguchi, Takuma Akimoto, and Eiji Yamamoto. Langevin equation
1021
+ with fluctuating diffusivity: A two-state model. Physical Review E, 94(1):012109,
1022
+ 2016.
1023
+ [24] Tomoshige Miyaguchi. Elucidating fluctuating diffusivity in center-of-mass mo-
1024
+ tion of polymer models with time-averaged mean-square-displacement tensor.
1025
+ Physical Review E, 96(4):042501, 2017.
1026
+ [25] Francesco Rusciano,
1027
+ Raffaele Pastore,
1028
+ and Francesco Greco.
1029
+ Fickian
1030
+ non-gaussian diffusion in glass-forming liquids.
1031
+ Physical Review Letters,
1032
+ 128(16):168001, 2022.
1033
+ [26] Raffaele Pastore, Antonio Ciarlo, Giuseppe Pesce, Francesco Greco, and Anto-
1034
+ nio Sasso. Rapid fickian yet non-gaussian diffusion after subdiffusion. Physical
1035
+ Review Letters, 126(15):158003, 2021.
1036
+ [27] Jeongmin Kim, Chanjoong Kim, and Bong June Sung. Simulation study of seem-
1037
+ ingly fickian but heterogeneous dynamics of two dimensional colloids. Physical
1038
+ review letters, 110(4):047801, 2013.
1039
+ 20
1040
+
1041
+ [28] Jae-Hyung Jeon, Matti Javanainen, Hector Martinez-Seara, Ralf Metzler, and
1042
+ Ilpo Vattulainen. Protein crowding in lipid bilayers gives rise to non-gaussian
1043
+ anomalous lateral diffusion of phospholipids and proteins. Physical Review X,
1044
+ 6(2):021006, 2016.
1045
+ [29] Bo Wang, Stephen M Anthony, Sung Chul Bae, and Steve Granick. Anomalous
1046
+ yet brownian. Proceedings of the National Academy of Sciences, 106(36):15160–
1047
+ 15164, 2009.
1048
+ [30] Jordan Hristov. On the diffusion with decaying time-dependent diffusivity: For-
1049
+ mulations and approximate solutions pertinent to diffusion in concretes. Methods
1050
+ of Mathematical Modelling and Computation for Complex Systems, pages 1–44,
1051
+ 2022.
1052
+ [31] Lan-zhen Yu and Jian-kang Chen. A new evolution model of concrete porosity
1053
+ under continuous hydration. International Journal of Modelling, Identification
1054
+ and Control, 26(4):345–352, jan 2016.
1055
+ [32] Hui Xu and Jian-kang Chen. Coupling effect of corrosion damage on chloride
1056
+ ions diffusion in cement based materials. Construction and Building Materials,
1057
+ 243:118225, 2020.
1058
+ [33] A K Suryavanshi, J D Scantlebury, and S B Lyon. Mechanism of Friedel’s salt for-
1059
+ mation in cements rich in tri-calcium aluminate. Cement and Concrete Research,
1060
+ 26(5):717–727, 1996.
1061
+ [34] Paul Brown and James Bothe. The system CaO-Al2O3-CaCl2-H2O at 23±2 °C
1062
+ and the mechanisms of chloride binding in concrete. Cement and Concrete Re-
1063
+ search, 34(9):1549–1553, 2004.
1064
+ [35] Mingzhong Zhang, Guang Ye, and Klaas van Breugel.
1065
+ Microstructure-based
1066
+ modeling of water diffusivity in cement paste. Construction and Building Ma-
1067
+ terials, 25(4):2046–2052, 2011.
1068
+ [36] Qing-feng Liu, Jian Yang, Jin Xia, Dave Easterbrook, Long-yuan Li, and Xian-
1069
+ Yang Lu.
1070
+ A numerical study on chloride migration in cracked concrete us-
1071
+ ing multi-component ionic transport models. Computational Materials Science,
1072
+ 99:396–416, 2015.
1073
+ [37] E Walther, M Bogdan, R Bennacer, and C De Sa. Cement paste morphologies
1074
+ and effective diffusivity: using the Lattice Boltzmann method. European Journal
1075
+ of Environmental and Civil Engineering, 20(6):667–679, jul 2016.
1076
+ [38] Qing-feng Liu, Gan-lin Feng, Jin Xia, Jian Yang, and Long-yuan Li. Ionic trans-
1077
+ port features in concrete composites containing various shaped aggregates: a nu-
1078
+ merical study. Composite Structures, 183:371–380, 2018.
1079
+ [39] Cheng Liu, Chen Qian, Rusheng Qian, Zhiyong Liu, Hongxia Qiao, and Yun-
1080
+ sheng Zhang. Numerical prediction of effective diffusivity in hardened cement
1081
+ paste between aggregates using different shapes of cement powder. Construction
1082
+ and Building Materials, 223:806–816, 2019.
1083
+ 21
1084
+
1085
+ [40] Cheng Liu, Zhiyong Liu, and Yunsheng Zhang. A multi-scale framework for
1086
+ modelling effective gas diffusivity in dry cement paste: Combined effects of
1087
+ surface, Knudsen and molecular diffusion.
1088
+ Cement and Concrete Research,
1089
+ 131:106035, 2020.
1090
+ [41] Cheng Liu, Fazhou Wang, and Mingzhong Zhang. Modelling of 3D microstruc-
1091
+ ture and effective diffusivity of fly ash blended cement paste. Cement and Con-
1092
+ crete Composites, 110:103586, 2020.
1093
+ [42] Cheng Liu, Beatrice Baudet, and Mingzhong Zhang. Lattice Boltzmann mod-
1094
+ elling of ionic diffusivity in non-saturated limestone blended cement paste. Con-
1095
+ struction and Building Materials, 316:126060, 2022.
1096
+ [43] Wei Zhang, Dongshuai Hou, and Hongyan Ma.
1097
+ Multi-scale study water and
1098
+ ions transport in the cement-based materials:from molecular dynamics to random
1099
+ walk. Microporous and Mesoporous Materials, 325:111330, 2021.
1100
+ [44] Wang Hailong, Chen Zhiwei, Zhang Jian, Zheng Jianjun, Sun Xiaoyan, and
1101
+ Li Jianhua. Numerical Scheme for Predicting Chloride Diffusivity of Concrete.
1102
+ Journal of Materials in Civil Engineering, 33(9):4021237, sep 2021.
1103
+ [45] Percolation of phases in a three-dimensional cement paste microstructural model.
1104
+ Cement and Concrete Research, 21(2):325–344, 1991.
1105
+ [46] E J Garboczi and D P Bentz. Computer simulation of the diffusivity of cement-
1106
+ based materials. Journal of Materials Science, 27(8):2083–2092, 1992.
1107
+ [47] Dale Bentz. CEMHYD3D: A Three-Dimensional Cement Hydration and Mi-
1108
+ crostructure Development Modeling Package. Version 3.0., 2005.
1109
+ [48] K van Breugel. Numerical simulation of hydration and microstructural devel-
1110
+ opment in hardening cement-based materials (I) theory. Cement and Concrete
1111
+ Research, 25(2):319–331, 1995.
1112
+ [49] Jeffrey W Bullard, Barbara Lothenbach, Paul E Stutzman, and Kenneth A Sny-
1113
+ der. Coupling thermodynamics and digital image models to simulate hydration
1114
+ and microstructure development of portland cement pastes. Journal of Materials
1115
+ Research, 26(4):609–622, 2011.
1116
+ [50] Pan Feng, Changwen Miao, and Jeffrey W Bullard. A model of phase stability,
1117
+ microstructure and properties during leaching of portland cement binders. Cement
1118
+ and Concrete Composites, 49:9–19, 2014.
1119
+ [51] Koichi Maekawa, Tetsuya Ishida, and Toshiharu Kishi. Multi-scale Modeling of
1120
+ Concrete Performance. Journal of Advanced Concrete Technology, 1(2):91–126,
1121
+ 2003.
1122
+ [52] C Pignat, P Navi, and K Scrivener. Simulation of cement paste microstructure
1123
+ hydration, pore space characterization and permeability determination. Materials
1124
+ and Structures, 38(4):459–466, 2005.
1125
+ 22
1126
+
1127
+ [53] Shashank Bishnoi and Karen L Scrivener. µic: A new platform for modelling the
1128
+ hydration of cements. Cement and Concrete Research, 39(4):266–274, 2009.
1129
+ [54] Mingzhong Zhang, Guang Ye, and Klaas van Breugel. Modeling of ionic diffu-
1130
+ sivity in non-saturated cement-based materials using lattice Boltzmann method.
1131
+ Cement and Concrete Research, 42(11):1524–1533, 2012.
1132
+ [55] Hamlin M Jennings. A model for the microstructure of calcium silicate hydrate
1133
+ in cement paste. Cement and Concrete Research, 30(1):101–116, 2000.
1134
+ [56] Roland J.-M. Pellenq, Akihiro Kushima, Rouzbeh Shahsavari, Krystyn J Van
1135
+ Vliet, Markus J Buehler, Sidney Yip, and Franz-Josef Ulm. A realistic molecular
1136
+ model of cement hydrates. Proceedings of the National Academy of Sciences,
1137
+ 106(38):16102–16107, sep 2009.
1138
+ [57] Ratan K Mishra, Aslam Kunhi Mohamed, David Geissb¨uhler, Hegoi Manzano,
1139
+ Tariq Jamil, Rouzbeh Shahsavari, Andrey G Kalinichev, Sandra Galmarini, Lei
1140
+ Tao, Hendrik Heinz, Roland Pellenq, Adri C T van Duin, Stephen C Parker,
1141
+ Robert J Flatt, and Paul Bowen. cemff: A force field database for cementitious
1142
+ materials including validations, applications and opportunities. Cement and Con-
1143
+ crete Research, 102:68–89, 2017.
1144
+ [58] Masood Valavi, Ziga Casar, Aslam Kunhi Mohamed, Paul Bowen, and San-
1145
+ dra Galmarini. Molecular dynamic simulations of cementitious systems using
1146
+ a newly developed force field suite ERICA FF. Cement and Concrete Research,
1147
+ 154:106712, 2022.
1148
+ [59] Byung Hwan Oh and Seung Yup Jang. Prediction of diffusivity of concrete based
1149
+ on simple analytic equations. Cement and Concrete Research, 34(3):463–480,
1150
+ 2004.
1151
+ [60] Nattapong Damrongwiriyanupap, Stefan Scheiner, Bernhard Pichler, and Chris-
1152
+ tian Hellmich. Self-Consistent Channel Approach for Upscaling Chloride Diffu-
1153
+ sivity in Cement Pastes. Transport in Porous Media, 118(3):495–518, 2017.
1154
+ [61] Ravi A Patel, Janez Perko, Diederik Jacques, Geert De Schutter, Guang Ye, and
1155
+ Klaas Van Bruegel. Effective diffusivity of cement pastes from virtual microstruc-
1156
+ tures: Role of gel porosity and capillary pore percolation.
1157
+ Construction and
1158
+ Building Materials, 165:833–845, 2018.
1159
+ [62] S E Chidiac and M Shafikhani. Phenomenological model for quantifying concrete
1160
+ chloride diffusion coefficient. Construction and Building Materials, 224:773–
1161
+ 784, 2019.
1162
+ [63] Mohamad Achour, Franc¸ois Bignonnet, Jean-Franc¸ois Barth´el´emy, Emmanuel
1163
+ Rozi`ere, and Ouali Amiri. Multi-scale modeling of the chloride diffusivity and
1164
+ the elasticity of Portland cement paste. Construction and Building Materials,
1165
+ 234:117124, 2020.
1166
+ 23
1167
+
1168
+ [64] M Shafikhani and S E Chidiac. A holistic model for cement paste and concrete
1169
+ chloride diffusion coefficient. Cement and Concrete Research, 133:106049,2020.
1170
+ [65] Yushan Gu, Benoˆıt Bary, Alisa Machner, Klaartje De Weerdt, Gerd Bolte, and
1171
+ Mohsen Ben Haha. Multi-scale strategy to estimate the mechanical and diffusive
1172
+ properties of cementitious materials prepared with CEM II/C-M. Cement and
1173
+ Concrete Composites, 131:104537, 2022.
1174
+ [66] N Ukrainczyk and E A B Koenders. Representative elementary volumes for 3D
1175
+ modeling of mass transport in cementitious materials. Modelling and Simulation
1176
+ in Materials Science and Engineering, 22(3):35001, 2014.
1177
+ [67] Gustave E Archie. The electrical resistivity log as an aid in determining some
1178
+ reservoir characteristics. Transactions of the AIME, 146(01):54–62, 1942.
1179
+ [68] T C Powers Brownyard and T L. Studies of the Physical Properties of Hardened
1180
+ Portland Cement Paste. ACI Journal Proceedings, 43(9), 1946.
1181
+ [69] Tetsuji Yamaguchi, Kumi Negishi, Seiichi Hoshino, and Tadao Tanaka. Modeling
1182
+ of diffusive mass transport in micropores in cement based materials. Cement and
1183
+ Concrete Research, 39(12):1149–1155, 2009.
1184
+ [70] S Lu, E N Landis, and D T Keane. X-ray microtomographic studies of pore
1185
+ structure and permeability in Portland cement concrete. Materials and Structures,
1186
+ 39(6):611–620, 2006.
1187
+ [71] H S Wong and N R Buenfeld. Patch microstructure in cement-based materials:
1188
+ Fact or artefact? Cement and Concrete Research, 36(5):990–997, 2006.
1189
+ [72] M H N Yio, H S Wong, and N R Buenfeld. Representative elementary volume
1190
+ (REV) of cementitious materials from three-dimensional pore structure analysis.
1191
+ Cement and Concrete Research, 102:187–202, 2017.
1192
+ [73] Qing-feng Liu, Muhammad Farjad Iqbal, Jian Yang, Xian-yang Lu, Peng Zhang,
1193
+ and Momina Rauf. Prediction of chloride diffusivity in concrete using artificial
1194
+ neural network: Modelling and performance evaluation. Construction and Build-
1195
+ ing Materials, 268:121082, 2021.
1196
+ [74] Tomoshige Miyaguchi, Takashi Uneyama, and Takuma Akimoto. Brownian mo-
1197
+ tion with alternately fluctuating diffusivity: Stretched-exponential and power-law
1198
+ relaxation. Physical Review E, 100(1):12116, jul 2019.
1199
+ [75] CD Lawrence. Transport of oxygen through concrete. In Proceedings of Concrete
1200
+ Society, Meeting on Chemistry and Chemically-Related Properties of Cement,
1201
+ Imperial College, London, pages 277–293, 1984.
1202
+ [76] Paul D Tennis and Hamlin M Jennings. A model for two types of calcium silicate
1203
+ hydrate in the microstructure of Portland cement pastes. Cement and Concrete
1204
+ Research, 30(6):855–863, 2000.
1205
+ 24
1206
+
1207
+ [77] Hamlin M Jennings, Jeffrey J Thomas, Julia S Gevrenov, Georgios Constan-
1208
+ tinides, and Franz-Josef Ulm. A multi-technique investigation of the nanoporosity
1209
+ of cement paste. Cement and Concrete Research, 37(3):329–336, 2007.
1210
+ [78] Hamlin M Jennings. Refinements to colloid model of C-S-H in cement: CM-II.
1211
+ Cement and Concrete Research, 38(3):275–289, 2008.
1212
+ [79] Sydney Chapman and Thomas George Cowling. The Mathematical Theory of
1213
+ Non-uniform Gases: an Account of the Kinetic Theory of Viscosity, Thermal Con-
1214
+ duction and Diffusion in Gases. Cambridge University Press, 3rd edition, 1990.
1215
+ [80] Won-Tae Koo, Shaopeng Qiao, Alana F Ogata, Gaurav Jha, Ji-Soo Jang, Vivian T
1216
+ Chen, Il-Doo Kim, and Reginald M Penner. Accelerating palladium nanowire h2
1217
+ sensors using engineered nanofiltration. ACS nano, 11(9):9276–9285, 2017.
1218
+ [81] Shuzhen Lv, Kangyao Zhang, Ling Zhu, Dianping Tang, Reinhard Niessner, and
1219
+ Dietmar Knopp. H2-based electrochemical biosensor with pd nanowires@ zif-67
1220
+ molecular sieve bilayered sensing interface for immunoassay. Analytical Chem-
1221
+ istry, 91(18):12055–12062, 2019.
1222
+ [82] J. R. Dorfman, Henk van Beijeren, and T. R. Kirkpatrick. Contemporary Kinetic
1223
+ Theory of Matter. Cambridge University Press, 2021.
1224
+ [83] Christoph Dellago, Henk v Beijeren, Debabrata Panja, and JR Dorfman. Field-
1225
+ dependent collision frequency of the two-dimensional driven random lorentz gas.
1226
+ Physical Review E, 64(3):036217, 2001.
1227
+ [84] Keliu Wu, Xiangfang Li, Chenchen Wang, Wei Yu, and Zhangxin Chen. Model
1228
+ for Surface Diffusion of Adsorbed Gas in Nanopores of Shale Gas Reservoirs.
1229
+ Industrial & Engineering Chemistry Research, 54(12):3225–3236, apr 2015.
1230
+ [85] Y D Chen and R T Yang. Concentration dependence of surface diffusion and
1231
+ zeolitic diffusion. AIChE Journal, 37(10):1579–1582, oct 1991.
1232
+ [86] Daniel T Gillespie. A general method for numerically simulating the stochastic
1233
+ time evolution of coupled chemical reactions. J. Comput. Phys., 22(4):403–434,
1234
+ 1976.
1235
+ [87] Alfred B Bortz, Malvin H Kalos, and Joel L Lebowitz. A new algorithm for monte
1236
+ carlo simulation of ising spin systems. J. Comput. Phys., 17(1):10–18, 1975.
1237
+ [88] M Boumaaza, B Huet, G Pham, P Turcry, A A¨ıt-Mokhtar, and C Gehlen. A
1238
+ new test method to determine the gaseous oxygen diffusion coefficient of cement
1239
+ pastes as a function of hydration duration, microstructure, and relative humidity.
1240
+ Materials and Structures, 51(2):51, 2018.
1241
+ [89] Yves F Houst and Folker H Wittmann. Influence of porosity and water content
1242
+ on the diffusivity of CO2 and O2 through hydrated cement paste. Cement and
1243
+ Concrete Research, 24(6):1165–1176, 1994.
1244
+ 25
1245
+
1246
+ [90] Mohammad Javad Abdolhosseini Qomi, Mathieu Bauchy, Franz-Josef Ulm, and
1247
+ Roland J.-M. Pellenq. Anomalous composition-dependent dynamics of nanocon-
1248
+ fined water in the interlayer of disordered calcium-silicates. The Journal of Chem-
1249
+ ical Physics, 140(5):54515, feb 2014.
1250
+ [91] Walter Kob, Claudio Donati, Steven J Plimpton, Peter H Poole, and Sharon C
1251
+ Glotzer. Dynamical heterogeneities in a supercooled lennard-jones liquid. Phys-
1252
+ ical review letters, 79(15):2827, 1997.
1253
+ [92] Bart Vorselaars, Alexey V Lyulin, K Karatasos, and MAJ Michels.
1254
+ Non-
1255
+ gaussian nature of glassy dynamics by cage to cage motion. Physical Review
1256
+ E, 75(1):011504, 2007.
1257
+ [93] Fumiaki Nakai, Yuichi Masubuchi, Yuya Doi, Takato Ishida, and Takashi Un-
1258
+ eyama. Fluctuating diffusivity emerges even in binary gas mixtures. Physical
1259
+ Review E, 107(1):014605, 2023.
1260
+ [94] Aneesur Rahman. Correlations in the motion of atoms in liquid argon. Physical
1261
+ review, 136(2A):A405, 1964.
1262
+ [95] Dale P Bentz, Edward J Garboczi, Claus J Haecker, and Ole M Jensen. Effects of
1263
+ cement particle size distribution on performance properties of Portland cement-
1264
+ based materials. Cement and Concrete Research, 29(10):1663–1671, 1999.
1265
+ [96] Jian Zhang, Hai-long Wang, Zhi-wei Chen, Qing-feng Liu, Xiao-yan Sun, and
1266
+ Jian-jun Zheng. Experimental investigation and numerical simulation for chloride
1267
+ diffusivity of cement paste with elliptical cement particles. Construction and
1268
+ Building Materials, 337:127616, 2022.
1269
+ [97] Dale P Bentz, Daniel A Quenard, Veronique Baroghel-Bouny, Edward J Gar-
1270
+ boczi, and Hamlin M Jennings. Modelling drying shrinkage of cement paste and
1271
+ mortar Part 1. Structural models from nanometres to millimetres. Materials and
1272
+ Structures, 28(8):450–458, 1995.
1273
+ 26
1274
+
PdFKT4oBgHgl3EQfgi6G/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
QNE0T4oBgHgl3EQfkQEN/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53859644ddcc4a399461b85c92aa960eba61e120300b097ddeaf1ad57f4703e0
3
+ size 1245229
RtE2T4oBgHgl3EQfWQe7/content/tmp_files/2301.03832v1.pdf.txt ADDED
@@ -0,0 +1,1216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Video Semantic Segmentation with Inter-Frame Feature
2
+ Fusion and Inner-Frame Feature Refinement
3
+ Jiafan Zhuang, Zilei Wang∗, Junjie Li
4
+ National Engineering Laboratory for Brain-inspired Intelligence Technology and
5
+ Application, University of Science and Technology of China, Hefei 230027, China
6
+ Abstract
7
+ Video semantic segmentation aims to generate accurate semantic maps for each
8
+ video frame. To this end, many works dedicate to integrate diverse information
9
+ from consecutive frames to enhance the features for prediction, where a feature
10
+ alignment procedure via estimated optical flow is usually required. However, the
11
+ optical flow would inevitably suffer from inaccuracy, and then introduce noises
12
+ in feature fusion and further result in unsatisfactory segmentation results. In
13
+ this paper, to tackle the misalignment issue, we propose a spatial-temporal fu-
14
+ sion (STF) module to model dense pairwise relationships among multi-frame
15
+ features. Different from previous methods, STF uniformly and adaptively fuses
16
+ features at different spatial and temporal positions, and avoids error-prone opti-
17
+ cal flow estimation. Besides, we further exploit feature refinement within a sin-
18
+ gle frame and propose a novel memory-augmented refinement (MAR) module to
19
+ tackle difficult predictions among semantic boundaries. Specifically, MAR can
20
+ store the boundary features and prototypes extracted from the training samples,
21
+ which together form the task-specific memory, and then use them to refine the
22
+ features during inference. Essentially, MAR can move the hard features closer to
23
+ the most likely category and thus make them more discriminative. We conduct
24
+ extensive experiments on Cityscapes and CamVid, and the results show that
25
+ our proposed methods significantly outperform previous methods and achieves
26
+ ∗Corresponding author.
27
+ Email addresses: jfzhuang@mail.ustc.edu.cn (Jiafan Zhuang), zlwang@ustc.edu.cn
28
+ (Zilei Wang), hnljj@mail.ustc.edu.cn (Junjie Li)
29
+ Preprint submitted to Pattern Recognition
30
+ January 11, 2023
31
+ arXiv:2301.03832v1 [cs.CV] 10 Jan 2023
32
+
33
+ the state-of-the-art performance. Code and pretrained models are available at
34
+ https://github.com/jfzhuang/ST Memory.
35
+ Keywords:
36
+ Video semantic segmentation, Spatial-temporal feature fusion,
37
+ Memory mechanism, Feature refinement.
38
+ 1. Introduction
39
+ Semantic segmentation targets to assign each pixel in scene images a se-
40
+ mantic class, which is one of the fundamental tasks in computer vision.
41
+ In
42
+ recent years, image semantic segmentation has achieved unprecedented per-
43
+ formance benefited from the great progress of deep convolutional neural net-
44
+ work (DCNN) [1] and construction of various datasets (e.g.Cityscapes [2] and
45
+ CamVid [3]). However, many real-world applications have strong demands for
46
+ accurate video semantic segmentation, e.g., robotics, autonomous driving, and
47
+ video surveillance. Actually, video data offer richer information than static im-
48
+ ages, e.g., diverse presentations from multiple frames and temporal consistency
49
+ prior. Thus video can provide good potential to achieve more accurate seman-
50
+ tic segmentation. The key is how to produce more discriminative features by
51
+ exploiting the characteristics of videos.
52
+ A natural way to enhance video features is to integrate the diverse informa-
53
+ tion of consecutive frames [4, 5]. Specifically, the feature alignment is commonly
54
+ performed via the optical flow based feature warping, which ensures that pixel-
55
+ level features at the same spatial position represent the identical object, and
56
+ then the temporal feature fusion is conducted for each pixel. Evidently, the
57
+ accurate optical flow is critical for feature fusion. However, the optical flow
58
+ estimation inevitably suffers from inaccuracy in the boundary areas due to ob-
59
+ ject occlusion and plain texture [6, 7]. If the features are not well-aligned, the
60
+ noises would be introduced. Consequently, the quality of fused features would
61
+ be reduced and the segmentation performance would be deteriorated.
62
+ Besides, after aggregating information from consecutive frames, can we fur-
63
+ ther refine the fused feature? Different from inter-frame feature fusion in video
64
+ 2
65
+
66
+ (b)
67
+ f
68
+ f
69
+ f𝑡−1
70
+ f𝑡
71
+ f𝑡
72
+ f𝑡+1
73
+ (a)
74
+ f𝑡−1
75
+ f𝑡
76
+ f𝑡
77
+ f𝑡+1
78
+ f𝑡
79
+ (c)
80
+ Backbone
81
+ Classifier
82
+ Feature
83
+ Fusion
84
+ Feature
85
+ Refinement
86
+ Inter-Frame
87
+ Feature
88
+ Fusion
89
+ Inner-Frame
90
+ Feature
91
+ Refinement
92
+ Figure 1: Architecture illustrations for different methods. (a) Feature fusion in video
93
+ segmentation methods (e.g., NetWarp [4] and GRFP [5]). (b) Feature refinement in image
94
+ segmentation methods (e.g., DenseCRF [8] and SegFix [9]). (c) Our proposed method. Best
95
+ viewed in color.
96
+ segmentation methods, some image-based methods adopt the post-processing
97
+ techniques to optimize the features for prediction. For example, DenseCRF [8]
98
+ uses a graph structure to model pairwise potentials on all pixels and iteratively
99
+ adjusts the feature by optimizing an energy function. Essentially, it uses simi-
100
+ lar features to mutually enhance themselves. SegFix [9] proposes to replace the
101
+ difficult boundary features with some better ones, whose locations are predicted
102
+ by a network and often lie around the boundary areas in practice.
103
+ Actually, feature fusion is proposed to aggregate useful information from
104
+ different frames while feature refinement is designed for correcting error-prone
105
+ features, which are potentially complementary. Based on this motivation, in
106
+ this paper, we aim to improve the accuracy of video semantic segmentation by
107
+ simultaneously considering inter-frame feature fusion and inner-frame feature
108
+ refinement, as shown in Figure 1. For the inter-frame fusion, we need to tackle
109
+ the feature misalignment issue.
110
+ To this end, we propose a spatial-temporal
111
+ fusion (STF) module that uniformly fuses the features at different spatial and
112
+ temporal positions and does not require explicit feature alignment via error-
113
+ prone optical flows. Here the transformer [10] is particularly adopted due to
114
+ the power to model long-range dependencies.
115
+ To be specific, the encoder is
116
+ fed with the features extracted from consecutive frames, and the decoder is
117
+ used to generate the prediction features by retrieving the current frame from
118
+ 3
119
+
120
+ the encoded features. In particular, we utilize the self-attention mechanism in
121
+ transformer to guide the feature fusion in latent space, in which more similar
122
+ features are supposed to be more likely to represent the same object. For an
123
+ image pixel, hence, STF would integrate multiple similar features at different
124
+ temporal and spatial positions, rather than only the temporal-aligned features
125
+ in the previous works [4, 5].
126
+ In addition, an image with the resolution of (1024, 2048) would typically
127
+ produce the features with the resolution of (128, 256). The transformer taking
128
+ three frames needs to process 3 × 128 × 256 = 98304 pixel-level features, which
129
+ results in unacceptable computation and memory cost with O(N 2) complexity
130
+ when computing affinity matrix. Inspired by a recent work [11], we propose an
131
+ interlaced cross-self attention (ICSA) attention mechanism to divide the dense
132
+ affinity matrix computation in transformer as the product of a long-range cross-
133
+ attention and a short-range self-attention, which can greatly reduce the memory
134
+ consumption.
135
+ On the other hand, we propose inner-frame feature refinement to further ad-
136
+ just the fused features for better prediction without devising more complicated
137
+ network structure. In this work, we focus on refining the hard features that are
138
+ error-prone and always lie in the boundary areas of different classes [9]. To this
139
+ end, we propose a novel memory-augmented refinement (MAR) module that
140
+ uses the stored features in memory to augment the hard features. Actually, this
141
+ is motivated by an intuitive observation that humans would retrieve memory to
142
+ enhance the judgement when facing semantically ambiguous contents. Here the
143
+ memory represents the experience from the training samples. For each semantic
144
+ category, we particularly store the hard features and their corresponding class
145
+ prototypes (refer to the mean feature representing a single category), which to-
146
+ gether form a key-value memory bank. During inference, a hard feature would
147
+ be refined by the class prototypes, where the weights of different classes are
148
+ computed by comparing it with the stored hard features in the memory. In this
149
+ way, the discriminativeness of boundary features would be enhanced since MAR
150
+ would make them move closer to the most likely category. Evidently, MAR has
151
+ 4
152
+
153
+ good interpretability and can be conveniently inserted into different models as
154
+ an independent module.
155
+ We experimentally evaluate the proposed method on the Cityscapes and
156
+ CamVid datasets. The results validate the effectiveness of our STF and MAR
157
+ to improve the quality of features, and their combination can achieve the state-
158
+ of-the-art segmentation performance.
159
+ The contributions of this work are summarized as
160
+ • We design a novel video semantic segmentation framework by simultane-
161
+ ously considering inter-frame feature fusion and inner-frame feature re-
162
+ finement, which can take advantages from both two feature enhancement
163
+ techniques and effectively improve segmentation accuracy.
164
+ • We propose an effective spatial-temporal fusion module based on the trans-
165
+ former, which can uniformly aggregate the features at different spatial and
166
+ temporal positions and avoid error-prone temporal feature alignment.
167
+ • We propose a novel memory-augmented refinement module to particularly
168
+ refine hard features using the experience from training samples. In par-
169
+ ticular, the key-value memory is stored to refine the hard features closer
170
+ to the most likely category.
171
+ • We experimentally evaluate the effectiveness of our proposed methods,
172
+ and the results on Cityscapes and CamVid demonstrate the superiority of
173
+ our methods to previous state-of-the-art methods.
174
+ The rest of this paper is organized as follows. We review the related works on
175
+ image and video semantic segmentation, transformer and memory mechanism
176
+ in Section 2.
177
+ Section 3 provides the details of our approach, and Section 4
178
+ experimentally evaluates the proposed method. Finally, we conclude the work
179
+ in Section 5.
180
+ 5
181
+
182
+ 2. Related Work
183
+ 2.1. Image Semantic Segmentation
184
+ With the development of DCNN, more semantic segmentation networks
185
+ spring up. Specifically, the fully convolutional networks (FCNs) [1] firstly uses
186
+ the convolutional layers to replace fully-connected layers and can achieve better
187
+ performance. Inspired by FCN, many extensions have been proposed to ad-
188
+ vance image semantic segmentation. The dilated layers [12] are used to replace
189
+ the pooling layers, which can better balance the computational cost and size
190
+ of receptive fields. To further improve segmentation accuracy, spatial pyramid
191
+ pooling and atrous spatial pyramid pooling (ASPP) are used in PSPNet [13]
192
+ and DeepLab [12] to capture multi-scale contextual information.
193
+ Mitivated
194
+ by ASPP, Peng et al. [14] proposes a stride spatial pyramid pooling (SSPP)
195
+ to capture multiscale semantic information from the high-level feature map,
196
+ while Lian et al. [15] proposes a cascaded hierarchical atrous pyramid pooling
197
+ module to simultaneously extract rich local detail characteristics and impor-
198
+ tant global contextual information. CENet [16] aggregates contextual cues via
199
+ densely usampling the convolutional features of deep layer to the shallow decon-
200
+ volutional layers, which can fully explore multiple scale contextual information.
201
+ GPNet [17] densely captures and filters the multi-scale information in a gated
202
+ and pair-wise manner with a gated pyramid module and a cross-layer attention
203
+ module. Marin et al. [18] propose a novel architecture based on shared pyrami-
204
+ dal representation and fusion of heterogeneous features along the upsampling
205
+ path, which is effective for dense inference in images with large scale. Different
206
+ from enhancing features, EFNet [19] propose to produce multiple enhanced im-
207
+ ages and fuses them to yield one new image, which can encourage the model to
208
+ exploit complementary information.
209
+ Differently, our proposed methods focus on exploiting both spatial and tem-
210
+ poral contexts to further improve the performance and can build upon any
211
+ existing image segmentation models.
212
+ 6
213
+
214
+ 2.2. Video Semantic Segmentation
215
+ Different from static images, videos embody rich temporal information that
216
+ can be exploited to improve the semantic segmentation performance. Existing
217
+ video semantic segmentation methods mainly fall into two categories. The first
218
+ category aims to accelerate inference speed by reusing the features in previous
219
+ frames. DFF [20] estimates the optical flow fields [21] from the key frame to
220
+ other frames and then propagates the high-level features using the predicted
221
+ optical flows.
222
+ Accel [22] proposes a reference branch to extract high-quality
223
+ segmentation from the key frames and an update branch to efficiently extract
224
+ low-quality segmentation from the current frames, and the fuses them to improve
225
+ the segmentation accuracy. DAVSS [7] designs a feature correction mechanism
226
+ to tackle distorted features after propagation due to inaccurate optical flows.
227
+ LERNet [23] proposes to propagate multi-level features from the key frame via a
228
+ temporal holistic attention module. TDNet [24] distributes several sub-networks
229
+ over sequential frames and then recomposes the extracted features for segmen-
230
+ tation via an attention propagation module. Differently, Liu et al. [25] designs
231
+ a new temporal knowledge distillation methods to narrow the performance gap
232
+ between compact models and large models.
233
+ Another category focus on improving segmentation accuracy by modeling
234
+ cross-frame relations to integrate information from consecutive frames. V2V [26]
235
+ utilizes a 3D CNN to perform a voxel-level prediction. STFCN [27] utilizes a
236
+ spatial-temporal LSTM over per-frame CNN features. However, these methods
237
+ cannot achieve high performance due to rough processing of different frames.
238
+ HDCNN [28] proposes a transition layer structure to make the pixel-wise label
239
+ prediction consist with adjacent pixels across space and time domains. Recently,
240
+ some works [4, 5] propose to fuse features from multiple frames to produce the
241
+ better features for prediction. They usually adopt the optical flow to model
242
+ cross-frame relationships and perform temporal alignment by warping features.
243
+ In particular, NetWarp [4] uses a set of learnable weights to fuse multiple fea-
244
+ tures, and GRFP [5] proposes the gated recurrent units STGRU to estimate the
245
+ uncertainty of warped features and then conducts feature fusion on the areas
246
+ 7
247
+
248
+ with high reliability. Obviously, the optical flow is critical for feature align-
249
+ ment and would affect the final accuracy. However, the optical flow estimation
250
+ inevitably suffers from inaccuracy, especially for the occlusion areas and small
251
+ objects (e.g., pedestrian, pole) [7]. In this work, we follow the route of sec-
252
+ ond category and focus on improving segmentation accuracy. Different from
253
+ previous works, we propose to simultaneously model the spatial-temporal re-
254
+ lationship without feature alignment, which can avoid error-prone optical-flow
255
+ estimation. Furthermore, we propose to use memory to refine the prediction
256
+ features.
257
+ 2.3. Transformer
258
+ Transformer is originally proposed for the sequence-to-sequence machine
259
+ translation [10], and currently has dominated various NLP tasks. As the core
260
+ component of transformer, the self-attention is particularly suitable for mod-
261
+ eling long-range dependencies. Due to the success of transformer in the NLP
262
+ field, some works attempt to explore the benefits of transformer in computer
263
+ vision. DETR [29] first builds an object detection system based on transformer,
264
+ which can reason about relationships between objects and global context and
265
+ directly output the final set of predictions. Swin Transformer [30] designs a
266
+ novel shifted windowing scheme, which can limit attention computation to local
267
+ windows while also allow for cross-window connection. It achieves an impressive
268
+ performance on a broad range of vision tasks. In this paper, we propose STF by
269
+ using transformer to model the spatial-temporal relationship among pixel-wise
270
+ features extracted from consecutive frames. To our best knowledge, this is the
271
+ first attempt to exploit the transformer in video semantic segmentation.
272
+ Recently, Action Transformer [31] and Actor Transformer [32] also adopt
273
+ transformer to model spatial-temporal relationship in action detection and group
274
+ action recognition tasks, which are closely related to our proposed STF. They
275
+ naturally adopt transformer for modeling proposal-context and proposal-proposal
276
+ relationship. But our proposed STF is different from these two works. STF is
277
+ designed for modeling pixel-wise relationship, which would involve huge memory
278
+ 8
279
+
280
+ and computation overhead issues. In this work, we propose interlaced cross-self
281
+ attention (ICSA) mechanism to tackle these issues and achieve efficient global
282
+ relationship modeling.
283
+ 2.4. External Memory
284
+ In DCNN, external memory is generally used to enhance feature represen-
285
+ tations by storing history data, which is especially useful for the tasks without
286
+ enough samples, e.g., life-long learning [33] and few-shot learning [34, 35]. For
287
+ example, MM-Net [35] proposes to store the representative features in the sup-
288
+ port set for one-shot learning, and then use them to predict the parameters of
289
+ feature extraction network on query images. Actually, this can make the query
290
+ features more relevant to the support features. In recent years, the memory
291
+ mechanism is also exploited to store long-range temporal contexts for video
292
+ tasks during inference. In video object detection, [36] proposes to store pixel-
293
+ level and instance-level features extracted from previous frames and then use
294
+ them to enhance the current frame.
295
+ LFB [37] proposes a long-term feature
296
+ bank for action localization to store supportive information extracted over the
297
+ entire span of a video, and then uses them to enhance the short-term features
298
+ extracted from short video clips. Different from the previous works that store
299
+ temporal [36, 37] or sample [35] contexts, in this paper we propose to store
300
+ the hard features and class prototypes from the training samples to form a
301
+ task-specific memory, and then use them to refine the boundary features during
302
+ inference.
303
+ 3. Our Approach
304
+ In this work, we aim to boost the accuracy of video semantic segmenta-
305
+ tion by enhancing the features for prediction. To this end, we first propose a
306
+ spatial-temporal fusion (STF) module to perform inter-frame feature fusion at
307
+ different spatial and temporal positions, which can avoid error-prone optical flow
308
+ estimation. Then we propose a memory-augmented refinement (MAR) module
309
+ 9
310
+
311
+ 𝐼𝑇−1
312
+ 𝐼𝑇
313
+ 𝐼𝑇+1
314
+ 𝐟𝑇−1
315
+ 𝐟𝑇
316
+ 𝐟𝑇+1
317
+ Spatial-Temporal
318
+ Fusion Module
319
+ Framework
320
+ Memory-Augmented
321
+ Refinement Module
322
+ Backbone
323
+ 1、相邻多帧特征经过STRM实现特征融合
324
+ 2、融合特征经过MARM实现进一步优化
325
+ መ𝐟𝑇
326
+ Feature
327
+ Memory
328
+ 𝑆𝑇
329
+ 𝑆𝑇−1
330
+
331
+
332
+ Sliding Window
333
+
334
+
335
+ Classifier
336
+ 𝐟𝑇
337
+
338
+ Video Feature Enhancement
339
+ መ𝐟𝑇−1
340
+ Figure 2: The framework of our proposed approach. First, the feature is extracted by
341
+ an image segmentation model for each frame. Then the features of consecutive frames are fed
342
+ into our proposed STF module to perform feature fusion. After that, the fused feature f
343
+
344
+ T
345
+ is further refined by our proposed MAR module, resulting in �fT . Finally, the segmentation
346
+ result is obtained by applying the classifier on �fT . Best viewed in color.
347
+ to further refine the boundary features during inference, which is essential to
348
+ utilize the stored experience from training samples. In the following, we first
349
+ introduce the framework of our proposed approach, and then elaborate on two
350
+ key modules, namely, STF and MAR.
351
+ 3.1. Framework
352
+ Our proposed video semantic segmentation framework is illustrated in Fig-
353
+ ure. 2. Formally, given a sequence of n video frames denoted by {I1, I2, · · · ,
354
+ In}, our purpose is to get the accurate semantic segmentation maps for every
355
+ video frame, denoted by {S1, S2, · · · , Sn}. Specifically, we first extract features
356
+ from each frame image using an off-the-shelf segmentation model. Then we con-
357
+ duct video feature enhancement for the current timestamp T with a sequence
358
+ of three-frame features {fT −1, fT , fT +1}, resulting in �fT for final prediction.
359
+ Finally, we apply the classifier on �fT to produce the segmentation result ST .
360
+ Since such a procedure can be performed in a sliding-window manner, we can
361
+ obtain the corresponding segmentation sequence.
362
+ In this work, we dedicate to enhance video features to improve the segmen-
363
+ tation performance. To be specific, we first feed the sequence of frame features
364
+ 10
365
+
366
+ 𝐟𝑇−1
367
+ 𝐟𝑇
368
+ 𝐟𝑇+1
369
+ Concat
370
+ Interlaced
371
+ Cross-Self
372
+ Attention
373
+ Add & Norm
374
+ FFN
375
+ Add & Norm
376
+ Encoder
377
+ Interlaced
378
+ Cross-Self
379
+ Attention
380
+ Add & Norm
381
+ FFN
382
+ Add & Norm
383
+ Decoder
384
+ 𝐟𝑇
385
+ Interlaced
386
+ Cross-Self
387
+ Attention
388
+ Add & Norm
389
+ 𝐟𝑇
390
+
391
+ 𝐟𝐸𝑛𝑐
392
+
393
+ 𝐟𝐸𝑛𝑐
394
+ Figure 3: Illustration of transformer based spatial-temporal fusion module. STF
395
+ consists of an encoder for modeling spatial-temporal relationships and feature encoding, and
396
+ a decoder for retrieving the feature of current frame from the encoded feature f
397
+
398
+ Enc. Best
399
+ viewed in color.
400
+ into our proposed STF module to capture spatial-temporal dependencies and
401
+ complete pixel-wise feature fusion, resulting in the fused feature f
402
+
403
+ T . After that,
404
+ our proposed MAR module further refines f
405
+
406
+ T into �fT by exploiting the stored
407
+ feature memory to enhance the discriminativeness of the boundary features. Ev-
408
+ idently, STF and MAR are the key components of our method that determine
409
+ the performance of video semantic segmentation.
410
+ 3.2. Spatial-Temporal Fusion
411
+ In this work, we propose a spatial-temporal fusion module to effectively
412
+ integrate the features of consecutive frames. Here it is expected that the spatial-
413
+ temporal relationship among consecutive frames is well modeled and the optical
414
+ flow estimation is avoided. In particular, we use the transformer [10] to perform
415
+ inter-frame fusion, which recently achieves the amazing performance in both
416
+ NLP and CV areas. Thus our STF consists of an encoder and a decoder, as
417
+ shown in Figure. 3.
418
+ 11
419
+
420
+ Encoder. In STF, the encoder is used to capture the spatial-temporal relation-
421
+ ships of pixel-level features. To this end, we concatenate the 2D features of
422
+ multiple frames {fT −1, fT , fT +1} to obtain a 3D feature fEnc ∈ Rd×3×H×W ,
423
+ where d is the dimension of pixel-level features, H and W represent the spatial
424
+ size of frame features. That is, there are 3HW features in total for processing in
425
+ the encoder. We first pass fEnc into our proposed interlaced cross-self attention
426
+ (ICSA) module to model dense spatial-temporal relationships, and the features
427
+ are adjusted by weighting on all features. Then we feed the new features into
428
+ feed-forward network (FFN) to perform feature transformation. Similar to [10],
429
+ we employ the residual connections for the attention module and FFN followed
430
+ by layer normalization. Finally, we obtain the encoded features f
431
+
432
+ Enc. Com-
433
+ pared with the previous optical flow based methods [4, 5], our proposed STF
434
+ uniformly aggregates all features at different spatial and temporal positions, and
435
+ no explicit feature alignment is required. Essentially, a single feature in STF
436
+ is implicitly aligned with multiple similar features by attention other than the
437
+ temporally-aligned ones. This is reasonable since the purpose of feature fusion
438
+ is to mutually enhance the features belonging to the same semantic class.
439
+ Decoder. In STF, the decoder is used to get the prediction features of the current
440
+ frame. To this end, we use the original feature of current frame to retrieve from
441
+ the encoded features f
442
+
443
+ Enc. To be specific, we first feed the feature of current
444
+ frame into an ICSA module to enhance the features similar in the encoder. Then
445
+ we pass the enhanced features together with f
446
+
447
+ Enc into another ICSA module
448
+ for cross attention and produce the features f
449
+
450
+ T with FFN. Different from the
451
+ previous one, here the enhanced fT serves as the query and f
452
+
453
+ Enc serves as the
454
+ key and value. Intuitively, we retrieve the encoded features from f
455
+
456
+ Enc for each
457
+ pixel-level feature in fT , and consequently the f
458
+
459
+ T would contain rich information
460
+ from other spatial and temporal positions.
461
+ Interlaced Cross-Self Attention. In the original transformer, the attention op-
462
+ eration would involve O(N 2) complexity given an input of size N (e.g., here
463
+ N = 3HW in our case), which is impractical to the video semantic segmen-
464
+ 12
465
+
466
+ query
467
+ value
468
+ key
469
+ permute
470
+ permute
471
+ permute
472
+ query
473
+ value
474
+ key
475
+ Positional
476
+ Encoding
477
+ Positional
478
+ Encoding
479
+ BWA
480
+ permute
481
+ output
482
+ output
483
+ query
484
+ value
485
+ key
486
+ BWA
487
+ Interlaced Cross-Self Attention (ICSA)
488
+ Interlaced Sparse Attention (ISA)
489
+ Positional
490
+ Encoding
491
+ Positional
492
+ Encoding
493
+ Long-range Cross-Attention
494
+ Short-range Self-Attention
495
+ permute
496
+ BWA
497
+ permute
498
+ BWA
499
+ Long-range Attention
500
+ Short-range Attention
501
+ Figure 4:
502
+ Illustration of differences between our interlaced cross-self attention
503
+ (ICSA) and with interlaced sparse attention (ISA) [11]. ICSA takes query, key and
504
+ value separately for long-rang cross-attention first and then conduct short-range self-attention
505
+ on the previous enhanced feature, which can be seamlessly integrated in the transformer
506
+ structure, especially for cross-attention module in the decoder. Besides, ICSA implements
507
+ necessary positional encoding and can deal with features from multiple frames directly, which
508
+ can uniformly model spatial-temporal relationships. Best viewed in color.
509
+ tation task since computation on pixel-level features would consume too much
510
+ memory. To tackle this issue, a recent work ISA [11] provides a successful so-
511
+ lution. It decomposes the whole attention calculation as the combination of
512
+ long-range and short-range sparse attention calculations, as shown in the upper
513
+ subplot in Figure. 4. In this way, it can retain the ability of modeling global
514
+ relationship while effectively reduce the memory consumption. However, ISA is
515
+ designed for self-attention mechanism like non-local [38], which is not well com-
516
+ patible with the transformer structure. Specifically, ISA takes a single feature
517
+ as input and performs enhancement by modeling inner relationship. Thus it
518
+ can not be directly integrated into cross attention in the transformer decoder.
519
+ Besides, how to insert necessary positional encoding and deal with features of
520
+ multiple frames are not considered by ISA.
521
+ In this work, we extend the original ISA into a more general form and propose
522
+ 13
523
+
524
+ Method
525
+ query
526
+ key
527
+ value
528
+ Divide
529
+ Group
530
+ q
531
+ k
532
+ v
533
+ Block-wise Attention Module
534
+ MHA: Multi-Head Attention
535
+ MHA
536
+ MHA
537
+ MHA
538
+ MHA
539
+ Figure 5: Illustration of block-wise attention (BWA). The input 3D features, i.e.query,
540
+ key and value, are spatially divided into patches with the same shape. Then we apply multi-
541
+ head attention (MHA) [10] operation on corresponding query, key and value patches indepen-
542
+ dently, and combine their results back to the entire one. Best viewed in color.
543
+ interlaced cross-self attention (ICSA), which can be seamlessly integrated into
544
+ transformer structure, as illustrated in the Figure. 4. Generally, we reorganize
545
+ ISA with long-range cross-attention and short-range self-attention operations.
546
+ First, we take query, key and value separately as inputs for cross-attention. Par-
547
+ ticularly, the query, key, and value are the same feature fEnc for the STF module
548
+ encoder, while the key and value are f
549
+
550
+ Enc and the query is the enhanced fT for
551
+ the STF module decoder. Here we directly takes 3D features as input to uni-
552
+ formly model spatial-temporal relationships. For query and key, we supplement
553
+ the features with positional encoding. Particularly, we choose the learnable po-
554
+ sitional encoding by following [10]. In this work, we extend positional encoding
555
+ to the 3D version and they have the same shape as the corresponding input.
556
+ Following ISA, we divide features into k blocks with the same shape (e.g.,
557
+ k = 4 in Figure. 4 and Figure. 5). To model long-range cross-attention, we
558
+ harvest features with same spatial positions from different blocks in query, key
559
+ and value via permutation operation, respectively. Then we conduct block-wise
560
+ attention (BWA) operation for relationship modeling. As shown in Figure. 5, we
561
+ first divide input query, key and value features into pre-defined blocks. Then,
562
+ we apply multi-head attention (MHA) [10] on corresponding query, key and
563
+ 14
564
+
565
+ Relationship Computation
566
+ Feature Refinement
567
+ Boundary Feature of class 0
568
+ Prototype of class 0
569
+ Boundary Feature of class 1
570
+ Prototype of class 1
571
+ Figure 6: Illustration of feature refinement. We first compute the relationships between
572
+ the stored boundary features and the test feature to estimate the class likelihoods. Then we
573
+ refine the test feature using the class prototypes, which essentially makes the feature move
574
+ closer to the most likely class. Best viewed in color.
575
+ value patches independently, and combine their results back to the entire one.
576
+ For short-range self-attention, we first permute the feature back to the original
577
+ positions and then regard it as query, key and value for the next attention cal-
578
+ culation. After adding positional encoding, we conduct BWA operation again
579
+ and obtain the final enhanced feature. With ICSA, STF can conveniently har-
580
+ vest global spatial-temporal information for feature enhancement while keeps
581
+ an efficient attention computation. Besides, if we take a single feature from one
582
+ frame as query, key and value, and remove positional encoding, ICSA would
583
+ degenerate into ISA. Evidently, ISA is a special case of ICSA.
584
+ 3.3. Memory-Augmented Refinement
585
+ In this work, we propose a novel memory-augmented refinement module to
586
+ further refine the fused features. Different from previous works that explore
587
+ the relationship among the inference features [8, 39, 9], we focus on refining
588
+ the hard features (e.g., boundary features) using the memory from the training
589
+ samples.
590
+ The idea is illustrated in Figure. 6, and it is actually inspired by
591
+ an intuitive mechanism of humans to process semantically ambiguous contents.
592
+ Specifically, given a test feature during inference, it usually lies in the boundary
593
+ area of different classes in the feature space if it is hard to distinguish (e.g., with
594
+ 15
595
+
596
+ Method
597
+ SDA: Scaled Dot-product Attention
598
+ FFN: Feed Forward Network
599
+ 𝐟
600
+ Key-Value Memory
601
+ 𝐟𝑅
602
+ FFN
603
+ SDA
604
+ 𝑆𝑖 = 𝜃(𝑄𝑝) ∅(𝐾𝑖)𝑇, 𝑖 ∈ 1, 𝐶𝐾 , p = (x, y),d is channel num
605
+ 𝑆 = 𝑆𝑜𝑓𝑡𝑚𝑎𝑥(𝑆)
606
+
607
+ 𝑄𝑝 = ෍
608
+ 𝑖=1
609
+ 𝐶𝐾
610
+ 𝑆𝑖𝑉𝑗 ,
611
+ 𝑗 is the class index of 𝐾𝑖, 𝑗 ∈ [1, 𝐶]
612
+ SDA的计算步骤:
613
+ Add &
614
+ Norm
615
+ Add &
616
+ Norm
617
+ Figure 7: Illustration of memory-augmented refinement module. The input feature
618
+ f is refined into fR using the key-value memory extracted from the training samples. Here
619
+ f serves as the query, the key is the stored boundary features, and the value is the class
620
+ prototypes.
621
+ Here ’SDA’ represents scaled dot-product attention and ’FFN’ represents feed
622
+ forward network.
623
+ low confidence score). To enhance its discriminativeness, we first estimates its
624
+ likelihoods to different classes by computing the similarities between the feature
625
+ and stored boundary features of each class. Then we use the class prototypes
626
+ to refine the feature according to the estimated likelihoods, where the class
627
+ prototype refers to the mean feature representing a category. Through this way,
628
+ the test feature would move closer to the most likely category.
629
+ Our proposed MAR module is used to implement such an idea and is illus-
630
+ trated in Figure. 7. Specifically, we build the key-value memory for each class
631
+ that stores two kinds of data from the training samples, namely, the bound-
632
+ ary features and class prototypes.
633
+ The boundary features serve as the keys
634
+ K ∈ Rd×CKL and the class prototypes serve as the values V ∈ Rd×C, where C
635
+ denotes the number of classes and KL is a hyper-parameter to control the size of
636
+ memory. In the MAR module, the input feature F is refined into FR using the
637
+ key-value memory. Inspired by the transformer, we use the scaled dot-product
638
+ attention (SDA) and FFN to construct the MAR block. To be specific, we take
639
+ the test feature as query Q ∈ Rd, and use the key-value in memory to refine it,
640
+ resulting in Q
641
+ ′. Formally,
642
+ si = θ(Q)T φ(Ki),
643
+ (1)
644
+ si =
645
+ esi
646
+ �CKL
647
+ i=1 esi ,
648
+ (2)
649
+ Q
650
+ ′ =
651
+ CKL
652
+
653
+ i=1
654
+ siVj,
655
+ (3)
656
+ 16
657
+
658
+ Training
659
+ Set
660
+ Memory Organization
661
+ Sky
662
+ Features with
663
+ Lowest Scores
664
+ Car
665
+ Sky
666
+ Features with
667
+ Highest Scores
668
+ Car
669
+ 𝐾𝐻 = 4, 𝐾𝐿 = 4, 𝐶 = 2 for Visualization
670
+ 𝐾𝐻𝐶 Features
671
+ Prototype Generation
672
+ 𝐾𝐿𝐶 Features
673
+ Score-Based Selection
674
+ Backbone
675
+ Sky
676
+ Key
677
+ Car
678
+ Sky
679
+ Value
680
+ Car
681
+ 𝐶 Features
682
+ 𝐾𝐿𝐶 Features
683
+ Key-Value Groups
684
+ Figure 8: Illustration of the key-value memory.
685
+ From the extracted features on the
686
+ training set, we select KH ”good” features with the highest scores and KL ”hard” features
687
+ with the lowest scores per class.
688
+ Then we generate the class prototypes by averaging the
689
+ ”good” features, and organize them with ”hard” features to form the key-value memory. Here
690
+ KH = 4, KL = 4, and C = 2 are used for visualization.
691
+ where i ∈ [1, CKL] denotes the sample in memory and j is the class index cor-
692
+ responding to the i-th sample. Here θ(Q) = WθQ and φ(Ki) = WφKi, and Wθ
693
+ and Wφ are two learnable matrices. Notably, in Eq. (3), we index V by j rather
694
+ than i, which is different from the original self-attention calculation. We employ
695
+ the residual connections for SDA and FFN followed by layer normalization, like
696
+ in the original transformer [10].
697
+ Next we explain how to generate the key-value memory from the training
698
+ samples, which is shown in Figure. 8. We first train the segmentation network
699
+ without the MAR module. Using this model, we extract the features for all
700
+ training samples. Note that a feature would be discarded if it is misclassified by
701
+ the classifier. According to the ground truth, for each class, we select KL ”hard”
702
+ features with the lowest confidence scores and KH ”good” features with the
703
+ highest confidence scores. The former are considered to suffer from semantically
704
+ ambiguity while the latter are to accurately represent the semantic category.
705
+ After that, we compute the mean feature of the ”good” features for each class,
706
+ resulting in the class prototype. Finally, we store the ”hard” features as keys
707
+ and the corresponding class prototype as values in the memory, which essentially
708
+ represent the task-specific experience.
709
+ 17
710
+
711
+ 3.4. Training Strategy
712
+ Our proposed network consists of four main components, i.e., backbone, clas-
713
+ sifier, STF, and MAR. Here, we adopt a multi-stage training schedule, which is a
714
+ common strategy in advanced works, e.g., Faster RCNN and knowledge distilla-
715
+ tion. First, the backbone and classifier together are pretrained on ImageNet and
716
+ finetuned on a particular segmentation dataset (e.g., Cityscapes and CamVid).
717
+ The backbone would keep fixed and the classifier would be re-initialized in the
718
+ following training procedures. Then, we train STF together with the backbone
719
+ and classifier, and use this model to generate the key-value memory. Finally,
720
+ we train the MAR and classifier by fixing STF. In the test phase, we perform
721
+ the end-to-end inference with STF and MAR.
722
+ 4. Experiment
723
+ In this section, we experimentally evaluate our proposed method on two
724
+ challenging datasets by following the previous works, namely, Cityscapes [2] and
725
+ CamVid [3]. Here some state-of-the-art methods are adopted for comparison
726
+ and the results from the literatures are listed. We follow the standard protocols
727
+ of video semantic segmentation and report the mean Intersection over Union
728
+ (mIoU) as the performance metric.
729
+ 4.1. Dataset
730
+ Cityscapes [2] is a popular dataset in semantic segmentation and autonomous
731
+ driving domain. It focuses on semantic understanding of urban street scenes.
732
+ The training and validation subsets contain 2, 975 and 500 video clips, respec-
733
+ tively, and each video clip contains 30 frames. The 20th frame in each clip is
734
+ annotated by the pixel-level semantic labels with 19 categories.
735
+ CamVid [3] also focuses on the semantic understanding of urban street
736
+ scenes, but it contains less data than Cityscapes. It only has 701 color images
737
+ with annotations of 11 semantic classes. CamVid is divided into the trainval
738
+ set with 468 samples and test set with 233 samples. All samples are extracted
739
+ 18
740
+
741
+ Method
742
+ Backbone
743
+ mIoU (%)
744
+ PSPNet [13]
745
+ ResNet18
746
+ 69.79
747
+ + Liu et al. [25]
748
+ ResNet18
749
+ 73.06 (+3.27)
750
+ + Ours
751
+ ResNet18
752
+ 74.58 (+4.79)
753
+ PSPNet [13]
754
+ ResNet50
755
+ 76.24
756
+ + Accel [22]
757
+ ResNet50
758
+ 70.20 (-6.04)
759
+ + TDNet [24]
760
+ ResNet50
761
+ 76.40 (+0.27)
762
+ + EFC [40]
763
+ ResNet50
764
+ 78.44 (+2.31)
765
+ + Ours
766
+ ResNet50
767
+ 79.22 (+2.98)
768
+ PSPNet [13]
769
+ ResNet101
770
+ 79.70
771
+ + TDNet [24]
772
+ ResNet101
773
+ 79.90 (+0.20)
774
+ + NetWarp [4]
775
+ ResNet101
776
+ 80.60 (+0.90)
777
+ + GRFP [5]
778
+ ResNet101
779
+ 80.20 (+0.50)
780
+ + Ours
781
+ ResNet101
782
+ 80.96 (+1.26)
783
+ Swin-B [30]
784
+ Swin Transformer
785
+ 81.34
786
+ + Ours
787
+ Swin Transformer
788
+ 81.67 (+0.33)
789
+ Table 1: Performance comparison on Cityscapes val subset. PSPNet and Swin Transformer
790
+ are chosen as the image segmentation models.
791
+ from the driving videos captured at daytime and dusk, and have pixel-level
792
+ semantic annotations. Each CamVid video contains 3, 600 to 11, 000 frames at
793
+ a resolution of 720 × 960.
794
+ 4.2. Performance Comparison
795
+ Here we compare our proposed method with the state-of-the-art methods on
796
+ Cityscapes and CamVid. In particular, the image segmentation model is used
797
+ as the baseline. The PSPNet [13] with the backbone ResNet18/50/101 has been
798
+ widely used on Cityscapes, and Dilation8 [41] is mainly adopted on CamVid.
799
+ Table 1 and Table 2 show the results, and we have the following observations.
800
+ First, our proposed method achieves the state-of-the-art performance on both
801
+ datasets and various baseline model, which demonstrate the effectiveness and
802
+ generalization of our method. Second, our proposed method can get more gains
803
+ 19
804
+
805
+ Method
806
+ Backbone
807
+ mIoU (%)
808
+ Dilation8 [41]
809
+ VGG16
810
+ 65.3
811
+ + STFCN [27]
812
+ VGG16
813
+ 65.9 (+0.4)
814
+ + GRFP [5]
815
+ VGG16
816
+ 66.1 (+0.8)
817
+ + FSO [42]
818
+ VGG16
819
+ 66.1 (+0.8)
820
+ + VPN [43]
821
+ VGG16
822
+ 66.7 (+1.4)
823
+ + NetWarp [4]
824
+ VGG16
825
+ 67.1 (+1.8)
826
+ + EFC [40]
827
+ VGG16
828
+ 67.4 (+2.1)
829
+ + Ours
830
+ VGG16
831
+ 67.9 (+2.6)
832
+ PSPNet [13]
833
+ ResNet101
834
+ 76.2
835
+ + Accel [22]
836
+ ResNet101
837
+ 71.5 (-4.7)
838
+ + TDNet [24]
839
+ ResNet101
840
+ 76.0 (-0.2)
841
+ + Ours
842
+ ResNet101
843
+ 76.6 (+0.4)
844
+ Swin-B [30]
845
+ Swin Transformer
846
+ 77.6
847
+ + Ours
848
+ Swin Transformer
849
+ 77.9 (+0.3)
850
+ Table 2:
851
+ Performance comparison on CamVid test subset.
852
+ Dilation8, PSPNet and Swin
853
+ Transformer are chosen as the image segmentation model.
854
+ on light-weight baseline model. This is reasonable since improving more com-
855
+ plicated model is generally more difficult. Third, TDNet [24], Accel [22] and
856
+ Liu et al. [25] have nearly no improvement and even degradation on accuracy
857
+ comparing to the baseline, since they mainly focus on improving inference speed.
858
+ Fourth, even on the strong baseline, e.g., Swin Transformer [30], our method
859
+ can also bring improvement.
860
+ 4.3. Ablation Study
861
+ Effectiveness of our method. In this work, we propose two key modules, namely
862
+ STF and MAR. To investigate their effects, we also give the results of applying
863
+ one of them, as shown in Table 3.
864
+ We can have the following observations.
865
+ First, our proposed STF and MAR can bring significant performance improve-
866
+ ment separately compared with the baseline, and the version equipped with
867
+ both of them performs best. Second, STF can brings more gains than MAR,
868
+ 20
869
+
870
+ Method
871
+ Dataset
872
+ Backbone
873
+ mIoU (%)
874
+ PSPNet [13]
875
+ Cityscapes
876
+ ResNet50
877
+ 76.24
878
+ + STF
879
+ Cityscapes
880
+ ResNet50
881
+ 78.75 (+2.62)
882
+ + MAR
883
+ Cityscapes
884
+ ResNet50
885
+ 78.37 (+2.24)
886
+ + Both
887
+ Cityscapes
888
+ ResNet50
889
+ 79.22 (+2.98)
890
+ Dilation8 [41]
891
+ CamVid
892
+ VGG16
893
+ 65.3
894
+ + STF
895
+ CamVid
896
+ VGG16
897
+ 67.5 (+2.2)
898
+ + MAR
899
+ CamVid
900
+ VGG16
901
+ 67.3 (+2.0)
902
+ + Both
903
+ CamVid
904
+ VGG16
905
+ 67.9 (+2.6)
906
+ Table 3: Ablation study on key modules of our proposed method. Performance comparison
907
+ on Cityscapes val subset and CamVid test subset. PSPNet and Dilation8 are chosen as the
908
+ image segmentation model, respectively.
909
+ Method
910
+ mIoU (%)
911
+ DeepLabv3 [44]
912
+ 79.5
913
+ + DenseCRF [8]
914
+ 79.7 (+0.2)
915
+ + GUM [39]
916
+ 79.8 (+0.3)
917
+ + SegFix [9]
918
+ 80.5 (+1.0)
919
+ + Our MAR
920
+ 81.0 (+1.5)
921
+ Table 4:
922
+ Comparison of different feature refinement methods on Cityscapes val subset.
923
+ DeepLabv3 is chosen as the baseline model.
924
+ since STF integrates the multi-frame information while MAR only optimizes the
925
+ features within the current frame. Third, our proposed STF outperforms other
926
+ multi-frame fusion methods (e.g., TDNet [24] in Table 1 and GRFP [5] and
927
+ NetWarp [4] in Table 2), which indicates the effectiveness of modeling spatial-
928
+ temporal relationship.
929
+ Analysis of MAR. In this paper, we propose MAR to refine the video features.
930
+ Actually, this technique can also be used in other tasks. Here we particularly
931
+ investigate its effect on image segmentation, and show its superiority by com-
932
+ paring with other representative refinement methods, including DenseCRF [8],
933
+ 21
934
+
935
+ Current Frame
936
+ Baseline
937
+ w/ STF
938
+ Ours
939
+ Ground Truth
940
+ Figure 9: Visualization of some sample segmentation results from Cityscapes. It
941
+ can be seen that STF can significantly improve the baseline results, and MAR can further
942
+ bring gains. Here the red rectangles highlight the important regions. Best viewed in color.
943
+ GUM [39], and SegFix [9]. DenseCRF [8] establishes pairwise potentials on all
944
+ pairs of pixels and poses segmentation refinement problem as maximum a pos-
945
+ teriori (MAP) inference, which is a classic post-processing method. GUM [39]
946
+ proposes to enrich bilinear upsampling operators by introducing a learnable
947
+ transformation for semantic maps, which can steer the sampling towards the
948
+ correct semantic class.
949
+ SegFix [9] proposes to replace the unreliable predic-
950
+ tions of boundary pixels with the predictions of interior pixels, which currently
951
+ achieves the state-of-the-art performance. Table 4 provides the comparison re-
952
+ sults, where the DeepLabv3 is adopted as the baseline by following previous
953
+ works. We can see that our MAR outperforms the previous methods, which
954
+ indicates the effectiveness of refining feature by memory.
955
+ To intuitively show the effect of MAR on feature refinement, we particularly
956
+ choose two easily ambiguous categories, i.e., wall and building, to visualize the
957
+ features before and after applying MAR. To be specific, we randomly sample
958
+ 100 hard features (with confidence scores lower than 0.8) per category, and then
959
+ visualize their distribution using t-SNE. The results are shown in Figure. 10.
960
+ Before using MAR, two kinds of features are confused together. MAR can move
961
+ features closer to their corresponding class prototypes and make them easier to
962
+ be separated.
963
+ 22
964
+
965
+ w/o MAR
966
+ w/ MAR
967
+ Wall
968
+ Building
969
+ Wall
970
+ Building
971
+ Figure 10: Visualization on the change of feature distribution. It can be seen that
972
+ features become more separable after using MAR module. Best viewed in color.
973
+ Method
974
+ road
975
+ side.
976
+ build.
977
+ wall
978
+ fence
979
+ pole
980
+ light
981
+ sign
982
+ vege.
983
+ terr.
984
+ PSPNet-50
985
+ 97.82
986
+ 83.23
987
+ 91.70
988
+ 35.86
989
+ 58.07
990
+ 63.87
991
+ 70.76
992
+ 78.93
993
+ 91.95
994
+ 62.85
995
+ + MAR
996
+ 98.13
997
+ 85.02
998
+ 92.39
999
+ 51.34
1000
+ 60.81
1001
+ 63.46
1002
+ 71.62
1003
+ 80.49
1004
+ 92.55
1005
+ 65.47
1006
+ Method
1007
+ sky
1008
+ pers.
1009
+ rider
1010
+ car
1011
+ truck
1012
+ bus
1013
+ train
1014
+ motor
1015
+ bike
1016
+ mean
1017
+ PSPNet-50
1018
+ 94.16
1019
+ 81.86
1020
+ 60.96
1021
+ 94.82
1022
+ 76.34
1023
+ 85.83
1024
+ 77.67
1025
+ 64.43
1026
+ 77.61
1027
+ 76.24
1028
+ + MAR
1029
+ 94.52
1030
+ 82.42
1031
+ 60.88
1032
+ 95.34
1033
+ 80.72
1034
+ 88.72
1035
+ 78.73
1036
+ 68.28
1037
+ 78.21
1038
+ 78.37
1039
+ Table 5: Category-wise performance on Cityscapes val subset. PSPNet-50 is chosen as the
1040
+ baseline model.
1041
+ Hyper-parameter KL and KH. In our MAR, KL and KH are used to control the
1042
+ number of boundary features for memory and good features for prototype per
1043
+ class. Here we explore their influence on the segmentation accuracy. Considering
1044
+ the memory size, we particularly evaluate the KL from {10, 50, 100, 300} and
1045
+ the KH from {1, 5, 10, 50} on Cityscapes val subset with PSPNet-ResNet50 as
1046
+ the base model. We found that there is almost no performance fluctuation for
1047
+ different settings. Finally, KL = 10 and KH = 10 are adopted throughout the
1048
+ experiments.
1049
+ Analysis of segmentation results. Our proposed MAR mainly handles the am-
1050
+ biguous cases, especially for the hard classes. Table 5 lists the segmentation
1051
+ accuracy of different semantic classes on Cityscapes, where PSPNet with the
1052
+ 23
1053
+
1054
+ wall
1055
+ buildingwall
1056
+ buildingModule
1057
+ MACs (G)
1058
+ PSPNet-50 (3 Frames)
1059
+ 4285.9
1060
+ PSPNet-101 (3 Frames)
1061
+ 6152.9
1062
+ STF
1063
+ 563.5
1064
+ MAR
1065
+ 31.5
1066
+ Classifier
1067
+ 0.5
1068
+ Table 6: Computational cost of different modules (GMACs). The resolution of input image
1069
+ is 1024 × 2048.
1070
+ ResNet50 backbone is particularly adopted as the baseline. We can see that
1071
+ our method can consistently boost the accuracy over all classes and the gain
1072
+ is especially significant for the hard classes, e.g., wall, terrain, and truck. In
1073
+ addition, to intuitively show the effectiveness of our proposed STF and MAR,
1074
+ we visualize three sample segmentation results from Cityscapes in Figure. 9. It
1075
+ can be seen that the original segmentation results can be progressively improved
1076
+ by STF and MAR.
1077
+ Cost of STF and MAR. Here we analyze the computational cost of different
1078
+ components in our proposed method, and the statistics are provided in Table 6.
1079
+ It can be seen that our STF and MAR involve little computational cost com-
1080
+ pared with the base model. In particular, our proposed MAR is more efficient
1081
+ to achieve good segmentation performance than devising more complicated net-
1082
+ work structures.
1083
+ 5. Conclusion
1084
+ In this paper, we design a novel video semantic segmentation framework
1085
+ with inter-frame feature fusion and inner-frame feature refinement, and pro-
1086
+ pose two novel techniques to boost the accuracy. Specifically, we first propose
1087
+ a spatial-temporal fusion module based on the transformer, which can effec-
1088
+ tively aggregate multi-frame features at different spatial and temporal positions,
1089
+ and meanwhile avoid error-prone optical flow estimation. Then we propose a
1090
+ 24
1091
+
1092
+ memory-augmented refinement module that exploits the stored features from
1093
+ the training samples to augment the hard features during inference. Our ex-
1094
+ perimental results on Cityscapes and CamVid show that the proposed method
1095
+ outperforms the state-of-the-art methods.
1096
+ Acknowledgements
1097
+ This work is supported by the National Natural Science Foundation of China
1098
+ under Grant No.62176246 and No.61836008. We acknowledge the support of
1099
+ GPU cluster built by MCC Lab of Information Science and Technology Institu-
1100
+ tion, USTC.
1101
+ References
1102
+ [1] J. Long, E. Shelhamer, T. Darrell, Fully convolutional networks for seman-
1103
+ tic segmentation, in: CVPR, 2015 (2015).
1104
+ [2] M. Cordts, M. Omran, S. Ramos, T. Rehfeld, M. Enzweiler, R. Benenson,
1105
+ U. Franke, S. Roth, B. Schiele, The cityscapes dataset for semantic urban
1106
+ scene understanding, in: CVPR, 2016 (2016).
1107
+ [3] G. J. Brostow, J. Fauqueur, R. Cipolla, Semantic object classes in video: A
1108
+ high-definition ground truth database, Pattern Recognition Letters (2009).
1109
+ [4] R. Gadde, V. Jampani, P. V. Gehler, Semantic video cnns through repre-
1110
+ sentation warping, in: ICCV, 2017 (2017).
1111
+ [5] D. Nilsson, C. Sminchisescu, Semantic video segmentation by gated recur-
1112
+ rent flow propagation, in: CVPR, 2018 (2018).
1113
+ [6] P. Liu, M. Lyu, I. King, J. Xu, Selflow: Self-supervised learning of optical
1114
+ flow, in: CVPR, 2019 (2019).
1115
+ [7] J. Zhuang,
1116
+ Z. Wang,
1117
+ B. Wang,
1118
+ Video semantic segmentation with
1119
+ distortion-aware feature correction, TCSVT (2020).
1120
+ 25
1121
+
1122
+ [8] P. Kr¨ahenb¨uhl, V. Koltun, Efficient inference in fully connected crfs with
1123
+ gaussian edge potentials, NeurIPS (2011).
1124
+ [9] Y. Yuan, J. Xie, X. Chen, J. Wang, Segfix: Model-agnostic boundary re-
1125
+ finement for segmentation, in: ECCV, 2020 (2020).
1126
+ [10] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez,
1127
+ L. Kaiser, I. Polosukhin, Attention is all you need, in: NeurIPS, 2017
1128
+ (2017).
1129
+ [11] L. Huang, Y. Yuan, J. Guo, C. Zhang, X. Chen, J. Wang, Interlaced sparse
1130
+ self-attention for semantic segmentation, arXiv preprint arXiv:1907.12273
1131
+ (2019).
1132
+ [12] L.-C. Chen, G. Papandreou, I. Kokkinos, K. Murphy, A. L. Yuille, Deeplab:
1133
+ Semantic image segmentation with deep convolutional nets, atrous convo-
1134
+ lution, and fully connected crfs, TPAMI (2017).
1135
+ [13] H. Zhao, J. Shi, X. Qi, X. Wang, J. Jia, Pyramid scene parsing network,
1136
+ in: CVPR, 2017 (2017).
1137
+ [14] C. Peng, J. Ma, Semantic segmentation using stride spatial pyramid pooling
1138
+ and dual attention decoder, Pattern Recognition (2020).
1139
+ [15] X. Lian, Y. Pang, J. Han, J. Pan, Cascaded hierarchical atrous spatial
1140
+ pyramid pooling module for semantic segmentation, Pattern Recognition
1141
+ (2021).
1142
+ [16] Q. Zhou, X. Wu, S. Zhang, B. Kang, Z. Ge, L. Jan Latecki, Contextual
1143
+ ensemble network for semantic segmentation, Pattern Recognition (2022).
1144
+ [17] Y. Zhang, X. Sun, J. Dong, C. Chen, Q. Lv, Gpnet: Gated pyramid network
1145
+ for semantic segmentation, Pattern Recognition (2021).
1146
+ [18] M. Orˇsi´c, S. ˇSegvi´c, Efficient semantic segmentation with pyramidal fusion,
1147
+ Pattern Recognition (2021).
1148
+ 26
1149
+
1150
+ [19] Z. Wang, R. Song, P. Duan, X. Li, Efnet: Enhancement-fusion network for
1151
+ semantic segmentation, Pattern Recognition (2021).
1152
+ [20] X. Zhu, Y. Xiong, J. Dai, L. Yuan, Y. Wei, Deep feature flow for video
1153
+ recognition, in: CVPR, 2017 (2017).
1154
+ [21] E. Ilg, N. Mayer, T. Saikia, M. Keuper, A. Dosovitskiy, T. Brox, Flownet
1155
+ 2.0: Evolution of optical flow estimation with deep networks, in: CVPR,
1156
+ 2017 (2017).
1157
+ [22] S. Jain, X. Wang, J. E. Gonzalez, Accel: A corrective fusion network for
1158
+ efficient semantic segmentation on video, in: CVPR, 2019 (2019).
1159
+ [23] J. Wu, Z. Wen, S. Zhao, K. Huang, Video semantic segmentation via feature
1160
+ propagation with holistic attentio, Pattern Recognition (2020).
1161
+ [24] P. Hu, F. Caba, O. Wang, Z. Lin, S. Sclaroff, F. Perazzi, Temporally dis-
1162
+ tributed networks for fast video semantic segmentation, in: CVPR, 2020
1163
+ (2020).
1164
+ [25] Y. Liu, C. Shen, C. Yu, J. Wang, Efficient semantic video segmentation
1165
+ with per-frame inference, in: ECCV, 2020 (2020).
1166
+ [26] D. Tran, L. Bourdev, R. Fergus, L. Torresani, M. Paluri, Deep end2end
1167
+ voxel2voxel prediction, in: CVPRW, 2016 (2016).
1168
+ [27] M. Fayyaz, M. H. Saffar, M. Sabokrou, M. Fathy, R. Klette, F. Huang,
1169
+ Stfcn: spatio-temporal fcn for semantic video segmentation, arXiv preprint
1170
+ arXiv:1608.05971 (2016).
1171
+ [28] Y. Wang, J. Liu, Y. Li, J. Fu, M. Xu, H. Lu, Hierarchically supervised
1172
+ deconvolutional network for semantic video segmentation, Pattern Recog-
1173
+ nition (2017).
1174
+ [29] N. Carion, F. Massa, G. Synnaeve, N. Usunier, A. Kirillov, S. Zagoruyko,
1175
+ End-to-end object detection with transformers, in: ECCV, 2020 (2020).
1176
+ 27
1177
+
1178
+ [30] Z. Liu, Y. Lin, Y. Cao, H. Hu, Y. Wei, Z. Zhang, S. Lin, B. Guo, Swin trans-
1179
+ former: Hierarchical vision transformer using shifted windows, in: ICCV,
1180
+ 2021 (2021).
1181
+ [31] R. Girdhar, J. Carreira, C. Doersch, A. Zisserman, Video action trans-
1182
+ former network, in: CVPR, 2019 (2019).
1183
+ [32] K. Gavrilyuk, R. Sanford, M. Javan, C. G. Snoek, Actor-transformers for
1184
+ group activity recognition, in: CVPR, 2020 (2020).
1185
+ [33] C. de Masson d'Autume, S. Ruder, L. Kong, D. Yogatama, Episodic mem-
1186
+ ory in lifelong language learning, in: NeurIPS, 2020 (2020).
1187
+ [34] L. Zhang, X. Chang, J. Liu, M. Luo, M. Prakash, A. G. Hauptmann,
1188
+ Few-shot activity recognition with cross-modal memory network, Pattern
1189
+ Recognition (2020).
1190
+ [35] Q. Cai, Y. Pan, T. Yao, C. Yan, T. Mei, Memory matching networks for
1191
+ one-shot image recognition, in: CVPR, 2018 (2018).
1192
+ [36] H. Deng, Y. Hua, T. Song, Z. Zhang, Z. Xue, R. Ma, N. Robertson,
1193
+ H. Guan, Object guided external memory network for video object de-
1194
+ tection, in: ICCV, 2019 (2019).
1195
+ [37] C.-Y. Wu, C. Feichtenhofer, H. Fan, K. He, P. Krahenbuhl, R. Girshick,
1196
+ Long-term feature banks for detailed video understanding, in: CVPR, 2019
1197
+ (2019).
1198
+ [38] X. Wang, R. Girshick, A. Gupta, K. He, Non-local neural networks, in:
1199
+ CVPR, 2017 (2017).
1200
+ [39] D. Mazzini, Guided upsampling network for real-time semantic segmenta-
1201
+ tion, in: BMVC, 2018 (2018).
1202
+ [40] M. Ding, Z. Wang, B. Zhou, J. Shi, Z. Lu, P. Luo, Every frame counts: joint
1203
+ learning of video segmentation and optical flow, in: AAAI, 2020 (2020).
1204
+ 28
1205
+
1206
+ [41] F. Yu, V. Koltun, Multi-scale context aggregation by dilated convolutions,
1207
+ arXiv preprint arXiv:1511.07122 (2015).
1208
+ [42] A. Kundu, V. Vineet, V. Koltun, Feature space optimization for semantic
1209
+ video segmentation, in: CVPR, 2016 (2016).
1210
+ [43] V. Jampani, R. Gadde, P. V. Gehler, Video propagation networks, in:
1211
+ CVPR, 2017, pp. 451–461 (2017).
1212
+ [44] L.-C. Chen, G. Papandreou, F. Schroff, H. Adam, Rethinking atrous con-
1213
+ volution for semantic image segmentation, arXiv preprint arXiv:1706.05587
1214
+ (2017).
1215
+ 29
1216
+
RtE2T4oBgHgl3EQfWQe7/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
W9FQT4oBgHgl3EQfcjYS/content/2301.13327v1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54dcc44fd49da779d89253fa2f2b00d2bf60d2370c920750c4163e96938c8a05
3
+ size 916847
_dFAT4oBgHgl3EQfqx1t/content/tmp_files/2301.08649v1.pdf.txt ADDED
@@ -0,0 +1,1101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Offline Policy Evaluation with Out-of-Sample Guarantees
2
+ Sofia Ek
3
+ sofia.ek@it.uu.se
4
+ Department of Information Technology
5
+ Uppsala University
6
+ Dave Zachariah
7
+ dave.zachariah@it.uu.se
8
+ Department of Information Technology
9
+ Uppsala University
10
+ Abstract
11
+ We consider the problem of evaluating the performance of a decision policy using past
12
+ observational data. The outcome of a policy is measured in terms of a loss or disutility (or
13
+ negative reward) and the problem is to draw valid inferences about the out-of-sample loss
14
+ of the specified policy when the past data is observed under a, possibly unknown, policy.
15
+ Using a sample-splitting method, we show that it is possible to draw such inferences with
16
+ finite-sample coverage guarantees that evaluate the entire loss distribution. Importantly, the
17
+ method takes into account model misspecifications of the past policy – including unmeasured
18
+ confounding. The evaluation method can be used to certify the performance of a policy using
19
+ observational data under an explicitly specified range of credible model assumptions.
20
+ 1
21
+ Introduction
22
+ In this work, we are interested in evaluating the performance of a decision policy, denoted π, which chooses
23
+ an action A from a discrete action set. Each action A is taken in a context with observable covariates X and
24
+ incurs a real-valued loss L (aka. disutility or negative reward). Such policies are considered in contextual
25
+ bandit problems and precision medicine (Langford & Zhang, 2007; Qian & Murphy, 2011; Lattimore &
26
+ Szepesvári, 2020; Tsiatis et al., 2019). For instance, A may be one of several treatment options for a patient
27
+ with observable characteristics X and L measures the severity of the outcome.
28
+ A target policy π can be evaluated using experimental data obtained from trials. Such experiments are,
29
+ however, often costly and may lead to rather restrictive sample sizes. Moreover, in safety-critical applications
30
+ it is often unethical to test new policies without severe restrictions. A more fundamental inferential problem,
31
+ however, is the lack of ‘external’ validity, i.e., the limited ability to extrapolate from the trial population
32
+ to the intended target population leads to invalid inferences (Westreich, 2019; Manski, 2019). The main
33
+ alternative is off-policy evaluation, i.e., using observational data from a past decision process to infer the
34
+ performance of the target policy. This requires that the past process is modelled without systematic errors
35
+ – by assuming well-specified models and no unmeasured confounding. The credibility of these assumptions
36
+ therefore determine the ‘internal’ validity of inferences about π from observational data (Manski, 2003).
37
+ Inferences that lack validity are particularly serious when evaluating π in decision processes that are costly
38
+ or safety-critical. In such cases even inferences that are asymptotically valid with increasing sample size
39
+ may not be adequate. Moreover, when the resulting distribution of losses is skewed or is widely dispersed,
40
+ its tails are important to evaluate. Then inferring the expected loss Eπ[L], as is commonly done, provides
41
+ a very limited evaluation of π. For instance, the average loss in a population maybe low but the tail losses
42
+ are unacceptable (Wang et al., 2018). In such applications, we are more concerned with providing valid
43
+ certifications of the overall performance (see Figure 1a), rather than precise but invalid inferences of a single
44
+ distributional parameter.
45
+ In this paper we propose a method for evaluating a specified target policy using observational data that
46
+ 1
47
+ arXiv:2301.08649v1 [stat.ML] 20 Jan 2023
48
+
49
+ • provides finite-sample coverage guarantees for the out-of-sample loss,
50
+ • evaluates the entire loss distribution instead of the expected value,
51
+ • and takes model misspecification, including unmeasured confounding, into account.
52
+ 0.0
53
+ 0.5
54
+ 1.0
55
+ 1.5
56
+ ℓα
57
+ 0%
58
+ 20%
59
+ 40%
60
+ 60%
61
+ 80%
62
+ 100%
63
+ 1 − α
64
+ π
65
+ π1
66
+ π0
67
+ (a) Evaluation of out-of-sample loss L.
68
+ 0%
69
+ 20%
70
+ 40%
71
+ 60%
72
+ 80%
73
+ 100%
74
+ Target coverage
75
+ 0%
76
+ 20%
77
+ 40%
78
+ 60%
79
+ 80%
80
+ 100%
81
+ Actual coverage
82
+ π
83
+ π1
84
+ π0
85
+ (b) Evaluation of coverage of limit curves.
86
+ Figure 1: Evaluating out-of-sample losses under target policy with binary decisions A ∈ {0, 1}. Policies π0
87
+ and π1 correspond to ‘treat none’ (A ≡ 0) and ‘treat all’ (A ≡ 1), respectively, while π denotes a policy that
88
+ adapts to context X, see Section 5.1 for more details. (a) Each curve certifies that a new loss L falls below
89
+ the limit ℓα with a probability of least 1 − α. The certified performance of the adaptive policy π dominates
90
+ those of the alternative policies. (b) Evaluation of actual coverage, that is, the probability of L ≤ ℓα, across
91
+ target coverage 1 − α.
92
+ 2
93
+ Problem formulation
94
+ We consider a target policy π for deciding actions A in different contexts, which are described by observed
95
+ and unobserved covariates X and U, respectively. The policy can be either deterministic or random, and
96
+ corresponds to a distribution pπ(A|X), which can be conditional on observed covariates X. Our aim is to
97
+ evaluate the losses L that result from applying any given π. Each instance of contextual covariates, action
98
+ and loss, i.e., (X, U, A, L), is drawn independently from a target distribution pπ(X, U, A, L). At our disposal
99
+ is an observational data set
100
+ D =
101
+
102
+ (Xi, Ai, Li)
103
+ �n
104
+ i=1,
105
+ (1)
106
+ and our goal is to use it to characterize the out-of-sample loss Ln+1. Specifically, for any miscoverage level
107
+ α ∈ (0, 1), we seek an informative limit ℓα(D) on the loss such that
108
+
109
+
110
+ Ln+1 ≤ ℓα(D)
111
+
112
+ ≥ 1 − α,
113
+ (2)
114
+ In other words, ℓα(D) evaluated across α yields a finite-sample performance certification of π as is illustrated
115
+ in Figure 1a. Unlike a single point estimate, the limit curve characterizes the distribution of losses under π.
116
+ The causal structure of the decision process is illustrated in Figure 2a and the target distribution admits a
117
+ causal factorization
118
+ pπ(X, U, A, L) = p(X, U) pπ(A|X) p(L|A, X, U),
119
+ (3)
120
+ where p(X, U) and p(L|A, X, U) are unknown. The central challenge in off-policy evaluation of π is that (1)
121
+ is not sampled from (3) but from a shifted training distribution which admits a causal factorization
122
+ p(X, U, A, L) = p(X, U) p(A|X, U) p(L|A, X, U),
123
+ (4)
124
+ 2
125
+
126
+ where p(A|X, U) characterizes a different, past policy (aka.
127
+ behavioral policy).
128
+ The causal structure is
129
+ illustrated in Figure 2b. If the past policy were known, it is possible to adjust for the distribution shift from
130
+ training to target distribution using the ratio
131
+ pπ(X, U, A, L)
132
+ p(X, U, A, L) ≡ pπ(A|X)
133
+ p(A|X, U) ≥ 0.
134
+ (5)
135
+ This is feasible in certain problems with fully automated decision-making, such as online recommendation
136
+ systems, where the past policy is designed using observable covariates only, i.e., p(A|X, U) ≡ p(A|X). In more
137
+ general problems, however, we have only a nominal model of the past policy �p(A|X) (aka. propensity model),
138
+ typically estimated from prior observable data. The nominal model may therefore diverge from p(A|X, U)
139
+ due to various modelling errors that persist even in the large-sample scenario: model misspecification and
140
+ unmeasured confounding via U (Peters et al., 2017; Westreich, 2019). Here we follow the marginal sensitivity
141
+ methodology of (Tan, 2006) and characterize the model divergence with respect to the odds of taking action
142
+ A. That is, the nominal odds diverge from the unknown odds by some bounded factor Γ ≥ 1 as follows:
143
+ 1
144
+ Γ ≤
145
+ p(A|X, U)
146
+ 1 − p(A|X, U)
147
+
148
+ ��
149
+
150
+ unknown odds
151
+
152
+ �p(A|X)
153
+ 1 − �p(A|X)
154
+
155
+ ��
156
+
157
+ nominal odds
158
+ ≤ Γ.
159
+ (6)
160
+ When the bound equals Γ = 1, the nominal model is perfectly specified and there is no unmeasured con-
161
+ founding. In general, we consider all cases where the nominal odds diverge by at most a factor Γ.
162
+ In summary, the problem we consider is to construct a limit ℓα(D) for target policy π using training data
163
+ D and a nominal model �p(A|X). The resulting limit should satisfy the finite-sample guarantee (2) across
164
+ all miscoverage levels α, and thereby certify the target policy performance for any specified bound Γ in
165
+ (6). This enables a robust off-policy evaluation of target policies using observational data, since it can be
166
+ performed across a range of credible odds bounds Γ as we will illustrate in the numerical experiments below.
167
+ By increasing the odds bound Γ, the credibility of our assumptions on �p(A|X) increases, but the strength of
168
+ our conclusions about Ln+1 decrease, cf. (Manski, 2003). The trade-off between credibility of assumptions
169
+ and informativeness of inferences will be quantified as well.
170
+ X
171
+ A
172
+ L
173
+ U
174
+ (a) Causal structure that yields target distribution (3).
175
+ X
176
+ A
177
+ L
178
+ U
179
+ (b) Causal structure that yields training distribution (4).
180
+ Figure 2: Directed acyclic diagrams (DAGs) representing the causal structure of decision process under (a)
181
+ target policy and (b) past policy. In (b), both contextual covariates (X, U) may affect actions A as well as
182
+ the outcome loss L and thus U gives rise to unmeasured confounding.
183
+ 3
184
+ Background
185
+ We situate the problem considered in this paper and our proposed method within the context of off-policy
186
+ evaluation.
187
+ Expected loss: In most off-policy evaluation literature, the target quantity is the unknown expected loss
188
+ Eπ[L] of policy π. A standard estimator of the mean, that dates back to Horvitz & Thompson (1952), is
189
+ based on inverse propensity weighting:
190
+ VIPW(D) = 1
191
+ n
192
+ n
193
+
194
+ i=1
195
+ �w(Xi, Ai) Li,
196
+ (7)
197
+ 3
198
+
199
+ where �w(X, A) = pπ(A|X)
200
+ �p(A|X) is a model of (5), see for instance (Rosenbaum & Rubin, 1983; Beygelzimer et al.,
201
+ 2009; Qian & Murphy, 2011; Zhang et al., 2012; Zhao et al., 2012; Kallus, 2018). We note that the estimator
202
+ is unbiased when Γ = 1. An alternative standard estimator is based on regression modeling:
203
+ VRM(D) = 1
204
+ n
205
+ n
206
+
207
+ i=1
208
+
209
+ a∈A
210
+ pπ(a|Xi) �ℓ(a, Xi),
211
+ (8)
212
+ where �ℓ(A, X) is a model of E[L|A, X].
213
+ The approaches in (7) and (8) have complementary strenghts and weaknesses based on the challenges of
214
+ modelling the past policy or the conditional mean of losses, respectively.
215
+ Even when models are well-
216
+ specified, the accuracy of the estimators depend highly on the overlap of covariates X across decisions A
217
+ in the training data Oberst et al. (2020); D’Amour et al. (2021). When the overlap is weak, the variance
218
+ of VIPW(D) can become excessively large, even when it is unbiased. This can be mitigated by clipping the
219
+ weights (Rubin, 2001; Kang & Schafer, 2007; Schafer & Kang, 2008; Strehl et al., 2010).
220
+ When the models �w(X, A) or �ℓ(A, X) are systematically in error, however, their corresponding estimators
221
+ are biased and may invalidate the evaluation of π. The ‘doubly robust’ estimator
222
+ VDR(D) = 1
223
+ n
224
+ n
225
+
226
+ i=1
227
+ �w(Xi, Ai)
228
+
229
+ Li − �ℓ(Ai, Xi)
230
+
231
+ +
232
+
233
+ a∈A
234
+ pπ(a|Xi) �ℓ(a, Xi),
235
+ is one way to protect against one of the models being misspecified and reduces the estimator variance provided
236
+ �ℓ(A, X) is sufficiently accurate (Bang & Robins, 2005; Dudík et al., 2011; Rotnitzky et al., 2012).
237
+ Distribution of losses: When loss distribution is highly skewed and/or the tails are wide, the expected loss
238
+ can be inadequate to evaluate policies, especially in high-stakes problems. There are alternative parameters
239
+ of the loss distribution, decribed by the cumulative distribution function F(ℓ) = Pπ{Ln+1 ≤ ℓ} (cdf), that
240
+ one can consider in such problems, such as the Conditional Value at Risk or a quantile (Wang et al., 2018;
241
+ Chandak et al., 2021; Huang et al., 2021).
242
+ Off-policy evaluation of π with respect to some alternative parameter can be achieved using cdf-estimators
243
+ that are analogous to the mean estimators above, see (Huang et al., 2021). In analogy to (7), the inverse
244
+ propensity weighted cdf-estimator
245
+ �FIPW(ℓ; D) = 1
246
+ n
247
+ n
248
+
249
+ i=1
250
+ �w(Xi, Ai) 1(Li ≤ ℓ),
251
+ (9)
252
+ is point-wise unbiased when Γ = 1. Similar to (8), the estimator
253
+ �FRM(ℓ; D) = 1
254
+ n
255
+ n
256
+
257
+ i=1
258
+
259
+ a∈A
260
+ pπ(a|Xi) �c(ℓ; a, Xi),
261
+ requires a model �c(ℓ; a, x) of the conditional distribution P{L ≤ ℓ|A, X}. To mitigate against model mis-
262
+ specification that threaten the validity of the evaluation of π, one can use the ‘doubly robust’ estimator
263
+ �FDR(ℓ; D) = 1
264
+ n
265
+ n
266
+
267
+ i=1
268
+ �w(Xi, Ai)
269
+
270
+ 1(Li ≤ ℓ) − �c(ℓ; Ai, Xi)
271
+
272
+ +
273
+
274
+ a∈A
275
+ pπ(a|Xi) �c(ℓ; a, Xi),
276
+ which protects against one of the models being in misspecified. While this estimator is consistent, it is not
277
+ guaranteed yield a valid cdf.
278
+ In this paper, we are interested in limiting the out-of-sample loss Ln+1 and the quantile is the smallest ℓα
279
+ such that Pπ{Ln+1 ≤ ℓα} ≥ 1 − α. It can be estimated as
280
+ ℓα(D) = inf
281
+
282
+ ℓ : �F(ℓ; D) ≥ 1 − α
283
+
284
+ ,
285
+ 4
286
+
287
+ using a cdf-estimator above.
288
+ Distribution-free inference: Derivations of finite-sample valid, nonparametric limits on random variables
289
+ date back to the works of Wilks (1941); Wald (1943); Scheffe & Tukey (1945). More recently, the related
290
+ methodology of conformal prediction has focused on developing covariate-specific prediction regions (Vovk
291
+ et al., 2005; Shafer & Vovk, 2008; Vovk, 2012). See Lei & Wasserman (2014); Lei et al. (2018); Romano
292
+ et al. (2019) for further developments. Tibshirani et al. (2019) adapt the methodology to be valid also under
293
+ known covariate shifts. This result was subsequently used to provide context-specific prediction intervals for
294
+ any given policy π that are statistically valid under the assumption that the past policy p(A|X, U) is known
295
+ Osama et al. (2020); Taufiq et al. (2022).
296
+ The marginal sensitivity methodology developed in Tan (2006) enables us to specify a more credible range
297
+ of assumptions using (6). This methodology was used for robust policy learning in Kallus & Zhou (2021)
298
+ and sensitivity analysis of treatment effects in Jin et al. (2021) under unobserved confounding. This paper
299
+ considers the overall performance of π, similar to Huang et al. (2021).
300
+ However, we focus on ensuring
301
+ inferences on the out-of-sample losses that are valid even with finite training data and under systematic
302
+ modelling errors – including unobserved confounding – using a sample-splitting technique that leverages
303
+ results derived in Jin et al. (2021).
304
+ 4
305
+ Method
306
+ We show that one can limit the out-of-sample losses under π using a sample-splitting technique and by
307
+ bounding the unknown ratio in (5).
308
+ For any specified odds bound Γ ≥ 1 in (6), we have that the ratio in (5) is bounded as:
309
+ W ≤ pπ(X, U, A, L)
310
+ p(X, U, A, L) ≤ W,
311
+ (10)
312
+ where the bounds equal
313
+ W = pπ(A|X) ·
314
+
315
+ 1 + Γ−1�
316
+ �p(A|X
317
+ �−1 − 1)
318
+
319
+ and
320
+ W = pπ(A|X) ·
321
+
322
+ 1 + Γ
323
+
324
+ �p(A|X)−1 − 1
325
+ ��
326
+ .
327
+ (11)
328
+ That is, the bounds are functions of X and A drawn from the training distribution (4).
329
+ We randomly split the training data (1) into two separate sets,
330
+ D = D0 ∪ D1,
331
+ with samples sizes n0 and n − n0, respectively. The first set D0 is used to construct a set of upper bounds
332
+
333
+ W i
334
+ �n0
335
+ i=1 via (11). The remaining set D1 is used to form the function
336
+ �F(ℓ; D1, w) =
337
+ �n
338
+ i=n0+1 W i1(Li ≤ ℓ)
339
+ �n
340
+ i=n0+1 W i1(Li ≤ ℓ) + �n
341
+ i=n0+1 W i1(Li > ℓ) + w,
342
+ (12)
343
+ as a proxy for the unknown cdf of the out-of-sample loss Ln+1. As the following result shows, (12) enables
344
+ the construction of a valid limit ℓα.
345
+ Theorem 4.1. Define the quantile function of (12) as
346
+ �F −1(·; D1, w) = inf
347
+
348
+ ℓ : �F(ℓ; D1, w) ≥ ·
349
+
350
+ .
351
+ For any miscoverage probability α ∈ (0, 1), construct the limit
352
+ ℓα(D) =
353
+ min
354
+ β:0<β<α
355
+ �F −1
356
+ �1 − α
357
+ 1 − β ; D1, wβ(D0)
358
+
359
+ ,
360
+ (13)
361
+ 5
362
+
363
+ where
364
+ wβ(D0) =
365
+
366
+ W [⌈(n0+1)(1−β)⌉],
367
+ ⌈(n0 + 1)(1 − β)⌉ ≤ n0,
368
+ ∞,
369
+ otherwise,
370
+ (14)
371
+ and W [k] denotes the kth order statistic of (W i)n0
372
+ i=1.
373
+ Then ℓα(D) limits the out-of-sample loss Ln+1 with a probability of at least 1 − α as specified in (2).
374
+ 4.1
375
+ Implementation
376
+ We note that (12) is piecewise constant and can readily be represented using a vector with n − n0 elements.
377
+ The limit curve can be evaluated across a discrete grid of miscoverage levels α and the computation is
378
+ summarized in Algorithm 1. Also, note that wβ as a function of β changes in discrete steps in (14), therefore
379
+ the relevant values of β form a discrete set.
380
+ Algorithm 1 Limit curve of policy π
381
+ Input: Training data D, model �p(A|X), bound Γ ≥ 1 and sample split size n0.
382
+ 1: Randomly split D into D0 and D1.
383
+ 2: for α ∈ {0, . . . , 1} do
384
+ 3:
385
+ for β ∈ {0, . . . , α} do
386
+ 4:
387
+ Compute wβ using (14).
388
+ 5:
389
+ Compute ℓα,β = inf
390
+
391
+ ℓ : �F(ℓ; D1, wβ) ≥ 1−α
392
+ 1−β
393
+
394
+ using (12).
395
+ 6:
396
+ end for
397
+ 7:
398
+ Set ℓα to the smallest ℓα,β above.
399
+ 8:
400
+ Store (α, ℓα).
401
+ 9: end for
402
+ Output: {(α, ℓα)}
403
+ 4.2
404
+ Derivation of result
405
+ Proof. The first part of the proof builds on techniques used to derive weighted conformal prediction intervals
406
+ in Tibshirani et al. (2019).
407
+ Let us consider a sequence of n − n0 samples drawn from (4) followed by a new sample drawn from (3), i.e.,
408
+ D+ =
409
+
410
+ (Xn0+1, Un0+1, An0+1, Ln0+1), . . . , (Xn, Un, An, Ln), (Xn+1, Un+1, An+1, Ln+1)
411
+
412
+ .
413
+ The joint distribution of this sequence can be expressed using:
414
+ n
415
+
416
+ i=n+
417
+ p(xi, ui, ai, ℓi) · p(xn+1, un+1, an+1, ℓn+1)wn+1 = p(D+)wn+1 = p(S+)wn+1,
418
+ where n+ = n0 + 1 for notational simplicity, S+ is an unordered set of elements from D+, and the weight
419
+ wi = pπ(xi, ui, ai, ℓi)
420
+ p(xi, ui, ai, ℓi) ,
421
+ is the (unobservable) ratio (5) that quantifies the distribution shift from training to target distribution. We
422
+ shall use the expression for the joint distribution to derive the distribution function for the new loss Ln+1.
423
+ Suppose we are given unordered set S+ alone, then the particular sequence D+ is unknown. Let Ei denote
424
+ the event that the sample (Xn+1, Un+1, An+1, Ln+1) equals the ith sample (xi, ui, ai, ℓi) in the unknown
425
+ sequence D+. We consider all possible sequences D+ obtained by permutations σ of elements in the set S+.
426
+ The joint probability the event Ei and S+ is then
427
+ P{Ei, S+} =
428
+
429
+ σ:σ(n+1)=n+i
430
+ p(S+)wi = p(S+)win!.
431
+ 6
432
+
433
+ The conditional probability of event Ei can now be expressed as
434
+ pi = P{Ei|S+} =
435
+ P{Ei, S+}
436
+ �n+1
437
+ j=n+ P{Ej, S+}
438
+ =
439
+ wi
440
+ �n+1
441
+ j=n+ wj
442
+ ,
443
+ where the first equality follows from the law of total probability. The probability that the loss Ln+1 of the
444
+ new sample equals ℓi, when conditioning on the unordered set S+, is equal to
445
+ P{Ln+1 = ℓi|S+} = P{Ei|S+} = pi.
446
+ Thus conditional on S+, the new loss Ln+1 follows the cdf:
447
+ P{Ln+1 ≤ ℓ|S+} =
448
+ n+1
449
+
450
+ i=n+
451
+ pi1(ℓi ≤ ℓ) =
452
+ �n+1
453
+ i=n+ wi1(Li ≤ ℓ)
454
+ �n+1
455
+ i=n+ wi
456
+ .
457
+ (15)
458
+ Before marginalizing out S+ from (15), we consider a limit ℓ that is a function of the observable elements in
459
+ S+. For this part, we will build on the proof technique in (Jin et al., 2021, thm. 2.2).
460
+ Specifically, using (12) we define the following limit:
461
+ ℓα(D1, W n+1) = inf
462
+
463
+ ℓ : �F(ℓ; D1, W n+1) ≥ 1 − α
464
+ 1 − β
465
+
466
+ ,
467
+ (16)
468
+ for any 0 < β < α, where W n+1 ≥ Wn+1 is given in (11). Now insert the limit ℓα(D1, W n+1) into (15) and
469
+ use the law of total expectation to marginalize out S+:
470
+ P{Ln+1 ≤ ℓα(D1, W n+1)} = E
471
+
472
+ Pπ{Ln+1 ≤ ℓα(D1, W n+1)|S+}
473
+
474
+ = E
475
+ ��n+1
476
+ i=n+ Wi1(Li ≤ ℓα(D1, W n+1))
477
+ �n+1
478
+ i=n+ Wi
479
+
480
+ .
481
+ We now proceed to lower bound this probability. Note that by construction:
482
+ E
483
+
484
+ �F(ℓα; D1, W n+1)
485
+
486
+ = E
487
+
488
+
489
+ i∈D1 W i1(Li ≤ ℓα)
490
+
491
+ i∈D1 W i1(Li ≤ ℓα) + �
492
+ i∈D1 W i1(Li > ℓα) + W n+1
493
+
494
+ ≥ (1 − α)
495
+ (1 − β).
496
+ Using this expression, we have that
497
+ P{Ln+1 ≤ ℓα(D1, W n+1)} − (1 − α)
498
+ (1 − β)
499
+ ≥ E
500
+ ��n+1
501
+ i=n+ Wi1(Li ≤ ℓα)
502
+ �n+1
503
+ i=n+ Wi
504
+
505
+ − E
506
+
507
+ �n
508
+ i=n+ W i1(Li ≤ ℓα)
509
+ �n
510
+ i=n+ W i1(Li ≤ ℓα) + �n
511
+ i=n+ W i1(Li > ℓα) + W n+1
512
+
513
+ = E
514
+
515
+
516
+ (∗)
517
+ ��n+1
518
+ i=n+ Wi
519
+ � ��n
520
+ i=n+ W i1(Li ≤ ℓα) + �n
521
+ i=n+ W i1(Li > ℓα) + W n+1
522
+
523
+
524
+ � ,
525
+ 7
526
+
527
+ where
528
+ (∗) =
529
+ � n+1
530
+
531
+ i=n+
532
+ Wi1(Li ≤ ℓα)
533
+ ��
534
+ n
535
+
536
+ i=n+
537
+ W i1(Li ≤ ℓα) +
538
+ n
539
+
540
+ i=n+
541
+ W i1(Li > ℓα) + W n+1
542
+
543
+
544
+
545
+ n
546
+
547
+ i=n+
548
+ W i1(Li ≤ ℓ��)
549
+ �� n+1
550
+
551
+ i=n+
552
+ Wi
553
+
554
+
555
+
556
+ n
557
+
558
+ i=n+
559
+ Wi1(Li ≤ ℓα)
560
+ ��
561
+ n
562
+
563
+ i=n+
564
+ W i1(Li ≤ ℓα) +
565
+ n
566
+
567
+ i=n+
568
+ W i1(Li > ℓα) + W n+1
569
+
570
+
571
+
572
+ n
573
+
574
+ i=n+
575
+ W i1(Li ≤ ℓα)
576
+ �� n+1
577
+
578
+ i=n+
579
+ Wi
580
+
581
+
582
+
583
+ n
584
+
585
+ i=n+
586
+ Wi1(Li ≤ ℓα)
587
+ ��
588
+ n
589
+
590
+ i=n+
591
+ W i1(Li > ℓα) + W n+1
592
+
593
+
594
+
595
+ n
596
+
597
+ i=n+
598
+ W i1(Li ≤ ℓα)
599
+ ��
600
+ n
601
+
602
+ i=n+
603
+ Wi1(Li > ℓα) + Wn+1
604
+
605
+
606
+
607
+ n
608
+
609
+ i=n+
610
+ Wi1(Li ≤ ℓα)
611
+ ��
612
+ n
613
+
614
+ i=n+
615
+ Wi1(Li > ℓα) + Wn+1
616
+
617
+
618
+
619
+ n
620
+
621
+ i=n+
622
+ Wi1(Li ≤ ℓα)
623
+ ��
624
+ n
625
+
626
+ i=n+
627
+ Wi1(Li > ℓα) + Wn+1
628
+
629
+ = 0,
630
+ using the bounds (10) on the fourth line. Therefore we obtain a valid limit:
631
+ P{Ln+1 ≤ ℓα(D1, W n+1)} ≥ (1 − α)
632
+ (1 − β).
633
+ (17)
634
+ However, W n+1 depends on a new sample drawn from the training distribution and thus the limit is unusable.
635
+ In lieu of W n+1, we use wβ(D0) in (14) to define the modified limit
636
+ ℓα(D) = inf
637
+
638
+ ℓ : �F(ℓ; D1, wβ(D0)) ≥ 1 − α
639
+ 1 − β
640
+
641
+ .
642
+ (18)
643
+ Comparing it with (16), we see that
644
+ ℓα(D) ≥ ℓα(D1, W n+1),
645
+ (19)
646
+ whenever W n+1 ≤ wβ(D0). By the construction in (14), the probability of this event is lower bounded by
647
+ P{W n+1 ≤ wβ(D0)} ≥ 1 − β,
648
+ (20)
649
+ see (Vovk et al., 2005; Lei et al., 2018).
650
+ We use this property to lower bound the probability of Ln+1 ≤ ℓα(D). First note that
651
+ P{Ln+1 ≤ ℓα(D)} = P{Ln+1 ≤ ℓα(D) | W n+1 ≤ wβ(D0)} P{W n+1 ≤ wβ(D0)}
652
+ + P{Ln+1 ≤ ℓα(D) | W n+1 > wβ(D0)} P{ W n+1 > wβ(D0)}
653
+ ≥ P{Ln+1 ≤ ℓα(D) | W n+1 ≤ wβ(D0)} P{W n+1 ≤ wβ(D0)} + 0.
654
+ The first factor can be lower bounded using (19), so that
655
+ P{Ln+1 ≤ ℓα(D)} ≥ P{Ln+1 ≤ ℓα(D1, W n+1) | W n+1 ≤ wβ(D0)} P{W n+1 ≤ wβ(D0)}
656
+ = P{Ln+1 ≤ ℓα(D1, W n+1)} P{W n+1 ≤ wβ(D0)}
657
+ ≥ (1 − α)
658
+ (1 − β) P{W n+1 ≤ wβ(D0)}
659
+ ≥ 1 − α.
660
+ (21)
661
+ The second line follows from using sample splitting, which ensures that Ln+1 ≤ ℓα(D1, W n+1) and W n+1 ≤
662
+ wβ(D0) are independent events. The third and fourth lines follow from (17) and (20), respectively. Since
663
+ (21) holds for any 0 < β < α, we choose β in (18) that yields the tightest limit, cf. (13).
664
+ 8
665
+
666
+ 5
667
+ Numerical experiments
668
+ In the experiments below, we evaluate policies using the limit curves (α, ℓα). Note that the extremum loss
669
+ ℓmax in a given problem provides a valid but uninformative limit across all α. We therefore quantify the
670
+ informativeness of a valid limit curve as follows:
671
+ Informativeness = 1 − α∗, where α∗ = sup{α : ℓα < ℓmax}.
672
+ That is, the lowest coverage probability at which we can informatively certify the performance of π. We can
673
+ therefore quantify increasing the credibility of our model assumption by Γ affects the informativeness of the
674
+ limit curve. We also consider the coverage probability of the curves:
675
+ Miscoverage gap = Pπ{Ln+1 > ℓα(D)} − α.
676
+ (22)
677
+ When this gap is positive, the limit is conservative and when the gap is negative the limit is invalid,
678
+ respectively, at level α.
679
+ A natural benchmark for the proposed limit (13) in this problem setting is the estimated quantile
680
+ ℓα(D) = inf
681
+
682
+ ℓ : �FIPW(ℓ; D) ≥ 1 − α
683
+
684
+ ,
685
+ (23)
686
+ using the inverse propensity weighted cdf-estimator (9).
687
+ In all examples below, the limit (13) is computed using sample splits of equal size, i.e., n0 = n/2.
688
+ 5.1
689
+ Synthetic data
690
+ In the first example, we consider synthetic data in order to evaluate the coverage of the derived limit curves.
691
+ We use a simulation setting similar to Jin et al. (2021). The miscoverage gap (22) is estimated by Monte
692
+ Carlo simulation using n′ = 1000 independent samples over N = 1000 independent runs, i.e., in total 106
693
+ samples.
694
+ We consider a population of individuals with two-dimensional covariates distributed uniformly as
695
+ X =
696
+ �X1
697
+ X2
698
+
699
+ ∼ U(0, 1)2.
700
+ The actions are binary A ∈ {0, 1} corresponding to ‘not treat’ and ‘treat’, respectively. We want to evaluate
701
+ a deterministic target policy, described by
702
+ pπ(A = 0|X) = 1(X1X2 ≥ τ),
703
+ (24)
704
+ for different τ ∈ [0, 1]. That is, all individuals whose covariate product X1X2 falls below τ are treated. Note
705
+ that τ = 0 corresponds a ‘treat none’ policy (A ≡ 0 for all X) and τ = 1 corresponds to a ‘treat all’ policy
706
+ (A ≡ 1 for all X). What can we say about the resulting losses under this policy using observational data
707
+ with sample sizes n ∈ {250, 500, 1000}.
708
+ Case: Known past policy (Γ = 1) In the first scenario, we assume that the past policy is known exactly
709
+ and there is therefore no unmeasured confounding.
710
+ For the training data, the past policy determined actions as a Bernoulli process, where
711
+ p(A = 0|X) ≡ �p(A = 0|X) = f
712
+
713
+ c(X1X2 + 1)
714
+
715
+ ,
716
+ c ∈
717
+ �1
718
+ 2, 2
719
+
720
+ ,
721
+ (25)
722
+ and f(·) is the sigmoid function. The conditional loss distribution is given by
723
+ L|A = 0, X ∼ N(1 − X1X2, 0.1)
724
+ and
725
+ L|A = 1, X ∼ N(X1X2, 0.1).
726
+ 9
727
+
728
+ We consider three configurations c of past policies (25), which yield inverse propensity weights in three
729
+ ranges:
730
+ 1
731
+ p1(A|X) < 3.72 (c = 1/2),
732
+ 1
733
+ p2(A|X) < 8.39 (c = 1), and
734
+ 1
735
+ p3(A|X) < 55.6 (c = 2). Thus we anticipate
736
+ p3(A|X) to be the most challenging case.
737
+ Here we evaluate three target policies τ = {0, 0.5, 1} in (24) and present their resulting limit curves using
738
+ data from different past policies (25) in Figure 3. The differing dashed lines shows the corresponding past
739
+ policy.
740
+ The limit curves for a given target policy are very similar across training distributions and are
741
+ informative above the 90% level. The main difference is in the inferred tail losses and is notable for when
742
+ τ = 1 under the more challenging past policy p3(A|X).
743
+ We now turn to evaluating miscoverage gap (22). Figure 4 presents gaps for target policy τ = 0.5 in (24).
744
+ The solid lines illustrate the proposed method and the dashed lines show the benchmark (23). We see that
745
+ the proposed method is slightly conservative, but remains valid for all α. By contrast, the benchmark is not
746
+ valid in the tail of the distribution, but is less conservative for higher α in this well-specified case.
747
+ −0.5
748
+ 0.0
749
+ 0.5
750
+ 1.0
751
+ 1.5
752
+ 0%
753
+ 20%
754
+ 40%
755
+ 60%
756
+ 80%
757
+ 100%
758
+ 1 − α
759
+ p1(A|X)
760
+ −0.5
761
+ 0.0
762
+ 0.5
763
+ 1.0
764
+ 1.5
765
+ ℓα
766
+ 0%
767
+ 20%
768
+ 40%
769
+ 60%
770
+ 80%
771
+ 100%
772
+ p2(A|X)
773
+ −0.5
774
+ 0.0
775
+ 0.5
776
+ 1.0
777
+ 1.5
778
+ 0%
779
+ 20%
780
+ 40%
781
+ 60%
782
+ 80%
783
+ 100%
784
+ p3(A|X)
785
+ pπ(A|X)
786
+ τ = 0.0
787
+ τ = 0.5
788
+ τ = 1.0
789
+ pπ = p
790
+ Figure 3: Limit curves (ℓα, 1 − α) when the past policy is known (Γ = 1) for three different potential target
791
+ policies (i.e. τ = {0.0, 0.5, 1.0} in (24)). Dashed curve denotes the past policy. n = 1000.
792
+ 0.0
793
+ 0.2
794
+ 0.4
795
+ 0.6
796
+ 0.8
797
+ 1.0
798
+ −0.025
799
+ 0.000
800
+ 0.025
801
+ 0.050
802
+ 0.075
803
+ 0.100
804
+ 0.125
805
+ 0.150
806
+ Miscoverage gap
807
+ n = 250
808
+ 0.0
809
+ 0.2
810
+ 0.4
811
+ 0.6
812
+ 0.8
813
+ 1.0
814
+ Target α
815
+ −0.025
816
+ 0.000
817
+ 0.025
818
+ 0.050
819
+ 0.075
820
+ 0.100
821
+ 0.125
822
+ 0.150
823
+ n = 500
824
+ 0.0
825
+ 0.2
826
+ 0.4
827
+ 0.6
828
+ 0.8
829
+ 1.0
830
+ −0.025
831
+ 0.000
832
+ 0.025
833
+ 0.050
834
+ 0.075
835
+ 0.100
836
+ 0.125
837
+ 0.150
838
+ n = 1000
839
+ Past policy
840
+ p1(A|X)
841
+ p2(A|X)
842
+ p3(A|X)
843
+ Type
844
+ Proposed
845
+ Benchmark
846
+ Figure 4: Miscoverage gaps (22) across α, when the past policy is known (Γ = 1). Dashed curve denotes the
847
+ benchmark (23).
848
+ Case: Estimated past policy (Γ > 1) In the second scenario, we assume that we only have access to an
849
+ estimate �p(A|X) and that there is unmeasured confounding in the training distribution. To render visually
850
+ distinct curves from the previous case, we consider here a rather extreme case of confounding following Jin
851
+ et al. (2021).
852
+ In this case we have an unobserved variable drawn as
853
+ U|X ∼ N(0, 0.1(X1 + X2)),
854
+ and the loss L|A, X, U is generated as
855
+ L =
856
+
857
+ 1 − X1X2 + U,
858
+ A = 0,
859
+ X1X2 + U,
860
+ A = 1.
861
+ 10
862
+
863
+ We define the past policy in a manner that enables us to control the divergence from the nominal model
864
+ �p(A|X) in (25):
865
+ p(A = 0|X, U) = 1(U ≤ t(X))
866
+
867
+ 1 + Γ−1
868
+ 0
869
+
870
+ �p(A = 0|X
871
+ �−1 − 1)
872
+
873
+ + 1(U > t(X))
874
+
875
+ 1 + Γ0
876
+
877
+ �p(A = 0|X)−1 − 1
878
+ ��
879
+ ,
880
+ (26)
881
+ where the threshold function t(X) is designed empirically to ensure that the resulting median loss of the
882
+ past policy for A = 1 is maximized. Our design of the past policy can be seen as a worst case among all
883
+ unknown past policies that diverge by a factor Γ0 in (6). We fix Γ0 = 2 here, but treat it as unknown.
884
+ For clarity, we consider a ‘treat all’ target policy (τ = 1). Its limit curves, under different assumed odds
885
+ bounds Γ = {1, 2, 3}, are presented in Figure 5. Note that under unmeasured confounding, the resulting
886
+ curves differ notably across the training distributions unlike in Figure 3. We see that under the first and
887
+ second distributions, the informativeness of all curves stays around the 90% level. However, in the most
888
+ extreme third case, the informativeness drops to barely above the 60% level when we increase the credibility
889
+ of our model assumption to an odds bound of Γ = 3. This example illustrates an inherent trade-off between
890
+ credibility and informativeness.
891
+ Figure 6 validates our guarantees using data drawn from p1(A|X). When Γ ≥ Γ0 = 2, the limit curves are
892
+ valid and as Γ increases to 3, the limits become quite conservative. Note that the conservativeness persists
893
+ even as the sample size n is increased fourfold. For Γ = 1, there is no guarantee of coverage and in this worst
894
+ case scenario the limit curve is invalid. The benchmark does not take unmeasured confounding into account
895
+ and is consequently invalid throughout.
896
+ −0.5
897
+ 0.0
898
+ 0.5
899
+ 1.0
900
+ 0%
901
+ 20%
902
+ 40%
903
+ 60%
904
+ 80%
905
+ 100%
906
+ 1 − α
907
+ p1(A|X)
908
+ −0.5
909
+ 0.0
910
+ 0.5
911
+ 1.0
912
+ ℓα
913
+ 0%
914
+ 20%
915
+ 40%
916
+ 60%
917
+ 80%
918
+ 100%
919
+ p2(A|X)
920
+ −0.5
921
+ 0.0
922
+ 0.5
923
+ 1.0
924
+ 0%
925
+ 20%
926
+ 40%
927
+ 60%
928
+ 80%
929
+ 100%
930
+ p3(A|X)
931
+ Gamma Γ
932
+ 1
933
+ 2
934
+ 3
935
+ Figure 5: Limit curves (ℓα, 1 − α) for ‘treat all’ target policy using odds bounds Γ = {1, 2, 3}, when the past
936
+ policy is unknown and subject to unmeasured confounding (Γ0 = 2 in (26)). n = 1000.
937
+ 0.0
938
+ 0.2
939
+ 0.4
940
+ 0.6
941
+ 0.8
942
+ 1.0
943
+ −0.2
944
+ −0.1
945
+ 0.0
946
+ 0.1
947
+ 0.2
948
+ Miscoverage gap
949
+ n = 250
950
+ 0.0
951
+ 0.2
952
+ 0.4
953
+ 0.6
954
+ 0.8
955
+ 1.0
956
+ Target α
957
+ −0.2
958
+ −0.1
959
+ 0.0
960
+ 0.1
961
+ 0.2
962
+ n = 500
963
+ 0.0
964
+ 0.2
965
+ 0.4
966
+ 0.6
967
+ 0.8
968
+ 1.0
969
+ −0.2
970
+ −0.1
971
+ 0.0
972
+ 0.1
973
+ 0.2
974
+ n = 1000
975
+ Gamma Γ
976
+ 1
977
+ 2
978
+ 3
979
+ Type
980
+ Proposed
981
+ Benchmark
982
+ Figure 6: Miscoverage gaps (22) across α, when the past policy is unknown and subject to unmeasured
983
+ confounding. Dashed curve denotes the benchmark (23) which does not take confounding into account.
984
+ 11
985
+
986
+ 6
987
+ Conclusion
988
+ We have considered the problem of off-policy evaluation, i.e., drawing valid inferences of a target policy
989
+ using past observational data obtained under a different decision process with a, possibly unknown, policy.
990
+ Using the marginal sensitivity model, we derive a sample-splitting method that provides limit curves with
991
+ finite-sample coverage guarantees and, importantly, takes into account model misspecifications and unmea-
992
+ sured confounding. The validity, informativeness, and conservativeness of the resulting limit curves were
993
+ demonstrated in the numerical experiments.
994
+ Using this method in any specific problem, we can specify range of credible model assumptions and assess the
995
+ corresponding degrees of informativeness of the limits, which are guaranteed to be valid up to any specified
996
+ odds ratio bound.
997
+ References
998
+ Heejung Bang and James M Robins. Doubly robust estimation in missing data and causal inference models.
999
+ Biometrics, 61(4):962–973, 2005.
1000
+ Alina Beygelzimer, Sanjoy Dasgupta, and John Langford. Importance weighted active learning. In Proceed-
1001
+ ings of the 26th annual international conference on machine learning, pp. 49–56, 2009.
1002
+ Yash Chandak, Scott Niekum, Bruno da Silva, Erik Learned-Miller, Emma Brunskill, and Philip S Thomas.
1003
+ Universal off-policy evaluation.
1004
+ Advances in Neural Information Processing Systems, 34:27475–27490,
1005
+ 2021.
1006
+ Miroslav Dudík, John Langford, and Lihong Li. Doubly robust policy evaluation and learning. In Proceedings
1007
+ of the 28th International Conference on International Conference on Machine Learning, pp. 1097–1104,
1008
+ 2011.
1009
+ Alexander D’Amour, Peng Ding, Avi Feller, Lihua Lei, and Jasjeet Sekhon. Overlap in observational studies
1010
+ with high-dimensional covariates. Journal of Econometrics, 221(2):644–654, 2021.
1011
+ Daniel G Horvitz and Donovan J Thompson. A generalization of sampling without replacement from a finite
1012
+ universe. Journal of the American statistical Association, 47(260):663–685, 1952.
1013
+ Audrey Huang, Liu Leqi, Zachary Lipton, and Kamyar Azizzadenesheli. Off-policy risk assessment in con-
1014
+ textual bandits. Advances in Neural Information Processing Systems, 34:23714–23726, 2021.
1015
+ Ying Jin, Zhimei Ren, and Emmanuel J Candès. Sensitivity analysis of individual treatment effects: A
1016
+ robust conformal inference approach. arXiv preprint arXiv:2111.12161, 2021.
1017
+ Nathan Kallus. Balanced policy evaluation and learning. Advances in neural information processing systems,
1018
+ 31, 2018.
1019
+ Nathan Kallus and Angela Zhou. Minimax-optimal policy learning under unobserved confounding. Manage-
1020
+ ment Science, 67(5):2870–2890, 2021.
1021
+ Joseph DY Kang and Joseph L Schafer.
1022
+ Demystifying double robustness: A comparison of alternative
1023
+ strategies for estimating a population mean from incomplete data.
1024
+ Statistical science, 22(4):523–539,
1025
+ 2007.
1026
+ John Langford and Tong Zhang. The epoch-greedy algorithm for contextual multi-armed bandits. Advances
1027
+ in neural information processing systems, 20(1):96–1, 2007.
1028
+ Tor Lattimore and Csaba Szepesvári. Bandit algorithms. Cambridge University Press, 2020.
1029
+ Jing Lei and Larry Wasserman. Distribution-free prediction bands for non-parametric regression. Journal
1030
+ of the Royal Statistical Society: Series B (Statistical Methodology), 76(1):71–96, 2014.
1031
+ 12
1032
+
1033
+ Jing Lei, Max G’Sell, Alessandro Rinaldo, Ryan J Tibshirani, and Larry Wasserman.
1034
+ Distribution-free
1035
+ predictive inference for regression. Journal of the American Statistical Association, 113(523):1094–1111,
1036
+ 2018.
1037
+ Charles F Manski. Identification problems in the social sciences and everyday life. Southern Economic
1038
+ Journal, 70(1):11–21, 2003.
1039
+ Charles F Manski. Patient Care Under Uncertainty. Princeton University Press, 2019.
1040
+ Michael Oberst, Fredrik Johansson, Dennis Wei, Tian Gao, Gabriel Brat, David Sontag, and Kush Varshney.
1041
+ Characterization of overlap in observational studies. In International Conference on Artificial Intelligence
1042
+ and Statistics, pp. 788–798. PMLR, 2020.
1043
+ Muhammad Osama, Dave Zachariah, and Peter Stoica. Learning robust decision policies from observational
1044
+ data. Advances in Neural Information Processing Systems, 33:18205–18214, 2020.
1045
+ Jonas Peters, Dominik Janzing, and Bernhard Schölkopf. Elements of causal inference: foundations and
1046
+ learning algorithms. The MIT Press, 2017.
1047
+ Min Qian and Susan A Murphy.
1048
+ Performance guarantees for individualized treatment rules. Annals of
1049
+ statistics, 39(2):1180, 2011.
1050
+ Yaniv Romano, Evan Patterson, and Emmanuel Candes. Conformalized quantile regression. In Advances in
1051
+ Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019.
1052
+ Paul R Rosenbaum and Donald B Rubin. The central role of the propensity score in observational studies
1053
+ for causal effects. Biometrika, 70(1):41–55, 1983.
1054
+ Andrea Rotnitzky, Quanhong Lei, Mariela Sued, and James M Robins. Improved double-robust estimation
1055
+ in missing data and causal inference models. Biometrika, 99(2):439–456, 2012.
1056
+ Donald B Rubin. Using propensity scores to help design observational studies: application to the tobacco
1057
+ litigation. Health Services and Outcomes Research Methodology, 2(3):169–188, 2001.
1058
+ Joseph L Schafer and Joseph Kang. Average causal effects from nonrandomized studies: a practical guide
1059
+ and simulated example. Psychological methods, 13(4):279, 2008.
1060
+ Henry Scheffe and John W Tukey. Non-parametric estimation. i. validation of order statistics. The Annals
1061
+ of Mathematical Statistics, 16(2):187–192, 1945.
1062
+ Glenn Shafer and Vladimir Vovk. A tutorial on conformal prediction. Journal of Machine Learning Research,
1063
+ 9(3), 2008.
1064
+ Alex Strehl, John Langford, Lihong Li, and Sham M Kakade. Learning from logged implicit exploration
1065
+ data. Advances in neural information processing systems, 23, 2010.
1066
+ Zhiqiang Tan. A distributional approach for causal inference using propensity scores. Journal of the American
1067
+ Statistical Association, 101(476):1619–1637, 2006.
1068
+ Muhammad Faaiz Taufiq, Jean-Francois Ton, Rob Cornish, Yee Whye Teh, and Arnaud Doucet.
1069
+ Con-
1070
+ formal off-policy prediction in contextual bandits.
1071
+ In Alice H. Oh, Alekh Agarwal, Danielle Bel-
1072
+ grave, and Kyunghyun Cho (eds.), Advances in Neural Information Processing Systems, 2022.
1073
+ URL
1074
+ https://openreview.net/forum?id=IfgOWI5v2f.
1075
+ Ryan J Tibshirani, Rina Foygel Barber, Emmanuel Candes, and Aaditya Ramdas. Conformal prediction un-
1076
+ der covariate shift. In Advances in Neural Information Processing Systems, volume 32. Curran Associates,
1077
+ Inc., 2019.
1078
+ Anastasios A Tsiatis, Marie Davidian, Shannon T Holloway, and Eric B Laber. Dynamic treatment regimes:
1079
+ Statistical methods for precision medicine. Chapman and Hall/CRC, 2019.
1080
+ 13
1081
+
1082
+ Vladimir Vovk. Conditional validity of inductive conformal predictors. In Asian conference on machine
1083
+ learning, pp. 475–490. PMLR, 2012.
1084
+ Vladimir Vovk, Alexander Gammerman, and Glenn Shafer. Algorithmic learning in a random world. Springer
1085
+ Science & Business Media, 2005.
1086
+ Abraham Wald. An extension of wilks’ method for setting tolerance limits. The Annals of Mathematical
1087
+ Statistics, 14(1):45–55, 1943.
1088
+ Lan Wang, Yu Zhou, Rui Song, and Ben Sherwood. Quantile-optimal treatment regimes. Journal of the
1089
+ American Statistical Association, 113(523):1243–1254, 2018.
1090
+ D. Westreich. Epidemiology by Design: A Causal Approach to the Health Sciences. Oxford University Press,
1091
+ Incorporated, 2019. ISBN 9780190665760. URL https://books.google.se/books?id=5R2yDwAAQBAJ.
1092
+ Samuel S Wilks. Determination of sample sizes for setting tolerance limits. The Annals of Mathematical
1093
+ Statistics, 12(1):91–96, 1941.
1094
+ Baqun Zhang, Anastasios A Tsiatis, Marie Davidian, Min Zhang, and Eric Laber.
1095
+ Estimating optimal
1096
+ treatment regimes from a classification perspective. Stat, 1(1):103–114, 2012.
1097
+ Yingqi Zhao, Donglin Zeng, A John Rush, and Michael R Kosorok. Estimating individualized treatment rules
1098
+ using outcome weighted learning. Journal of the American Statistical Association, 107(499):1106–1118,
1099
+ 2012.
1100
+ 14
1101
+
_dFAT4oBgHgl3EQfqx1t/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
bNAzT4oBgHgl3EQfLPu9/content/tmp_files/2301.01112v1.pdf.txt ADDED
@@ -0,0 +1,1206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.01112v1 [quant-ph] 3 Jan 2023
2
+ Time-Optimal Transport of a Harmonic Oscillator: Analytic Solution
3
+ Gerhard C. Hegerfeldt1
4
+ 1Institut f¨ur Theoretische Physik, Universit¨at G¨ottingen,
5
+ Friedrich-Hund-Platz 1, D-37077 G¨ottingen, Germany
6
+ Motivated by the experimental transport of a trap with a quantum mechanical system modeled
7
+ as a harmonic oscillator (h.o.) the corresponding classical problem is investigated. Protocols for
8
+ the fastest possible transport of a classical h.o. in a wagon over a distance d are derived where
9
+ both initially and finally the wagon is at rest and the h.o. is in its equilibrium position and also at
10
+ rest. The acceleration of the wagon is assumed to be bounded. For fixed oscillator frequency Ω it is
11
+ shown that there are in general three switches in the acceleration and for special values of Ω only
12
+ one switch. In the latter case the optimal transport time is Tabs, that of a wagon without oscillator.
13
+ The optimal transport time and the switch times are determined. It is shown that in some cases it
14
+ is advantageous to go backwards for a while. In addition a time-dependent Ω(t), bounded by Ω±,
15
+ is allowed. In this case the behavior depends sensitively on Ω± and is spelled out in detail. In
16
+ particular, depending on Ω±, Tabs may be obtained in continuously many ways.
17
+ PACS numbers:
18
+ I.
19
+ INTRODUCTION
20
+ Adiabatic processes may serve to transform an initial
21
+ state of a system to a proscribed final state. Such pro-
22
+ cesses, however, are very slow and, in principle, infinitely
23
+ slow. Protocols for speeding up the time development
24
+ have been introduced in the past, with numerous appli-
25
+ cations in quantum optics [1–22] and to classical systems,
26
+ e.g.
27
+ cranes [23].
28
+ These methods include ‘shortcuts to
29
+ adiabadicity’ (STA) [1–10], ‘counterdiabatic’ approaches
30
+ [11–13] and the ‘fast-forward’ approach [15–18]. In gen-
31
+ eral the above mentioned protocols yield a speed-up, but
32
+ not necessarily the fastest possible time development.
33
+ Other methods are combinations with control theory [24–
34
+ 26], cf. e.g. [19, 27, 28]. While a time development as
35
+ fast as possible is often desired, other considerations like
36
+ robustness and further conditions may prolong the re-
37
+ sulting time duration.
38
+ A particular example is the efficient transport of ul-
39
+ tra cold atoms and ions by moving the confining trap.
40
+ An atom or ion in a harmonic trap can be treated to
41
+ good approximation as a quantum harmonic oscillator.
42
+ For harmonic traps efficient protocols have been inves-
43
+ tigated with STA and the invariant-based inverse engi-
44
+ neering method to obtain transitionless evolutions under
45
+ imposed constraints, faster than by an adiabatic process
46
+ [6, 28]. It is therefore natural to ask how fast the trans-
47
+ port of a quantum harmonic oscillator can be made. This
48
+ depends of course on the particular question one is inter-
49
+ ested in, for example a time-optimal transport a a har-
50
+ monic oscillator under additional conditions.
51
+ Insight for the quantum case may be obtained by ask-
52
+ ing the same question for a classical harmonic oscillator.
53
+ Therefore in this paper the time-optimal transport of a
54
+ classical harmonic oscillator will be investigated.
55
+ Consider a classical one-dimensional harmonic oscilla-
56
+ tor (h.o.) without friction in the center of a long wagon,
57
+ such as depicted in Fig. 1 where a small mass m is at-
58
+ tached to a spring on the wagon. When the wagon is
59
+ accelerated the h.o. will start to perform oscillations. In
60
+ this case the frequency Ω of the h.o.
61
+ depends on the
62
+ spring constant and on m.
63
+ The problem to be investigated is the following:
64
+ (i) Initially the wagon is at rest and the h.o. is in its
65
+ equilibrium position, also at rest.
66
+ (ii) Then the wagon undergoes an acceleration a(t),
67
+ where a(t) can vary between ±amax, until it has traveled
68
+ a prescribed distance d.
69
+ (iii) Upon arrival at the end point the system should
70
+ again be in its initial state, i.e.
71
+ the wagon should be
72
+ at rest, and the h.o. should again be in its equilibrium
73
+ position and at rest.
74
+ The questions to be answered here are: Is this achiev-
75
+ able, and if so what is the shortest time possible? Can
76
+ this time be further lowered by allowing the h.o.
77
+ fre-
78
+ quency Ω to be time dependent, i.e. Ω(t)? Both ques-
79
+ tions will be answered in the affirmative.
80
+ a(t)
81
+ everything
82
+ at rest
83
+ everything
84
+ at rest
85
+ a(t)
86
+ FIG. 1: Oscillating mass m attached to a spring in an
87
+ accelerated wagon
88
+ The plan of the paper is as follows. First, in Section II,
89
+ a fixed oscillator frequency will be considered, examples
90
+ will be given and a complete solution of the problem and
91
+ an explicit protocol for fixed Ω will be formulated. In
92
+ Section III detailed proofs are provided. In Section IV the
93
+ case of a time-dependent oscillator frequency is treated
94
+ where Ω(t) satisfies Ω− ≤ Ω(t) ≤ Ω+, with arbitrary Ω±.
95
+ The results and protocols will be seen to depend critically
96
+ on the particular choice of Ω±. Finally, in Section V the
97
+ results are summarized and discussed.
98
+
99
+ 2
100
+ II.
101
+ OPTIMAL PROTOCOL FOR FIXED
102
+ OSCILLATOR FREQUENCY
103
+ We consider a classical one-dimensional harmonic os-
104
+ cillator on a long wagon. The position of the h.o. (i.e.
105
+ mass point) relative to the wagon center will be denoted
106
+ by xh and the position of the wagon center in the external
107
+ rest frame by xw. When the wagon is accelerated with
108
+ acceleration a(t), the mass point additionally experiences
109
+ the corresponding inertial force −ma in the rest frame of
110
+ the wagon so that one has
111
+ ¨xh = −Ω2xh − a
112
+ (1)
113
+ ¨xw = a .
114
+ It is assumed that a(t) can vary between ±amax.
115
+ Example 1. With no h.o. present, to move a wagon a
116
+ distance d in shortest time, with initial and final veloc-
117
+ ity equal to zero, it is optimal to accelerate with amax
118
+ for half the distance and then decelerate with −amax [25]
119
+ (cf. solid line in Fig.2). The corresponding time Tabs(d),
120
+ T 2
121
+ abs = 4 d/amax, can at most be achieved, but not un-
122
+ dercut, if a h.o. in the wagon is to be initially and finally
123
+ at rest in its equilibrium position.
124
+ Example 2. For special ’resonant values’ of Ω this time
125
+ can indeed be achieved, e.g. for
126
+ Ω = n Ωres(d) ,
127
+ n = 1, 2, · · ·
128
+ Ωres(d) =
129
+
130
+ 4π2amax/d = 4π/Tabs .
131
+ (2)
132
+ To see this consider n = 1. Initially, the wagon and h.o.
133
+ are at rest. Upon accelerating the wagon by amax the h.o.
134
+ experiences, in the wagon frame, the additional inertial
135
+ force −ma and starts to move to the left. During the
136
+ time Tabs/2 it has just performed a single oscillation, has
137
+ returned to its initial position in the wagon and is at rest
138
+ relative to the wagon. In this instant, the acceleration
139
+ of the wagon is reversed, the h.o. starts moving to the
140
+ right and at a further time duration of Tabs/2 is back
141
+ at rest at the initial position, with the wagon at rest
142
+ and having traveled the distance d. For n > 1 one has
143
+ correspondingly more oscillations.
144
+ For fixed Ω a protocol to obtain the unique optimal
145
+ transport time is constructed as follows.
146
+ (i) For given d determine the unique optimal time tf by
147
+ the equation
148
+ d = 1
149
+ 4amax t2
150
+ f [1 −
151
+ 8
152
+ (Ω tf)2
153
+
154
+ arccos(cos2(Ω tf/4))
155
+ �2] . (3)
156
+ (ii) With wagon and oscillator at rest at t = 0, accelerate
157
+ with amax until time 1
158
+ 2tf − t1 where t1, 0 ≤ Ωt1 ≤ π/2, is
159
+ given by
160
+ t1 = 1
161
+ Ω arccos(cos2(Ω tf/4)) .
162
+ (4)
163
+ (iii) Decelerate with −amax until time 1
164
+ 2tf.
165
+ (iv) Accelerate with amax until time 1
166
+ 2tf + t1.
167
+ FIG. 2: Typical wagon velocities for the acceleration
168
+ alternating between±1. Solid curve: No oscillator
169
+ present and Example 2 with resonant Ω. Dashed and
170
+ dotted curves: General Ω. For the dotted curve the
171
+ wagon velocity becomes partially negative, i.e. the
172
+ wagon moves backwards for some time.
173
+ (v) Finally decelerate with −amax until time tf.
174
+ Typical wagon velocities are depicted in Fig. 2. At the
175
+ end the wagon is obviously at rest. The oscillator may
176
+ perform several oscillations. That finally it is also again
177
+ at rest and in its equilibrium position will be shown at
178
+ the end of this section.
179
+ In the next section it will be
180
+ shown that tf is indeed the unique optimal time. The
181
+ above protocol has a certain symmetry; there may, or
182
+ may not, be other, nonsymmetric, protocols which lead
183
+ to the same unique optimal time.
184
+ Note that t1 = 0 if Ω tf = 4nπ, n = 1, 2, · · · , which
185
+ recovers Example 2 with tf = Tabs. If t1 > 1
186
+ 4tf the wagon
187
+ velocity temporarily becomes negative (dotted curve in
188
+ Fig. 2), i.e. then it is advantageous to go backwards for
189
+ a while. From Eqs. (3, 4) this is seen to happen if
190
+ Ω2 < 1
191
+ 4 Ωres(d)2,
192
+ (5)
193
+ i.e. for small oscillator frequency. However, it can easily
194
+ be shown that the backward motion will not go back as
195
+ far as the original starting position of the wagon.
196
+ If one plots d as a function of tf in Eq.(3) then tf
197
+ as a function of d is given by reflecting it at the diag-
198
+ onal. In dimensionless scaled variables, the solid curve
199
+ in Fig.
200
+ 3 displays Ω tf as a function of d/dΩ where
201
+ dΩ = 4π2amax Ω−2 is the distance for which Ω is reso-
202
+ nant, i.e. Ωres(dΩ) = Ω. The dashed curve is the corre-
203
+ sponding Tabs(d). Note that at d/dΩ = n2, n = 1, 2, · · ·
204
+ the two transport times coincide, which is again Example
205
+ 2.
206
+ For fixed d, one can also obtain tf as a function of Ω
207
+ from Eq. (3). In dimensionless scaled variables the result
208
+ is plotted in Fig. 4. It is seen that tf diverges for Ω → 0.
209
+ This can be made more explicit by expanding Eq. (3) in
210
+ terms of Ω tf. A short calculation gives, in dimensionless
211
+
212
+ 3
213
+ 1
214
+ 2
215
+ 3
216
+ 4
217
+ 5
218
+ 10
219
+ 15
220
+ 20
221
+ 25
222
+ FIG. 3: Solid curve: Optimal transport time tf as a
223
+ function of distance d in units of dΩ = 4π2amax Ω−2, for
224
+ fixed Ω. Dashed curve: Tabs(d) (without oscillator). For
225
+ d/dΩ = 1, 22, · · · the times coincide.
226
+ scaled variables,
227
+ tf/Tabs(d) ≈ {6/π2}1/4 (Ω/Ωabs(d))−1/2.
228
+ (6)
229
+ Replacing 6 by 5.3 in Eq.(6) one obtains an excel-
230
+ lent approximation for tf/Tabs(d) in the range 0.05 ≤
231
+ Ω/Ωabs(d) ≤ 0.7.
232
+ 0.5
233
+ 1.0
234
+ 1.5
235
+ 2.0
236
+ 2.5
237
+ 1.02
238
+ 1.04
239
+ 1.06
240
+ 1.08
241
+ 1.10
242
+ 1.12
243
+ FIG. 4: Fixed d: Optimal transport time tf in units of
244
+ Tabs(d) as a function of Ω in units of Ωabs(d).
245
+ Protocol
246
+ evaluation.
247
+ For
248
+ the
249
+ oscillator
250
+ time-
251
+ development Eq.
252
+ (1) has to be evaluated with a =
253
+ ±amax. This is conveniently done in the complex plane.
254
+ With
255
+ z = xh + i Ω−1 ˙xh ± amax/Ω2
256
+ (7)
257
+ one finds ˙z = −iΩ z and thus z(t) = exp[−iΩ(t−t0) z(t0).
258
+ Hence
259
+ xh(t) + i Ω−1 ˙xh(t) = exp[−iΩ(t − t0)]
260
+ (8)
261
+ · (xh(t0) + i Ω−1 ˙xh(t0) ± amax/Ω2) ∓ amax/Ω2.
262
+ In the complex plane the right-hand side corresponds to
263
+ a clock-wise rotation of xh(t0) + i Ω−1 ˙xh(t0) by the an-
264
+ gle Ω(t − t0) around the point −amax/Ω2 and amax/Ω2,
265
+ respectively.
266
+ In the protocol one starts with xh(0)
267
+ =
268
+ 0 and
269
+ ˙xh(0) = 0 and rotates clock-wise around −amax/Ω2, then
270
+ around amax/Ω2, then again around −amax/Ω2 and fi-
271
+ nally around amax/Ω2.
272
+ Analytically this gives for the
273
+ -1
274
+ 1
275
+ FIG. 5: Time-development of xh in complex
276
+ phase-space for Ω = 1, amax = 1, d = 2.82 π2,
277
+ tf = 3.41 π, and t1 = .205 π. Starting at the origin, i.e.
278
+ equilibrium position and at rest, there is first a rotation
279
+ around -1, then around 1, then around -1 and finally
280
+ again around 1, back to the origin.
281
+ first two rotations
282
+ ζ1 ≡ xh(tf/2 − t1) + i Ω−1 ˙xh(tf/2 − t1)
283
+ = exp[−iΩ(tf/2 − t1)] amax/Ω2 − amax/Ω2
284
+ ζ2 ≡ xh(tf/2) + i Ω−1 ˙xh(tf/2)
285
+ = exp[−iΩt1](ζ1 − amax/Ω2) + amax/Ω2.
286
+ (9)
287
+ xh(tf/2) is the real part of ζ2 and one finds
288
+ xh(tf/2) = 2 a max/Ω2 (cos2(Ω tf/4)) − cos(Ω t1))
289
+ = 0
290
+ (10)
291
+ by Eq. (4), i.e. ζ2 lies on the imaginary axis. The cor-
292
+ responding trajectories in the complex plane correspond
293
+
294
+ LALTL3L24
295
+ to the two curves in the left half-plane in Fig. 5. By
296
+ the symmetry of the protocol the next two steps give the
297
+ two curves in the right half-plane where the last one ends
298
+ again at the origin. This follows of course also analyti-
299
+ cally. Hence after the final step the oscillator is again at
300
+ rest in its equilibrium position. Thus the protocol satis-
301
+ fies the initial and final conditions.
302
+ III.
303
+ PROOF OF OPTIMALITY FOR FIXED Ω
304
+ First the equivalent converse problem will be consid-
305
+ ered: Finding the longest distance d for a given time
306
+ duration tf under the conditions (i) - (iii) in Section I
307
+ and a corresponding protocol.
308
+ Symmetry. Consider some given tf and d. In the fol-
309
+ lowing it is convenient to let time run from − 1
310
+ 2tf to 1
311
+ 2tf.
312
+ Let xh and xw satisfy Eqs. (1) for some a(t) and the
313
+ boundary conditions at ± 1
314
+ 2tf. Then 1
315
+ 2(xh(t) − xh(−t))
316
+ and
317
+ 1
318
+ 2(xw(t) − xw(−t) + d) satisfy Eqs.
319
+ (1) with a(t)
320
+ replaced by 1
321
+ 2(a(t) − a(−t)) and the same boundary con-
322
+ ditions. Hence without loss of generality one can assume
323
+ that xh and a are anti-symmetric while ˙xh and ˙xw are
324
+ symmetric under time reversal.
325
+ Scaled variables. We go over to dimensionless scaled
326
+ variables. We choose some fixed length unit d0 and put
327
+ Ω2
328
+ 0
329
+ = amax/d0
330
+ ω
331
+ = Ω/Ω0
332
+ τ
333
+ = Ω0t
334
+ u(τ)
335
+ = a(t)/amax
336
+ ξ1(τ)
337
+ = xh(t)/d0
338
+ ξ2(τ)
339
+ = d
340
+ dτ ξ1(τ)
341
+ ξ3(τ) = xw(t)/d0
342
+ ξ4(τ) = d
343
+ dτ ξ3(τ)
344
+ (11)
345
+ so that u(τ) can vary between −1 and 1.
346
+ Then one
347
+ obtains
348
+ ¨ξ1 ≡ d2
349
+ dτ 2 ξ1 = −ω2ξ1 − u(τ)
350
+ (12)
351
+ ¨ξ3 = u(τ) .
352
+ For fixed Ω and a suitable d0 one can assume Ω0 = Ω
353
+ and then ω = 1.
354
+ Pontryagin Maximum (or Minimum) Principle (PMP)
355
+ [24–26]. This is a far-reaching generalization of the cal-
356
+ culus of variations and regarded as a milestone in control
357
+ theory. A simple example is a car moving in shortest time
358
+ from standstill at A to standstill at B, under the only
359
+ condition that the time-dependent acceleration resp. de-
360
+ celeration (the ’control’) is bounded, but not necessarily
361
+ continuous.
362
+ The PMP serves to determine necessary conditions for
363
+ an optimal control function u∗(t) (or possibly several con-
364
+ trol functions) which minimizes a given cost function J
365
+ of the form J =
366
+ � T
367
+ 0 L(u(τ), ...)dτ, where L is a func-
368
+ tion of the control u(τ) and some state functions ξi and
369
+ their derivatives. For the present distance-optimal con-
370
+ trol problem, one can take L = ξ4 since J =
371
+ � T
372
+ 0
373
+ ˙ξ3dτ is
374
+ the (scaled) distance. To minimize it, the PMP considers
375
+ a control Hamiltonian Hc,
376
+ Hc = −L+p1 ˙ξ1 + p2 ˙ξ2 + p3 ˙ξ3 + p4 ˙ξ4,
377
+ (13)
378
+ where one inserts ˙ξi from Eqs. (11-12) and where the
379
+ adjoint states pi are Lagrange multipliers which can not
380
+ all be identically zero.
381
+ Then, for an extremal control
382
+ u(τ) = u ∗ (t), Hamilton’s equations
383
+ ˙pi = −∂Hc/∂ξi,
384
+ ˙ξi = ∂Hc/∂pi
385
+ (14)
386
+ hold.
387
+ For almost all −τf/2 ≤ τ ≤ τf/2, the function
388
+ Hc(pi(t), ξi(t), u(t)) attains its maximum at u(t) = u∗(t),
389
+ and Hc = const. For simplicity we omit the asterisk on
390
+ u∗. Inserting for ˙ξi, Hc becomes
391
+ Hc = −ξ4 + p1ξ2 + p2(−ω2ξ1 − u) + p3ξ4 + p4u .
392
+ (15)
393
+ From the term (p4−p2) u it follows that for a maximum
394
+ one has to choose u(τ) = 1 if p4 − p2 > 0 and -1 if
395
+ p4 − p2 < 0. When p4 − p2 = 0, or more precisely, when
396
+ p4 − p2 changes sign, there is a switch from ±1 to ∓1 in
397
+ u. Hamilton’s equations become
398
+ ˙p1 = ω2p2,
399
+ ˙p2 = −p1
400
+ ˙p3 = 0,
401
+ ˙p4 = −p3 + 1
402
+ (16)
403
+ The solutions are
404
+ p2(τ) = A cos τ + B sin ωτ,
405
+ p1 = − ˙p2
406
+ p3 = c3,
407
+ p4 = (−c3 + 1) τ + c4
408
+ (17)
409
+ where A, B, c3, and c4 are constants. If p4 − p2 ≡ 0
410
+ in some extended interval, then p4 = p2 ≡ 0, by linear
411
+ independence. Therefore it is not possible to have u ≡ 0
412
+ and ξ4 ≡ const in some extended interval so that there
413
+ are only isolated switches. Hence, by anti-symmetry of
414
+ u, there is a switch at τ = 0, i.e. −p2(0) + p4(0) = 0,
415
+ and thus A = c4. By the boundary conditions on ξi at
416
+ ±τf/2 only the terms containing u remain in Hc which
417
+ by antisemitic of u lead to two equations and to
418
+ A (cos(ωτf/2) − 1) = 0 .
419
+ (18)
420
+ Thus either A = 0 or ωτf = 4πn. In the latter case the
421
+ situation is analogous to Example 2, i.e. the h.o. can per-
422
+ form 2n complete oscillations and the optimal distance
423
+ is the same as without oscillator. We can therefore as-
424
+ sume A = c4 = 0.
425
+ For ωτf ̸= 4πn there are at least
426
+ two switches of u and therefore B ̸= 0 since otherwise
427
+ −c3 + 1 = 0, c3 = 1, and ξ4 ≡ const. The explicit values
428
+ of B and c3 are not needed, they can in principle be cal-
429
+ culated at the end; it suffices to discuss the cases B < 0
430
+ and B > 0.
431
+ Note: From the remark after Eq. (15) it follows that
432
+ u(τ) = 1 when the line p4(τ) lies above the sine curve
433
+ p2(τ) and u(τ) = −1 when it lies below.
434
+
435
+ 5
436
+ Case B < 0. (i) Single switch for τ < 0, at −τ1, say.
437
+ Then the line p4(τ), denoted by L1 in Fig. 6, intersects
438
+ with the -sine curve p2(τ) once.
439
+ The analog of Eqs.
440
+ (9) for ξ + iω−1 ˙ξ in the scaled
441
+ variables, now with initial time -τf/2 and final time 0
442
+ yields
443
+ ξ1(0) = cos(ωτf/2) − 2 cos(ωτ1) + 1.
444
+ (19)
445
+ From the anti-symmetry of ξ1 one has ξ1(0) = 0, and
446
+ from this one obtains
447
+ cos ωτ1 = cos2(ωτf/4)
448
+ (20)
449
+ with −π/2ω < −τ1 < 0. Thus line L1 in Fig. 6 is typical
450
+ in this case, while line L2 is not possible.
451
+ -3 π
452
+ -2 π
453
+
454
+ FIG. 6: Case B < 0. With ω = 1. L1 and L2 denote
455
+ possible lines for p4(τ). Their intersections with p2(τ)
456
+ (-sine curve) are possible switching points. In regions
457
+ where p4(τ) is above p2(τ) one has acceleration,
458
+ otherwise deceleration. Only L1 with a single switch is
459
+ optimal.
460
+ (ii) If there are two or more switches for τ < 0, e.g. if
461
+ p4(τ) is given by line L2 in Fig. 6, then the last decelera-
462
+ tion period before τ = 0 is longer than π/2ω. Hence the
463
+ total acceleration time is less than in (i) and the distance
464
+ traveled by the wagon during τf is less than that in (i).
465
+ Hence for B < 0 there is only a single switch for τ < 0.
466
+ Case B > 0. From Fig. 7 this is case B < 0 reflected at
467
+ the τ axis, with u = ±1 interchanged and thus positive
468
+ wagon distances for B < 0 now become negative. But
469
+ there might also be negative distances for B < 0, corre-
470
+ sponding to positive distances for B > 0, and therefore a
471
+ more detailed discussion is required. Here we use ω = 1.
472
+ (i) Single switch for τ < 0: As for B < 0 there is only a
473
+ single solution for fixed τf, and this is the corresponding
474
+ optimal backward motion, with p4(τ) typically given by
475
+ L3 in Fig. 7.
476
+ (ii) Exactly two switches for τ < 0. Typical for this
477
+ would be lines L4 and L5 in Fig.
478
+ 7, with switches at
479
+ −τ2 < −τ1 < 0, say.
480
+ a) Case τ2 − τ1 > π/2.
481
+ From Fig. 7 one easily finds ˙ξ3(0) = τf/2 − 2(τ2 − τ1) <
482
+ τf/2 − π while, from case B < 0, ˙ξ3opt ≥ τf/2 − π since
483
+ here the switching point lies to the right of −π/2. Hence
484
+ in case B < 0 the distance is larger.
485
+ b) Case τ2 − τ1 < π/2.
486
+ This will be shown to be incompatible with the bound-
487
+ ary conditions on the h.o.. One has ξ1(0) = 0, by anti-
488
+ symmetry, while ˙ξ1(0) ≡ λ is unknown. Reversing the
489
+ time development from τ = 0 to τ = −τ2 one obtains
490
+ ξ1(−τ1) + i ˙ξ1(−τ1) = exp[−iτ1]{iλ + 1} − 1
491
+ ξ1(−τ2) + i−1 ˙ξ1(−τ2) =
492
+ exp[i(−τ1 + τ2)]{ξ1(−τ1) + i ˙ξ1(−τ1) − 1} + 1
493
+ = exp[i(−τ1 + τ2){exp[iτ1](iλ + 1) − 2} + 1
494
+ (21)
495
+ Since this must lie on the circle around −1 passing
496
+ through 0, upon adding 1 the rhs becomes a number of
497
+ modulus 1:
498
+ 1 = | exp[i(−τ1 + τ2)]{exp[iτ1](iλ + 1) − 2} + 2|
499
+ = |iλ + 1 − 2 exp[−iτ1] + 2 exp[−iτ2]|
500
+ (22)
501
+ Hence the modulus of the real part,
502
+ |1 − 2 cos τ1 + 2 cosτ2|,
503
+ (23)
504
+ must be less than, or equal to, 1. However, from Fig.
505
+ 7, one has −3π/2 < −τ1 < −π and so cos τ1 < 0.
506
+ For −2π < −τ2 < −3π/2 one has cos τ2 > 0 while for
507
+ −3π/2 < −τ2 < −π one has −2 cosτ1 + 2 cosτ2 > 0.
508
+ Hence the bracket in Eq.
509
+ (23) is larger than 1, a
510
+ contradiction. Thus this case can not occur.
511
+ (iii) Three or more switches for τ < 0: A typical line is
512
+ L5 in Fig. 7. From Fig.7 it is evident that the area under
513
+ the curve (i.e. distance) decreases.
514
+ FIG. 7: Case B > 0. With ω = 1. L3, L4 and L5 denote
515
+ possible lines for p4(τ). Their intersections with p2(τ)
516
+ (sine curve) are possible switching points. Dashed: ˙ξ3
517
+ with 2 intersection points −τ1 and −τ2. Dotdashed:
518
+ ˙ξ3opt from case B < 0. For τ2 − τ1 > π/2 one has
519
+ ˙ξopt > ˙ξ. L3 is typical for the optimal backwards
520
+ motion.
521
+
522
+ L5.L3L4L2L16
523
+ As a consequence, case B > 0 is not possible and case
524
+ B < 0 (i) gives the unique optimal distance for given τf
525
+ and fixed ω in scaled variables. This distance is easily
526
+ calculated to be τ 2
527
+ f /4 − 2τ 2
528
+ 1, with τ1, 0 ≤ τ1 ≤ π/2, given
529
+ by Eq. (20). In the original variables one has
530
+ d = 1
531
+ 4amaxt2
532
+ f − 2amaxt2
533
+ 1.
534
+ (24)
535
+ Going back to the original problem one obtains the
536
+ protocol of Section II.
537
+ IV.
538
+ PROTOCOLS FOR TIME-DEPENDENT
539
+ OSCILLATOR FREQUENCY
540
+ In this case one allows in addition to a(t) also Ω(t) to
541
+ be time-dependent and seeks a minimal transport time
542
+ tf for a distance d under the condition that the wagon
543
+ is initially and finally at rest and the oscillator is at rest
544
+ in its equilibrium position. This situation is more com-
545
+ plicated. If there are no bounds on Ω then for Ω → ∞
546
+ one obtains the absolute minimal time as without oscilla-
547
+ tor. Therefore, in addition to |a(t)| ≤ amax one imposes
548
+ bounds
549
+ 0 ≤ Ω− ≤ Ω(t) ≤ Ω+ < ∞.
550
+ (25)
551
+ If a ’resonant value’ from Eq.
552
+ (2) lies in this interval
553
+ then, from Example 2, one chooses this value for Ω and
554
+ then obtains the absolute minimal time.
555
+ Distance optimization.
556
+ Again we first consider the
557
+ equivalent problem of finding a protocol that maximizes
558
+ the distance d for given time tf and let time run from
559
+ − 1
560
+ 2tf to 1
561
+ 2tf. We will seek solutions that satisfy the same
562
+ symmetry properties as in Section III, i.e.
563
+ we assume
564
+ that Ω(t) is symmetric.
565
+ The same scaled variables as in Eq.
566
+ (11) are used.
567
+ Introducing
568
+ u1(τ) ≡ ω2(τ)
569
+ (26)
570
+ as a second control variable, Eq. (12) reads
571
+ ¨ξ1 ≡ d2
572
+ dτ 2 ξ1 = −u1(τ)ξ1 − u(τ)
573
+ (27)
574
+ ¨ξ3 = u(τ) .
575
+ The condition on Ω(t) becomes ω2
576
+ − ≤ u1(τ) ≤ ω2
577
+ +. The
578
+ control Hamiltonian for the PMP now reads
579
+ Hc = −ξ4 + p1ξ2 + p2(−u1ξ1 − u) + p3ξ4 + p4u .
580
+ (28)
581
+ As before it follows that for a maximum one has to choose
582
+ u(τ) = 1 if p4 > p2 and -1 if p4 < p2. When p4 − p2 = 0,
583
+ or more precisely, when p4 − p2 changes sign, there is
584
+ a switch from ±1 to ∓1 in u.
585
+ Similarly, u1 = ω2
586
+ + if
587
+ p2ξ1 < 0, and u1 = ω2
588
+ − if p2ξ1 > 0. A switch occurs
589
+ when p2ξ1 changes sign.
590
+ Depending on whether u1 = ω2
591
+ + or u1 = ω2
592
+ −, Hamil-
593
+ ton’s equations in the respective τ intervals become
594
+ ˙p1 = ω2
595
+ ± p2,
596
+ ˙p2 = −p1
597
+ ˙p3 = 0,
598
+ ˙p4 = −p3 + 1.
599
+ (29)
600
+ Between switches of u1 the solutions are of the form
601
+ p2(τ) = A± cos ω±τ + B± sin ω±τ = C± sin(ω±τ − ϕ±)
602
+ (30)
603
+ p1 = − ˙p2,
604
+ p3 = c3,
605
+ p4 = (−c3 + 1) τ + c4
606
+ where c3, c4, C± are constants, and A±, B±, ϕ± are con-
607
+ stants which may dependent on the respective interval.
608
+ If p2(τ) ≡ 0 in some interval then it is zero everywhere
609
+ because it cannot be joined continuously to the a nonzero
610
+ p2 from Eq. (30).
611
+ Since ω(τ) is symmetric there must be intervals of
612
+ equal length with ω(τ) = ω+ directly to the left and right
613
+ of τ = 0 (or ω− intervals, but this will not be optimal as
614
+ shown later). Hence one must have ϕ+ = 0 in this inter-
615
+ val since then there are switches in ω(τ) at τ = ±π/ω+
616
+ because p2ξ1 vanishes there. It also vanishes at τ = 0
617
+ but does not change sign because of anti-symmetry of ξ1
618
+ and p2 so that ω has no switch at τ = 0 although u does.
619
+ Thus p2 is of the form
620
+ p2(τ) = B+ sin(ω+τ)
621
+ (31)
622
+ in the interval −π/ω+ ≤ τ ≤ π/ω+.
623
+ To the left of τ = −π/ω+ there is an interval with ω−,
624
+ then again an ω+ interval and so on, and similarly to the
625
+ right of τ = π/ω+. Since p2(τ) is differentiable different
626
+ parts of p2 have to be joined accordingly. This yields an
627
+ anti-symmetric p2 as typically displayed in Fig. 8.
628
+ FIG. 8: Solid: p2(τ) with symmetric ω± sequence.
629
+ Dashed: p4(τ).
630
+ The procedure for the determination of τ1 uses the
631
+ time-development of ξ1 and depends on the interval in
632
+ which 1
633
+ 2τf lies. This will be exemplified for 1
634
+ 2τf ≤ π/ω++
635
+ π/ω−.When 1
636
+ 2τf ≤ π/ω+ the situation is the same as in
637
+ Section III and τ1 is given by Eq. (20), with ω replaced
638
+ by ω+.
639
+
640
+ 7
641
+ When π/ω+ <
642
+ 1
643
+ 2τf ≤ π/ω+ + π/ω− we calculate
644
+ ξ1(τf/2) and ˙ξ1(τf/2) from ξ1(0) and ˙ξ10).
645
+ By anti-
646
+ symmetry one has ξ1(0) = 0 and we put ˙ξ1(0) = λ, the
647
+ exact value of which will not be needed. Using Eq. (8)
648
+ one obtains
649
+ η1 ≡ ξ1(τ1) +
650
+ i
651
+ ω+
652
+ ˙ξ1(τ1)
653
+ = exp[−iω+(τ1 − 0)]( i
654
+ ω+
655
+ λ + 1
656
+ ω2
657
+ +
658
+ ) − 1
659
+ ω2
660
+ +
661
+ η2 ≡ ξ1(π/ω+) +
662
+ i
663
+ ω+
664
+ ˙ξ1(π/ω+)
665
+ = exp[−iω+( π
666
+ ω+
667
+ − τ1)]{ℜη1 +
668
+ i
669
+ ω+ω+ℑη1 − 1
670
+ ω2
671
+ +
672
+ } + 1
673
+ ω2
674
+ +
675
+ ˜η3 ≡ ξ1(τf/2) +
676
+ i
677
+ ω−
678
+ ˙ξ1(τf/2)
679
+ = exp[−iω−(τf/2 − π
680
+ ω+
681
+ )]{ℜη2 +
682
+ i
683
+ ω−
684
+ ω+ℑη2 − 1
685
+ ω2
686
+
687
+ } + 1
688
+ ω2
689
+
690
+ (32)
691
+ By the boundary conditions at 1
692
+ 2τf one has ˜η3 = 0, and
693
+ thus
694
+ 0 = ℜη2 +
695
+ i
696
+ ω−
697
+ ω+ℑη2 − 1
698
+ ω2
699
+
700
+ + exp[iω−(τf/2 − π
701
+ ω+
702
+ )] 1
703
+ ω2
704
+
705
+ .
706
+ (33)
707
+ Taking the real part of this one obtains after a short
708
+ calculation
709
+ cos[ω+τ1] = ω2
710
+ +
711
+ 2ω2
712
+
713
+ {1 + cos(ω−τf/2 + ω+ − ω−
714
+ ω+
715
+ π)}. (34)
716
+ The l.h.s. cannot exceed 1, while the r.h.s. becomes 1
717
+ for τf = τopt where
718
+ τopt/2 = π
719
+ ω+
720
+ + π
721
+ ω−
722
+ − 2
723
+ ω−
724
+ arccos[ω−
725
+ ω+
726
+ ],
727
+ (35)
728
+ which lies between π/ω+ and π/ω++π/ω−. Then τ1 = 0
729
+ and the distance becomes the absolute optimum for this
730
+ particular τf = τopt.
731
+ Example 3. Let ω− = ω+/2. Then Eq. (35) yields
732
+ τopt/2 = 5
733
+ 3π/ω+ and the distance d/d0 becomes 1
734
+ 4τ 2
735
+ opt.
736
+ If one considered only ω+ and the corresponding τopt,
737
+ one would have ω+τ1 = arccos[3/4] ̸= 0 and the distance
738
+ would be less.
739
+ How to proceed when the r.h.s. of Eq. (34) is larger
740
+ than 1? To answer this question we recall that p2 has
741
+ also the trivial solution p2(τ) ≡ 0. Then there are no
742
+ restrictions on the choice of ω(τ). If one decreases ω+
743
+ on the r.h.s of Eq. (34) to ω− the r.h.s. becomes less
744
+ or equal to 1. Hence there must be an intermediate ω,
745
+ denoted by ˜ω+, such that the r.h.s becomes 1. Hence if
746
+ one uses [ω−, ˜ω+] instead of [ω−, ω+] one gets a solution
747
+ for τ1, namely τ1 = 0, so that the sequence ω− and ˜ω+
748
+ gives the largest distance for the given τf. This means
749
+ going over to a sub-interval [ω−, ˜ω+] of [ω, ω+] optimizes
750
+ the distance in this case. There are many sub-intervals
751
+ with the same property, as seen further below.
752
+ In the case π/ω+ + π/ω− < τf/2 ≤ π/ω+ + π/ω− +
753
+ π/ω+, i.e. if one starts with ω+, switches to ω−, and to
754
+ ω+ before τ = 0, i.e. a sequence +−+|+−+ in Fig. 8, then
755
+ η1 and η2 in Eq. (32) remain unchanged while in η3 one
756
+ replaces τf/2 by π/ω+ + π/ω− and there is an additional
757
+ η4,
758
+ η3 = −ℜη2 + 2/ω2
759
+ − −
760
+ i
761
+ ω−
762
+ ω−ℑη2
763
+ η4 ≡ ξ1(τf/2) +
764
+ i
765
+ ω+
766
+ ˙ξ1(τf/2)
767
+ = exp[−iω+(τf/2 − π/ω+ − π/ω−)]
768
+ {ℜη3 +
769
+ i
770
+ ω+
771
+ ω−ℑη3 + 1
772
+ ω2
773
+ +
774
+ } − 1
775
+ ω2
776
+ +
777
+ .
778
+ (36)
779
+ The condition η4 = 0 now gives
780
+ cos ω+τ1 = ω2
781
+ +
782
+ ω2
783
+
784
+ − 1 + 1
785
+ 2{1 + cos(ω+τf/2 − ω+ − ω−
786
+ ω−
787
+ π)}.
788
+ (37)
789
+ For complete ω± intervals the exponentials in Eqs. (32)
790
+ and (36) equal -1 and using this the results are easily
791
+ generalized. In particular, for the ω± sequence − + − + | +
792
+ − + − one obtains
793
+ cos(ω+τ1) = ω2
794
+ +
795
+ ω2
796
+
797
+ − 1 + ω2
798
+ +
799
+ 2ω2
800
+
801
+ {1 + cos(ω−τf/2 − 2π ω−
802
+ ω+
803
+ )}.
804
+ (38)
805
+ Time optimization. These results will now be applied
806
+ to the original problem in which a distance, now denoted
807
+ by d0, is fixed and the shortest transport time for given
808
+ Ω± is sought. If this d0 is taken for the definition of the
809
+ scaled variables, d0 becomes ξ3(τf/2) = 1. The absolutely
810
+ shortest possible time, τabs, and corresponding ωres is
811
+ then, by Example 2, given by
812
+ τabs = 2
813
+ ωres = 2π.
814
+ (39)
815
+ From Fig. 2 the distance traveled in time τf is 1
816
+ 4τ 2
817
+ f − 2τ 2
818
+ 1
819
+ and if τf is to be optimal it must satisfy
820
+ 1 = 1
821
+ 4τ 2
822
+ f − 2τ 2
823
+ 1
824
+ (40)
825
+ where τf = τf(ω−, ω+). For given ω± one obtains τ1 from
826
+ Eqs. (20, 34, 37) and generalizations thereof, depending
827
+ on in which interval the as yet unknown τf/2 lies.
828
+ If
829
+ ωres or an integer multiple n thereof lies in [ω−, ω+] one
830
+ chooses ω(τ) ≡ nωres and obtains the absolute optimal
831
+ τabs. Different case of increasing complexity will now be
832
+ discussed.
833
+ Case: ω− = 0, 0 < ω+ < 2π and the distance 1. If
834
+ the spring constant is 0 then in the lab frame the mass
835
+ point m travels free of force and in the the wagon frame
836
+ under the inertial force. It can happen that it is optimal
837
+
838
+ 8
839
+ to start with ω−.
840
+ Then m initially remains at rest in
841
+ the lab frame until a switch to ω+ occurs. If the time
842
+ development starts with ω+ there can be no switch to
843
+ ω− because the associated time interval π/ω− is infinite.
844
+ Hence in this case the results of Section II and III apply.
845
+ From Fig. 4 it is seen that τf decreases with increasing
846
+ ω+ < 2π. Since τf/2 ≤ π/ω+ one has, for optimality,
847
+ τf = 2π/ω+ and τ1 = 0, by Eqs. (3,4). From Eq. (40)
848
+ one then obtains τ 2
849
+ f = 4 so that in this case one must
850
+ have ω+ = π/
851
+
852
+ 2 ≡ ˜ω+. Thus if ω+ > ˜ω+ one starts
853
+ with ω− = 0 and then there is a switch to ω+ at some
854
+ later time.
855
+ In this case Eq.
856
+ (34) holds for τ1 and it
857
+ becomes 0 for τf = τopt given by Eq. (35). Taking the
858
+ limit ω− → 0 one finds τopt = (2π + 4)/ω+. This must
859
+ equal τabs = 2 which gives ω+ = π + 2 ≡ ωabs. From this
860
+ value of ω+ on one obtains the absolute time minimum.
861
+ The optimal time as a function of ω+ is displayed in Fig.
862
+ 9.
863
+ Protocol. This depends on ω+ and is as in Section II
864
+ when ω+ ≤ ˜ω+. When ˜ω+ < ω+ ≤ ωabs one determines
865
+ τf and τ1 from Eqs. (37) and (40), starts with ω− = 0 for
866
+ the time duration −π/˜ω+ + τf/2 and with u = 1, then
867
+ switches to ω+ and continues for the time −τ1 + π/˜ω+,
868
+ then switches to u = −1 for the time τ1 and continues by
869
+ symmetry, resp. anti-symmetry. When ωabs = 2 + π <
870
+ ω+ ≤ ωres one chooses the protocol for ω+ = ωabs.
871
+ 0.4
872
+ 0.6
873
+ 0.8
874
+ 1.0
875
+ 1.0
876
+ 1.1
877
+ 1.2
878
+ 1.3
879
+ 1.4
880
+ 1.5
881
+ 1.6
882
+ FIG. 9: Shortest transport time tf for fixed distance d0,
883
+ Ω− = 0 and 0 ≤ Ω+/Ωres(d0) ≤ 1. Dotted: tf for fixed
884
+ Ω+ without switch in Ω. Solid: Ω+/Ωres(d0) >
885
+
886
+ 2/4;
887
+ initially Ω(t) ≡ 0 and then a switch to Ω+. For
888
+ 1/2 + 1/π ≤ Ω+/Ωres(d0) ≤ 1 one has Tabs(d). The
889
+ switch in Ω can thus lead to a shorter transport time
890
+ than for Ω+ alone.
891
+ Case: 0 < ω− < ω+ < ωres = 2π.
892
+ As in the pre-
893
+ ceding case, only ω+ is relevant if ω+ ≤ ˜ω+ = π/
894
+
895
+ 2.
896
+ Then τf(ω−, ω+)/2 ≤ π/ω+ and is independent of ω−.
897
+ This is the upper close meshed region in Fig. 10. For
898
+ ω+ > ˜ω+ there are on the l.h.s. of Fig. 8 two or more
899
+ alternating ω±’s for the time development. If there are
900
+ two, one starts with ω−, and the initial time −τf/2 sat-
901
+ isfies π/ω+ ≤ τf/2 ≤ π/ω+ + π/ω−. In this case Eqs.
902
+ (40) and (34) apply. If the l.h.s. of Eq. (34) is less or
903
+ equal to 1 then one can determine τ1 and τf(ω−, ω+), dis-
904
+ played by the coarse meshed region in Fig. 10. Putting
905
+ cos[ω+τ1] = 1 one obtains with τf = τabs = 2π from
906
+ Eq. (34) the boundary curve at the bottom of the coarse
907
+ meshed surface which borders the region denoted by Tabs.
908
+ In this region there is no solution for τ1. As before, here
909
+ the solution p2(τ) ≡ 0 can be used and then there are
910
+ no restrictions on ω(τ).
911
+ If one starts from the point
912
+ {ω−, ω+} and first decreases ω+ until one hits the bound-
913
+ ary curve and then similarly increases ω− one obtains the
914
+ end points of an arc on the boundary curve. Every point
915
+ {ˆω−, ˆω+} on this arc satisfies {ω− ≤ ˆω− ≤ ˆω+ ≤ ω+} and
916
+ yields τabs. Thus there is again an improvement over the
917
+ single ω+ case.
918
+ If there were a third, preceding, interval, i.e.
919
+ with
920
+ ω+, then τf(ω−, ω+)/2 > π/ω+ + π/ω− and τf would
921
+ thus be larger than that with only two periods. Hence
922
+ a third period does not occur. By a similar calculation,
923
+ interchanging ω+ and ω− leads to a larger transport time.
924
+ Protocol: When ω+ ≤ ˜ω+ = π/
925
+
926
+ 2 one proceeds with
927
+ ω+ as in Section II. When ω+ > ˜ω+ one determines
928
+ τf(ω−, ω+) and τ1 from Eqs.
929
+ (34) and (40), provided
930
+ a solution for τ1 exists. Then one has an ω± sequence
931
+ of the form − + | + − and thus one starts with u = 1 and
932
+ ω− from time −τf/2 to time −π/ω+ where one switches
933
+ to ω+. Then one continues until time −τ1, where one
934
+ switches to u = −1 and continues to τ = 0 where there
935
+ is a switch back to u = 1. For τ > 0 one continues by
936
+ symmetry, resp. anti-symmetry. When there is no solu-
937
+ tion for τ1, i.e when the point {ω−, ω+} lies in the region
938
+ denoted by Tabs in Fig. 10, then one can choose a proto-
939
+ col for any point on the above arc. This will yield τabs
940
+ and in this case the protocol is not unique.
941
+ Case: ωres = 2π ≤ ω− < ω+ < 2 ωres. Arguing as
942
+ before, one has + − +| + −+ and − + − + | + − + − as possible
943
+ ω± sequences.
944
+ To the first sequence Eq.
945
+ (37) applies
946
+ and to the second Eq. (38). One now solves Eq. (40)
947
+ together with Eq. (37) for τf under the condition thatτf/2
948
+ lies in the last ω+ interval. In Fig. 11 this gives the left
949
+ surface outside of which there is no solution for τ1. In a
950
+ similar way one obtains the right surface for the second
951
+ sequence.
952
+ On the boundary curve at the bottom one
953
+ has τabs and the curve is obtained from cos(ω+τ1) = 1.
954
+ The two ω± sequences are separated by the dashed curve
955
+ under the surface.
956
+ This curve is obtained by putting
957
+ τf/2 = 2π/ω+ + π/ω− in Eqs. (37, 40). Its end point on
958
+ the boundary curve is given by { 1
959
+ 2 + 1
960
+ 2
961
+
962
+ 2, 1 + 1
963
+ 2
964
+
965
+ 2} ωres
966
+ and on the diagonal by 1
967
+ 4
968
+
969
+ 34 ωres.
970
+ In the region denoted by Tabs there is no solution for
971
+ τ1.
972
+ Again one can choose any point {ˆω−, ˆω+} on the
973
+ arc constructed as before to obtain τabs. Reversing the
974
+ sequence to − + −| − +− leads to larger transport times.
975
+ Protocol: If for a given {ω−, ω+} one has ω− ≤ ( 1
976
+ 2 +
977
+
978
+ 9
979
+ FIG. 10: Shortest transport time tf for fixed distance d0
980
+ and 0 ≤ Ω−/Ωres(d0) ≤ Ω+/Ωres(d0) ≤ 1. For
981
+ Ω+/Ωres(d0) ≤ π/
982
+
983
+ 2 there is only Ω+ and no switch
984
+ (close meshed region). For {Ω−, Ω+} in the region
985
+ denoted by Tabs at the r.h.s. one has the shortest time
986
+ Tabs. The intersection of the surface with the front
987
+ plane is the curve of Fig. 9 and that with the diagonal
988
+ plane is the left part of the curve of Fig. 4 until 1.
989
+ 1
990
+ 2
991
+
992
+ 2) ωres or if a solution for τ1 in Eq. (37) exists, one has
993
+ a sequence +−+|+−+, from Fig. 11. If a solution exists the
994
+ protocol is analogous to the previous case above. If not,
995
+ one picks a point {ˆω−, ˆω+} on the arc on the boundary
996
+ curve, as before, and uses the protocol for this point with
997
+ τf = τabs. Otherwise, one has a sequence
998
+ − + − + | + − + −
999
+ and the procedure is analogous.
1000
+ V.
1001
+ SUMMARY AND DISCUSSION
1002
+ Protocols for the fastest possible transport of a classi-
1003
+ cal harmonic oscillator (h.o.) over a distance d have been
1004
+ derived where both initially and finally everything is at
1005
+ rest, i.e. the position of the h.o. is at rest and the h.o.
1006
+ is in its equilibrium position and also at rest. The accel-
1007
+ eration a(t) is assumed to satisfy −amax ≤ a(t) ≤ amax.
1008
+ First, with fixed h.o.
1009
+ frequency Ω, for the shortest
1010
+ transport time the optimal acceleration alternates be-
1011
+ tween ±amax. It was shown that one starts with amax
1012
+ and that there are three switches or, for special values
1013
+ Ω = nΩres(d) = 2πn
1014
+
1015
+ amax/d, n = 1, 2, · · ·, only one
1016
+ switch. The switch times were determined.
1017
+ The dependence of the shortest transport time, de-
1018
+ noted by tf, on d, Ω and amax was found, cf. Figs. 3 and 4.
1019
+ The optimal time tf is proportional to 1/√amax, diverges
1020
+ FIG. 11: Shortest transport time tf for fixed distance d0
1021
+ and 1 ≤ Ω−/Ωres(d0) ≤ Ω+/Ωres(d0) ≤ 2. The left side
1022
+ of the surface belongs to an Ω± sequence + − +| + −+,
1023
+ the right side to − + − + | + − + −, separated by the
1024
+ dashed line in the bottom plane. For {Ω−, Ω+} in the
1025
+ region denoted by Tabs one obtains the shortest time
1026
+ Tabs by going over to a point on the boundary
1027
+ corresponding to a sub-interval of [Ω−, Ω+].
1028
+ for Ω → 0 and, not surprisingly, for Ω → ∞ converges
1029
+ to Tabs(d) = 2
1030
+
1031
+ d/amax, the optimal time for a wagon
1032
+ without h.o.. The function tf(d) approaches Tabs(d) for
1033
+ large d. Surprisingly, sometimes it is advantageous to go
1034
+ backwards for a while, but not as far back as the initial
1035
+ position.
1036
+ Second, in addition to a(t) a time-dependent Ω(t) sat-
1037
+ isfying Ω− ≤ Ω(t) ≤ Ω+ was considered. In this case
1038
+ the behavior of tf depends sensitively on Ω±. If n Ωres(d)
1039
+ lies in the interval [Ω−, Ω+] for some n then choosing
1040
+ n Ωres(d) will give the minimal time Tabs(d).
1041
+ If Ω+ ≤
1042
+ 1
1043
+ 2
1044
+
1045
+ 2Ωres then Ω(t) ≡ Ω+, there is no switch
1046
+ in Ω, and Ω− does not enter. Otherwise there are two
1047
+ alternatives if Ω+ < Ωres:
1048
+ (i) One starts with Ω−, switches to Ω+ and then back to
1049
+ Ω−.
1050
+ (ii) Or there are ˜Ω±, depending on Ω±, with Ω− ≤ ˜Ω− ≤
1051
+ ˜Ω+ ≤ Ω+ and one starts with ˜Ω−, switches to ˜Ω+ and
1052
+ then back to ˜Ω−. In this case one obtains the minimal
1053
+ time Tabs(d).
1054
+ In the Ω− − Ω+ plane this happens for
1055
+ {Ω−, Ω+} in a region, cf. Fig. 10.
1056
+ If n Ωres < Ω− ≤ Ω+ < (n + 1)Ωres the situation is
1057
+ similarly involved and depicted for n = 1 in Fig. 11 .
1058
+ The Pontryagin Maximum Principle was employed,
1059
+ first for constant Ω with a(t) as a control variable, and
1060
+ then with a(t) and Ω(t) as control variables. Symmetry
1061
+
1062
+ 2.0
1063
+ 2- /Sres(do)
1064
+ 1.5
1065
+ 1.0
1066
+ 1.03
1067
+ 1.02
1068
+ t /Tabs(do)
1069
+ 1.01
1070
+ 1.00
1071
+ 1.0
1072
+ 1.5
1073
+ (op)s/+
1074
+ 2.01.0
1075
+ 2- /Sres(do)
1076
+ 0.5
1077
+ 0.0
1078
+ 1.4
1079
+ t /Tabs(do)
1080
+ 1.2
1081
+ 1.0
1082
+ 0.0
1083
+ a.n
1084
+ 0.5
1085
+ (p)sa/+
1086
+ 1.010
1087
+ properties played an important role which were proved
1088
+ for constant Ω and assumed in an analogous form for
1089
+ time-dependent Ω.
1090
+ One may also want to impose restrictions on the veloc-
1091
+ ities ˙xw and ˙xh or on the relative displacement xh of the
1092
+ h.o.. Within the PMP this may be formulated by means
1093
+ of Lagrangian multipliers. In [28] the relative displace-
1094
+ ment was assumed to be bounded and taken as the only
1095
+ control. However, in this case there are δ(t)-like forces at
1096
+ the time of a switch acting on the h.o., and no oscillations
1097
+ occur.
1098
+ The above results for constant Ω have immediate appli-
1099
+ cations to cranes for small-angle oscillations of the pay-
1100
+ load where the the rope length l is constant. For time
1101
+ dependent l(t) modifications are needed since l(t) is not
1102
+ related to the frequency Ω(t) in the same way as the
1103
+ spring constant.
1104
+ The harmonic oscillator considered here is an idealized
1105
+ system. However, it may serve as a benchmark for more
1106
+ realistic models, e.g. if the switches are short but smooth
1107
+ rather than instantaneous.
1108
+ [1] D. Gu´ery-Odelin, A. Ruschhaupt, A. Kiely, E. Tor-
1109
+ rontegui, S. Mart´ınez-Garaot and J.G. Muga, Shortcuts
1110
+ to adiabaticity:
1111
+ Concepts, methods, and applications,
1112
+ Rev. Mod. Phys. 91, 045001 (2019).
1113
+ [2] E. Torrontegui, S. Iba˜nez, S. Mart´ınez-Garaot, M. Mod-
1114
+ ugno, A. del Campo, D. Gu´ery-Odelin, A. Ruschhaupt,
1115
+ X. Chen, and J. G. Muga, Shortcuts to adiabaticity, Adv.
1116
+ At. Mol. Opt. Phys. 62, 117 (2013).
1117
+ [3] Yue Ban, Xi Chen, E. Torrontegui, E. Solano, and J.
1118
+ Casanova, Speeding up quantum perceptron via short-
1119
+ cuts to adiabaticity Scientific Reports volume 11, Article
1120
+ number: 5783, (2021).
1121
+ [4] N.N. Hegade, K. Paul, Yongcheng Ding, M. Sanz, F. Al-
1122
+ barr´an-Arriagada, E. Solano, and Xi Chen, Shortcuts to
1123
+ Adiabaticity in Digitized Adiabatic Quantum Comput-
1124
+ ing, Phys. Rev. Applied 15, 024038 (2021)
1125
+ [5] J. G. Muga, X. Chen, A. Ruschhaupt, and D. Gu´ery-
1126
+ Odelin, Frictionless dynamics of Bose-Einstein conden-
1127
+ sates under fast trap variations, J. Phys. B 42, 241001
1128
+ (2009).
1129
+ [6] X. Chen, A. Ruschhaupt, S. Schmidt, A. del Campo, D.
1130
+ Gu´ery-Odelin, and J. G. Muga, Fast Optimal Frictionless
1131
+ Atom Cooling in Harmonic Traps, Phys. Rev. Lett. 104,
1132
+ 063002 (2010).
1133
+ [7] D. Gu´ery-Odelin, J. G. Muga, M. J. Ruiz-Montero, and
1134
+ E. Trizac, Exact Nonequilibrium Solutions of the Boltz-
1135
+ mann Equation under a Time-Dependent External Force,
1136
+ Phys. Rev. Lett. 112, 180602 (2014).
1137
+ [8] D. Gu´ery-Odelin and J. G. Muga, Transport in a har-
1138
+ monic trap: Shortcuts to adiabaticity and robust proto-
1139
+ cols, Phys. Rev. A 90, 063425 (2014).
1140
+ [9] A. Ruschhaupt, X. Chen, D. Alonso, and J. G. Muga, Op-
1141
+ timally robust shortcuts to population inversion in two-
1142
+ level quantum systems, New J. Phys. 14, 093040 (2012).
1143
+ [10] S. Mart´ınez-Garaot, E. Torrontegui, X. Chen, M. Mod-
1144
+ ugno, D. Gu´ery-Odelin, Shuo-Yen Tseng, and J. G.
1145
+ Muga,
1146
+ Vibrational
1147
+ Mode
1148
+ Multiplexing of
1149
+ Ultracold
1150
+ Atoms, Phys. Rev. Lett. 111, 213001 (2013).
1151
+ [11] M. Demirplak and S. A. Rice, On the consistency, ex-
1152
+ tremal, and global properties of counterdiabatic fields, J.
1153
+ Chem. Phys. 129, 154111 (2008).
1154
+ [12] M. V. Berry, Transitionless quantum driving, J. Phys. A
1155
+ 42, 365303 (2009).
1156
+ [13] X. Chen, I. Lizuain, A. Ruschhaupt, D. Gu´ery-Odelin,
1157
+ and J. G. Muga, Shortcut to Adiabatic Passage in Two-
1158
+ and Three-Level Atoms, Phys. Rev. Lett. 105, 123003
1159
+ (2010).
1160
+ [14] E. Carolan, A. Kiely, and S. Campbell, Counterdiabatic
1161
+ control in the impulse regime Phys. Rev. A 105, 012605
1162
+ (2022)
1163
+ [15] S. Masuda and K. Nakamura, Fast-forward of adiabatic
1164
+ dynamics in quantum mechanics, Proc. R. Soc. A 466,
1165
+ 1135 (2010).
1166
+ [16] S. Masuda and K. Nakamura, Acceleration of adiabatic
1167
+ quantum dynamics in electromagnetic fields, Phys. Rev.
1168
+ A 84, 043434 (2011).
1169
+ [17] Katsuhiro Nakamura, Jasur Matrasulov, and Yuki Izu-
1170
+ mida, Fast-forward approach to stochastic heat engine,
1171
+ Phys. Rev. E 102, 012129 (2020).
1172
+ [18] E. Torrontegui, S. Mart´ınez-Garaot, A. Ruschhaupt, and
1173
+ J. G. Muga, Shortcuts to adiabaticity: Fast-forward ap-
1174
+ proach, Phys. Rev. A 86, 013601 (2012).
1175
+ [19] G.C. Hegerfeldt, Driving at the Quantum Speed Limit:
1176
+ Optimal Control of a Two-Level System, Phys. Rev. Lett.
1177
+ 111, 260501 (2013)
1178
+ [20] E. Dionis and D. Sugny, Time-optimal control of two-
1179
+ level quantum systems by piecewise constant pulses,
1180
+ arXiv.2211.09167
1181
+ [21] G.C. Hegerfeldt, High-speed driving of a two-level sys-
1182
+ tem, Phys. Rev. A 90, 032110 (2014).
1183
+ [22] Xi Chen, Yue Ban, and G.C. Hegerfeldt, Time-optimal
1184
+ quantum control of nonlinear two-level systems, Phys.
1185
+ Rev. A 94, 023624 (2016)
1186
+ [23] S. Gonz´alez-Resines, D. Gu´ery-Odelin, A. Tobalina, I.
1187
+ Lizuain, E. Torrontegui, and J.G. Muga, Invariant-Based
1188
+ Inverse Engineering of Crane Control Parameters, Phys.
1189
+ Rev. Applied 8, 054008 (2017).
1190
+ [24] L. S. Pontryagin, V. G. Boltyanskii, R. V. Gamkrelidze,
1191
+ and E. F. Mishchenko, The Mathematical Theory of Op-
1192
+ timal Processes, Interscience (1962).
1193
+ [25] L.M. Hocking, Optimal control: an introduction to the
1194
+ theory with applications, Clarendon Press (Oxford 1991)
1195
+ [26] U. Boscain, M. Sigalotti, and D. Sugny, Introduction to
1196
+ the Pontryagin Maximum Principle for Quantum Opti-
1197
+ mal Control, PRX Quantum 2, 030203 (2021)
1198
+ [27] E. Torrontegui, I. Lizuain, S. Gonz´alez-Resines, A. To-
1199
+ balina, A. Ruschhaupt, R. Kosloff, and J. G. Muga, En-
1200
+ ergy consumption for shortcuts to adiabaticity, Phys.
1201
+ Rev. A 96, 022133 (2017)
1202
+ [28] Xi Chen, E. Torrontegui, D. Stefanatos, Jr-Shin Li, and
1203
+ J. G. Muga, Optimal trajectories for efficient atomic
1204
+ transport without final excitation, Phys. Rev. A 84,
1205
+ 043415 (2011).
1206
+
bNAzT4oBgHgl3EQfLPu9/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
bdAyT4oBgHgl3EQf-PoQ/content/tmp_files/2301.00887v1.pdf.txt ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Towards Computer-Vision Based Vineyard
2
+ Navigation for Quadruped Robots
3
+ Lee Milburn
4
+ Dynamic Legged Systems Lab
5
+ Istituto Italiano di Tecnologia
6
+ Genova, Italy
7
+ lee.milburn@iit.it
8
+ Juan Gamba
9
+ Dynamic Legged Systems Lab
10
+ Istituto Italiano di Tecnologia
11
+ Genova, Italy
12
+ juan.gamba@iit.it
13
+ Claudio Semini
14
+ Dynamic Legged Systems Lab
15
+ Istituto Italiano di Tecnologia
16
+ Genova, Italy
17
+ claudio.semini@iit.it
18
+ Abstract—There is a dramatic shortage of skilled labor for
19
+ modern vineyards. The Vinum project is developing a mobile
20
+ robotic solution to autonomously navigate through vineyards for
21
+ winter grapevine pruning. This necessitates an autonomous navi-
22
+ gation stack for the robot pruning a vineyard. The Vinum project
23
+ is using the quadruped robot HyQReal. This paper introduces
24
+ an architecture for a quadruped robot to autonomously move
25
+ through a vineyard by identifying and approaching grapevines
26
+ for pruning. The higher level control is a state machine switch-
27
+ ing between searching for destination positions, autonomously
28
+ navigating towards those locations, and stopping for the robot
29
+ to complete a task. The destination points are determined by
30
+ identifying grapevine trunks using instance segmentation from
31
+ a Mask Region-Based Convolutional Neural Network (Mask-
32
+ RCNN). These detections are sent through a filter to avoid
33
+ redundancy and remove noisy detections. The combination of
34
+ these features is the basis for the proposed architecture.
35
+ Index Terms—Agricultural Robotics, Computer-Vision, Vine-
36
+ yard Navigation, Quadruped Control
37
+ I. INTRODUCTION
38
+ Fig. 1: HyQReal in Vineyard.
39
+ There is a major shortage of labor in vineyards across the
40
+ world. Vineyards rely on seasonal labor, which in a lot of cases
41
+ includes international workforces. Seasonal labor shortages
42
+ began with the COVID-19 pandemic and have continued
43
+ since1. Vineyards have looked towards robotic automation of
44
+ seasonal work to account for the labor shortage.
45
+ The Vinum project is built on the HyQReal quadruped
46
+ robot that is being developed to autonomously do the winter
47
+ pruning of grapevines, see Fig. 1 [8]. To accomplish this,
48
+ 1https://www.winemag.com/2021/12/07/wine-industry-labor-supply/
49
+ the Vinum robot has to autonomously navigate vineyards,
50
+ arriving at each grapevine that needs winter pruning. This
51
+ extended abstract introduces a navigation architecture based
52
+ on computer vision for quadruped robots. Previous vineyard
53
+ navigation has described moving down each row, using a
54
+ laser sensor, until there are no more grapevines in a row
55
+ [7]. Other navigation stacks have been developed which also
56
+ move down rows but they use laser scanners for perception
57
+ [2]. Our proposed navigation stack initializes itself with a
58
+ search of a vineyard row and will choose whether to start
59
+ from right or left. It uses computer vision to detect the
60
+ grapevines and a filter to average the detections and eliminate
61
+ noise. In other papers, grapevine trunks were identified using
62
+ instance segmentation [5]. We implemented a similar sensor
63
+ navigation control using a RGB-D for grapevine trunk image
64
+ segmentation. So, detections of the grapevine trunks are made
65
+ using a Mask-RCNN trained off a created dataset with 100
66
+ images. The combination of the higher level control with the
67
+ grapevine detections makes the basis for the Vinum navigation
68
+ stack.
69
+ The contribution of this extended abstract is a navigation
70
+ for precise placement of quadruped robots moving through
71
+ vineyard rows. It will allow for precise robot placement within
72
+ the vineyard that is ideal for a robotic workspace. This allows
73
+ the robot to perform selective, plant-by-plant task automation
74
+ within the vineyard. A series of experiments were preformed
75
+ with the Aliengo robot and our approach achieved a mean of
76
+ 3.36cm and standard deviation of 2.19cm of distance from the
77
+ desired position, which is sufficient for an automated task.
78
+ II. STATE OF THE ART
79
+ As of today, different robots and vehicles have been de-
80
+ veloped that can move autonomously throughout vineyards.
81
+ These robots either move continuously throughout the row
82
+ and/or are not quadrupeds. The EU Project BACCHUS robot
83
+ is a wheeled vehicle that is under development to harvest
84
+ grapes and take care of vineyards. The BACCHUS robot uses
85
+ semantic segmentation of vineyard trunks for its localization
86
+ [5]. Our proposed navigation architecture takes the same
87
+ segmentation approach but it is used to identify positions for
88
+ the robot to walk to instead. The EU Project CANOPIES is
89
+ aimed at developing a human-robot collaborative paradigm for
90
+ arXiv:2301.00887v1 [cs.RO] 2 Jan 2023
91
+
92
+ harvesting and pruning in vineyards1. It is a wheeled robot
93
+ that works over the vineyard row. A similar autonomous over
94
+ the vineyard row robot is the ViTiBOT Bakus which is used
95
+ to improve vineyard help by removing herbicides and using
96
+ precision spraying. This solution does not include stopping at
97
+ each grapevine. YANMAR’s autonomous over-the-row robot,
98
+ YV01, does a similar task that autonomously sprays vineyard
99
+ rows, without stopping at a specific grapevine2. A proposed
100
+ wheeled robot for precision agriculture is the Agri.q02 which
101
+ is meant to work in unstructured environments in collaboration
102
+ with a UAV [6]. A navigation stack was created for the
103
+ wheeled Ackerman Vehicles in percision farming, path plan-
104
+ ning from pose to pose [3]. There was autonomous navigation
105
+ outlined in the Echord++ GRAPE experiment which maps a
106
+ vineyard that uses a wheeled robot and moves to locations on
107
+ the map to perform tasks [1]. These autonomous robots are
108
+ all wheeled and most do not have to stop at precise locations
109
+ in the vineyard. The proposed navigation architecture of this
110
+ paper is quadruped navigation based on previous techniques
111
+ used for localization to find precise positions for automated
112
+ tasks to take place such as winter pruning and harvesting
113
+ grapes.
114
+ III. NAVIGATION ARCHITECTURE
115
+ The navigation architecture is a combination of higher-level
116
+ control and object detection. The higher level control will
117
+ make decisions on its movement path through a vineyard row
118
+ based on the grapevine trunks detected. The object detection
119
+ was done by training a Mask-RCNN from Detectron2 [9].
120
+ A. Higher Level Control
121
+ Fig. 2: Navigation Flow.
122
+ The higher level control is a state machine for the robot
123
+ to move throughout a vineyard row, as illustrated in Fig. 2.
124
+ It begins with an initial search to find the starting lines for
125
+ both sides of the row. The user can set initially if they want
126
+ the robot to move to the left or right of the row. The initial
127
+ detections get sent through a filter which will find the rolling
128
+ averages of each detection. From the filtered detection points,
129
+ the control will find the lines on which the vineyard rows
130
+ begin.
131
+ The robot has to approach parallel to the grapevines for it to
132
+ be able to prune properly. To find the correct destination point,
133
+ initially, the robot determines the orientation of the approach
134
+ by calculating the vector of the vineyards in a row. This is
135
+ 1www.canopies-project.eu
136
+ 2https://www.yanmar.com/eu/campaign/2021/10/vineyard/
137
+ derived from a list of points found in the initial search. It
138
+ updates the vector for possible deviances of grapevines as the
139
+ robot moves along the row. The robot then approaches the
140
+ grapevines in parallel at a desired distance that depends on
141
+ the robot size and the workspace of the arm.
142
+ After the robot has reached the determined location in the
143
+ vineyard, it removes that grapevine from the list of vines to
144
+ approach. Next, the control will choose the closest grapevine
145
+ to the robot as its next target. It will continue this method until
146
+ there are no more grapevines to identify in a row.
147
+ B. Grapevine Identification
148
+ Instance segmentation using a Mask-RCNN is used to detect
149
+ the grapevine trunks in a vineyard. The training of the neural
150
+ network was done in Detectron2 using 100 hand annotated
151
+ images of potted grapevines. The corresponding depth of the
152
+ detections is found by using the aligned depth image and
153
+ from there the grapevine locations are found in relation to
154
+ the quadruped.
155
+ Fig. 3: Result of the image segmentation to detect grapevine
156
+ trunks. (4 examples).
157
+ IV. EXPERIMENTS
158
+ A. Higher Level Control
159
+ 1) Goals: The goals of these experiments are to determine
160
+ the precision of moving the robot’s center of mass to desired
161
+ positions. They are aimed to align the geometric center of
162
+ the robot with the grapevine trunk, this way an arm mounted
163
+ on the front of the robot is in the center of the grapevine’s
164
+ main cordon, and thus optimizes the workspace of the arm for
165
+ single-plant operations, such as pruning.
166
+ Fig. 4: Experiment setup.
167
+ 2) Setup: The higher level control was tested in a lab using
168
+ Unitree’s Aliengo robot. Aliengo was used for simplification
169
+
170
+ RowEnd
171
+ No
172
+ Yes
173
+ Segment
174
+ Filter segmented
175
+ If next
176
+ Derive pose to
177
+ Move to next
178
+ Grapevine Trunks
179
+ detections
180
+ Grapevineexists
181
+ approach
182
+ Grapevine
183
+ Update detected grapevine trunkslit
184
+ lit
185
+ ISTITUTO ITALIANO
186
+ ISTITUTO ITALIANO
187
+ DI TECNOLOGIA
188
+ DI TECNOLOGIA
189
+ STIC
190
+ DLS
191
+ DYNAMIC LEGGED SYSTEMS
192
+ DYNAMIC LEGGED SYSTEMSFig. 5: Measurement of Aliengo’s arrival at a position.
193
+ of experiments since it is 21kg and 61cm in length. Aliengo
194
+ is equipped with Intel’s Realsense D435 RGB-D camera. Red
195
+ balls for segmenting were used to test in lab instead of the
196
+ grapevine trunks. The red balls are spaced out at about 80cm
197
+ from each other, the approximate distance that grapevines are
198
+ from each other. The setup of the experiment can be seen in
199
+ Fig. 4. How the precision of the robot approaching a position
200
+ was measured is shown in Fig. 5.
201
+ 3) Tests: The robot does an initial search of the area using
202
+ its RGB-D camera to segment the red balls. After it finds the
203
+ row of red balls, it approaches the first position in the row.
204
+ After the robot’s arrival at the initial position, it pauses for
205
+ an automated task and update its detections. It repeats this
206
+ process until the row is finished and then stops.
207
+ Ten trials were conducted with five balls. To measure the
208
+ error between the destination point and the red ball, a laser
209
+ pointer was used to show the point that Aliengo’s center of
210
+ mass reached.
211
+ 4) Results: The error of reaching the destination point is
212
+ a mean of 3.36cm and standard deviation of 2.19cm. The
213
+ accompanying video shows complete trials.
214
+ B. Grapevine Identification
215
+ 1) Goals: The goal of this is to test how well the Mask-
216
+ RCNN was trained for working in vineyards.
217
+ 2) Setup: The training of the neural network was done in
218
+ Detectron2 using the framework set up in the paper [4].
219
+ 3) Tests: The results were tested on a previously recorded
220
+ video of a potted vineyard at University Cattolica of Piacenza
221
+ during winter.
222
+ 4) Results: Outputs from the model are shown in Fig. 3.
223
+ Currently the model needs to be trained on more data for
224
+ robustness and for functionality in other vineyards as well.
225
+ V. CONCLUSION
226
+ This paper presented a method of computer-vision based
227
+ navigation in vineyards for quadruped robots. This method
228
+ will allow for precise placement to preform selective task
229
+ automation.
230
+ The control architecture works accurately with the exper-
231
+ iments in the lab, and the trunk detections from the image
232
+ segmentation can accurately identify grapevine trunks. The
233
+ quadruped can reach a desired destination position with a mean
234
+ error of 3.36cm error.
235
+ The next steps for this architecture is combining the
236
+ grapevine trunk semantic segmentation with the higher level
237
+ control to test in the field. The dataset created for this project
238
+ has to be expanded to train a more robust Mask-RCNN as
239
+ well.
240
+ VI. ACKNOWLEDGMENTS
241
+ Thanks to the contributions of Miguel Fernandes for helping
242
+ train the dataset and Lorenzo Amatucci for the configuration
243
+ of the robot’s controllers for experiments.
244
+ REFERENCES
245
+ [1]
246
+ P. Astolfi, A. Gabrielli, L. Bascetta, and M. Matteucci.
247
+ “Vineyard Autonomous Navigation in the Echord++ GRAPE
248
+ Experiment (FP7-601116). http://echord.eu/grape/”. In: IFAC-
249
+ PapersOnLine 51.11 (2018). 16th IFAC Symposium on In-
250
+ formation Control Problems in Manufacturing INCOM 2018,
251
+ pp. 704–709. ISSN: 2405-8963. DOI: https://doi.org/10.1016/
252
+ j.ifacol.2018.08.401. URL: https://www.sciencedirect.com/
253
+ science/article/pii/S2405896318315271.
254
+ [2]
255
+ M. Bergerman, S. M. Maeta, J. Zhang, G. M. Freitas, B. Ham-
256
+ ner, S. Singh, and G. Kantor. “Robot Farmers: Autonomous Or-
257
+ chard Vehicles Help Tree Fruit Production”. In: IEEE Robotics
258
+ & Automation Magazine 22.1 (2015), pp. 54–63. DOI: 10.1109/
259
+ MRA.2014.2369292.
260
+ [3]
261
+ R. F. Carpio, C. Potena, J. Maiolini, G. Ulivi, N. B. Rossell´o,
262
+ E. Garone, and A. Gasparri. “A Navigation Architecture for
263
+ Ackermann Vehicles in Precision Farming”. In: IEEE Robotics
264
+ and Automation Letters 5.2 (2020), pp. 1103–1110. DOI: 10.
265
+ 1109/LRA.2020.2967306.
266
+ [4]
267
+ M. Fernandes, A. Scaldaferri, P. Guadagna, G. Fiameni, T. Teng,
268
+ M. Gatti, S. Poni, C. Semini, D. G. Caldwell, and F. Chen.
269
+ “Towards Precise Pruning Points Detection using Semantic-
270
+ Instance-Aware Plant Models for Grapevine Winter Pruning
271
+ Automation”. In: CoRR abs/2109.07247 (2021). arXiv: 2109.
272
+ 07247. URL: https://arxiv.org/abs/2109.07247.
273
+ [5]
274
+ A. Papadimitriou, I. Kleitsiotis, I. Kostavelis, I. Mariolis, D.
275
+ Giakoumis, S. Likothanassis, and D. Tzovaras. “Loop Closure
276
+ Detection and SLAM in Vineyards with Deep Semantic Cues”.
277
+ In: 2022 International Conference on Robotics and Automation
278
+ (ICRA). 2022, pp. 2251–2258. DOI: 10.1109/ICRA46639.2022.
279
+ 9812419.
280
+ [6]
281
+ G. Quaglia, C. Visconte, L. S. Scimmi, M. Melchiorre, P.
282
+ Cavallone, and S. Pastorelli. “Design of a UGV Powered
283
+ by Solar Energy for Precision Agriculture”. In: Robotics 9.1
284
+ (2020). ISSN: 2218-6581. DOI: 10.3390/robotics9010013. URL:
285
+ https://www.mdpi.com/2218-6581/9/1/13.
286
+ [7]
287
+ G. Riggio, C. Fantuzzi, and C. Secchi. “A Low-Cost Navigation
288
+ Strategy for Yield Estimation in Vineyards”. In: May 2018,
289
+ pp. 2200–2205. DOI: 10.1109/ICRA.2018.8462839.
290
+ [8]
291
+ C. Semini, V. Barasuol, M. Focchi, C. Boelens, M. Emara,
292
+ S. Casella, O. Villarreal, R. Orsolino, G. Fink, S. Fahmi,
293
+ et al. “Brief introduction to the quadruped robot HyQReal”.
294
+ In: Istituto di Robotica e Macchine Intelligenti (I-RIM) (2019).
295
+ [9]
296
+ Y. Wu, A. Kirillov, F. Massa, W.-Y. Lo, and R. Girshick.
297
+ “Detectron2”. In: (2019).
298
+
299
+ w.burster.it
300
+ 7
301
+ 61
302
+ 81
303
+ 14
bdAyT4oBgHgl3EQf-PoQ/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf,len=285
2
+ page_content='Towards Computer-Vision Based Vineyard Navigation for Quadruped Robots Lee Milburn Dynamic Legged Systems Lab Istituto Italiano di Tecnologia Genova, Italy lee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
3
+ page_content='milburn@iit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
4
+ page_content='it Juan Gamba Dynamic Legged Systems Lab Istituto Italiano di Tecnologia Genova, Italy juan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
5
+ page_content='gamba@iit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
6
+ page_content='it Claudio Semini Dynamic Legged Systems Lab Istituto Italiano di Tecnologia Genova, Italy claudio.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
7
+ page_content='semini@iit.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
8
+ page_content='it Abstract—There is a dramatic shortage of skilled labor for modern vineyards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
9
+ page_content=' The Vinum project is developing a mobile robotic solution to autonomously navigate through vineyards for winter grapevine pruning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
10
+ page_content=' This necessitates an autonomous navi- gation stack for the robot pruning a vineyard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
11
+ page_content=' The Vinum project is using the quadruped robot HyQReal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
12
+ page_content=' This paper introduces an architecture for a quadruped robot to autonomously move through a vineyard by identifying and approaching grapevines for pruning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
13
+ page_content=' The higher level control is a state machine switch- ing between searching for destination positions, autonomously navigating towards those locations, and stopping for the robot to complete a task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
14
+ page_content=' The destination points are determined by identifying grapevine trunks using instance segmentation from a Mask Region-Based Convolutional Neural Network (Mask- RCNN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
15
+ page_content=' These detections are sent through a filter to avoid redundancy and remove noisy detections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
16
+ page_content=' The combination of these features is the basis for the proposed architecture.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
17
+ page_content=' Index Terms—Agricultural Robotics, Computer-Vision, Vine- yard Navigation, Quadruped Control I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
18
+ page_content=' INTRODUCTION Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
19
+ page_content=' 1: HyQReal in Vineyard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
20
+ page_content=' There is a major shortage of labor in vineyards across the world.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
21
+ page_content=' Vineyards rely on seasonal labor, which in a lot of cases includes international workforces.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
22
+ page_content=' Seasonal labor shortages began with the COVID-19 pandemic and have continued since1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
23
+ page_content=' Vineyards have looked towards robotic automation of seasonal work to account for the labor shortage.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
24
+ page_content=' The Vinum project is built on the HyQReal quadruped robot that is being developed to autonomously do the winter pruning of grapevines, see Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
25
+ page_content=' 1 [8].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
26
+ page_content=' To accomplish this, 1https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
27
+ page_content='winemag.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
28
+ page_content='com/2021/12/07/wine-industry-labor-supply/ the Vinum robot has to autonomously navigate vineyards, arriving at each grapevine that needs winter pruning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
29
+ page_content=' This extended abstract introduces a navigation architecture based on computer vision for quadruped robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
30
+ page_content=' Previous vineyard navigation has described moving down each row, using a laser sensor, until there are no more grapevines in a row [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
31
+ page_content=' Other navigation stacks have been developed which also move down rows but they use laser scanners for perception [2].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
32
+ page_content=' Our proposed navigation stack initializes itself with a search of a vineyard row and will choose whether to start from right or left.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
33
+ page_content=' It uses computer vision to detect the grapevines and a filter to average the detections and eliminate noise.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
34
+ page_content=' In other papers, grapevine trunks were identified using instance segmentation [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
35
+ page_content=' We implemented a similar sensor navigation control using a RGB-D for grapevine trunk image segmentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
36
+ page_content=' So, detections of the grapevine trunks are made using a Mask-RCNN trained off a created dataset with 100 images.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
37
+ page_content=' The combination of the higher level control with the grapevine detections makes the basis for the Vinum navigation stack.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
38
+ page_content=' The contribution of this extended abstract is a navigation for precise placement of quadruped robots moving through vineyard rows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
39
+ page_content=' It will allow for precise robot placement within the vineyard that is ideal for a robotic workspace.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
40
+ page_content=' This allows the robot to perform selective, plant-by-plant task automation within the vineyard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
41
+ page_content=' A series of experiments were preformed with the Aliengo robot and our approach achieved a mean of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
42
+ page_content='36cm and standard deviation of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
43
+ page_content='19cm of distance from the desired position, which is sufficient for an automated task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
44
+ page_content=' II.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
45
+ page_content=' STATE OF THE ART As of today, different robots and vehicles have been de- veloped that can move autonomously throughout vineyards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
46
+ page_content=' These robots either move continuously throughout the row and/or are not quadrupeds.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
47
+ page_content=' The EU Project BACCHUS robot is a wheeled vehicle that is under development to harvest grapes and take care of vineyards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
48
+ page_content=' The BACCHUS robot uses semantic segmentation of vineyard trunks for its localization [5].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
49
+ page_content=' Our proposed navigation architecture takes the same segmentation approach but it is used to identify positions for the robot to walk to instead.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
50
+ page_content=' The EU Project CANOPIES is aimed at developing a human-robot collaborative paradigm for arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
51
+ page_content='00887v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
52
+ page_content='RO] 2 Jan 2023 harvesting and pruning in vineyards1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
53
+ page_content=' It is a wheeled robot that works over the vineyard row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
54
+ page_content=' A similar autonomous over the vineyard row robot is the ViTiBOT Bakus which is used to improve vineyard help by removing herbicides and using precision spraying.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
55
+ page_content=' This solution does not include stopping at each grapevine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
56
+ page_content=' YANMAR’s autonomous over-the-row robot, YV01, does a similar task that autonomously sprays vineyard rows, without stopping at a specific grapevine2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
57
+ page_content=' A proposed wheeled robot for precision agriculture is the Agri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
58
+ page_content='q02 which is meant to work in unstructured environments in collaboration with a UAV [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
59
+ page_content=' A navigation stack was created for the wheeled Ackerman Vehicles in percision farming, path plan- ning from pose to pose [3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
60
+ page_content=' There was autonomous navigation outlined in the Echord++ GRAPE experiment which maps a vineyard that uses a wheeled robot and moves to locations on the map to perform tasks [1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
61
+ page_content=' These autonomous robots are all wheeled and most do not have to stop at precise locations in the vineyard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
62
+ page_content=' The proposed navigation architecture of this paper is quadruped navigation based on previous techniques used for localization to find precise positions for automated tasks to take place such as winter pruning and harvesting grapes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
63
+ page_content=' III.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
64
+ page_content=' NAVIGATION ARCHITECTURE The navigation architecture is a combination of higher-level control and object detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
65
+ page_content=' The higher level control will make decisions on its movement path through a vineyard row based on the grapevine trunks detected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
66
+ page_content=' The object detection was done by training a Mask-RCNN from Detectron2 [9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
67
+ page_content=' A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
68
+ page_content=' Higher Level Control Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
69
+ page_content=' 2: Navigation Flow.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
70
+ page_content=' The higher level control is a state machine for the robot to move throughout a vineyard row, as illustrated in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
71
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
72
+ page_content=' It begins with an initial search to find the starting lines for both sides of the row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
73
+ page_content=' The user can set initially if they want the robot to move to the left or right of the row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
74
+ page_content=' The initial detections get sent through a filter which will find the rolling averages of each detection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
75
+ page_content=' From the filtered detection points, the control will find the lines on which the vineyard rows begin.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
76
+ page_content=' The robot has to approach parallel to the grapevines for it to be able to prune properly.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
77
+ page_content=' To find the correct destination point, initially, the robot determines the orientation of the approach by calculating the vector of the vineyards in a row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
78
+ page_content=' This is 1www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
79
+ page_content='canopies-project.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
80
+ page_content='eu 2https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
81
+ page_content='yanmar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
82
+ page_content='com/eu/campaign/2021/10/vineyard/ derived from a list of points found in the initial search.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
83
+ page_content=' It updates the vector for possible deviances of grapevines as the robot moves along the row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
84
+ page_content=' The robot then approaches the grapevines in parallel at a desired distance that depends on the robot size and the workspace of the arm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
85
+ page_content=' After the robot has reached the determined location in the vineyard, it removes that grapevine from the list of vines to approach.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
86
+ page_content=' Next, the control will choose the closest grapevine to the robot as its next target.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
87
+ page_content=' It will continue this method until there are no more grapevines to identify in a row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
88
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
89
+ page_content=' Grapevine Identification Instance segmentation using a Mask-RCNN is used to detect the grapevine trunks in a vineyard.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
90
+ page_content=' The training of the neural network was done in Detectron2 using 100 hand annotated images of potted grapevines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
91
+ page_content=' The corresponding depth of the detections is found by using the aligned depth image and from there the grapevine locations are found in relation to the quadruped.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
92
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
93
+ page_content=' 3: Result of the image segmentation to detect grapevine trunks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
94
+ page_content=' (4 examples).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
95
+ page_content=' IV.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
96
+ page_content=' EXPERIMENTS A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
97
+ page_content=' Higher Level Control 1) Goals: The goals of these experiments are to determine the precision of moving the robot’s center of mass to desired positions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
98
+ page_content=' They are aimed to align the geometric center of the robot with the grapevine trunk, this way an arm mounted on the front of the robot is in the center of the grapevine’s main cordon, and thus optimizes the workspace of the arm for single-plant operations, such as pruning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
99
+ page_content=' Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
100
+ page_content=' 4: Experiment setup.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
101
+ page_content=' 2) Setup: The higher level control was tested in a lab using Unitree’s Aliengo robot.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
102
+ page_content=' Aliengo was used for simplification RowEnd No Yes Segment Filter segmented If next Derive pose to Move to next Grapevine Trunks detections Grapevineexists approach Grapevine Update detected grapevine trunkslit lit ISTITUTO ITALIANO ISTITUTO ITALIANO DI TECNOLOGIA DI TECNOLOGIA STIC DLS DYNAMIC LEGGED SYSTEMS DYNAMIC LEGGED SYSTEMSFig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
103
+ page_content=' 5: Measurement of Aliengo’s arrival at a position.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
104
+ page_content=' of experiments since it is 21kg and 61cm in length.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
105
+ page_content=' Aliengo is equipped with Intel’s Realsense D435 RGB-D camera.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
106
+ page_content=' Red balls for segmenting were used to test in lab instead of the grapevine trunks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
107
+ page_content=' The red balls are spaced out at about 80cm from each other, the approximate distance that grapevines are from each other.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
108
+ page_content=' The setup of the experiment can be seen in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
109
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
110
+ page_content=' How the precision of the robot approaching a position was measured is shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
111
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
112
+ page_content=' 3) Tests: The robot does an initial search of the area using its RGB-D camera to segment the red balls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
113
+ page_content=' After it finds the row of red balls, it approaches the first position in the row.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
114
+ page_content=' After the robot’s arrival at the initial position, it pauses for an automated task and update its detections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
115
+ page_content=' It repeats this process until the row is finished and then stops.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
116
+ page_content=' Ten trials were conducted with five balls.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
117
+ page_content=' To measure the error between the destination point and the red ball, a laser pointer was used to show the point that Aliengo’s center of mass reached.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
118
+ page_content=' 4) Results: The error of reaching the destination point is a mean of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
119
+ page_content='36cm and standard deviation of 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
120
+ page_content='19cm.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
121
+ page_content=' The accompanying video shows complete trials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
122
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
123
+ page_content=' Grapevine Identification 1) Goals: The goal of this is to test how well the Mask- RCNN was trained for working in vineyards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
124
+ page_content=' 2) Setup: The training of the neural network was done in Detectron2 using the framework set up in the paper [4].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
125
+ page_content=' 3) Tests: The results were tested on a previously recorded video of a potted vineyard at University Cattolica of Piacenza during winter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
126
+ page_content=' 4) Results: Outputs from the model are shown in Fig.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
127
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
128
+ page_content=' Currently the model needs to be trained on more data for robustness and for functionality in other vineyards as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
129
+ page_content=' V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
130
+ page_content=' CONCLUSION This paper presented a method of computer-vision based navigation in vineyards for quadruped robots.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
131
+ page_content=' This method will allow for precise placement to preform selective task automation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
132
+ page_content=' The control architecture works accurately with the exper- iments in the lab, and the trunk detections from the image segmentation can accurately identify grapevine trunks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
133
+ page_content=' The quadruped can reach a desired destination position with a mean error of 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
134
+ page_content='36cm error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
135
+ page_content=' The next steps for this architecture is combining the grapevine trunk semantic segmentation with the higher level control to test in the field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
136
+ page_content=' The dataset created for this project has to be expanded to train a more robust Mask-RCNN as well.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
137
+ page_content=' VI.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
138
+ page_content=' ACKNOWLEDGMENTS Thanks to the contributions of Miguel Fernandes for helping train the dataset and Lorenzo Amatucci for the configuration of the robot’s controllers for experiments.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
139
+ page_content=' REFERENCES [1] P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
140
+ page_content=' Astolfi, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
141
+ page_content=' Gabrielli, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
142
+ page_content=' Bascetta, and M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
143
+ page_content=' Matteucci.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
144
+ page_content=' “Vineyard Autonomous Navigation in the Echord++ GRAPE Experiment (FP7-601116).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
145
+ page_content=' http://echord.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
146
+ page_content='eu/grape/”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
147
+ page_content=' In: IFAC- PapersOnLine 51.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
148
+ page_content='11 (2018).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
149
+ page_content=' 16th IFAC Symposium on In- formation Control Problems in Manufacturing INCOM 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
150
+ page_content=' 704–709.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
151
+ page_content=' ISSN: 2405-8963.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
152
+ page_content=' DOI: https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
153
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
154
+ page_content='1016/ j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
155
+ page_content='ifacol.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
156
+ page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
157
+ page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
158
+ page_content='401.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
159
+ page_content=' URL: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
160
+ page_content='sciencedirect.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
161
+ page_content='com/ science/article/pii/S2405896318315271.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
162
+ page_content=' [2] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
163
+ page_content=' Bergerman, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
164
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
165
+ page_content=' Maeta, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
166
+ page_content=' Zhang, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
167
+ page_content=' M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
168
+ page_content=' Freitas, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
169
+ page_content=' Ham- ner, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
170
+ page_content=' Singh, and G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
171
+ page_content=' Kantor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
172
+ page_content=' “Robot Farmers: Autonomous Or- chard Vehicles Help Tree Fruit Production”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
173
+ page_content=' In: IEEE Robotics & Automation Magazine 22.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
174
+ page_content='1 (2015), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
175
+ page_content=' 54–63.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
176
+ page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
177
+ page_content='1109/ MRA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
178
+ page_content='2014.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
179
+ page_content='2369292.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
180
+ page_content=' [3] R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
181
+ page_content=' F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
182
+ page_content=' Carpio, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
183
+ page_content=' Potena, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
184
+ page_content=' Maiolini, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
185
+ page_content=' Ulivi, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
186
+ page_content=' B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
187
+ page_content=' Rossell´o, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
188
+ page_content=' Garone, and A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
189
+ page_content=' Gasparri.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
190
+ page_content=' “A Navigation Architecture for Ackermann Vehicles in Precision Farming”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
191
+ page_content=' In: IEEE Robotics and Automation Letters 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
192
+ page_content='2 (2020), pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
193
+ page_content=' 1103–1110.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
194
+ page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
195
+ page_content=' 1109/LRA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
196
+ page_content='2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
197
+ page_content='2967306.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
198
+ page_content=' [4] M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
199
+ page_content=' Fernandes, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
200
+ page_content=' Scaldaferri, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
201
+ page_content=' Guadagna, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
202
+ page_content=' Fiameni, T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
203
+ page_content=' Teng, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
204
+ page_content=' Gatti, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
205
+ page_content=' Poni, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
206
+ page_content=' Semini, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
207
+ page_content=' G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
208
+ page_content=' Caldwell, and F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
209
+ page_content=' Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
210
+ page_content=' “Towards Precise Pruning Points Detection using Semantic- Instance-Aware Plant Models for Grapevine Winter Pruning Automation”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
211
+ page_content=' In: CoRR abs/2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
212
+ page_content='07247 (2021).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
213
+ page_content=' arXiv: 2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
214
+ page_content=' 07247.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
215
+ page_content=' URL: https://arxiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
216
+ page_content='org/abs/2109.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
217
+ page_content='07247.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
218
+ page_content=' [5] A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
219
+ page_content=' Papadimitriou, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
220
+ page_content=' Kleitsiotis, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
221
+ page_content=' Kostavelis, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
222
+ page_content=' Mariolis, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
223
+ page_content=' Giakoumis, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
224
+ page_content=' Likothanassis, and D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
225
+ page_content=' Tzovaras.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
226
+ page_content=' “Loop Closure Detection and SLAM in Vineyards with Deep Semantic Cues”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
227
+ page_content=' In: 2022 International Conference on Robotics and Automation (ICRA).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
228
+ page_content=' 2022, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
229
+ page_content=' 2251–2258.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
230
+ page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
231
+ page_content='1109/ICRA46639.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
232
+ page_content='2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
233
+ page_content=' 9812419.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
234
+ page_content=' [6] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
235
+ page_content=' Quaglia, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
236
+ page_content=' Visconte, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
237
+ page_content=' S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
238
+ page_content=' Scimmi, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
239
+ page_content=' Melchiorre, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
240
+ page_content=' Cavallone, and S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
241
+ page_content=' Pastorelli.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
242
+ page_content=' “Design of a UGV Powered by Solar Energy for Precision Agriculture”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
243
+ page_content=' In: Robotics 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
244
+ page_content='1 (2020).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
245
+ page_content=' ISSN: 2218-6581.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
246
+ page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
247
+ page_content='3390/robotics9010013.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
248
+ page_content=' URL: https://www.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
249
+ page_content='mdpi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
250
+ page_content='com/2218-6581/9/1/13.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
251
+ page_content=' [7] G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
252
+ page_content=' Riggio, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
253
+ page_content=' Fantuzzi, and C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
254
+ page_content=' Secchi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
255
+ page_content=' “A Low-Cost Navigation Strategy for Yield Estimation in Vineyards”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
256
+ page_content=' In: May 2018, pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
257
+ page_content=' 2200–2205.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
258
+ page_content=' DOI: 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
259
+ page_content='1109/ICRA.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
260
+ page_content='2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
261
+ page_content='8462839.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
262
+ page_content=' [8] C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
263
+ page_content=' Semini, V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
264
+ page_content=' Barasuol, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
265
+ page_content=' Focchi, C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
266
+ page_content=' Boelens, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
267
+ page_content=' Emara, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
268
+ page_content=' Casella, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
269
+ page_content=' Villarreal, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
270
+ page_content=' Orsolino, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
271
+ page_content=' Fink, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
272
+ page_content=' Fahmi, et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
273
+ page_content=' “Brief introduction to the quadruped robot HyQReal”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
274
+ page_content=' In: Istituto di Robotica e Macchine Intelligenti (I-RIM) (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
275
+ page_content=' [9] Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
276
+ page_content=' Wu, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
277
+ page_content=' Kirillov, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
278
+ page_content=' Massa, W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
279
+ page_content='-Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
280
+ page_content=' Lo, and R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
281
+ page_content=' Girshick.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
282
+ page_content=' “Detectron2”.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
283
+ page_content=' In: (2019).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
284
+ page_content=' w.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
285
+ page_content='burster.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
286
+ page_content='it 7 61 81 14' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/bdAyT4oBgHgl3EQf-PoQ/content/2301.00887v1.pdf'}
bdFAT4oBgHgl3EQfXh2G/content/tmp_files/2301.08534v1.pdf.txt ADDED
@@ -0,0 +1,910 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Galaz, Z. et al. (2022). Prodromal Diagnosis of Lewy Body Diseases Based on the Assessment of Graphomotor
2
+ and Handwriting Difficulties. In: Carmona-Duarte, C., Diaz, M., Ferrer, M.A., Morales, A. (eds) Intertwining
3
+ Graphonomics with Human Movements. IGS 2022. Lecture Notes in Computer Science, vol 13424. Springer,
4
+ Cham. https://doi.org/10.1007/978-3-031-19745-1_19
5
+
6
+
7
+ Prodromal Diagnosis of Lewy Body
8
+ Diseases Based on the Assessment
9
+ of Graphomotor and Handwriting
10
+ Difficulties
11
+
12
+ Zoltan Galaz1, Jiri Mekyska1, Jan Mucha1, Vojtech Zvoncak1, Zdenek Smekal1,
13
+ Marcos Faundez-Zanuy2, Lubos Brabenec3, Ivona Moravkova3,4,5, and Irena
14
+ Rektorova3,4
15
+ 1 Department of Telecommunications, Faculty of Electrical Engineering and
16
+ Communication, Brno University of Technology, Brno, Czech Republic
17
+ xgalaz00@gmail.com
18
+ 2 Escola Superior Politecnica, Tecnocampus, Mataro, Barcelona, Spain
19
+ 3 Applied Neuroscience Research Group, Central European Institute
20
+ of Technology – CEITEC, Masaryk University, Brno, Czech Republic
21
+ 4 First Department of Neurology, Faculty of Medicine and St. Anne’s University
22
+ Hospital, Masaryk University, Brno, Czech Republic
23
+ 5 Faculty of Medicine, Masaryk University, Brno, Czech Republic
24
+
25
+ Abstract. To this date, studies focusing on the prodromal diagnosis of
26
+ Lewy body diseases (LBDs) based on quantitative analysis of graphomo-
27
+ tor and handwriting difficulties are missing. In this work, we enrolled 18
28
+ subjects diagnosed with possible or probable mild cognitive impairment
29
+ with Lewy bodies (MCI-LB), 7 subjects having more than 50% prob-
30
+ ability of developing Parkinson’s disease (PD), 21 subjects with both
31
+ possible/probable MCI-LB and probability of PD > 50%, and 37 age-
32
+ and gender-matched healthy controls (HC). Each participant performed
33
+ three tasks: Archimedean spiral drawing (to quantify graphomotor diffi-
34
+ culties), sentence writing task (to quantify handwriting difficulties), and
35
+ pentagon copying test (to quantify cognitive decline). Next, we parame-
36
+ terized the acquired data by various temporal, kinematic, dynamic, spa-
37
+ tial, and task-specific features. And finally, we trained classification mod-
38
+ els for each task separately as well as a model for their combination to
39
+ estimate the predictive power of the features for the identification of
40
+ LBDs. Using this approach we were able to identify prodromal LBDs
41
+ with 74% accuracy and showed the promising potential of computerized
42
+ objective and non-invasive diagnosis of LBDs based on the assessment
43
+ of graphomotor and handwriting difficulties.
44
+
45
+
46
+ This work was supported by grant no. NU20-04-00294 (Diagnostics of Lewy body
47
+ diseases in prodromal stage based on multimodal data analysis) of the Czech Ministry
48
+ of Health and by Spanish grant of the Ministerio de Ciencia e Innovacio´n no. PID2020-
49
+ 113242RB-I00.
50
+
51
+
52
+
53
+ ·
54
+ ·
55
+ ·
56
+ ·
57
+ ·
58
+
59
+ Keywords: Lewy body diseases Online handwriting Graphomotor
60
+ difficulties Handwriting difficulties Machine learning Prodromal
61
+ diagnosis
62
+
63
+ 1 Introduction
64
+
65
+ Lewy body diseases (LBDs) is a term describing a group of neurodegenerative
66
+ disorders characterized by a pathophysiological process of α-synuclein accumu-
67
+ lation in specific brain regions leading to the formation of Lewy bodies and
68
+ Lewy neurites resulting in cell death. LBDs consists of two major clinical enti-
69
+ ties: Parkinson’s disease (PD) and dementia with Lewy bodies (DLB) [29,38].
70
+ Although the phenotypes and temporal evolution of motor and cognitive symp-
71
+ toms of these two diseases vary, they share many clinical and pathophysiolog-
72
+ ical features and are therefore referred to as LBDs spectrum. Together with
73
+ Alzheimer’s disease (AD), LBDs comprise the major part of all cases of neu-
74
+ rodegenerative disorders.
75
+ It is known that LBDs do not start suddenly. At the time the clinical symp-
76
+ toms occur, the neurodegenerative process has reached a severe degree in which
77
+ most of the targeted neurons have already been damaged. Before the clinical
78
+ diagnosis based on the presence of typical clinical symptoms becomes possible,
79
+ there is a long period of the underlying neurodegenerative process with subtle or
80
+ nonspecific symptoms [18,29] such as sleep disturbances, mood changes, smell
81
+ loss, constipation, etc. This period of LBDs is called the prodromal stage.
82
+ One of the early markers of PD is PD dysgraphia (micrographia and other
83
+ alterations in handwriting, e.g. kinematic and dynamic) [21,32,33]. Similarly,
84
+ some manifestations of dysgraphia have been observed in the prodromal DLB
85
+ as well [23]. Although modern approaches to the analysis of graphomotor and
86
+ handwriting difficulties (utilising digitising tablets) were proved to work well
87
+ during e.g. diagnosis of the clinical stage of PD [9,11,35], assessment of cogni-
88
+ tion in PD patients [4], or discrimination of AD and mild cognitive impairment
89
+ (MCI) [15], to the best of our knowledge, no studies employed this technology
90
+ (with high potential) in the prodromal diagnosis of LBDs in a larger scale.
91
+ Identification of the early stages of LBDs is crucial for the development
92
+ of disease-modifying treatment since the neurodegeneration may be possibly
93
+ stopped or treated before the pathological cascades start. Therefore, the goal
94
+ of this study is to explore whether the computerised assessment of graphomo-
95
+ tor and handwriting difficulties could support the prodromal diagnosis of LBDs,
96
+ more specifically, we aim to:
97
+ 1. identify which task significantly discriminates LBD patients and age- and
98
+ gender-matched healthy controls (HC),
99
+ 2. identify what conventional online handwriting features have good discrimina-
100
+ tion power.
101
+
102
+
103
+
104
+ ±
105
+ ±
106
+ ±
107
+
108
+ 2 Materials and Methods
109
+
110
+ 2.1
111
+ Dataset
112
+ We enrolled 39 subjects (19 females, 20 males, age = 69.53
113
+ 6.61) diagnosed
114
+ with possible or probable MCI (based on the scores of the MoCA – Montreal
115
+ Cognitive Assessment [25] and based on the CCB – Complex Cognitive Battery,
116
+ see the explanation below) who were simultaneously diagnosed with possible or
117
+ probable MCI-LB (i.e. mild cognitive impairment with Lewy bodies) based on
118
+ the criteria published by McKeith et al. [22]. In this group, 21 subjects also
119
+ had more than 50% probability of developing PD (calculated following the MDS
120
+ criteria published in [18]). In addition, we enrolled 7 subjects (2 females, 5 males,
121
+ age = 66.41 4.32) without possible/probable MCI-LB, but still with more than
122
+ 50% probability of developing PD. Finally, we enrolled 37 HC (26 females, 11
123
+ males, age = 67.60
124
+ 5.61). In the experiments, we stratified the subjects into
125
+ two groups, HC vs. LBD (i.e. people with a high risk of developing PD or DLB).
126
+ CCB was used to evaluate four cognitive domains: 1) memory (The Brief
127
+ Visuospatial memory test–revised [2], Philadelphia Verbal Learning Test [3]);
128
+ 2) attention (Wechsler Adult Intelligence Scale-III: Letter-Number Sequencing,
129
+ Digit Symbol Substitution [37]); 3) executive functions (Semantic and phonemic
130
+ verbal fluency [30], Picture arrangement test [37]); and 4) visuospatial functions
131
+ (Judgment of Line Orientation [36]). The cognitive domain z-scores were com-
132
+ puted as the average z-scores of the tests included in the particular domain.
133
+ The participants were asked to perform a set of three tasks:
134
+ 1. Archimedean spiral (spiral) – we consider this task as a graphomotor one, i.e.
135
+ it is a building block of some letter shapes; in addition, it is a golden standard
136
+ in PD dysgraphia diagnosis [35]
137
+ 2. sentence “Tramvaj dnes uˇz nepojede” (translation: “A tram will not go
138
+ today.”) writing (sentence) – this handwriting task was used e.g. in the
139
+ PaHaW database [11]
140
+ 3. pentagon copying test (pentagons) – it is a task frequently used for quantifi-
141
+ cation of cognitive decline [4]
142
+ All participants were right-handed and had Czech as their native language.
143
+ They all signed an informed consent form that was approved by the local ethics
144
+ committee.
145
+
146
+ 2.2
147
+ Feature Extraction
148
+ The participants were asked to perform the tasks (using the Wacom Ink pen)
149
+ on an A4 paper that was laid down and fixed to a digitizing tablet Wacom
150
+ Intuos 4 M (sampling frequency fs = 130 Hz). Before the acquisition, they had
151
+ some time to get familiar with the hardware. The recorded time series (x and
152
+ y position; timestamp; a binary variable, being 0 for in-air movement and 1 for
153
+ on-surface movement, respectively; pressure exert on the tablet’s surface during
154
+
155
+
156
+
157
+
158
+ writing; pen tilt; azimuth) were consequently parameterised utilising the follow-
159
+ ing set of features (we selected the set based on available reviews and based on
160
+ our experience [9,11,35]):
161
+ 1. temporal – duration of writing, ratio of the on-surface/in-air duration, dura-
162
+ tion of strokes, and ratio of the on-surface/in-air stroke duration
163
+ 2. kinematic – velocity, and acceleration
164
+ 3. dynamic – pressure, tilt, and azimuth
165
+ 4. spatial – width, height, and length of the whole product, as well as its partic-
166
+ ular strokes, i.e. stroke width, height, and length
167
+ 5. spiral-specific – degree of spiral drawing severity [31], mean drawing speed of
168
+ spiral [31], second-order smoothness of spiral [31], spiral precision index [5],
169
+ spiral tightness [31], variability of spiral width [31], and first-order zero-
170
+ crossing rate of spiral [31]
171
+ 6. other – number of interruptions (pen elevations), number of pen stops [27],
172
+ tempo (number of strokes normalised by duration), number of on-surface
173
+ intra-stroke intersections, relative number of on-surface intra-stroke intersec-
174
+ tions, number of on-surface inter-stroke intersections, and relative number of
175
+ on-surface inter-stroke intersections, Shannon entropy [4], number of changes
176
+ in the velocity profile, relative number of changes in the velocity profile
177
+ Most of the features were extracted using the recently released Python library
178
+ handwriting-features (v 1.0.1) [14], the rest of them were coded in Matlab. Some
179
+ features (mainly spatial, temporal and kinematic) were extracted from both on-
180
+ surface and in-air movements. In addition, kinematic features were also analysed
181
+ in horizontal and vertical projection. Features represented by vectors were con-
182
+ sequently transformed to a scalar value using median, non-parametric coefficient
183
+ of variation (nCV; interquartile range of feature divided by its median), slope
184
+ and 95th percentile (95p).
185
+
186
+ 2.3
187
+ Statistical Analysis and Machine Learning
188
+ To compare the distribution of features between the HC and LBD subjects, we
189
+ conducted Mann-Whitney U-test with the significance level of 0.05. Moreover,
190
+ to assess the strength of a relationship between the features and the subject’s
191
+ clinical status (HC/LBD), we computed Spearman’s correlation coefficient (ρ)
192
+ with the significance level of 0.05. Finally, during this exploratory step, we calcu-
193
+ lated Spearman’s correlation with the domains of CCB and the overall score of
194
+ MDS–Unified Parkinson’s Disease Rating Scale (MDS–UPDRS), part III (motor
195
+ part) [16].
196
+ To identify the presence of graphomotor or handwriting difficulties, we built
197
+ binary classification models using an ensemble extreme gradient boosting algo-
198
+ rithm known as XGBoost [6] (with 100 estimators). This algorithm was chosen
199
+ due to its robustness to outliers, ability to find complex interactions among fea-
200
+ tures as well as the possibility of ranking their importance. To build models with
201
+ an optimal set of hyperparameters, we conducted 1000 iteration of randomized
202
+
203
+
204
+
205
+ ×
206
+ ×
207
+ ×
208
+ 2
209
+ TP + FN TN + FP
210
+
211
+ search strategy via stratified 5-fold cross-validation with 10 repetitions aiming
212
+ to optimize balanced accuracy score (BACC; described in more detail along with
213
+ other evaluation scores below). The following set of hyperparameters were opti-
214
+ mized: the learning rate [0.001, 0.01, 0.1, 0.2, 0.3], γ [0, 0.05, 0.10, 0.15, 0.20,
215
+ 0.25, 0.5], the maximum tree depth [6, 8, 10, 12, 15], the fraction of observations
216
+ to be randomly sampled for each tree (subsample ratio) [0.5, 0.6, 0.7, 0.8, 0.9,
217
+ 1.0], the subsample ratio for the columns at each level [0.4, 0.5, 0.6, 0.7, 0.8,
218
+ 0.9, 1.0], the subsample ratio for the columns when constructing each tree [0.4,
219
+ 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], the minimum sum of the weights of all observations
220
+ required in a child node [0.5, 1.0, 3.0, 5.0, 7.0, 10.0], and the balance between
221
+ positive and negative weights [1, 2, 3, 4].
222
+ The classification test performance was determined using the following clas-
223
+ sification metrics: Matthew’s correlation coefficient (MCC), balanced accuracy
224
+ (BACC), sensitivity (SEN) also known as recall (REC), specificity (SPE), pre-
225
+ cision (PRE) and F1 score (F1). These metrics are defined as follows:
226
+ TP × TN + FP × FN
227
+
228
+ MCC =
229
+ √N
230
+ ,
231
+ (1)
232
+ BACC = 1
233
+ TP
234
+ TN
235
+ ,
236
+ (2)
237
+
238
+ SPE =
239
+ TN
240
+ TN + FP
241
+ PRE =
242
+ TP
243
+ TP + FP
244
+ REC =
245
+ TP
246
+ ,
247
+ (3)
248
+
249
+ ,
250
+ (4)
251
+ ,
252
+ (5)
253
+
254
+ TP + FN
255
+ F1 = 2 PRE × REC
256
+ PRE + REC
257
+
258
+ (6)
259
+ where N = (TP + FP ) (TP + FN ) (TN + FP ) (TN + FN ), TP (true
260
+ positive) and FP (false positive) represent the number of correctly identified
261
+ LBD subjects and the number of subjects incorrectly identified as having LBDs,
262
+ respectively. Similarly, TN (true negative) and FN (false negative) represent
263
+ the number of correctly identified HC and the number of subjects with LBDs
264
+ incorrectly identified as being healthy.
265
+ To further optimize the trained classification models, we fine-tuned the mod-
266
+ els’ decision thresholds via the receiver operating characteristics (ROC) curve.
267
+ Using the fine-tuned decision thresholds, we evaluated the classification perfor-
268
+ mance of the models using the leave-one-out cross-validation. The ROC curves
269
+ were plotted using the probabilities of the predicted labels obtained via the
270
+ cross-validation procedure that was employed during the final evaluation of the
271
+ fine-tuned models.
272
+ And finally, to evaluate the statistical significance of the prediction perfor-
273
+ mance obtained by the built classification models, a non-parametric statisti-
274
+ cal method named permutation test was employed [7,28]. For this purpose, we
275
+ applied 1 000 permutations with the significance level of 0.05. To estimate the
276
+
277
+
278
+
279
+
280
+
281
+ performance of the models on the permuted data, we used the same classification
282
+ setup as employed during the training phase [26].
283
+
284
+ 3 Results
285
+
286
+ The results of the exploratory data analysis are summarized in Table 1 (sorted
287
+ based on the p-value for the Mann-Whitney U-test). The following features were
288
+ found as the most distinguishing ones in terms of the differentiation between HC
289
+ and subjects with LBD (the top 4 features are listed; *, **, and *** denote the p-
290
+ values for both the Mann-Whitney U-test and Spearman’s correlation coefficient
291
+ being bellow the significance level of 0.05, 0.01, and 0.001, respectively; if both p-
292
+ values are bellow a different significance level, the weaker statistical significance
293
+ is selected): a) spiral – nCV of acceleration (on-surface) ρ = 0.2438∗, variability
294
+ of spiral width ρ = 0.2439∗, median of azimuth ρ = 0.2378∗, and spiral precision
295
+ index ρ = 0.2367∗; b) sentence – number of pen stops ρ = 0.3460∗∗, slope of
296
+ duration of stroke (in-air) ρ = 0.2823∗∗, median of vertical velocity (on-surface)
297
+ ρ = −0.2438∗, and median of vertical acceleration (on-surface) ρ = 0.2317∗; and
298
+ c) pentagons – width of writing (on-surface) ρ = −0.3045∗∗, median of length
299
+ of stroke (on-surface) ρ = −0.2894∗∗, nCV of length of stroke (on-surface) ρ =
300
+ 0.2489∗, and median of duration of stroke (on-surface) ρ = −0.2327∗.
301
+
302
+ Table 1. Results of the exploratory analysis.
303
+
304
+ Feature
305
+ p(U)
306
+ ρ
307
+ p(ρ)
308
+ Spiral
309
+ nCV of acceleration (s)
310
+ Variability of spiral width
311
+ 0.0138
312
+ 0.0138
313
+ −0.2438
314
+ 0.2439
315
+ 0.0263
316
+ 0.0263
317
+ Median of azimuth
318
+ 0.0158 0.2378
319
+ 0.0304
320
+ Spiral precision index
321
+ 0.0162 0.2367
322
+ 0.0312
323
+ nCV of duration of stroke (s)
324
+ 0.0438 −0.1892 0.0867
325
+ Sentence
326
+ Number of pen stops
327
+ 0.0009 0.3460
328
+ 0.0014
329
+ Slope of duration of stroke (a)
330
+ 0.0054 0.2823
331
+ 0.0097
332
+ Median of vertical velocity (s)
333
+ Median of vertical acceleration (s)
334
+ 0.0138
335
+ 0.0182
336
+ −0.2438
337
+ 0.2317
338
+ 0.0263
339
+ 0.0351
340
+ Rel. total number of intra-stroke intersections 0.0232 −0.2206 0.0451
341
+ Pentagons
342
+ Width of writing (s)
343
+ Median of length of stroke (s)
344
+ nCV of length of stroke (s)
345
+ Median of duration of stroke (s)
346
+ Median of horizontal acceleration (s)
347
+ 0.0030
348
+ 0.0045
349
+ 0.0123
350
+ 0.0178
351
+ 0.0182
352
+ −0.3045
353
+ −0.2894
354
+ 0.2489
355
+ −0.2327
356
+ 0.2317
357
+ 0.0051
358
+ 0.0080
359
+ 0.0233
360
+ 0.0343
361
+ 0.0351
362
+ p(U) – p-value of Mann-Whitney U-test; ρ – Spearman’s correlation coeffi-
363
+ cient; p(ρ)– p-value of ρ; (s) – on-surface movement; (a) – in-air movement.
364
+
365
+
366
+
367
+
368
+ ∗∗
369
+
370
+ Next, Table 2 presents the results of the correlation analysis (*, and ** denote
371
+ the p-values for Spearman’s correlation coefficient being below the significance
372
+ level of 0.05 and 0.01, respectively) between the features summarized in Table 1
373
+ and the following clinical information: a) MDS–UPDRS, and b) CCB domains.
374
+
375
+ Table 2. Results of the correlation analysis.
376
+
377
+ Feature
378
+ ρ (UPDRS) ρ (V)
379
+ ρ (A)
380
+ ρ (E)
381
+ Spiral
382
+ nCV of acceleration (s)
383
+ Variability of spiral width
384
+ Median of azimuth
385
+ Spiral precision index
386
+ nCV of duration of stroke (s)
387
+ −0.3411∗
388
+ 0.1653
389
+ 0.0442
390
+ 0.0606
391
+ −0.1089
392
+ −0.0013
393
+ −0.3973∗∗
394
+ −0.3656∗
395
+ −0.0942
396
+ −0.1344
397
+ 0.1130
398
+ −0.2981∗
399
+ −0.1029
400
+ −0.3987∗∗
401
+ −0.1618
402
+ 0.1899
403
+ −0.1666
404
+ −0.0490
405
+ −0.2126
406
+ −0.0469
407
+ Sentence
408
+ Num. of pen stops
409
+ Slope of duration of stroke (a)
410
+ Median of vertical velocity (s)
411
+ −0.1018
412
+ 0.2620
413
+ 0.0314
414
+ −0.1181
415
+ −0.1928
416
+ 0.1106
417
+ 0.1012
418
+ −0.0513
419
+ 0.0025
420
+ −0.1956
421
+ −0.1025
422
+ 0.1794
423
+ Median of vertical acceleration (s)
424
+ Rel. total num. of intra-stroke intersections
425
+ −0.2641
426
+ 0.0477
427
+ −0.0301
428
+ 0.1647
429
+ 0.3246∗
430
+ 0.1143
431
+ 0.0193
432
+ 0.0962
433
+ Pentagons
434
+ Width of writing (s)
435
+ Median of length of stroke (s)
436
+ nCV of length of stroke (s)
437
+ Median of duration of stroke (s)
438
+ Median of horizontal acceleration (s)
439
+ −0.3448∗
440
+ −0.1545
441
+ 0.3065∗
442
+ −0.0348
443
+ 0.3215∗
444
+ 0.2947∗
445
+ 0.1607
446
+ −0.2435
447
+ 0.0080
448
+ −0.0226
449
+ 0.1351
450
+ 0.0501
451
+ −0.1126
452
+ −0.0085
453
+ −0.1632
454
+ 0.1362
455
+ 0.1511
456
+ −0.1155
457
+ −0.0269
458
+ −0.2060
459
+ ρ – Spearman’s correlation coefficient (∗ denotes p-value < 0.05 and ∗∗ denotes p-value
460
+ < 0.01); UPDRS – MDS–Unified Parkinson’s Disease Rating Scale, part III (motor
461
+ part) [16]; V – visuospatial domain of CCB; A – attention domain of CCB; E – executive
462
+ functions domain of CCB; (s) – on-surface movement; (a) – in-air movement.
463
+
464
+ To visualize the difference in the distribution of the top 4 features summarized
465
+ above for HC and subjects with LBD, the box-violin plots are presented in
466
+ Figs. 1, 2 and 3. The Fig. 1 shows the distribution of the features for the spiral
467
+ drawing, the Fig. 2 shows the distribution of the features for the sentence writing,
468
+ and the Fig. 3 is dedicated to the distribution of the features for the pentagon
469
+ copying test.
470
+ The results of the classification analysis are summarized in Table 3. We
471
+ trained 4 models in total: 3 models dedicated to each task separately and
472
+ a model combining all of the tasks. The following results were achieved (where
473
+ and
474
+ denote p-value of the permutation test bellow < 0.05 and < 0.01,
475
+ respectively): a) spiral – BACC = 0.6848∗∗, SEN = 0.8696, SPE = 0.5000; b)
476
+ sentence – BACC = 0.7283∗∗, SEN = 0.9783, SPE = 0.4783 c) pentagons –
477
+ BACC = 0.6848∗∗, SEN = 0.9348, SPE = 0.4348; and d) all tasks combined –
478
+
479
+
480
+
481
+
482
+
483
+
484
+
485
+ Fig. 1. Distribution of the top 4 most discriminating features (spiral drawing).
486
+
487
+
488
+ Fig. 2. Distribution of the top 4 most discriminating features (sentence writing).
489
+
490
+
491
+ BACC = 0.7391∗∗, SEN = 0.8043, SPE = 0.6739. The ROC curves of the trained
492
+ models are shown in Fig. 4.
493
+
494
+ 4 Discussion
495
+ As mentioned in the methodology, the Archimedean spiral is considered as a
496
+ gold standard, especially in the assessment of graphomotor difficulties in PD
497
+ patients [5,8,31], nevertheless, it has been utilised during the quantitative anal-
498
+ ysis of Huntington’s disease, essential tremor, or brachial dystonia as well [13].
499
+ Concerning the spiral features with the highest discrimination power (as identi-
500
+ fied by the Mann-Whitney U-test), we observed that the LBD group was asso-
501
+ ciated with a lower range in on-surface acceleration, which we suppose is caused
502
+
503
+ 1.0
504
+ 0.5
505
+ 0.0
506
+ 0.5
507
+ 1.0
508
+ HC
509
+ LBD
510
+ 3500
511
+ meden cf amtn
512
+ 3000
513
+ 2500
514
+ 000
515
+ 1500
516
+ 1000
517
+ 500
518
+ .0.5
519
+ *
520
+ 0.4
521
+ 0.3
522
+ 0.2
523
+ 0.1
524
+ 0.0
525
+ HC
526
+ JBJD
527
+ 40
528
+ 35
529
+ *
530
+ 30
531
+ 25
532
+ 20
533
+ 15
534
+ 10
535
+ 5
536
+ H50
537
+ ***
538
+ 40
539
+ 30
540
+ 20
541
+ 10
542
+ 10
543
+ -20
544
+ HC
545
+ T
546
+ 80
547
+ mscliaun qf weehel welotb fo-surtsco
548
+ 40
549
+ 10
550
+ 0
551
+ -10
552
+ HY0.10
553
+ **
554
+ 0.05
555
+ 0.05
556
+ 0.10
557
+ 0.15
558
+ HO
559
+ L13
560
+ 100
561
+ megbisn of wgrbicu sxalrabioa
562
+ -50
563
+ -100
564
+ -150
565
+ -200
566
+ 250
567
+
568
+
569
+
570
+
571
+
572
+
573
+
574
+
575
+
576
+
577
+ Fig. 3. Distribution of the top 4 most discriminating features (pentagons copying test).
578
+
579
+ Table 3. Results of the classification analysis.
580
+
581
+ Task
582
+ MCC BACC SEN
583
+ SPE
584
+ PRE
585
+ F1
586
+ threshold p
587
+ Spiral
588
+ 0.3977 0.6848 0.8696 0.5000 0.6349 0.7339 0.26
589
+ ∗∗
590
+ Sentence
591
+ 0.5271 0.7283 0.9783 0.4783 0.6522 0.7826 0.36
592
+ ∗∗
593
+ Pentagons
594
+ 0.4267 0.6848 0.9348 0.4348 0.6232 0.7478 0.13
595
+ ∗∗
596
+ All tasks combined 0.4824 0.7391 0.8043 0.6739 0.7115 0.7551 0.48
597
+ ∗∗
598
+ MCC – Matthew’s correlation coefficient; BACC – balanced accuracy; SEN –
599
+ sensitivity; SPE – specificity; PRE – precision; F1 – F1 score; p – p-values computed by
600
+ the permutation test (1 000 permutations, ∗ denotes p-value < 0.05 and ∗∗ denotes p-
601
+ value < 0.01); threshold – fine-tuned decision threshold.
602
+
603
+
604
+ by rigidity. This assumption is supported by the fact that the measure signifi-
605
+ cantly correlates (ρ = 0.3, p < 0.05) with the overall score of MDS–UPDRS III.
606
+ Next, the LBD group was not able to keep small variability of loop-to-loop spi-
607
+ ral width index, which is in line with findings reported in [31]. We also observed
608
+ a significant correlation between this feature and the visuospatial (ρ = 0.4,
609
+ p < 0.01) and the attention (ρ = 0.3, p < 0.05) domain of CCB. On the other
610
+ hand, the LBD group had generally higher values of the spiral precision index
611
+ than the HC one, which is against our initial assumptions (also the correlation
612
+ with the attention domain of CCB is surprisingly negative; ρ = 0.4, p < 0.01).
613
+ Finally, the last significant correlation with the clinical status was identified in
614
+ the median of azimuth, which was higher in the LBD group (in addition we
615
+ observed a negative correlation with the visuospatial domain of CCB; ρ = 0.4,
616
+ p < 0.05).
617
+ Regarding the classification analysis, based on the spiral features, we were
618
+ able to discriminate the LBD and HC groups with 68% balanced accuracy (area
619
+ under the curve (AUC) = 71%), which is the worst result when compared to other
620
+
621
+ 90
622
+ 80
623
+ *?
624
+ 02
625
+ 80
626
+ 50
627
+ 40
628
+ 30
629
+ 20
630
+ 10
631
+ LBD
632
+ 9.5
633
+ mocf emortm
634
+ 3.0
635
+ 2.5
636
+ 2.0
637
+ 1.5
638
+ 1.0
639
+ 0.5
640
+ 0.0
641
+ 0.5
642
+ -3.200
643
+ 150
644
+ 100
645
+ 50
646
+ 50
647
+ TLSE
648
+ 12
649
+ 10
650
+ 683
651
+ 2
652
+ HE
653
+
654
+
655
+
656
+
657
+
658
+
659
+ Fig. 4. Receiver operating characteristic curves for the trained models.
660
+
661
+
662
+ tasks and which supports our previous findings that even though the spiral is
663
+ considered as a gold standard the sentence copy task accents the manifestations
664
+ of dysgraphia much better [11].
665
+ Regarding the sentence, the most discriminative feature extracted from this
666
+ task is the number of pen stops (i.e. a pen is in contact with the paper and
667
+ does not vary its position for at least 30 ms [8]), which was higher in the LBD
668
+ group. This parameter has been mainly employed in the diagnosis of develop-
669
+ mental dysgraphia in children population [27], however, in one study, Danna et
670
+ al. observed that this measure (but extracted from the spiral) was significantly
671
+ different between PD patients in the OFF state and HC [8]. Initially, we assumed
672
+ that the feature could be theoretically linked with cognitive deficits, but we did
673
+ not observe any significant correlation with the visuospatial, attention, or execu-
674
+ tive functions domain of CCB. The second most significant feature was the slope
675
+
676
+ 0.4
677
+ 0.@Roc (pentagoms)
678
+ 1.0
679
+ +++++++++
680
+ hreshold:0.13
681
+ 0.8
682
+ 0.6
683
+ 0.4
684
+ AL
685
+ ...
686
+ ROC curve (area = 0.73)
687
+ Random guess
688
+ 0.00.4
689
+ 0.@1.0
690
+ Roctantaskscompined
691
+ 0.a
692
+ Threshold : 0.48
693
+ 0.6
694
+ FD
695
+ 0.4
696
+ ROC curve (area =0.76)
697
+ Random guess
698
+ 0.00.6Roe spirall
699
+ 1.0
700
+ Threshold:0.26
701
+ 0.8
702
+ Ruai
703
+ 0.6
704
+ tie
705
+ 0.4
706
+ ru
707
+ ROC curve (area = 0.71)
708
+ Random guess
709
+ 0.00.61.0
710
+ Roc tsenbence
711
+ ++++++++++++++++++++++++++++++++++
712
+ Threshold:0.36
713
+ 0.8
714
+ Ruai
715
+ 0.6
716
+ ositive
717
+ 0.4
718
+ ru
719
+ ROC curve (area = 0.80)
720
+ Random guess
721
+ 0.0
722
+
723
+
724
+
725
+
726
+ of the duration of in-air strokes. The positive correlation coefficient suggests that
727
+ the LBD subjects were associated with progressing fatigue [1,12,17]. Next, in
728
+ the LBD group, we observed lower on-surface vertical velocity (this is in line
729
+ with e.g. [21,35]), but increased on-surface vertical acceleration. This could be
730
+ probably explained by the slow and less smooth handwriting. In terms of pro-
731
+ jection, the reason why these deficits dominate in the vertical movement could
732
+ be explained by the fact that the finger system (which is mainly involved in the
733
+ vertical movement) is more affected by muscular fatigue than the wrist system
734
+ (which controls horizontal movement) [20]. The vertical movement requires coor-
735
+ dinated movement and finer flexions/extensions of more joints (interphalangeal
736
+ and metacarpophalangeal), thus it is more complex than ulnar abductions of the
737
+ wrist [10,34] and could more accent the rigidity and bradykinesia. In addition,
738
+ this manifestation could be associated with the progressive/consistent vertical
739
+ micrographia, i.e., progressive/consistent reduction in letter amplitude [33].
740
+ In terms of classification, by modelling features extracted from the sentence,
741
+ we were able to differentiate both groups with 73% balanced accuracy (AUC
742
+ = 80%). In comparison with the state of the art in supportive LBD or PD
743
+ diagnosis [9,19,35], it is not a competitive result, but on the other hand, we
744
+ would like to highlight that we deal with results evaluating diagnosis of LBDs
745
+ in the prodromal state that has not been targeted by other research teams yet.
746
+ Concerning the last (cognitive) task, all the top 5 discriminative features were
747
+ extracted from the on-surface movement. In our recent article [4] we proved that
748
+ in-air entropy-based parameters could be used to identify early cognitive deficits
749
+ in PD without major cognitive impairment and that they correlate with the
750
+ level of attention. In the current study, these in-air measures were not signifi-
751
+ cant, but on the other hand, their on-surface variants (i.e. median of Shannon
752
+ entropy calculated from the global/vertical movement) had the p-values of the
753
+ Mann-Whitney U-test < 0.05, moreover, they significantly correlated with the
754
+ visuospatial domain of CCB (e.g. ρ =
755
+ 0.3, p < 0.05). The top 5 parameters
756
+ consist of the width of the product, which was smaller in the LBD group. It
757
+ slightly correlates with the lower median of the length of strokes (ρ = 0.3) and
758
+ lower median of the duration of strokes (ρ = 0.2) and probably means that the
759
+ subjects in the LBD group made the overlapped pentagons smaller. In addition,
760
+ since the non-parametric coefficient of variation of the length of strokes was
761
+ higher, we assume that the LBD subjects were not able to keep a stable length
762
+ of strokes (nevertheless, based on the scoring published in [24], this is assumed
763
+ as a very small deviation). Regarding the width, we also observed a negative
764
+ correlation (ρ = 0.3, p < 0.05) with the overall score of MDS–UPDRS III.
765
+ The classification based on the pentagon copying test provided 68% balanced
766
+ accuracy (AUC = 0.73%), which is slightly better than in the case of the spiral,
767
+ but not as high as in the case of the sentence.
768
+ And finally, a machine learning model based on the whole set of features
769
+ (tasks) enabled us to improve the accuracy to 74% (AUC = 76%). This shows
770
+ that the combination of the graphomotor, handwriting and cognitive deficits can
771
+ be used to achieve reasonable performance in the prodromal diagnosis of LBDs.
772
+
773
+
774
+
775
+
776
+ 5 Conclusion
777
+
778
+ This study has several limitations. Our dataset has a small sample size and the
779
+ HC and LBD groups are imbalanced, therefore to get better results in terms
780
+ of their generalisation, a bigger database must be analysed. Next, due to the
781
+ small sample size, we fused subjects with a high risk of developing PD or MCI-
782
+ LB into one LBD group. Nevertheless, subjects with MCI-LB in its prodromal
783
+ stage are associated mainly with cognitive (executive or visuospatial) decline,
784
+ while subjects with prodromal PD experience mainly motor deficits. In other
785
+ words, we suppose that further stratification of these participants into two groups
786
+ could increase the classification accuracy (we hypothesise that MCI-LB would
787
+ be more pronounced in the pentagon copying task and PD in the handwriting
788
+ one). Finally, although we tried a correction of multiple comparisons during the
789
+ statistical analysis, almost no significant features appeared after this adjustment.
790
+ To sum up, concerning the limitations mentioned above, the study should be
791
+ considered as a pilot one.
792
+ In conclusion, despite the limitations, to the best of our knowledge, it is
793
+ the first work exploring the impact of computerised analysis of a graphomotor,
794
+ cognitive, and handwriting task on the prodromal diagnosis of these neurodegen-
795
+ erative disorders. It bridges the knowledge gap in the field of LBDs, and provides
796
+ baseline results for future studies focusing on the prodromal diagnosis of LBDs
797
+ via a computerized and objective analysis of graphomotor and handwriting dif-
798
+ ficulties.
799
+
800
+ References
801
+
802
+ 1. Aouraghe, I., Alae, A., Ghizlane, K., Mrabti, M., Aboulem, G., Faouzi, B.: A novel
803
+ approach combining temporal and spectral features of Arabic online handwriting
804
+ for Parkinson’s disease prediction. J. Neurosci. Methods 339, 108727 (2020)
805
+ 2. Benedict, H.: Brief Visual Memory Test-Revised: Professional Manual. Psycholog-
806
+ ical Assessment Resources, Odessa (1997)
807
+ 3. Bezdicek, O., et al.: Development, validity, and normative data study for the 12-
808
+ word Philadelphia Verbal Learning Test [czP (r) VLT-12] among older and very
809
+ old Czech adults. Clin. Neuropsychol. 28(7), 1162–1181 (2014)
810
+ 4. Brabenec, L., Klobusiakova, P., Mekyska, J., Rektorova, I.: Shannon entropy: a
811
+ novel parameter for quantifying pentagon copying performance in non-demented
812
+ Parkinson’s disease patients. Parkinsonism Relat. Disord. 94, 45–48 (2022)
813
+ 5. Cascarano, G.D., et al.: Biometric handwriting analysis to support Parkinson’s
814
+ disease assessment and grading. BMC Med. Inform. Decis. Mak. 19(9), 1–11 (2019).
815
+ https://doi.org/10.1186/s12911-019-0989-3
816
+ 6. Chen, T., Guestrin, C.: XGBoost. In: Proceedings of the 22nd ACM SIGKDD
817
+ International Conference on Knowledge Discovery and Data Mining - KDD 2016.
818
+ ACM Press (2016). https://doi.org/10.1145/2939672.2939785
819
+ 7. Combrisson, E., Jerbi, K.: Exceeding chance level by chance: the caveat of the-
820
+ oretical chance levels in brain signal classification and statistical assessment of
821
+ decoding accuracy. J. Neurosci. Methods 250, 126–136 (2015). https://doi.org/10.
822
+ 1016/j.jneumeth.2015.01.010
823
+
824
+
825
+
826
+
827
+ 8. Danna, J., et al.: Digitalized spiral drawing in Parkinson’s disease: a tool for eval-
828
+ uating beyond the written trace. Hum. Mov. Sci. 65, 80–88 (2019)
829
+ 9. De Stefano, C., Fontanella, F., Impedovo, D., Pirlo, G., di Freca, A.S.: Handwriting
830
+ analysis to support neurodegenerative diseases diagnosis: a review. Pattern Recogn.
831
+ Lett. 121, 37–45 (2019)
832
+ 10. Dounskaia, N., Van Gemmert, A., Stelmach, G.: Interjoint coordination during
833
+ handwriting-like movements. Exp. Brain Res. 135(1), 127–140 (2000). https://
834
+ doi.org/10.1007/s002210000495
835
+ 11. Drot´ar, P., Mekyska, J., Rektorov´a, I., Masarova´, L., Sm´ekal, Z., Faundez-Zanuy,
836
+ M.: Evaluation of handwriting kinematics and pressure for differential diagnosis of
837
+ Parkinson’s disease. Artif. Intell. Med. 67, 39–46 (2016)
838
+ 12. Drot´ar, P., Mekyska, J., Sm´ekal, Z., Rektorov´a, I., Masarova´, L., Faundez-Zanuy,
839
+ M.: Prediction potential of different handwriting tasks for diagnosis of Parkinson’s.
840
+ In: 2013 E-Health and Bioengineering Conference (EHB), pp. 1–4. IEEE (2013)
841
+ 13. Faundez-Zanuy, M., Mekyska, J., Impedovo, D.: Online handwriting, signature
842
+ and touch dynamics: tasks and potential applications in the field of security and
843
+ health. Cogn. Comput. 13(5), 1406–1421 (2021). https://doi.org/10.1007/s12559-
844
+ 021-09938-2
845
+ 14. Galaz, Z., Mucha, J., Zvoncak, V., Mekyska, J.: Handwriting features (2022). www.
846
+ github.com/BDALab/handwriting-features
847
+ 15. Garre-Olmo, J., Faundez-Zanuy, M., L´opez-de Ipin˜a, K., Calv´o-Perxas, L., Turr´o-
848
+ Garriga, O.: Kinematic and pressure features of handwriting and drawing: prelim-
849
+ inary results between patients with mild cognitive impairment, Alzheimer disease
850
+ and healthy controls. Curr. Alzheimer Res. 14(9), 960–968 (2017)
851
+ 16. Goetz, C.G., et al.: Movement disorder society-sponsored revision of the unified
852
+ Parkinson’s disease rating scale (MDS-UPDRS): scale presentation and clinimetric
853
+ testing results. Mov. Disord. Off. J. Mov. Disord. Soc. 23(15), 2129–2170 (2008)
854
+ 17. Harralson, H.H., Teulings, H.L., Farley, B.G.: Handwriting variability in movement
855
+ disorder patients and effects of fatigue. In: Proceedings of the Fourteenth Biennial
856
+ Conference of the International Graphonomics Society, pp. 103–107 (2009)
857
+ 18. Heinzel, S., et al.: Update of the MDS research criteria for prodromal Parkinson’s
858
+ disease. Mov. Disord. 34(10), 1464–1470 (2019)
859
+ 19. Impedovo, D., Pirlo, G.: Dynamic handwriting analysis for the assessment of neu-
860
+ rodegenerative diseases: a pattern recognition perspective. IEEE Rev. Biomed.
861
+ Eng. 12, 209–220 (2018)
862
+ 20. Kushki, A., Schwellnus, H., Ilyas, F., Chau, T.: Changes in kinetics and kinemat-
863
+ ics of handwriting during a prolonged writing task in children with and without
864
+ dysgraphia. Res. Dev. Disabil. 32(3), 1058–1064 (2011)
865
+ 21. Letanneux, A., Danna, J., Velay, J.L., Viallet, F., Pinto, S.: From micrographia to
866
+ Parkinson’s disease dysgraphia. Mov. Disord. 29(12), 1467–1475 (2014)
867
+ 22. McKeith, I.G., et al.: Research criteria for the diagnosis of prodromal dementia
868
+ with Lewy bodies. Neurology 94(17), 743–755 (2020)
869
+ 23. Monvoisin-Joly, T., Furcieri, E., Chabran, E., Blanc, F.: Writing in prodromal and
870
+ mild dementia with Lewy bodies: an exploratory and preliminary study. Geriatrie
871
+ et Psychologie Neuropsychiatrie du Vieillissement 19(3), 341–351 (2021)
872
+ 24. Nagaratnam, N., Nagaratnam, K., O’Mara, D.: Intersecting pentagon copying and
873
+ clock drawing test in mild and moderate Alzheimer’s disease. J. Clin. Gerontol.
874
+ Geriatr. 5(2), 47–52 (2014)
875
+ 25. Nasreddine, Z.S., et al.: The Montreal Cognitive Assessment, MoCA: a brief screen-
876
+ ing tool for mild cognitive impairment. J. Am. Geriatr. Soc. 53(4), 695–699 (2005)
877
+
878
+
879
+
880
+
881
+ 26. Ojala, M., Garriga, G.: Permutation tests for studying classifier performance. In:
882
+ ICDM 2009: Ninth IEEE International Conference on Data Mining 2009, pp. 908–
883
+ 913, December 2009. https://doi.org/10.1109/ICDM.2009.108
884
+ 27. Paz-Villagra´n, V., Danna, J., Velay, J.L.: Lifts and stops in proficient and dys-
885
+ graphic handwriting. Hum. Mov. Sci. 33, 381–394 (2014)
886
+ 28. Phipson, B., Smyth, G.K.: Permutation P-values should never be zero: calculating
887
+ exact P-values when permutations are randomly drawn. Stat. Appl. Genet. Mol.
888
+ Biol. 9(1) (2010). https://doi.org/10.2202/1544-6115.1585
889
+ 29. Postuma, R.B., et al.: MDS clinical diagnostic criteria for Parkinson’s disease. Mov.
890
+ Disord. 30(12), 1591–1601 (2015)
891
+ 30. Preiss, M., et al.: Test verb´aln´ı fluence-vod´ıtka pro vˇseobecnou dospˇelou populaci.
892
+ Psychiatrie 6(2), 74–77 (2002)
893
+ 31. San Luciano, M., et al.: Digitized spiral drawing: a possible biomarker for early
894
+ Parkinson’s disease. PLoS ONE 11(10), e0162799 (2016)
895
+ 32. Saunders-Pullman, R., et al.: Validity of spiral analysis in early Parkinson’s disease.
896
+ Mov. Disord. Off. J. Mov. Disord. Soc. 23(4), 531–537 (2008)
897
+ 33. Thomas, M., Lenka, A., Kumar Pal, P.: Handwriting analysis in Parkinson’s dis-
898
+ ease: current status and future directions. Mov. Disord. Clin. Pract. 4(6), 806–818
899
+ (2017)
900
+ 34. Van Galen, G.P.: Handwriting: issues for a psychomotor theory. Hum. Mov. Sci.
901
+ 10(2–3), 165–191 (1991)
902
+ 35. Vessio, G.: Dynamic handwriting analysis for neurodegenerative disease assess-
903
+ ment: a literary review. Appl. Sci. 9(21), 4666 (2019)
904
+ 36. Warrington, E.K., James, M.: The visual object and space perception battery
905
+ (1991)
906
+ 37. Wechsler, D., et al.: WAIS-III WMS-III Technical Manual. Psychological Corpo-
907
+ ration, San Antonio (1997)
908
+ 38. Yamada, M., et al.: Diagnostic criteria for dementia with Lewy bodies: updates
909
+ and future directions. J. Mov. Disord. 13(1), 1 (2020)
910
+
bdFAT4oBgHgl3EQfXh2G/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
c9E3T4oBgHgl3EQfeQoX/vector_store/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7056be96b7a38790f18f3de186906d69351fcde7477114e5bbab3a3448d9adee
3
+ size 69260
edE3T4oBgHgl3EQfewrM/content/tmp_files/2301.04547v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
edE3T4oBgHgl3EQfewrM/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
edFRT4oBgHgl3EQfVDdZ/content/tmp_files/2301.13538v1.pdf.txt ADDED
@@ -0,0 +1,1095 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AMD: Adaptive Masked Distillation for Object
2
+ Detection
3
+ Guang Yanga, Yin Tangb, Jun Lia�,Jianhua Xua,Xili Wanb
4
+ a School of Computer and Electronic Information, Nanjing Normal University, Nanjing, China
5
+ b School of Computer Science and Technology, Nanjing Tech University, Nanjing, China
6
+ {Jun Li}lijuncst@njnu.edu.cn
7
+ Abstract—As a general model compression paradigm, feature-
8
+ based knowledge distillation allows the student model to learn
9
+ expressive features from the teacher counterpart. In this paper,
10
+ we mainly focus on designing an effective feature-distillation
11
+ framework and propose a spatial-channel adaptive masked distil-
12
+ lation (AMD) network for object detection. More specifically, in
13
+ order to accurately reconstruct important feature regions, we first
14
+ perform attention-guided feature masking on the feature map of
15
+ the student network, such that we can identify the important
16
+ features via spatially adaptive feature masking instead of random
17
+ masking in the previous methods. In addition, we employ a simple
18
+ and efficient module to allow the student network channel to
19
+ be adaptive, improving its model capability in object perception
20
+ and detection. In contrast to the previous methods, more crucial
21
+ object-aware features can be reconstructed and learned from
22
+ the proposed network, which is conducive to accurate object
23
+ detection. The empirical experiments demonstrate the superiority
24
+ of our method: with the help of our proposed distillation method,
25
+ the student networks report 41.3%, 42.4%, and 42.7% mAP
26
+ scores when RetinaNet, Cascade Mask-RCNN and RepPoints are
27
+ respectively used as the teacher framework for object detection,
28
+ which outperforms the previous state-of-the-art distillation meth-
29
+ ods including FGD and MGD.
30
+ Index Terms—Feature-based Knowledge Distillation, Object
31
+ Detection, Adaptive Masked Distillation, Object-Aware Features
32
+ I. INTRODUCTION
33
+ Recent years have witnessed successful and pervasive ap-
34
+ plications of Deep Convolutional Neural Networks (CNNs) in
35
+ various computer vision tasks. However, deep CNNs usually
36
+ cost a huge amount of computational resources in pursuit
37
+ of higher performance, which adversely affects their deploy-
38
+ ment in practical applications and leads to severe parameter
39
+ redundancy. It is therefore necessary to transfer the dark
40
+ knowledge learned in the complex networks (teacher) to
41
+ another lightweight network (student). This is also termed as
42
+ knowledge distillation [1] which allows the student model to
43
+ generate expressive features learned from the teacher model.
44
+ Thus, it is more preferable to deploy the student model
45
+ with compact network architecture sacrificing minimal loss of
46
+ performance.
47
+ The earliest distillation algorithms function mainly at the
48
+ output head. The representative examples include logit-based
49
+ distillation for classification and head-based distillation for
50
+ detection [2]. Recently, a more common distillation strategy
51
+ emerges as feature-based distillation mechanism. Since only
52
+ 0.3
53
+ 0.4
54
+ 0.8
55
+ 0.4
56
+ 3.6
57
+ 2.3
58
+ 1.2
59
+ 1.1
60
+ 0.8
61
+ 0.5
62
+ 0.3
63
+ 0.1
64
+ 1.8
65
+ 1.0
66
+ 1.3
67
+ 0.7
68
+ 1.2
69
+ 1.0
70
+ 0.4
71
+ 1.8
72
+ 0.8
73
+ 0.5
74
+ 1.8
75
+ 1.2
76
+ 0.8
77
+ 0.7
78
+ 0.3
79
+ 0.2
80
+ 0.3
81
+ 1.1
82
+ 0.4
83
+ 0.7
84
+ 1.3
85
+ 1.2
86
+ 0.5
87
+ Fig. 1: Different regions are quantified with varying attention
88
+ scores in the feature map of teacher model. The regions
89
+ with higher scores encode the region importance and should
90
+ outweigh the low-score regions in the feature masking.
91
+ the head or projector after the generated feature varies within
92
+ different networks, the feature-based distillation approaches
93
+ can potentially be employed in a variety of tasks. There-
94
+ fore, it has become a prominent line of research for both
95
+ model compression and performance improvement due to
96
+ its simplicity and efficacy. In object detection, in particular,
97
+ a variety of feature-based distillation approaches have been
98
+ developed. The earlier research, such as FitNet [3], performs
99
+ distillation at the global level. FGFI [4] operates by distilling
100
+ the features of high IoU between ground truth and anchors.
101
+ FGD [5] was developed to separate distillation of foreground
102
+ and background. Recent research suggests it is preferable for
103
+ the student model to reconstruct and learn expressive features
104
+ from the teacher model in the first place instead of following
105
+ the teacher for generating competitive representations. For
106
+ instance, MGD [6] was proposed to randomly mask pixels in
107
+ the feature map of student network, leading to reconstructed
108
+ features of the teacher model via a simple block.
109
+ Although MGD further improves the feature distillation
110
+ by reconstructing the features of masked areas, the masked
111
+ regions are generated in a random manner. This random
112
+ operation fails to identify the region-specific importance, and
113
+ is likely to cause the student model to generate features of
114
+ the teacher in unimportant regions. As illustrated in Fig. 1,
115
+ arXiv:2301.13538v1 [cs.CV] 31 Jan 2023
116
+
117
+ Spatial Attention
118
+ Mask
119
+ Feature
120
+ Mask
121
+ Backbone
122
+ (Neck)
123
+ SE block
124
+ 3×3
125
+ 3×3
126
+ Distillation
127
+ Teacher
128
+ Spatial Attention
129
+ R
130
+ 1× H× W
131
+ R
132
+ C×H× W
133
+ R
134
+ C×1× 1
135
+ R
136
+ C×H× W
137
+ R
138
+ C×H× W
139
+ Avg pooling
140
+ RELU
141
+ FC
142
+ FC
143
+ Sigmoid
144
+ Backbone
145
+ (Neck)
146
+ SE block
147
+ ReLU
148
+ Student
149
+ Teacher
150
+ Generation
151
+ Block
152
+ .
153
+ Fig. 2: The proposed AMD distillation framework. It first learns the adaptive Region-of-Interest (RoI) via attention-guided
154
+ feature masking, generating the spatial mask clue from the teacher model imposed on the student feature. Furthermore, we
155
+ apply the simple and efficient SE layer to the feature of the teacher model, leading to the channel adaptive clues. The auxiliary
156
+ clues are then fused with the output from the generation block via a Hadamard product, such that the generated feature from
157
+ the student model is channel adaptive.
158
+ the importance of different regions in the feature map of
159
+ a teacher model can be quantified using the region-specific
160
+ attention scores. Only the regions with higher scores play
161
+ critical role in feature masking while the low-score regions
162
+ should be downplayed.
163
+ To alleviate the above-mentioned drawback, we propose an
164
+ adaptive masked distillation (AMD) framework which enjoys
165
+ object-aware spatial and channel adaptivity. On the one hand,
166
+ we perform attention-guided spatial masking instead of ran-
167
+ dom masking on the feature map of the student network. More
168
+ specifically, we first learn a spatial attention map from the
169
+ feature map of the teacher model, producing a region-specific
170
+ mask. Then, the feature of the student network is adaptively
171
+ masked by using this attention map. Benefiting from this
172
+ selective feature masking, it allows subsequent generation
173
+ block to focus on those adaptively masked important areas,
174
+ leading to robust and expressive representations. On the other
175
+ hand, to further explore the object-awareness capability, we
176
+ leverage a simple and effective SE layer [7] for modeling the
177
+ channel attention of the resulting feature of the teacher model.
178
+ The learned clue and the output from the generation block
179
+ of students will be fused via a Hadamard product, achieving
180
+ desirable object-aware channel adaptivity.
181
+ To summarize, the contributions of this paper are threefold.
182
+ • First, we develop a spatially adaptive feature masking
183
+ mechanism for the student model, such that the region-
184
+ specific importance can be encoded in the features recon-
185
+ structed and learned from the teacher network.
186
+ • Second, we further explore the channel adaptivity by
187
+ introducing a simple and efficient SE module to improve
188
+ the object-aware capability of the student model.
189
+ • Third, we evaluate our proposed feature distillation net-
190
+ work AMD using various detection frameworks includ-
191
+ ing one-stage detector RetinaNet [8], two-stage detec-
192
+ tor Faster-RCNN [9], and anchor free model RepPoint
193
+ [10]. Extensive experimental results demonstrate that
194
+ our method can help to learn features with sufficient
195
+ descriptive capability and achieve significant performance
196
+ gains over the previous state-of-the-art methods.
197
+ The remainder of this paper is structured as follows. After
198
+ reviewing the related work in Section II, we elaborate on our
199
+ method in Section III. Next, we conduct extensive experi-
200
+ mental evaluations in Section IV before the paper is finally
201
+ concluded in Section V.
202
+ II. RELATED WORK
203
+ In this section, we comprehensively review the recent ad-
204
+ vance in object detection and knowledge distillation, both of
205
+ which are closely related to our method.
206
+ A. Object Detection
207
+ As one fundamental vision task, object detection aims to
208
+ determine the category and location of the objects in an
209
+ image. Over recent years, the success of CNNs has enormously
210
+ advanced the research in object detection. In general, the
211
+ detectors based on deep CNNs can be classified into three
212
+ categories including anchor-based detectors [9, 11], anchor-
213
+ free detectors [12] and end-to-end detectors [13]. In particular,
214
+ anchor-based detection models are divided into two-stage
215
+ [9, 14–16] and one-stage detectors [11, 17, 18]. The former
216
+ detection method, represented by R-CNN like [9, 19] algo-
217
+ rithms, has a higher detection accuracy, whereas its inference
218
+ speed is usually unsatisfactory due to expensive computational
219
+ costs incurred by region proposal network (RPN). As a result,
220
+
221
+ it is impractical for some real-time scenarios. In contrast, one-
222
+ stage detectors directly perform classification and regression
223
+ on the anchors without generating proposals beforehand. Thus,
224
+ they run faster with guaranteed detection performance.
225
+ While recent deep networks achieve high detection accu-
226
+ racy, they usually rely on complex backbone structure and
227
+ significant computational resources [13, 20–22]. In this sense,
228
+ designing lightweight and efficient backbone networks has
229
+ emerged as a major line of research in object detection. In
230
+ particular, knowledge distillation, which can transfer sufficient
231
+ descriptive power from a large network to a small network, is
232
+ beneficial for designing lightweight backbone with maintained
233
+ performance close to the large network.
234
+ B. Knowledge Distillation
235
+ Recently, knowledge distillation has received increasing
236
+ attention in model compression, since it is capable of retaining
237
+ compact model structure with promoted performance. Hinton
238
+ et al. [1] first came up with the concept of knowledge
239
+ distillation by introducing the soft label of the teacher network
240
+ as part of the loss of the student network, allowing the student
241
+ network to learn probability distribution fitting of the teacher
242
+ model for classification task. Moreover, Romero et al. [3]
243
+ demonstrated that semantic information in the intermediate
244
+ layer can also be learned as dark knowledge by student
245
+ networks. Thus, knowledge distillation can therefore be widely
246
+ applied to a wide range of downstream tasks. Chen et al. [2]
247
+ distilled the neck feature, classification head, and regression
248
+ head by setting up three loss functions, respectively. Tang
249
+ et al. [23] carefully designed the distillation weights and
250
+ distillation loss functions such that they are automatically
251
+ adjusted between samples for the single-stage object detector.
252
+ Li et al. [24] used region proposals of the larger network to
253
+ help the smaller network learn higher semantic information.
254
+ Zheng et al. [25] transferred the knowledge distillation of the
255
+ classification head to the location head of object detection,
256
+ leading to a new distillation mechanism termed Localization
257
+ Distillation (LD). LD makes logit mimicking become a better
258
+ alternative to feature imitation, and reveals the knowledge
259
+ of object category and object location should be handled
260
+ separately. Dai et al. [26] developed GID framework which
261
+ selects distillation areas based on differences between the
262
+ student and teacher networks. Yang et al. proposed FGD [5]
263
+ which separates the foreground and background, enabling the
264
+ student model to learn from the teacher network areas of
265
+ interest and global knowledge via local and global distillation
266
+ respectively. Besides, MGD [6] imposes random masking on
267
+ the feature map of the student model, and then generates
268
+ the feature map reconstructing from the teacher network.
269
+ However, the uncertainty of random masking may introduce
270
+ additional noise, producing biased feature map with compro-
271
+ mised representation capability.
272
+ III. THE PROPOSED APPROACH
273
+ Recently, a massive amount of distillation methods are
274
+ carefully designed for various model architectures and tasks.
275
+ Typically, the feature maps used for distillation usually have
276
+ high-level semantics and spatial information about adjacent
277
+ pixels. Therefore, learning these features from the teacher
278
+ model can significantly improve the performance of the stu-
279
+ dent model. Mathematically, basic feature distillation can be
280
+ formulated as:
281
+ Lfea =
282
+ 1
283
+ CHW
284
+ C
285
+
286
+ k=1
287
+ H
288
+
289
+ i=1
290
+ W
291
+
292
+ j=1
293
+
294
+ F T
295
+ k,i,j − f
296
+
297
+ F S
298
+ k,i,j
299
+ ��2
300
+ (1)
301
+ where C, H, and W denote the channel, height, and width of
302
+ the feature map, respectively. F T and F S denote the feature
303
+ generated from the teacher model and its counterpart from the
304
+ student model. f represents the adaptation layer that aligns the
305
+ shape of F S and F T .
306
+ Recent research suggests learning and reconstructing the
307
+ features of the teacher model is a desirable alternative to
308
+ feature imitation [6]. More specifically, expressive features
309
+ can be generated from the masked regions on the feature
310
+ map of the student network. However, previous state-of-the-
311
+ art method mainly performs random feature masking without
312
+ identifying the importance of different regions on the feature
313
+ map. In this paper, we attempt to make the student model
314
+ generate features corresponding to the important areas on the
315
+ feature map of the teacher network. Towards this end, we
316
+ propose a spatial-channel adaptive masked distillation strategy
317
+ termed AMD. In contrast to the random masking strategy in
318
+ the previous method, we perform feature masking via region-
319
+ aware attention for identifying the important areas in the
320
+ feature map of the teacher network. In order to improve the
321
+ object-aware capability, we further introduce a simple and
322
+ efficient SE module such that the resulting features are channel
323
+ adaptive. The framework of our proposed method is illustrated
324
+ in Fig. 2.
325
+ A. Spatially adaptive feature masking
326
+ Using random pixels to recover the complete feature map,
327
+ MGD allows the masked features of the student model to
328
+ generate features of the teacher model. Thus, it is beneficial for
329
+ the student network to obtain a better representation. However,
330
+ the region-specific importance is discarded due to the random
331
+ masking in MGD. To alleviate this drawback, we carefully
332
+ design the region-aware feature masking with the help of
333
+ spatial attention. To begin with, we calculate the absolute mean
334
+ value of the teacher network along the channel dimension:
335
+ GS(F) = 1
336
+ C
337
+ C
338
+
339
+ k=1
340
+ ��F T
341
+ k
342
+ ��
343
+ (2)
344
+ where C denotes the channel number of the feature. F T is the
345
+ feature of the teacher. GS(F) is the spatial representation map.
346
+ Then, the spatial attention mask resulting from the teacher
347
+ model can be formulated as:
348
+ AS(F) = H · W · softmax
349
+
350
+ GS(F)/T
351
+
352
+ (3)
353
+ where T is a hyper-parameter introduced in [1] to change the
354
+ probability distribution such that the shape of the resulting AS
355
+
356
+ FGD
357
+ mAP:40.7
358
+ MGD
359
+ mAP:41.0
360
+ Ours
361
+ mAP:41.3
362
+ Fig. 3: Visualisation of the feature maps obtained by different distillation methods. Teacher detector is RetinaNet-ResNeXt101
363
+ while student detector is RetinaNet-ResNet50.
364
+ is 1×H ×W. The attention score for each location represents
365
+ the level of interest in the teacher network. Furthermore, the
366
+ mask value is set to 0 when the attention score is greater than
367
+ λ and the rest are set to 1. This can be expressed as:
368
+ Mi,j =
369
+ � 0,
370
+ if AS
371
+ i,j > λ
372
+ 1,
373
+ Otherwise
374
+ (4)
375
+ where AS
376
+ i,j is the spatial attention score at the point with
377
+ coordinates (i, j) on the feature map of the teacher network.
378
+ λ is a hyper-parameter to control the number of pixels in the
379
+ mask. Next, we cover the feature map of the student model
380
+ with the mask M, which can be formulated as follows:
381
+ F S
382
+ mask = F S · M
383
+ (5)
384
+ In a nutshell, with the help of this attention-guided feature
385
+ masking, we can mask out the student feature map according
386
+ to the important regions of interest on the teacher counterpart,
387
+ and the resulting feature will contain more important semantic
388
+ information.
389
+ B. Channel adaptive clues generation
390
+ Different from single-object recognization tasks such as
391
+ image classification, object detection is a dense prediction task
392
+ focusing on detecting multiple objects. Except for the effective
393
+ receptive field (ERF), the capability of capturing the object
394
+ information in different scales can also bring a significant
395
+ performance fluctuation for a detector, which is not considered
396
+ in the previous work [5, 6, 13]. Therefore, we utilize a simple
397
+ and lightweight SE layer [7] to learn the channel adaptive clue
398
+ from the teacher feature. The resulting channel adaptive clue
399
+ will be applied to enhance the student’s feature, and further
400
+ improve the object-awareness capability:
401
+ F T
402
+ clue = σ
403
+
404
+ WL1
405
+
406
+ WL2
407
+
408
+ F T
409
+ avg; θ1
410
+
411
+ ; θ2
412
+ ��
413
+ ,
414
+ GS(F S
415
+ mask) = WC1
416
+
417
+ ReLU
418
+
419
+ WC2(F S
420
+ mask; θ1)
421
+
422
+ ; θ2
423
+
424
+ ⊙ F T
425
+ clue,
426
+ (6)
427
+ where F T
428
+ clue ∈ R1×1×C denotes the learned channel adaptive
429
+ clue for the student feature. It is fused with the output
430
+ from the generation block via a Hadamard product denoted
431
+ as ⊙. The WL(·; θ) and WC(·; θ) are weight matrices of
432
+ linear projection and convolution layer for SE and generation
433
+ modules, respectively.
434
+ Benefiting from this design, our model further explores the
435
+ object-aware potential, resulting in a significant improvement
436
+ over those vanilla counterparts, i.e., models with no channel-
437
+ adaptive design. More interestingly, we observe that our AMD
438
+ can achieve a remarkable mAP improvement in the case of
439
+ detecting small objects, demonstrating the effectiveness of our
440
+ proposed method. We also provide the visualization results of
441
+ the feature map derived from different distillation models as
442
+ shown in Fig 3. It can be easily observed that the object feature
443
+ produced from our AMD is more distinguishable than those
444
+ of methods.
445
+ C. Loss function
446
+ Based on the proposed distillation method, we design the
447
+ following distillation loss for AMD:
448
+ Lfea =
449
+ C
450
+
451
+ k=1
452
+ H
453
+
454
+ i=1
455
+ W
456
+
457
+ j=1
458
+
459
+ F T
460
+ k,i,j − GS(F S
461
+ mask)
462
+ �2
463
+ (7)
464
+ where C, H, and W respectively denote the channel number,
465
+ height and width of the feature map. F S
466
+ mask denotes the
467
+ masked student feature map. Thus, the overall loss function
468
+ is as follows:
469
+ Loverall
470
+
471
+ F T , F S�
472
+ = α · Lfea + Loriginal
473
+ (8)
474
+ where α is a hyper-parameter to balance distillation loss and
475
+ original loss, and Loriginal is the original loss of the detection
476
+ task.
477
+ IV. EXPERIMENT
478
+ A. Experimental Setting
479
+ To verify the effectiveness of our AMD for object detection,
480
+ we evaluate our method on MS COCO2017 [27] benchmark
481
+ dataset, which contains 80 object categories and over 160k
482
+ images. We use 120k training images for training and 5k
483
+ validation images for testing. For performance measures, we
484
+ use Average Precision (AP) and Average Recall (AR) to
485
+ evaluate the performance of different object detectors. Three
486
+ mainstream detectors including the anchor-based one-stage
487
+ detector RetinaNet [8], the two-stage detector Faster-RCNN
488
+ [9], and the anchor-free detector RepPoint [10] are involved
489
+ in our comprehensive experiments. In addition, ResNeXt101
490
+ and ResNet50 are respectively used as the backbone of the
491
+ teacher network and its student counterpart.
492
+ We also conduct a series of ablation studies to ex-
493
+ plore the effects of individual components on the per-
494
+ formance
495
+ of
496
+ our
497
+ AMD
498
+ framework.
499
+ In
500
+ implementation,
501
+
502
+ TABLE I: Comparison of our method with other distillation methods for object detection on COCO.
503
+ Teacher
504
+ Student
505
+ mAP
506
+ APS
507
+ APM
508
+ APL
509
+ mAR
510
+ ARS
511
+ ARM
512
+ ARL
513
+ RetinaNet-Res50
514
+ 37.4
515
+ 20.6
516
+ 40.7
517
+ 49.7
518
+ 53.9
519
+ 33.1
520
+ 57.7
521
+ 70.2
522
+ FKD [28]
523
+ 39.6 (+2.2)
524
+ 22.7
525
+ 43.3
526
+ 52.5
527
+ 56.1 (+2.2)
528
+ 36.8
529
+ 60.0
530
+ 72.1
531
+ FGD [5]
532
+ 40.7 (+3.3)
533
+ 22.9
534
+ 45.0
535
+ 54.7
536
+ 56.8 (+2.9)
537
+ 36.5
538
+ 61.4
539
+ 72.8
540
+ MGD [6]
541
+ 41.0 (+3.6)
542
+ 23.4
543
+ 45.3
544
+ 55.7
545
+ 57.0 (+3.1)
546
+ 37.2
547
+ 61.7
548
+ 72.8
549
+ RetinaNet
550
+ ResNeXt101
551
+ (41.0)
552
+ AMD (ours)
553
+ 41.3 (+3.9)
554
+ 23.9
555
+ 45.4
556
+ 55.7
557
+ 57.4 (+3.5)
558
+ 38.2
559
+ 61.7
560
+ 73.5
561
+ RepPoints-Res50
562
+ 38.6
563
+ 22.5
564
+ 42.2
565
+ 50.4
566
+ 55.1
567
+ 34.9
568
+ 59.4
569
+ 70.3
570
+ FKD [28]
571
+ 40.6 (+2.0)
572
+ 23.4
573
+ 44.6
574
+ 53.0
575
+ 56.9 (+1.8)
576
+ 37.3
577
+ 60.9
578
+ 71.4
579
+ FGD [5]
580
+ 42.0 (+3.4)
581
+ 24.0
582
+ 45.7
583
+ 55.6
584
+ 58.2 (+3.1)
585
+ 37.8
586
+ 62.2
587
+ 73.3
588
+ MGD [6]
589
+ 42.3 (+3.7)
590
+ 24.4
591
+ 46.2
592
+ 55.9
593
+ 58.4 (+3.3)
594
+ 40.4
595
+ 62.3
596
+ 73.9
597
+ RepPoints
598
+ ResNeXt101
599
+ (44.2)
600
+ AMD (ours)
601
+ 42.7 (+4.1)
602
+ 24.8
603
+ 46.5
604
+ 56.3
605
+ 58.8 (+3.7)
606
+ 40.6
607
+ 62.4
608
+ 74.1
609
+ Faster RCNN-Res50
610
+ 38.4
611
+ 21.5
612
+ 42.1
613
+ 50.3
614
+ 52.0
615
+ 32.6
616
+ 55.8
617
+ 66.1
618
+ FKD [28]
619
+ 41.5 (+3.1)
620
+ 23.5
621
+ 45.0
622
+ 55.3
623
+ 54.4 (+2.4)
624
+ 34.0
625
+ 58.2
626
+ 69.9
627
+ FGD [5]
628
+ 42.0 (+3.6)
629
+ 23.8
630
+ 46.4
631
+ 55.5
632
+ 55.4 (+3.4)
633
+ 35.5
634
+ 60.0
635
+ 70.0
636
+ MGD [6]
637
+ 42.1 (+3.7)
638
+ 23.7
639
+ 46.4
640
+ 56.1
641
+ 55.5 (+3.5)
642
+ 35.4
643
+ 60.0
644
+ 70.5
645
+ Cascade
646
+ Mask RCNN
647
+ ResNeXt101
648
+ (47.3)
649
+ AMD (ours)
650
+ 42.4 (+4.0)
651
+ 24.1
652
+ 46.5
653
+ 56.2
654
+ 55.8 (+3.8)
655
+ 35.3
656
+ 60.0
657
+ 70.8
658
+ all the experiments are conducted on a server with one
659
+ RTX3090 GPU using MMdetection toolbox [29] and Py-
660
+ torch framework [30]. Besides, the hyper-parameters are
661
+ empirically set to
662
+
663
+ α = 2.5 × 10−7, λ = 1, T = 0.5
664
+
665
+ and
666
+
667
+ α = 4 × 10−6, λ = 1.2, T = 0.5
668
+
669
+ for the one-stage models
670
+ and the two-stage models respectively. During the training
671
+ process, SGD optimizer is used for training all the detectors
672
+ within 24 epochs. Meanwhile, momentum is set as 0.9 whilst
673
+ weight decay is set to 0.0001. Moreover, single-scale training
674
+ strategy is utilized in our experiments.
675
+ B. Results
676
+ In our comparative studies, we carry out three groups of
677
+ experiments to evaluate different distillation methods with the
678
+ three popular detectors involved. The corresponding experi-
679
+ mental results are shown in Table I.
680
+ In the first group of experiments, RetinaNet is used as the
681
+ detection framework for both the teacher and the student.
682
+ The corresponding experimental results demonstrate that our
683
+ distillation method provides significant performance boosts of
684
+ 3.9% in mAP over the baseline student network by reporting
685
+ the highest accuracy at 41.3%. This result consistently outper-
686
+ forms the state-of-the-art methods FGD and MGD by 0.6%
687
+ and 0.3%, while it even surpasses the teacher model achieving
688
+ 41.0% mAP. Similar performance improvement can also be ob-
689
+ served with respect to mAR metric. The experimental setting
690
+ in the second group is analogous to the first one except that the
691
+ RetinaNet framework is replaced with RepPoints. Consistent
692
+ with the results in the first group, dramatic performance gains
693
+ of 4.1% in mAP and 3.7% in mAR are reported, and similar
694
+ performance superiority to the competing distillation methods
695
+ is also demonstrated. The results reveal that our method can
696
+ adaptively learn more important information from the teacher
697
+ and significantly contribute to the improvement of the student
698
+ model.
699
+ To further assess the generalization capability of our pro-
700
+ posed method, we make use of different detection frameworks
701
+ for the teacher and student models. To be specific, the more
702
+ powerful detector Cascade Mask-RCNN is used as the teacher
703
+ network while the Faster-RCNN for the student model. As
704
+ shown in Table I, our method boosts the baseline student
705
+ model from 38.4% to 42.4% in mAP and from 52.0% to 55.8%
706
+ in mAR, outperforming MGD 0.3% both in mAP and mAR. It
707
+ sufficiently suggests our method is independent of the specific
708
+ detector and shows consistent advantages in cross-framework
709
+ scenarios.
710
+ C. Ablation Study
711
+ In this section, we conduct extensive ablation experiments
712
+ to explore the effect of different configurations on the pro-
713
+ posed AMD. Consistent with the above setting, the ablation
714
+ experiments with different configurations are conducted based
715
+ on the three popular detectors, i.e., RetinaNet, Faster-RCNN,
716
+ and RepPoint.
717
+ As shown in Table II, when RetinaNet is used for the
718
+ detection framework for both the teacher and the student, we
719
+ explore two primary modules in our AMD model, namely
720
+ the spatially adaptive masking (Ada-Mask) and the channel
721
+ adaptive clues generation (Ada-Channel). It is observed that
722
+ the complete AMD model including both the Ada-Mask and
723
+ Ada-Channel components achieves the best results. Further-
724
+ more, when we remove either component, there is a clear
725
+ performance drop in particular in the small-object detection
726
+ scenario (0.3%↓ w/o Ada-Mask and 0.5%↓ w/o Ada-Channel).
727
+ This implies that our AMD method can improve object-
728
+ awareness capability which is crucial for dense prediction
729
+ tasks.
730
+ When the RetinaNet is replaced with the RepPoint, similar
731
+ results can be obtained. As displayed in Table III, both the
732
+ Ada-Mask and Ada-Channel components play critical roles
733
+ in our AMD model. Specifically, single Ada-Mask module
734
+ reports 24.4%, 46.3% and 56.0% in APS, APM and APL
735
+ scores. With the help of additional channel adaptive clues, fur-
736
+ ther performance gains of 0.4%, 0.2% and 0.3% are reported
737
+ for the respective metrics.
738
+ Furthermore, we also perform ablation studies in cross-
739
+ framework scenarios. Specifically, the Cascade Mask-RCNN
740
+ is used as the teacher network, while the Faster-RCNN as the
741
+ student counterpart. As shown in Table IV, the complete AMD
742
+ model achieves the highest accuracy. In particular, the highest
743
+
744
+ TABLE II: Ablation studies using RetinaNet [8] framework for both the teacher and the student. The backbone of the teacher
745
+ network is ResNeXt-101 whilst its student counterpart is ResNet-50. Ada-Mask and Ada-channel respectively denote spatially
746
+ adaptive masking and channel adaptive clue generation module. They constitute two main components in our proposed AMD
747
+ model.
748
+ Ada-Mask
749
+ Ada-Channel
750
+ Student: RetinaNet + Res50
751
+ AP b
752
+ AP b
753
+ 50
754
+ AP b
755
+ 75
756
+ APS
757
+ APM
758
+ APL
759
+ 
760
+ 
761
+ 41.3
762
+ 61.0
763
+ 44.1
764
+ 23.9
765
+ 45.4
766
+ 55.7
767
+ 
768
+ 41.0
769
+ 61.0
770
+ 43.8
771
+ 23.7
772
+ 45.3
773
+ 55.6
774
+ 
775
+ 41.2
776
+ 60.8
777
+ 44.0
778
+ 23.4
779
+ 45.2
780
+ 55.6
781
+ TABLE III: Ablation studies using RepPoint [10] framework for both the teacher and the student. ResNeXt-101 and ResNet-50
782
+ are respective backbones.
783
+ Ada-Mask
784
+ Ada-Channel
785
+ Student: RepPoint + Res50
786
+ AP b
787
+ AP b
788
+ 50
789
+ AP b
790
+ 75
791
+ APS
792
+ APM
793
+ APL
794
+ 
795
+ 
796
+ 42.7
797
+ 63.5
798
+ 46.5
799
+ 24.8
800
+ 46.5
801
+ 56.3
802
+ 
803
+ 42.4
804
+ 63.2
805
+ 46.4
806
+ 24.6
807
+ 46.5
808
+ 56.1
809
+ 
810
+ 42.4
811
+ 63.3
812
+ 46.2
813
+ 24.4
814
+ 46.3
815
+ 56.0
816
+ TABLE IV: Ablation studies in a cross-framework scenario. The Cascade Mask-RCNN [31] is employed for the teacher
817
+ framework, while the Faster R-CNN is for the student counterpart.
818
+ Ada-Mask
819
+ Ada-Channel
820
+ Student: Faster-RCNN + Res50
821
+ AP b
822
+ AP b
823
+ 50
824
+ AP b
825
+ 75
826
+ APS
827
+ APM
828
+ APL
829
+ 
830
+ 
831
+ 42.4
832
+ 63.1
833
+ 46.2
834
+ 24.1
835
+ 46.5
836
+ 56.2
837
+ 
838
+ 42.1
839
+ 62.8
840
+ 46.0
841
+ 23.8
842
+ 46.4
843
+ 56.3
844
+ 
845
+ 42.3
846
+ 63.0
847
+ 46.2
848
+ 23.6
849
+ 46.6
850
+ 56.3
851
+ APS score 24.1% is reported, outperforming the other settings
852
+ w/o either Ada-Mask or Ada-Channel. This indicates that our
853
+ AMD model benefits small-object detection with improved
854
+ object-awareness capability.
855
+ TABLE V: Comparison of different generation blocks. For
856
+ MBConv [32], we use 5 × 5 depthwise convolution.
857
+ Student: RetinaNet-Res50
858
+ Generation Block
859
+ MBConv
860
+ 3 × 3 Dense Conv * 1
861
+ 3 × 3 Dense Conv * 2
862
+ mAP
863
+ 41.0
864
+ 41.2
865
+ 41.3
866
+ In addition to the above ablation studies, we also discuss the
867
+ effect of different generation blocks on the performance of our
868
+ method. As illustrated in Table V, three different generation
869
+ blocks are compared within the RetinaNet framework. The
870
+ results reveal that a slightly inferior performance is reported
871
+ by the advanced MBConv [32]. In contrast, a better result
872
+ is achieved by simply stacking two vanilla convolutional
873
+ layers. We assume that the channel adaptive clues learned
874
+ from the teacher network is not compatible with MBConv
875
+ block, because MBConv somewhat encodes the channel clues
876
+ from the student model. This incompatibility results from the
877
+ difference of the channel clues between the teacher and the
878
+ student network.
879
+ To gain a deeper insight into the effect of the Ada-Channel
880
+ module on feature generation, we explore the following two
881
+ TABLE VI: Comparison of different locations of Ada-channel.
882
+ After and Within denote that we apply the channel adaptive
883
+ clues after the generation block and between the two convo-
884
+ lution layers, respectively.
885
+ Student: Faster-RCNN + Res50
886
+ Location
887
+ After
888
+ Within
889
+ mAP
890
+ 42.4
891
+ 42.2
892
+ cases with Cascade Mask-RCNN and Faster-RCNN respec-
893
+ tively used as the teacher and the student. In the first case,
894
+ Ada-Channel follows the generation block, and the two com-
895
+ ponents function separately. In the other case, Ada-Channel
896
+ is embedded within two consecutive convolution layers of the
897
+ generation block, which implies that two modules are coupled.
898
+ As shown in Table VI, decoupling the two components brings
899
+ an improvement of 0.2% in mAP, suggesting that the genera-
900
+ tion process working on the masked feature of the student is
901
+ repulsive with other exotic clues, even the informative ones.
902
+ D. Parameter Analysis
903
+ In our AMD method, the hyper-parameter λ in Eq. 4
904
+ controls the coverage of feature mask. A larger λ value
905
+ indicates that only the points with higher attention scores of
906
+ the teacher model are masked, and most of the pixel points
907
+ are in the object-specific ground-truth region. In contrast, it
908
+
909
+ is likely that masked points appear in the background region
910
+ when decreasing λ. In our experiments, we discuss the effect
911
+ of λ using RepPoints as the detection framework. It is observed
912
+ from Fig. 4 that the highest mAP 42.7% and mAR 58.8%
913
+ are reported when λ = 1.0, suggesting it helps the model to
914
+ better compromise between encoding low-score and high-score
915
+ regions.
916
+ 40
917
+ 45
918
+ 50
919
+ 55
920
+ 60
921
+ 0.7
922
+ 1
923
+ 1.5
924
+ 42.5
925
+ 42.7
926
+ 42.3
927
+ 58.4
928
+ 58.8
929
+ 58.5
930
+ λ
931
+ Accuracy(%)
932
+ mAR
933
+ mAP
934
+ Fig. 4: Parameter λ analysis on one-stage RepPoints frame-
935
+ work.
936
+ V. CONCLUSION
937
+ In this paper, we focus on the topic of feature-based masked
938
+ distillation and propose spatial-channel adaptive masked dis-
939
+ tillation termed AMD for object detection. On the one hand,
940
+ we perform spatially adaptive feature masking to encode
941
+ the region-specific importance, such that more important and
942
+ expressive features can be learned from the teacher net-
943
+ work. On the other hand, to improve the object-awareness
944
+ capability, we utilize the simple and efficient SE block to
945
+ generate informative channel-adaptive clues for the student
946
+ model. Extensive experiments demonstrate the superiority and
947
+ effectiveness of our method, showing that the proposed AMD
948
+ model not only significantly boosts the performance of the
949
+ baseline student model but also outperforms the other state-
950
+ of-the-art distillation approaches.
951
+ In our proposed AMD, the spatial attention map generated
952
+ from the feature of the teacher model lacks information
953
+ interaction. Our future work will focus on exploring alternative
954
+ strategies to enhance the interaction among different locations
955
+ on the attention map.
956
+ REFERENCES
957
+ [1] G. Hinton, O. Vinyals, J. Dean et al., “Distilling the
958
+ knowledge in a neural network,” in Proceedings of the
959
+ International Conference on Neural Information Process-
960
+ ing Systems Workshop, 2014, pp. 1–9.
961
+ [2] G. Chen, W. Choi, X. Yu, T. Han, and M. Chandraker,
962
+ “Learning efficient object detection models with knowl-
963
+ edge distillation,” in Proceedings of the International
964
+ Conference on Neural Information Processing Systems
965
+ Workshop, 2017, pp. 1–10.
966
+ [3] A. Romero, N. Ballas, S. E. Kahou, A. Chassang,
967
+ C. Gatta, and Y. Bengio, “Fitnets: Hints for thin deep
968
+ nets,” in Proceedings of the International Conference on
969
+ Learning Representations, 2015, pp. 1–13.
970
+ [4] T. Wang, L. Yuan, X. Zhang, and J. Feng, “Distilling
971
+ object detectors with fine-grained feature imitation,” in
972
+ Proceedings of the IEEE Conference on Computer Vision
973
+ and Pattern Recognition, 2019, pp. 4933–4942.
974
+ [5] Z. Yang, Z. Li, X. Jiang, Y. Gong, Z. Yuan, D. Zhao,
975
+ and C. Yuan, “Focal and global knowledge distillation
976
+ for detectors,” in Proceedings of the IEEE Conference
977
+ on Computer Vision and Pattern Recognition, 2022, pp.
978
+ 4643–4652.
979
+ [6] Z. Yang, Z. Li, M. Shao, D. Shi, Z. Yuan, and C. Yuan,
980
+ “Masked generative distillation,” in Proceedings of the
981
+ European Conference on Computer Vision, 2022, pp. 1–
982
+ 17.
983
+ [7] J. Hu, L. Shen, and G. Sun, “Squeeze-and-excitation
984
+ networks,” in Proceedings of the IEEE Conference on
985
+ Computer Vision and Pattern Recognition, 2018, pp.
986
+ 7132–7141.
987
+ [8] T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Doll´ar,
988
+ “Focal loss for dense object detection,” in Proceedings of
989
+ the IEEE International Conference on Computer Vision,
990
+ 2017, pp. 2980–2988.
991
+ [9] S. Ren, K. He, R. Girshick, and J. Sun, “Faster r-cnn:
992
+ Towards real-time object detection with region proposal
993
+ networks,” in Proceedings of the International Confer-
994
+ ence on Neural Information Processing Systems, 2015,
995
+ pp. 1–9.
996
+ [10] Z. Yang, S. Liu, H. Hu, L. Wang, and S. Lin, “Rep-
997
+ points: Point set representation for object detection,” in
998
+ Proceedings of the IEEE/CVF International Conference
999
+ on Computer Vision, 2019, pp. 9657–9666.
1000
+ [11] C.-Y. Fu, W. Liu, A. Ranga, A. Tyagi, and A. C.
1001
+ Berg, “Dssd: Deconvolutional single shot detector,” arXiv
1002
+ preprint arXiv:1701.06659, 2017.
1003
+ [12] Z. Tian, C. Shen, H. Chen, and T. He, “Fcos: Fully con-
1004
+ volutional one-stage object detection,” in Proceedings of
1005
+ the IEEE International Conference on Computer Vision,
1006
+ 2019, pp. 9627–9636.
1007
+ [13] N. Carion, F. Massa, G. Synnaeve, N. Usunier, A. Kir-
1008
+ illov, and S. Zagoruyko, “End-to-end object detection
1009
+ with transformers,” in Proceedings of the European Con-
1010
+ ference on Computer Vision.
1011
+ Springer, 2020, pp. 213–
1012
+ 229.
1013
+ [14] S. Gidaris and N. Komodakis, “Object detection via
1014
+ a multi-region and semantic segmentation-aware cnn
1015
+ model,” in Proceedings of the IEEE International Con-
1016
+ ference on Computer Vision, 2015, pp. 1134–1142.
1017
+ [15] T. Kong, A. Yao, Y. Chen, and F. Sun, “Hypernet:
1018
+ Towards accurate region proposal generation and joint
1019
+ object detection,” in Proceedings of the IEEE Conference
1020
+ on Computer Vision and Pattern Recognition, 2016, pp.
1021
+ 845–853.
1022
+
1023
+ [16] K. He, G. Gkioxari, P. Doll´ar, and R. Girshick, “Mask
1024
+ r-cnn,” in Proceedings of the IEEE International Confer-
1025
+ ence on Computer Vision, 2017, pp. 2961–2969.
1026
+ [17] J. Redmon and A. Farhadi, “Yolov3: An incremental
1027
+ improvement,” arXiv preprint arXiv:1804.02767, 2018.
1028
+ [18] Z. Ge, S. Liu, F. Wang, Z. Li, and J. Sun, “Yolox:
1029
+ Exceeding yolo series in 2021,” in Proceedings of the
1030
+ IEEE/CVF Conference on Computer Vision and Pattern
1031
+ Recognition, 2021, pp. 1–7.
1032
+ [19] R. Girshick, “Fast r-cnn,” in Proceedings of the IEEE
1033
+ International Conference on Computer Vision, 2015, pp.
1034
+ 1440–1448.
1035
+ [20] Z. Liu, Y. Lin, Y. Cao, H. Hu, Y. Wei, Z. Zhang, S. Lin,
1036
+ and B. Guo, “Swin transformer: Hierarchical vision
1037
+ transformer using shifted windows,” in Proceedings of
1038
+ the IEEE International Conference on Computer Vision,
1039
+ 2021, pp. 10 012–10 022.
1040
+ [21] H. Zhang, F. Li, S. Liu, L. Zhang, H. Su, J. Zhu,
1041
+ L. M. Ni, and H.-Y. Shum, “Dino: Detr with improved
1042
+ denoising anchor boxes for end-to-end object detection,”
1043
+ in Proceedings of the IEEE Conference on Computer
1044
+ Vision and Pattern Recognition, 2022, pp. 1–23.
1045
+ [22] X. Zhu, W. Su, L. Lu, B. Li, X. Wang, and J. Dai,
1046
+ “Deformable detr: Deformable transformers for end-to-
1047
+ end object detection,” in Proceedings of the International
1048
+ Conference on Learning Representations, 2021, pp. 1–16.
1049
+ [23] S. Tang, L. Feng, W. Shao, Z. Kuang, W. Zhang,
1050
+ and Y. Chen, “Learning efficient detector with semi-
1051
+ supervised adaptive distillation,” in Proceedings of the
1052
+ IEEE Conference on Computer Vision and Pattern
1053
+ Recognition, 2019, pp. 1–9.
1054
+ [24] Q. Li, S. Jin, and J. Yan, “Mimicking very efficient
1055
+ network for object detection,” in Proceedings of the IEEE
1056
+ Conference on Computer Vision and Pattern Recognition,
1057
+ 2017, pp. 6356–6364.
1058
+ [25] Z. Zheng, R. Ye, P. Wang, D. Ren, W. Zuo, Q. Hou,
1059
+ and M.-M. Cheng, “Localization distillation for dense
1060
+ object detection,” in Proceedings of the IEEE Conference
1061
+ on Computer Vision and Pattern Recognition, 2022, pp.
1062
+ 9407–9416.
1063
+ [26] X. Dai, Z. Jiang, Z. Wu, Y. Bao, Z. Wang, S. Liu,
1064
+ and E. Zhou, “General instance distillation for object
1065
+ detection,” in Proceedings of the IEEE Conference on
1066
+ Computer Vision and Pattern Recognition, 2021, pp.
1067
+ 7842–7851.
1068
+ [27] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona,
1069
+ D. Ramanan, P. Doll´ar, and C. L. Zitnick, “Microsoft
1070
+ coco: Common objects in context,” in European Confer-
1071
+ ence on Computer Vision.
1072
+ Springer, 2014, pp. 740–755.
1073
+ [28] L. Zhang and K. Ma, “Improve object detection with
1074
+ feature-based knowledge distillation: Towards accurate
1075
+ and efficient detectors,” in International Conference on
1076
+ Learning Representations, 2020.
1077
+ [29] K. Chen, J. Wang, J. Pang, Y. Cao, Y. Xiong, X. Li,
1078
+ S. Sun, W. Feng, Z. Liu, J. Xu et al., “Mmdetection:
1079
+ Open mmlab detection toolbox and benchmark,” arXiv
1080
+ preprint arXiv:1906.07155, 2019.
1081
+ [30] A. Paszke, S. Gross, F. Massa, A. Lerer, J. Bradbury,
1082
+ G. Chanan, T. Killeen, Z. Lin, N. Gimelshein, L. Antiga
1083
+ et al., “Pytorch: An imperative style, high-performance
1084
+ deep learning library,” in Proceedings of the International
1085
+ Conference on Neural Information Processing Systems,
1086
+ 2019, pp. 1–12.
1087
+ [31] Z. Cai and N. Vasconcelos, “Cascade r-cnn: high quality
1088
+ object detection and instance segmentation,” IEEE Trans-
1089
+ actions on Pattern Analysis and Machine Intelligence,
1090
+ vol. 43, no. 5, pp. 1483–1498, 2019.
1091
+ [32] M. Tan and Q. Le, “Efficientnet: Rethinking model scal-
1092
+ ing for convolutional neural networks,” in Proceedings
1093
+ of the International Conference on Machine Learning.
1094
+ PMLR, 2019, pp. 6105–6114.
1095
+