jackkuo commited on
Commit
26adadd
·
verified ·
1 Parent(s): 54fa10c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. 19E4T4oBgHgl3EQfzg22/content/tmp_files/2301.05275v1.pdf.txt +1640 -0
  3. 19E4T4oBgHgl3EQfzg22/content/tmp_files/load_file.txt +0 -0
  4. 3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf +0 -0
  5. 3NFIT4oBgHgl3EQf5iun/content/tmp_files/2301.11390v1.pdf.txt +162 -0
  6. 3NFIT4oBgHgl3EQf5iun/content/tmp_files/load_file.txt +165 -0
  7. 3dAzT4oBgHgl3EQf9P73/content/tmp_files/2301.01918v1.pdf.txt +1727 -0
  8. 3dAzT4oBgHgl3EQf9P73/content/tmp_files/load_file.txt +0 -0
  9. 4NFKT4oBgHgl3EQfRi0P/content/tmp_files/2301.11771v1.pdf.txt +977 -0
  10. 4NFKT4oBgHgl3EQfRi0P/content/tmp_files/load_file.txt +0 -0
  11. 69E0T4oBgHgl3EQfwAF2/content/tmp_files/2301.02626v1.pdf.txt +1615 -0
  12. 69E0T4oBgHgl3EQfwAF2/content/tmp_files/load_file.txt +0 -0
  13. 8tE2T4oBgHgl3EQflgdv/content/tmp_files/2301.03989v1.pdf.txt +1454 -0
  14. 8tE2T4oBgHgl3EQflgdv/content/tmp_files/load_file.txt +0 -0
  15. BNAzT4oBgHgl3EQfhv2_/content/tmp_files/2301.01490v1.pdf.txt +1210 -0
  16. BNAzT4oBgHgl3EQfhv2_/content/tmp_files/load_file.txt +0 -0
  17. DNE0T4oBgHgl3EQfQQC5/content/tmp_files/2301.02191v1.pdf.txt +1849 -0
  18. DNE0T4oBgHgl3EQfQQC5/content/tmp_files/load_file.txt +0 -0
  19. I9AyT4oBgHgl3EQffvhT/content/tmp_files/2301.00345v1.pdf.txt +1829 -0
  20. I9AyT4oBgHgl3EQffvhT/content/tmp_files/load_file.txt +0 -0
  21. ItE3T4oBgHgl3EQfXAqD/content/tmp_files/2301.04475v1.pdf.txt +3162 -0
  22. ItE3T4oBgHgl3EQfXAqD/content/tmp_files/load_file.txt +0 -0
  23. LNFAT4oBgHgl3EQfwR78/content/tmp_files/2301.08681v1.pdf.txt +0 -0
  24. LNFAT4oBgHgl3EQfwR78/content/tmp_files/load_file.txt +0 -0
  25. M9E1T4oBgHgl3EQftQXp/content/tmp_files/2301.03376v1.pdf.txt +1463 -0
  26. M9E1T4oBgHgl3EQftQXp/content/tmp_files/load_file.txt +0 -0
  27. ONE0T4oBgHgl3EQfjgHF/content/tmp_files/2301.02461v1.pdf.txt +1388 -0
  28. ONE0T4oBgHgl3EQfjgHF/content/tmp_files/load_file.txt +0 -0
  29. S9E2T4oBgHgl3EQfWwd9/content/tmp_files/2301.03837v1.pdf.txt +2033 -0
  30. S9E2T4oBgHgl3EQfWwd9/content/tmp_files/load_file.txt +0 -0
  31. SdE0T4oBgHgl3EQfUgDE/content/tmp_files/2301.02252v1.pdf.txt +0 -0
  32. SdE0T4oBgHgl3EQfUgDE/content/tmp_files/load_file.txt +0 -0
  33. U9AzT4oBgHgl3EQf1P4j/content/tmp_files/2301.01795v1.pdf.txt +0 -0
  34. U9AzT4oBgHgl3EQf1P4j/content/tmp_files/load_file.txt +0 -0
  35. VNAzT4oBgHgl3EQfJ_sw/content/tmp_files/2301.01088v1.pdf.txt +837 -0
  36. VNAzT4oBgHgl3EQfJ_sw/content/tmp_files/load_file.txt +435 -0
  37. WdAyT4oBgHgl3EQfu_n3/content/tmp_files/2301.00625v1.pdf.txt +990 -0
  38. WdAyT4oBgHgl3EQfu_n3/content/tmp_files/load_file.txt +0 -0
  39. XNE1T4oBgHgl3EQfJQOq/content/tmp_files/2301.02950v1.pdf.txt +1303 -0
  40. XNE1T4oBgHgl3EQfJQOq/content/tmp_files/load_file.txt +0 -0
  41. a9AyT4oBgHgl3EQfW_fi/content/tmp_files/2301.00176v1.pdf.txt +1072 -0
  42. a9AyT4oBgHgl3EQfW_fi/content/tmp_files/load_file.txt +0 -0
  43. d9FRT4oBgHgl3EQfUTfv/content/tmp_files/2301.13536v1.pdf.txt +759 -0
  44. d9FRT4oBgHgl3EQfUTfv/content/tmp_files/load_file.txt +416 -0
  45. dNFQT4oBgHgl3EQfjDY0/content/tmp_files/2301.13352v1.pdf.txt +2147 -0
  46. dNFQT4oBgHgl3EQfjDY0/content/tmp_files/load_file.txt +0 -0
  47. h9AzT4oBgHgl3EQf4v4M/content/tmp_files/2301.01847v1.pdf.txt +1578 -0
  48. h9AzT4oBgHgl3EQf4v4M/content/tmp_files/load_file.txt +0 -0
  49. hNE4T4oBgHgl3EQfrQ2w/vector_store/index.faiss +3 -0
  50. htE4T4oBgHgl3EQfrg2l/content/tmp_files/2301.05209v1.pdf.txt +1870 -0
.gitattributes CHANGED
@@ -256,3 +256,4 @@ k9FIT4oBgHgl3EQfriuF/content/2301.11332v1.pdf filter=lfs diff=lfs merge=lfs -tex
256
  KdAyT4oBgHgl3EQff_ia/content/2301.00351v1.pdf filter=lfs diff=lfs merge=lfs -text
257
  79E2T4oBgHgl3EQflQc0/content/2301.03986v1.pdf filter=lfs diff=lfs merge=lfs -text
258
  k9FIT4oBgHgl3EQfriuF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
 
 
256
  KdAyT4oBgHgl3EQff_ia/content/2301.00351v1.pdf filter=lfs diff=lfs merge=lfs -text
257
  79E2T4oBgHgl3EQflQc0/content/2301.03986v1.pdf filter=lfs diff=lfs merge=lfs -text
258
  k9FIT4oBgHgl3EQfriuF/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
259
+ hNE4T4oBgHgl3EQfrQ2w/vector_store/index.faiss filter=lfs diff=lfs merge=lfs -text
19E4T4oBgHgl3EQfzg22/content/tmp_files/2301.05275v1.pdf.txt ADDED
@@ -0,0 +1,1640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Approximate Balancing Weights for Clustered Observational Study
2
+ Designs∗
3
+ Eli Ben-Michael†
4
+ Lindsay Page‡
5
+ Luke Keele§
6
+ January 16, 2023
7
+ Abstract
8
+ In a clustered observational study, a treatment is assigned to groups and all units
9
+ within the group are exposed to the treatment. We develop a new method for statisti-
10
+ cal adjustment in clustered observational studies using approximate balancing weights,
11
+ a generalization of inverse propensity score weights that solve a convex optimization
12
+ problem to find a set of weights that directly minimize a measure of covariate imbal-
13
+ ance, subject to an additional penalty on the variance of the weights. We tailor the
14
+ approximate balancing weights optimization problem to both adjustment sets by de-
15
+ riving an upper bound on the mean square error for each case and finding weights that
16
+ minimize this upper bound, linking the level of covariate balance to a bound on the
17
+ bias. We implement the procedure by specializing the bound to a random cluster-level
18
+ effects model, leading to a variance penalty that incorporates the signal signal-to-noise
19
+ ratio and penalizes the weight on individuals and the total weight on groups differently
20
+ according to the the intra-class correlation.
21
+ Keywords: Balancing Weights, Clustered Observational Study, Clustered Data
22
+ ∗This research is supported by the Institute of Education Sciences, U.S. Department of Education, through
23
+ Grant R305D210014. The opinions expressed are those of the authors and do not represent views of the
24
+ Institute or the U.S. Department of Education.
25
+ One of the datasets used for this study was purchased
26
+ with a grant from the Society of American Gastrointestinal and Endoscopic Surgeons. Although the AMA
27
+ Physician Masterfile data is the source of the raw physician data, the tables and tabulations were prepared
28
+ by the authors and do not reflect the work of the AMA. The Pennsylvania Health Cost Containment Council
29
+ (PHC4) is an independent state agency responsible for addressing the problems of escalating health costs,
30
+ ensuring the quality of health care, and increasing access to health care for all citizens. While PHC4 has
31
+ provided data for this study, PHC4 specifically disclaims responsibility for any analyses, interpretations or
32
+ conclusions. Some of the data used to produce this publication was purchased from or provided by the
33
+ New York State Department of Health (NYSDOH) Statewide Planning and Research Cooperative System
34
+ (SPARCS). However, the conclusions derived, and views expressed herein are those of the author(s) and
35
+ do not reflect the conclusions or views of NYSDOH. NYSDOH, its employees, officers, and agents make
36
+ no representation, warranty or guarantee as to the accuracy, completeness, currency, or suitability of the
37
+ information provided here. This publication was derived, in part, from a limited data set supplied by the
38
+ Florida Agency for Health Care Administration (AHCA) which specifically disclaims responsibility for any
39
+ analysis, interpretations, or conclusions that may be created as a result of the limited data set.
40
+ †Carnegie Mellon University, Pittsburgh, PA, Email: ebenmichael@cmu.edu
41
+ ‡Brown University, Providence, RI, Email: lindsay page@brown.edu
42
+ §University of Pennsylvania, Philadelphia, PA, Email: luke.keele@gmail.com
43
+ 1
44
+ arXiv:2301.05275v1 [stat.ME] 12 Jan 2023
45
+
46
+ 1
47
+ Introduction
48
+ In a study of comparative effectiveness, researchers seek to understand whether a treatment
49
+ has a causal effect on an outcome of interest for a set of study units. In the causal infer-
50
+ ence literature, treatment assignment is a critical element of the study design (Rubin, 2007,
51
+ 2008). Two key components of treatment assignment are whether the intervention is ran-
52
+ domized or not and whether it is grouped or not. First, when interventions are randomly
53
+ assigned, differences between treated and control groups can be interpreted as causal effects.
54
+ In contrast, when subjects select their own treatments, stronger assumptions are needed
55
+ to identify causal effects. Second, interventions may be assigned to individual units or to
56
+ intact groups. For example, given that students are grouped in schools, a treatment may
57
+ be assigned to all students in some schools and withheld from all students in other schools.
58
+ When treatment is randomly assigned and grouped, the design is often called a clustered
59
+ randomized trial (CRT) (Raudenbush, 1997; Hedges and Hedberg, 2007). In many cases,
60
+ however, treatments are grouped but non-randomly assigned. This design is referred to as
61
+ the clustered observational study (COS) (Pimentel et al., 2018; Page et al., 2020). COS
62
+ designs differ from non-clustered observational studies, both because they rely on different
63
+ identification assumptions and because they require different methods for analysis (Ye et al.,
64
+ 2022).
65
+ In a COS design, as is true for any observational study, differences between treated and
66
+ control outcomes may reflect initial differences in the treated and control groups rather
67
+ than treatment effects (Cochran, 1965; Rubin, 1974). As such, analysis for COS designs
68
+ requires statistical adjustment methods to account for observed confounders. Adjustment
69
+ methods for COS designs, however, may need to remove treated and control differences in the
70
+ distributions of covariates at the cluster level, the unit level, or perhaps both levels. Recent
71
+ work has also shown that statistical adjustment in the COS design needs to reflect key
72
+ substantive knowledge of how the clustered treatment is assigned and whether differential
73
+ selection of clusters is present (Ye et al., 2022).
74
+ In short, analysts require a statistical
75
+ adjustment strategy that takes into account key aspects of the COS design.
76
+ In this article, we develop an approximate balancing weights estimator tailored to the
77
+ 2
78
+
79
+ COS context. Balancing weight estimators solve a convex optimization problem to find a set
80
+ of weights that directly minimize a measure of covariate imbalance subject to an additional
81
+ constraint or penalty on the complexity of the weights (Hainmueller, 2011; Zubizarreta,
82
+ 2015; Ben-Michael et al., 2021). Approximate balancing weights are a generalization of the
83
+ standard inverse propensity score estimator. Approximate balancing weight methodology,
84
+ however, requires a number of key innovations to be used in the COS context. First, we write
85
+ separate objective functions with different balance measures and variance penalties for the
86
+ two possible estimands in a COS. The first estimand adjusts for both cluster- and unit-level
87
+ covariates while the second adjusts for cluster-level covariates only. For both estimands, we
88
+ derive a bound on the mean square error of a general weighting estimator, and find weights
89
+ that minimize this upper bound, showing that the level of balance between the treated and
90
+ re-weighted control group gives a bound on the bias. Next, we write the variance penalty as
91
+ a cluster-level random effects model, and we show that the variance penalty is comprised of
92
+ two parts. The first component is the signal-to-noise ratio which measures the overall impact
93
+ of the variance relative to bias. The second component is the intra-class correlation. These
94
+ components correspond to hyper-parameters in the corresponding balancing optimization
95
+ problem, and we develop a data-driven approach for selecting them. For cases where it is
96
+ sufficient to condition on cluster-level covariates only, we show that it can be more efficient
97
+ to further adjust for unit-level covariates when they are strongly predictive of the outcome
98
+ or if the intra-class correlation is moderate to high. In applications with poor overlap or
99
+ many covariates, it can be difficult to find weights that achieve good balance. For these
100
+ cases, we develop two separate extensions: using an outcome model to perform additional
101
+ bias correction, and adjusting the balancing optimization problem to find the maximally
102
+ overlapping set between the treated and control group.
103
+ Finally, we derive methods for
104
+ variance estimation and show how to conduct inference based on asymptotic normality. In
105
+ a series of simulations, we find that balancing weights are superior to multilevel matching
106
+ — which is also tailored to the COS design — in terms of bias and variance reduction.
107
+ We provide further comparisons with two empirical applications estimating the effects of
108
+ Catholic schools and surgical training.
109
+ Overall, we find that balancing weights produce
110
+ superior balance relative to extant matching methods and have larger effective sample sizes.
111
+ 3
112
+
113
+ Our article proceeds as follows. In Section 2, we review the details of the COS design.
114
+ In Section 3 we develop approximate balancing weights for the COS design and discuss
115
+ extensions and inference in Section 4. In Section 5, we evaluate our methods in a simulation
116
+ study. In Section 6, we analyze the data from two COS designs: one from education and
117
+ one from health services research. In Section 7, we conclude and discuss directions for future
118
+ work.
119
+ 2
120
+ The COS Design
121
+ First, we review the formal aspects of the COS design and how the process of differential
122
+ selection leads to different estimands and adjustment strategies. See Ye et al. (2022) for a
123
+ detailed treatment of identification issues in the COS design.
124
+ 2.1
125
+ Notation
126
+ We consider a setup with m clusters, with nℓ units in cluster ℓ, and n = �m
127
+ ℓ=1 nℓ total
128
+ units. We denote the treatment status of cluster ℓ as Aℓ ∈ {0, 1}, n1 ≡ �m
129
+ ℓ=1(1 − Aℓ)nℓ as
130
+ the total number of treated units, and n0 ≡ n − n1 as the total number of control units.
131
+ Each collection of treatment status vectors a = (a1, . . . , am) ∈ {0, 1}m is associated with
132
+ a vector of potential cluster assignments, J(a) = (J1(a), . . . , Jn(a)) ∈ {1, . . . , m}n, where
133
+ Ji(a) ∈ {1, . . . , m} denotes the cluster that unit i would belong to under overall treatment
134
+ allocation a. We denote the observed cluster assignments as J = J(A). Each unit i has a
135
+ potential outcome Yi(aJi(a), J(a)) corresponding to the outcome that would be observed if
136
+ its associated cluster has treatment status aJi and the overall cluster assignment is J(A).
137
+ Note that here we have followed Ye et al. (2022) and assumed that a unit’s potential outcome
138
+ only depends on its own cluster’s treatment status and not the treatment status of other
139
+ clusters, except through the potential cluster assignments. We further assume that potential
140
+ outcomes are independent across clusters — i.e., they are independent for unit i and i′ if
141
+ Ji(a) ̸= Ji′(a) — but may be dependent within clusters. We denote the observed outcomes
142
+ as Yi(AJi, J). We also assume that we observe unit-level covariates Xi ∈ X and that the
143
+ cluster assignments lead to potential cluster-level covariates, Wℓ(J((a))) ∈ W, which may
144
+ include summaries of the unit-level covariates and so can depend on the potential cluster
145
+ assignments. We let Wℓ = Wℓ(J((A))) denote the observed cluster-level covariates, and W
146
+ 4
147
+
148
+ denote the collection of observed cluster-level covariates for all clusters. Taken together, our
149
+ observed data consists of tuples (Xi, Ji, WJi, AJi, Yi).
150
+ Next, we define several conditional expectations of the observed outcomes that we will use
151
+ throughout. First, we denote mw(a, w) ≡ E[Yi | AJi = a, WJi = w] as the expected outcome
152
+ for unit i, conditioned on its cluster having treatment status a and covariates w. Next, we
153
+ use mwx(a, w, x) ≡ E[Yi | AJi = a, WJi = w, Xi = x] to denote the expected outcome if
154
+ we add more information and additionally condition on unit-level covariates x. Next, let
155
+ e(w) = P(Aℓ = 1 | Wℓ = w) denote the propensity score, the probability that a cluster is
156
+ treated, conditioning on cluster-level covariates, and e(w, x) = P(Aℓ = 1 | Wℓ = w, Xi = x)
157
+ denote the propensity score conditioning on cluster and unit-level covariates.
158
+ 2.2
159
+ Estimand, Designs, and Assumptions
160
+ Ye et al. (2022) delineate two primary COS designs. The first we denote as the Cluster-Unit
161
+ Design (CUD), and the second we denote as the Cluster-Only Design (COD). Both designs
162
+ focus on a common target causal estimand which is the average treatment effect for the
163
+ treated units under the observed cluster assignments J,
164
+ τ ≡ E [Yi(1, J) | AJi = 1]
165
+
166
+ ��
167
+
168
+ µ1
169
+ − E [Yi(0, J) | AJi = 1]
170
+
171
+ ��
172
+
173
+ µ0
174
+ .
175
+ (1)
176
+ This estimand measures the effect of treatment for units in treated clusters, keeping the
177
+ cluster assignments fixed. The first term, µ1 can be written as the expectation of the observed
178
+ outcomes among units in the treated clusters, µ1 = E[Yi | AJi = 1]. However, identifying and
179
+ estimating µ0, the mean counterfactual outcome if those clusters had in fact been assigned
180
+ to control, is more difficult. Next, we review the key assumptions required for identification
181
+ of this target causal estimand under the two designs.
182
+ 2.2.1
183
+ Cluster-Unit Design
184
+ The key distinction between the identification strategies for these two designs is whether the
185
+ units within the clusters respond to treatment assignment to clusters. That is, under the
186
+ CUD, the mix of units within clusters is changed by the fact that clusters are treated. Ye
187
+ et al. (2022) refer to such bias as differential selection. Differential selection could occur if the
188
+ 5
189
+
190
+ student mix in a school changes in response to the school being treated. For example, parents
191
+ may opt to enroll their child in a school implementing a whole-school curricular reform. For
192
+ the CUD, we need to account for unit selection into the clusters, which can depend on the
193
+ treatment assignments. Identification of µ0 under the CUD requires the following set of
194
+ assumptions:
195
+ • For two cluster assignment vectors j and j′ such that the treatment assignments are
196
+ the same, aji = a′
197
+ ji, and the cluster-level covariates are unchanged Wji = Wj′
198
+ i for unit
199
+ i, the potential outcomes are equal Yi(aji, j) = Yi(aj′
200
+ i, j′).
201
+ • Denote Yi(a, w) = Yi(aji = a, Wji = w). For every unit i, cluster ℓ, treatment value a,
202
+ and cluster-level covariates value w, AJi ⊥ Yi(a, w) | WJi, Xi.
203
+ • e(w, x) > 0 for all w, x ∈ W, X.
204
+ Taken together, these assumptions form an ignorability assumption that includes both
205
+ cluster-level covariates and unit-level covariates that drive treatment selection. Here, we
206
+ identify µ0 as
207
+ µ0 = E[mw(0, WJi, Xi) | AJi = 1] ≡ µ0wx.
208
+ 2.3
209
+ Cluster-Only Design
210
+ For the COD, treatment assignment is restricted to cases where either treatment assignment
211
+ occurs after the unit-cluster pairing, or units are blinded to the cluster assignments before
212
+ pairing. As such, treatment assignment only depends on cluster-level covariates. Identifica-
213
+ tion of µ0 under the COD requires the following set of assumptions:
214
+ • Cluster assignments are not affected by treatment: Jℓ(a) = Jℓ for all a ∈ {0, 1}m.
215
+ • For every unit i, cluster ℓ, cluster assignment vector j, and treatment value a, AJi ⊥
216
+ (J, Xi, Yi(a, j)) | WJi.
217
+ • e(w) > 0 for all w ∈ W.
218
+ Here, conditioning on the cluster-level covariates is sufficient to remove confounding. As
219
+ such, under the COD, the estimated for the expected outcome conditional on the cluster
220
+ 6
221
+
222
+ being assigned to the control condition and the cluster-level covariates is identified as:
223
+ µ0 = E[mw(0, WJi) | AJi = 1] = µ0w.
224
+ The CUD requires a stronger assumption about the form of the potential outcomes but
225
+ a weaker assumption on the treatment selection process than the COD. However, the key
226
+ distinction between these two identification strategies is the conditioning sets. For the CUD,
227
+ we must condition on both unit and cluster-level covariates. For the COD, we only need
228
+ to condition on cluster-level covariates. As such, the COD is a special case of the CUD
229
+ without unit-level covariates. Below, we consider the implications of conditioning on unit-
230
+ level covariates when the assumptions of the COD hold, and we show that while unit-level
231
+ covariates are unnecessary for identification, they may be beneficial in terms of increased
232
+ precision. Next, we tailor approximate balancing weight methods to each design.
233
+ 3
234
+ Approximate Balancing Weights for the COS Design
235
+ Recent work has developed matching methods—known as multilevel matching—that are
236
+ specifically tailored to the COS design (Keele and Zubizarreta, 2017; Pimentel et al., 2018;
237
+ Keele et al., 2021). Here, we outline approximate balancing weights as an alternative to mul-
238
+ tilevel matching. Approximate balancing weights are a generalization of the standard inverse
239
+ propensity score (IPW) estimator that solve a convex optimization problem to find a set of
240
+ weights that directly minimize a measure of covariate imbalance subject to an additional
241
+ constraint or penalty on the complexity of the weights (Hainmueller, 2011; Zubizarreta,
242
+ 2015). Like matching, and unlike IPW, balancing weights are designed to directly target
243
+ covariate balance in the estimation process, as opposed using the estimated probability of
244
+ being selected for treatment. Like weighting, and unlike matching, balancing weights use
245
+ more of the available data and so often have larger effective sample sizes.
246
+ Recent theoretical innovations have bolstered support for balancing weights. For example,
247
+ it has been shown that balancing weights are implicitly estimates of the inverse propensity
248
+ score, fit via a loss function that guarantees covariate balance (Zhao and Percival, 2016;
249
+ Zhao, 2019; Wang and Zubizarreta, 2019; Chattopadhyay et al., 2020). Researchers have
250
+ proposed non-parametric extensions that allow for flexible specifications of the outcome
251
+ 7
252
+
253
+ model or propensity score (Hirshberg et al., 2019; Hazlett, 2019). Finally, recent literature
254
+ on weighting allows these estimators to target different quantities such as sample overlap (Li
255
+ et al., 2018). See Ben-Michael et al. (2021) for a general overview on balancing weights.
256
+ Approximate balancing weights for the COS design, however, require the development of
257
+ specific objective functions that are tailored to the identification conditions outlined above.
258
+ Next, we develop objective functions that target measures of covariate imbalance and vari-
259
+ ance penalties that are specific to COS designs. As we will see, one key challenge in develop-
260
+ ing objective functions for COS designs is accounting for how clustering affects the variance
261
+ of the weights. Below, we consider a generic weighting estimator of the form
262
+ ˆµ0(γ) = 1
263
+ n1
264
+ m
265
+
266
+ ℓ=1
267
+ (1 − Aℓ)
268
+
269
+ Ji=ℓ
270
+ γiYi,
271
+ where the weights γi are independent of the outcomes. We will first consider conditioning
272
+ on cluster- and unit-level covariates to target µ0wx. Then we will specialize this estimator
273
+ to only condition on cluster-level covariates and target µ0w.
274
+ For both cases, we inspect
275
+ the mean square error of the generic weighted average of control units’ outcomes, then find
276
+ weights that minimize an upper bound. Under both settings, we will consider the finite
277
+ sample estimands:
278
+ ˜µ0wx ≡ 1
279
+ n1
280
+
281
+ Aℓ=1
282
+
283
+ Ji=ℓ
284
+ mwx(0, WJi, Xi),
285
+ and
286
+ ˜µ0w ≡ 1
287
+ n1
288
+
289
+ Aℓ=1
290
+ nℓmw(0, Wℓ),
291
+ which converge to our main estimands.
292
+ 3.1
293
+ Conditioning on cluster- and unit-level covariates
294
+ To target µ0wx, we use the following decomposition for the estimation error of the weighting
295
+ estimator:
296
+ ˆµ0 (γ) − ˜µ0wx = 1
297
+ n1
298
+
299
+ Aℓ=0
300
+
301
+ Ji=ℓ
302
+ γimwx(0, Wℓ, Xi) − 1
303
+ n1
304
+
305
+ Aℓ=1
306
+
307
+ Ji=ℓ
308
+ mwx(0, Wℓ, Xi) + 1
309
+ n1
310
+
311
+ Aℓ=0
312
+
313
+ Ji=ℓ
314
+ γiεi.
315
+ (2)
316
+ To understand this decomposition, we compute the design-conditional bias and variance.
317
+ First, since the weights are independent of the outcomes, under the ignorability condition
318
+ 8
319
+
320
+ for the CUD, the conditional bias corresponds to the first term in the decomposition:
321
+ E [ˆµ0 (γ) − ˜µ0wx | W , A, J, X] = 1
322
+ n1
323
+
324
+ Aℓ=0
325
+
326
+ Ji=ℓ
327
+ γimwx(0, Wℓ, Xi) − 1
328
+ n1
329
+
330
+ Aℓ=1
331
+
332
+ Ji=ℓ
333
+ mwx(0, Wℓ, Xi).
334
+ (3)
335
+ This bias is the post-weighting imbalance in a particular function of the cluster- and unit-
336
+ level covariates: the conditional expectation function mwx(0, ·, ·). If we knew mwx, then
337
+ we could remove all bias by ensuring that this function is balanced. Unfortunately we do
338
+ not know it, or else we would be able to perfectly impute the missing potential outcomes.
339
+ Instead, following Hirshberg et al. (2019) and Ben-Michael et al. (2021), we consider a class
340
+ of potential models Mwx and consider the worst case bias over this class:
341
+ imbalanceMwx(γ) = max
342
+ m∈Mwx
343
+ �����
344
+ 1
345
+ n1
346
+
347
+ Aℓ=0
348
+
349
+ Ji=ℓ
350
+ γim(0, Wℓ, Xi) − 1
351
+ n1
352
+
353
+ Aℓ=1
354
+
355
+ Ji=ℓ
356
+ m(0, Wℓ, Xi)
357
+ ����� .
358
+ Here, since we are conditioning on both unit and cluster-level covariates, the model class
359
+ Mwx can be relatively complex. For example, it might include interactions between WJi and
360
+ Xi.
361
+ Next, the design-conditional variance corresponds to the second term:
362
+ V unit ≡ Var (ˆµ0 (γ) | W , A, J, X) = 1
363
+ n2
364
+ 1
365
+
366
+ Aℓ=0
367
+ ��
368
+ Ji=ℓ
369
+ γ2
370
+ i Var (εi) +
371
+
372
+ Ji=ℓ
373
+
374
+ Jk=ℓ,k̸=i
375
+ γiγjCov(εi, εk)
376
+
377
+ ,
378
+ (4)
379
+ where εi = Yi − mwx(0, WJi, Xi) is the residual in the observed outcomes after conditioning
380
+ on cluster- and unit-level covariates. Note that due to potential correlation in the outcomes
381
+ within clusters, the variance term includes a cross-term involving the product of pairs of
382
+ weights on each unit, in addition to the typical squared weight term used in settings where
383
+ the weights are independent. Taken together, we try to find weights that minimize an upper
384
+ bound on the design-conditional mean square error:
385
+ min
386
+ γ
387
+ imbalanceMwx(γ)2 + 1
388
+ n2
389
+ 1
390
+
391
+ Aℓ=0
392
+ ��
393
+ Ji=ℓ
394
+ γ2
395
+ i Var (εi) +
396
+
397
+ Ji=ℓ
398
+
399
+ Jk=ℓ,k̸=i
400
+ γiγjCov(εi, εk)
401
+
402
+ .
403
+ (5)
404
+ Implementing the optimization of Equations (9) requires making several modeling choices.
405
+ 9
406
+
407
+ First, we must choose the model class for the conditional expectation function Mwx. Second,
408
+ we must specify the variances and covariances of the residuals. In making these choices, we
409
+ will attempt to strike a balance between flexibility and practicality.
410
+ We begin by selecting the model class for Mwx. In the non-clustered treatment assign-
411
+ ment setting, many model classes have been considered, from sparse models (Zubizarreta,
412
+ 2015) to models with expanding basis functions (Wang and Zubizarreta, 2020) to reproduc-
413
+ ing kernel Hilbert spaces (Hirshberg et al., 2019). See Ben-Michael et al. (2021) for a recent
414
+ review and discussion on the difficulty of balancing these model classes. To target µ0wx in
415
+ the COS design, we consider a model class that is linear in a transformation of the covariates
416
+ and incorporating both cluster- and unit-level covariates with L2-bounded coefficients and
417
+ an unbounded intercept, i.e.:
418
+ Mwx = Ψwx ≡ {α + β · ψ(w, x) | ∥β∥2 ≤ Cwx, α ∈ R}.
419
+ These model classes naturally allow for a non-parametric extension to reproducing kernel
420
+ hilbert spaces with infinite-dimensional transformations via the “kernel” trick, e.g., defining
421
+ a kernel k((w1, x1), (w2, x2)) = ψ(w1, x1)·ψ(w2, x2). Notably, these transformations, ψ(w, x),
422
+ can include interactions between the two levels of variables, allowing for the cluster context
423
+ to affect the relationship between the outcome and the unit-level covariates.
424
+ To specify the variances and covariances of the residuals, we use a random effects model.
425
+ Momentarily abusing notation, we write the residual between unit i’s outcome and its ex-
426
+ pected control outcome conditional on cluster- and unit-level covariates as Yi−mwx(WJi, Xi) =
427
+ δJi + εi, with a cluster-level random effect δJi and an independent unit-level residual εi. We
428
+ parameterize the variance with two terms: Var(δJi) = σ2ρ, and Var(εi) = σ2(1 − ρ). Under
429
+ the random-effects model, σ2 represents the total residual variance, and ρ is the intra-class
430
+ correlation (ICC), which is a well-known measure of relatedness within clusters. Under this
431
+ random effects model, the variance is
432
+ σ2
433
+ n2
434
+ 1
435
+
436
+ Aℓ=0
437
+
438
+ �(1 − ρ)
439
+
440
+ Ji=ℓ
441
+ γ2
442
+ i + ρ
443
+ ��
444
+ Ji=ℓ
445
+ γi
446
+ �2�
447
+ � .
448
+ 10
449
+
450
+ We then find the weights, γi that solve the following optimization problem:
451
+ min
452
+ γ
453
+ �����
454
+ 1
455
+ n1
456
+
457
+ Aℓ=0
458
+
459
+ Ji=ℓ
460
+ γiψ(Wℓ, Xi) − 1
461
+ n1
462
+
463
+ Aℓ=1
464
+
465
+ Ji=ℓ
466
+ ψ(Wℓ, Xi)
467
+ �����
468
+ 2
469
+ 2
470
+ + σ2
471
+ C2
472
+ wx
473
+ 1
474
+ n2
475
+ 1
476
+
477
+ Aℓ=0
478
+
479
+ �(1 − ρ)
480
+
481
+ Ji=ℓ
482
+ γ2
483
+ i + ρ
484
+ ��
485
+ Ji=ℓ
486
+ γi
487
+ �2�
488
+
489
+ subject to
490
+
491
+ Aℓ=0
492
+
493
+ Ji=ℓ
494
+ γi = n1
495
+ and
496
+ L ≤ ¯γℓ ≤ U,
497
+ (6)
498
+ where we have included some additional optional upper and lower bounds on the weights.
499
+ This objective function implements the general balancing weights problem in Equation (5)
500
+ with the model class Ψwx and a random-effects model. The variance penalty includes two
501
+ hyperparameters. First, there is the noise to signal ratio, σ2/C2
502
+ w, that measures the overall
503
+ impact of the variance relative to the bias in the MSE. If this ratio is small, then better
504
+ balance will be prioritized over lower variance; if the ratio is large then the opposite is true.
505
+ The second hyperparameter is the ICC, which determines the level of penalization for each
506
+ cluster. If the ICC is small and units’ outcomes are nearly uncorrelated, then the weight on
507
+ each unit will be penalized the same. Conversely, if the ICC is large then the outcomes are
508
+ very correlated within clusters, the penalization focuses on the total weight assigned to each
509
+ cluster instead. We discuss setting these hyperparameters below. Note that this specification
510
+ of the variance penalty is equivalent to Rubinstein et al. (2022), who consider region-level
511
+ policy analysis via weighting.
512
+ The weights are also constrained to sum to the total number of treated units, and to be
513
+ bounded between a lower bound L and an upper bound U. The former constraint comes
514
+ from the possibility of an unbounded intercept in the model class Φwx; by ensuring the
515
+ sum constraint we can be sure that the estimator is invariant to constant shifts in the
516
+ outcome. The latter constraint acts as a form of regularization, and allows us to perform
517
+ typically post-hoc adjustments such as weight trimming directly when finding our weights.
518
+ If L = 0 and U = ∞, the estimator will be restricted from extrapolating away from the
519
+ support of the control data (see Ben-Michael et al., 2021, for further discussion on the role
520
+ of extrapolation). If we additionally set U to be non-infinite, we can prevent any weights
521
+ from becoming extremely large, at the cost of balance.
522
+ 11
523
+
524
+ 3.2
525
+ Conditioning on cluster-level covariates only
526
+ Next, we consider estimating µ0w, which does not condition on unit-level covariates. This
527
+ weighting estimator ignores the unit-level information, with weights that are constant within
528
+ clusters, i.e. γclus
529
+ i
530
+ = ¯γJi for all units i. Here, the results are a special case of the CUD,
531
+ removing the unit-level covariates from the optimization procedure. The design-conditional
532
+ bias and variance in this special case are:
533
+ E
534
+
535
+ ˆµ0
536
+
537
+ γclus�
538
+ − ˜µ0w | W , A, J
539
+
540
+ = 1
541
+ n1
542
+
543
+ Aℓ=0
544
+ nℓ¯γℓmw(0, Wℓ) − 1
545
+ n1
546
+
547
+ Aℓ=1
548
+ nℓmw(0, Wℓ),
549
+ (7)
550
+ V clus ≡ Var
551
+
552
+ ˆµ0
553
+
554
+ γclus�
555
+ | W , A, J
556
+
557
+ = 1
558
+ n2
559
+ 1
560
+
561
+ Aℓ=0
562
+ ¯γ2
563
+ ℓ Var
564
+ ��
565
+ Ji=ℓ
566
+ ei | J
567
+
568
+ .
569
+ (8)
570
+ Here, the bias only depends on imbalance in a function of the cluster-level covariates, mw.
571
+ As above, we consider a model class Mw and upper bound the bias by worst-case imbalance:
572
+ imbalanceMw(¯γ) ≡ max
573
+ m∈Mw
574
+ �����
575
+ 1
576
+ n1
577
+
578
+ Aℓ=0
579
+ nℓ¯γℓm(0, Wℓ) − 1
580
+ n1
581
+
582
+ Aℓ=1
583
+ nℓm(0, Wℓ)
584
+ ����� .
585
+ Comparing the variances V clus and V unit, we see that restricting to cluster-level covariates
586
+ only removes the cross term between weights in the same cluster. So V clus depends principally
587
+ on the sum of the squared weights, weighted by the total variance of the outcomes in each
588
+ cluster.
589
+ Tailoring Equation (5) yields the following optimization problem to control the
590
+ MSE:
591
+ min
592
+ ¯γ
593
+ imbalanceMw(¯γ)2 + 1
594
+ n2
595
+ 1
596
+
597
+ Aℓ=0
598
+ ¯γ2
599
+ ℓ Var
600
+ ��
601
+ Ji=ℓ
602
+ ei | J
603
+
604
+ .
605
+ (9)
606
+ To implement this optimization procedure, we will use the same model class and variance
607
+ model as above, removing the unit-level covariates. For the model class, we use the set of
608
+ models that are linear in transformations of the cluster-level covariates:
609
+ Mw = Φw ≡ {α + β · φ(w) | ∥β∥2 ≤ Cw, α ∈ R}.
610
+ For the variance model, we again assume that the residual decomposes into a cluster-level
611
+ 12
612
+
613
+ residual and an independent unit-level residual: Yi − mw(0, WJi) = ei + dJi, with Var(ei) =
614
+ s2(1 − r) and Var(dℓ) = s2r. Here, s2 represents the total variance in the residual, and r is
615
+ the ICC. Now, the variance can be written as
616
+ s2
617
+ n2
618
+ 1
619
+
620
+ Aℓ=0
621
+ ¯γ2
622
+ ℓ ((1 − r)nℓ + rn2
623
+ ℓ).
624
+ Putting together the pieces, we find weights ¯γ that solve the following optimization problem
625
+ min
626
+ ¯γ
627
+ �����
628
+ 1
629
+ n1
630
+
631
+ Aℓ=0
632
+ nℓ¯γℓφ(Wℓ) − 1
633
+ n1
634
+
635
+ Aℓ=1
636
+ nℓφ(Wℓ)
637
+ �����
638
+ 2
639
+ 2
640
+ + s2
641
+ C2
642
+ w
643
+ 1
644
+ n2
645
+ 1
646
+
647
+ Aℓ=0
648
+ ¯γ2
649
+ ℓ ((1 − r)nℓ + rn2
650
+ ℓ)
651
+ subject to
652
+
653
+
654
+
655
+ Aℓ=0
656
+ nℓ¯γℓ = n1 and
657
+ L ≤ ¯γℓ ≤ U.
658
+ (10)
659
+ The cluster-level balancing problem in Equation (10) and the unit-level problem in Equa-
660
+ tion (6) share many similarities, including the constraints on the weights and the noise-to-
661
+ signal ratio.1 However, the key difference is whether the weights can differ within clusters in
662
+ order to balance the additional unit-level covariates. This is most apparent in how the ICC
663
+ affects the variance penalty. When only cluster-level covariates are included, the ICC only
664
+ appears in the role that the cluster size nℓ has in the variance penalty. When weights are
665
+ allowed to differ by unit, however, this creates more options in how the weights are penalized.
666
+ 3.3
667
+ Hyperparameter selection
668
+ Both of the objective functions specified above include two terms that we consider hyper-
669
+ parameters: the overall noise to signal ratio, (s2/C2
670
+ w or σ2/C2
671
+ wx) and the ICC. The noise
672
+ to signal ratio governs the bias-variance tradeoff. As it approaches zero, more and more
673
+ emphasis is placed on achieving covariate balance; in the (unreasonable) noiseless limit, we
674
+ would not need or want to penalize the variance. On the other hand, as the noise to signal
675
+ ratio increases and the covariates are less predictive of the outcome, the optimization will
676
+ prioritize variance more. In contrast, the ICC term determines the form of the variance
677
+ penalty. In both cases, when the ICC is 0, the weights on each unit are penalized separately
678
+ since units’ outcomes are independent. When the ICC is 1, units’ outcomes completely move
679
+ 1The noise-to-signal ratio will in general be different when unit-level covariates are included.
680
+ 13
681
+
682
+ together within a group, in which case the total weight placed on the group is penalized.
683
+ Intermediate values of the ICC correspond to a mixture of these penalization schemes.
684
+ Note that the particular values of the noise to signal ratio and the ICC only enter the
685
+ estimator through the variance penalties on the weights in Equations (6) and (10). This is
686
+ why we view them as hyperparameters in the optimization problem rather than parameters
687
+ to be estimated. However, we can use the data at hand to guide our choice of these hyper-
688
+ parameters. We do so by regressing the outcome on the covariates with a random intercept
689
+ model. The random intercept model estimates cluster- and unit-level variance terms that
690
+ can be used as an estimate for the ICC. To estimate the signal-to-noise ratio, we take the
691
+ ratio of the estimated residual variance and the squared sum of the estimated regression
692
+ coefficients from the fitted model. Below, we use this heuristic to choose the hyperparame-
693
+ ters in our simulation studies and empirical analyses. Note that this procedure can induce a
694
+ dependence between the outcomes and the weights through the hyper-parameters, which can
695
+ be avoided via sample splitting. Other forms of hyperparameter selection are possible, in-
696
+ cluding cross-validation style approaches that evaluate balance on a held-out sample (Wang
697
+ and Zubizarreta, 2020).
698
+ 3.4
699
+ When should we include unit-level covariates in the Cluster-Only Design?
700
+ Given that under the COD it is sufficient to only include cluster-level covariates, one natural
701
+ question for this design is whether it is useful to also include unit-level covariates. Including
702
+ unit-level covariates may decrease the variance: balancing these covariates is akin to adjust-
703
+ ing for baseline covariates that predict the outcome in randomized experiments. On the other
704
+ hand, balancing unit-level covariates may lead to more extreme weights, and, depending on
705
+ the ICC, this could lead to higher variance.
706
+ To characterize this trade off, we consider the ratio of the design conditional variance
707
+ with and without including unit-level covariates, V unit and V clus, respectively.
708
+ In cases
709
+ with excellent balance in both cluster- and unit-level covariates, this will give a measure
710
+ of the expected difference in the overall mean squared error. To simplify the problem, we
711
+ restrict our attention to the cluster-level random effects model, and assume that the ICC is
712
+ unchanged when conditioning on unit-level covariates (i.e., ρ = r). Under these assumptions,
713
+ 14
714
+
715
+ the variance ratio is
716
+ V unit
717
+ V clus = σ2
718
+ s2 deff(ρ),
719
+ (11)
720
+ where
721
+ deff(ρ) = (1 − ρ) �
722
+ Aℓ=0
723
+
724
+ Ji=ℓ γ2
725
+ i + ρ �
726
+ Aℓ=0
727
+ ��
728
+ Ji=ℓ γi
729
+ �2
730
+ (1 − ρ) �
731
+ Aℓ=0 nℓ¯γ2
732
+ ℓ + ρ �
733
+ Aℓ=0 (nℓ¯γℓ)2
734
+ is the design effect of including unit-level covariates.
735
+ The first term, σ2/s2 < 1 represents the reduction in total variance in the residuals by
736
+ conditioning on unit-level covariates, and serves to reduce the variance ratio by incorporat-
737
+ ing additional information. The more predictive the unit-level variables are, the lower we
738
+ expect this ratio to be. Counteracting this, we have the design effect deff(ρ). Rearranging
739
+ Equation (11), we see that in order for it to be beneficial to include unit-level covariates,
740
+ the variance after conditioning on unit-level covariates must decrease by at least the design
741
+ effect, σ2 < s2deff(ρ).
742
+ For any given value of ρ, we expect the cluster-level weights to be less extreme, because
743
+ they do not have to additionally balance the unit-level covariates, so typically deff(ρ) ≥ 1.
744
+ Whether the sum of the squared weights on the units or on the clusters matters more to the
745
+ design effect depends on the ICC. When ρ is small, there is little effect of clustering on the
746
+ variance, and so we have a standard tradeoff: including unit-level covariates will increase
747
+ efficiency as long as the effective sample size does not decrease by more than 1 − σ2/s2.
748
+ Conversely, when ρ is large, the sum of the squared weights on the clusters will dominate,
749
+ in which case the unit-level weights can differ substantially from their average cluster-level
750
+ weight without incurring much extra variance. In this case including unit-level covariates can
751
+ improve efficiency even if they are only somewhat predictive. Hedges and Hedberg (2007)
752
+ reported that from 41 clustered randomized experiments in education, the ICCs range from
753
+ 0.07 to 0.31, with an average value of 0.17. Small et al. (2008) report that ICCs in the
754
+ range of .002 to 0.03 in public health interventions that target clusters such as hospitals or
755
+ clinics. Thus, in education settings — where ICCs are larger, and we typically have strongly
756
+ predictive covariates such as prior test scores — it will likely be beneficial to include unit-
757
+ level covariates. In contrast, for public health settings it may depend much more on the
758
+ particular context and the set of unit-level covariates available.
759
+ 15
760
+
761
+ 4
762
+ Extensions and Inference
763
+ 4.1
764
+ Dealing with poor balance
765
+ When there are a large number of cluster-level covariates relative to the total number of
766
+ clusters — or conversely, when there is a small number of clusters overall — it may be
767
+ difficult to find weights that achieve good balance.
768
+ Often this occurs when the overlap
769
+ between the treated and control cluster covariate distributions is limited (Keele et al., 2022).
770
+ This is true for the application in Section 6.1 below. We consider two different approaches
771
+ to account for this. First, we outline using an outcome model to correct for the bias due
772
+ to remaining imbalance. Second, we consider changing the estimand by also weighting the
773
+ treated units to find a maximally sized overlapping set between the treated and control units.
774
+ 4.1.1
775
+ Bias correction with an outcome estimator
776
+ Bias correction, sometimes also called augmentation, is a popular approach to reducing bias
777
+ due to imbalance.
778
+ We describe the bias-correction procedure when only including both
779
+ cluster- and unit-level covariates; restricting to cluster-level covariates alone will be analo-
780
+ gous. First we estimate the conditional expectation function to get estimates ˆmwx(0, WJi, Xi).
781
+ There are many possible estimation strategies, one choice being regularized regression. Then
782
+ we perform bias-correction by estimating µ0 as
783
+ ˆµbc
784
+ 0 (γ) ≡ ˆµ0(γ) + 1
785
+ n1
786
+
787
+ Aℓ=1
788
+
789
+ Ji=ℓ
790
+ ˆmwx(0, Wℓ, Xi) − 1
791
+ n1
792
+
793
+ Aℓ=0
794
+
795
+ Ji=ℓ
796
+ γi ˆmwx(0, Wℓ, Xi).
797
+ (12)
798
+ If we compare this to the bias in Equation (2), we see that ˆµbc
799
+ 0 (ˆγ) uses the estimated model to
800
+ estimate the bias due to imbalance after weighting. Then it attempts to remove the bias by
801
+ subtracting the estimated bias off of the estimate. In the bias-variance decomposition above,
802
+ bias correction changes the imbalance measure to be over the worst-case model error ˆm−m,
803
+ which will generally be smaller than the imbalance in the worst-case model (Hirshberg and
804
+ Wager, 2021). This bias correction is akin to what has been proposed for matching estimators
805
+ (Abadie and Imbens, 2011) and augmented IPW (Robins et al., 1994), and has been used
806
+ with a variety of balancing weights estimators (e.g. Athey et al., 2018; Ben-Michael et al.,
807
+ 2021).
808
+ 16
809
+
810
+ 4.1.2
811
+ Subset Weights: Finding a maximally overlapping set
812
+ One issue with bias correction based on an outcome model is that the additional use of
813
+ modeling can lead to extrapolation away from the control units’ data (Ben-Michael et al.,
814
+ 2021). One alternative to outcome modeling that can be attractive, especially if overlap is
815
+ limited, is to focus on a more limited estimand than the ATT. Specifically, one such estimand
816
+ is the average treatment effect for the overlap population (ATO). The ATO corresponds
817
+ to the treatment effect for the marginal population that might or might not receive the
818
+ treatment of interest rather than a known, a priori well-defined population such as the
819
+ treated group. Li et al. (2018) developed model-based overlap weights for the ATO that
820
+ continuously down-weight the units in the tails of the propensity score distribution. We
821
+ develop an analog with balancing weights for the COS setting.
822
+ To do so, we will weight the treated units as well as the control units, leading to an
823
+ estimator
824
+ ˆτ(γ) = 1
825
+ n1
826
+
827
+ Aℓ=1
828
+
829
+ Ji=ℓ
830
+ γiYi − 1
831
+ n0
832
+
833
+ Aℓ=0
834
+
835
+ Ji=ℓ
836
+ γiYi.
837
+ (13)
838
+ By weighting the treated units as well as the control units, we are no longer estimating
839
+ the expected effect among the treated units. We are instead trimming the estimand. To
840
+ understand what the new estimand is, consider the design conditional expectation:
841
+ E [ˆτ(γ) | W , J, X] = 1
842
+ n1
843
+
844
+ Aℓ=1
845
+
846
+ Ji=ℓ
847
+ γimwx(0, Wℓ, Xi)− 1
848
+ n0
849
+
850
+ Aℓ=0
851
+
852
+ Ji=ℓ
853
+ γimwx(0, Wℓ, Xi)+ 1
854
+ n1
855
+
856
+ Aℓ=1
857
+
858
+ Ji=ℓ
859
+ γiτ(Wℓ, Xi),
860
+ where τ(w, x) = E[Yi(1, J)−Yi(0, J) | WJi = w, Xi = x] is the conditional average treatment
861
+ effect (CATE). As before, we want to find weights that balance the conditional expected con-
862
+ trol outcome. Then ˆτ(γ) will be an unbiased estimator of a weighted average of conditional
863
+ treatment effects for the treated group. We will once again try to find weights that minimize
864
+ the worst case balance across a model class Mwx, now also weighting the treated units:
865
+ imbalanceo
866
+ Mwx(γ) = max
867
+ m∈Mwx
868
+ �����
869
+ 1
870
+ n1
871
+
872
+ Aℓ=0
873
+
874
+ Ji=ℓ
875
+ γim(0, Wℓ, Xi) − 1
876
+ n0
877
+
878
+ Aℓ=1
879
+
880
+ Ji=ℓ
881
+ γim(0, Wℓ, Xi)
882
+ ����� .
883
+ 17
884
+
885
+ The design-conditional variance of ˆτ(γ) is
886
+ Var (ˆτ(γ) | W , A, J, X) =
887
+ m
888
+
889
+ ℓ=1
890
+ �Aℓ
891
+ n1
892
+ + 1 − Aℓ
893
+ n0
894
+ �2 ��
895
+ Ji=ℓ
896
+ γ2
897
+ i Var (εi) +
898
+
899
+ Ji=ℓ
900
+
901
+ Jk=ℓ,k̸=i
902
+ γiγjCov(εi, εk)
903
+
904
+ ,
905
+ where we generalize the definition of the residual to be εi = Yi−mwx(AJi, WJi, Xi). With this,
906
+ we once again try to find weights that minimize the imbalance and the variance. Focusing on
907
+ the specialization to the constrained linear transformation model class Ψwx and the random
908
+ effects variance model, we find weights γi that solve:
909
+ min
910
+ γ
911
+ �����
912
+ 1
913
+ n0
914
+
915
+ Aℓ=0
916
+
917
+ Ji=ℓ
918
+ γiψ(Wℓ, Xi) − 1
919
+ n1
920
+
921
+ Aℓ=1
922
+
923
+ Ji=ℓ
924
+ γiψ(Wℓ, Xi)
925
+ �����
926
+ 2
927
+ 2
928
+ + σ2
929
+ C2
930
+ wx
931
+ m
932
+
933
+ ℓ=1
934
+ �Aℓ
935
+ n1
936
+ + 1 − Aℓ
937
+ n0
938
+ �2
939
+
940
+ �(1 − ρ)
941
+
942
+ Ji=ℓ
943
+ γ2
944
+ i + ρ
945
+ ��
946
+ Ji=ℓ
947
+ γi
948
+ �2�
949
+ � ,
950
+ subject to
951
+
952
+ Aℓ=0
953
+
954
+ Ji=ℓ
955
+ γi = n0
956
+ and
957
+
958
+ Aℓ=1
959
+
960
+ Ji=ℓ
961
+ γi = n1
962
+ and
963
+ L ≤ ¯γℓ ≤ U.
964
+ (14)
965
+ This optimization problem tries to find weights on both treated and control units such
966
+ that the weighted average of their transformed covariates are similar. Hereafter, we refer
967
+ to these weights as subset weights, since they weight the subset of the data for which the
968
+ covariate distributions overlap. The variance penalty in Equation (14) serves to ensure that
969
+ this weighted subset is pushed towards having a larger effective number of units. Finally,
970
+ Equation (14) is also related to the Lagrangian dual of the SVM problem. Tarr and Imai
971
+ (2021) show that the SVM dual minimizes the same measure of imbalance, but includes a
972
+ penalty to ensure that the sum of the weights is not small, leading to a different measure of
973
+ the “size” of the overlapping set.
974
+ 4.2
975
+ Variance Estimation and Uncertainty Quantification
976
+ We now turn to constructing asymptotically valid confidence intervals for µ0 by estimating
977
+ the variance of ˆµ0(ˆγ) − µ0 and relying on asymptotic normality. When estimating the vari-
978
+ ance, it is important to account for dependence within clusters. One method for variance
979
+ estimation in this context is the cluster-robust sandwich estimator (Huber, 1967; White,
980
+ 18
981
+
982
+ 1980), which we adapt here. Focusing on the CUD first, we estimate the conditional expec-
983
+ tation function ˆmwx(0, w, x), then compute unit-level residuals ˆεi ≡ Yi − ˆmwx(0, WJi, Xi).
984
+ This leads to a plug-in estimator for the variance:
985
+ ˆV unit ≡ 1
986
+ n2
987
+ 1
988
+
989
+ Aℓ=0
990
+ ��
991
+ Ji=ℓ
992
+ γ2
993
+ i ˆε2
994
+ i +
995
+
996
+ Ji=ℓ
997
+
998
+ Jk=ℓ,k̸=i
999
+ γiγjˆεiˆεk
1000
+
1001
+ .
1002
+ Assumption ?? in the Supplementary Materials lists regularity conditions for asymptotic
1003
+ inference, based on conditions from Hansen and Lee (2019). Importantly, these regularity
1004
+ conditions allow the cluster sizes to grow, but limit the number of clusters.
1005
+ Here we highlight the dependence of the variances on the sample size by indexing them by
1006
+ n. To state the asymptotic normality result below, define µl ≡ �
1007
+ Ji=ℓ
1008
+
1009
+ Jk=ℓ ˆγiˆγkE [εi ˆm(0, Wℓ, Xi) | W , A, J, X]
1010
+ as the covariance between the true residuals and the estimated model predictions in cluster
1011
+ ℓ.
1012
+ Theorem 1. Under the regularity conditions in Assumption ??, with constant lower and
1013
+ upper bounds L and U in Equation (6), if imbalanceMwx(ˆγ) = op
1014
+
1015
+ 1/
1016
+
1017
+ V unit
1018
+ n
1019
+
1020
+ , then
1021
+ 1
1022
+
1023
+ V unit
1024
+ n
1025
+ (ˆµ(ˆγ) − ˜µ0wx) ⇒ N(0, 1).
1026
+ Furthermore, if supw,x | ˆm(0, w, x) − m(0, w, x)| = op(1) and
1027
+ 1
1028
+ n2
1029
+ 1
1030
+
1031
+ Aℓ=0 µl → 0 then
1032
+ ˆV unit
1033
+ V unit
1034
+ n
1035
+ → 1
1036
+ in probability, and consequently,
1037
+ 1
1038
+
1039
+ ˆV unit
1040
+ n
1041
+ (ˆµ(ˆγ) − ˜µ0wx) ⇒ N(0, 1).
1042
+ Theorem 1 implies that we can construct approximate 1 − α confidence intervals as
1043
+ ˆµ0(ˆγ)±z1−α/2
1044
+
1045
+ ˆV unit, where z1−α/2 is the 1−α quantile of the standard normal distribution.
1046
+ The key assumption is that the weights ˆγ can achieve good balance in the sense that the
1047
+ worst-case imbalance imbalanceMwx(ˆγ) converges to zero faster than 1/
1048
+
1049
+ V unit. This rate
1050
+ depends on the correlation within clusters. If units are uncorrelated, it will scale with the
1051
+ total number of units; if units are perfectly correlated then it will scale with the number
1052
+ of clusters; see Hansen and Lee (2019) for further discussion on rates of convergence with
1053
+ 19
1054
+
1055
+ clustered data. Whether the weights can actually achieve this level of balance depends on
1056
+ the model class. Hirshberg et al. (2019) show that for Reproducing Kernel Hilbert Spaces
1057
+ with i.i.d. data, the imbalance will be small enough, while Hirshberg and Wager (2021)
1058
+ show that the bias-corrected estimator will have small enough bias in more general settings.
1059
+ See Ben-Michael et al. (2021) for further discussion. Finally, Theorem 1 assumes that the
1060
+ estimated model ˆm is consistent and that the covariance between the true residuals and
1061
+ the estimated model predictions converges to zero, ensuring that the variance estimator is
1062
+ consistent. The latter can be guaranteed by using sample-splitting or cross fitting, or by
1063
+ using models with restricted complexity.
1064
+ Below, we estimate ˆm via regularized weighted least squares on the control units, using
1065
+ the weights from Equation (10) above. If we only include an intercept and exclude all of the
1066
+ covariates in the regression, then these variance estimates are equivalent to the cluster-robust
1067
+ sandwich estimates of a weighted mean. However, this approach may be conservative as it
1068
+ disregards the variance reduction due to balancing the covariates.
1069
+ Finally, by including a separate regression for the treated units, we can also compute a
1070
+ plug in estimate of the variance for the maximal overlap estimator ˆτ(γ) above. In addition, to
1071
+ construct a variance estimate for the COD we can follow the same procedure: (i) estimate the
1072
+ conditional expectation function ˆmw(0, w); (ii) estimate the residuals ˆεi ≡ Yi − ˆmw(0, WJi);
1073
+ and (iii) create a plug-in estimate for the variance:
1074
+ ˆV clus ≡ 1
1075
+ n2
1076
+ 1
1077
+
1078
+ Aℓ=0
1079
+ ¯γ2
1080
+
1081
+
1082
+ Ji=ℓ
1083
+
1084
+ Jk=ℓ
1085
+ ˆeiˆek.
1086
+ 5
1087
+ Simulation Study
1088
+ Next, we conduct a simulation study to understand the performance of balancing weights
1089
+ using a simulation design from Keele et al. (2021) developed to evaluate multilevel matching.
1090
+ First, we describe the data generation process (DGP), which we alter slightly to manipu-
1091
+ late the level of overlap between the treated and control distributions. The DGP partially
1092
+ depends on empirical data from a summer school reading intervention that follows the COS
1093
+ template. In the data, there are 18 treated schools with 1,367 students, and 26 control
1094
+ 20
1095
+
1096
+ schools with 2,060 students. There are 5 student-level variables and 9 school-level variables.
1097
+ The student-level variables are reading and math test scores and indicator variables for race,
1098
+ ethnicity, and sex. The school-level covariates include the percentage of students who re-
1099
+ ceive free/reduced price lunch, who are English language learners, and who are proficient in
1100
+ math and reading based on state standardized tests. The school-level covariates also include
1101
+ the share of teachers who are novice (e.g., in their first year), the rate of year-to-year staff
1102
+ turnover, and student average daily attendance.
1103
+ For this DGP, we first fit a school-level propensity score model where we regressed the
1104
+ observed treatment indicator on the following set of school level variables: the percentage
1105
+ of students receiving free/reduced price lunch, the percentage of students who are English
1106
+ language learners, the percentage of teachers who are novice, and student average daily atten-
1107
+ dance. We denote this estimated propensity score as ˆe(w). We define the latent probability
1108
+ of treatment as:
1109
+ Z∗ = (ˆe(w)/c) + Unif(−.5, .5)
1110
+ where c controls the level of overlap between treated and control clusters. Observed treatment
1111
+ status is generated via the following model: Zj = 1(Z∗ > 0.25).
1112
+ Next, we fit an outcome model for the observed data. Here, we regressed reading scores
1113
+ on the student-level covariates with the basis expanded to include interactions between race
1114
+ and test scores. After model fitting, we save ˆβ0, the intercept from this regression. We use
1115
+ τ to denote the true treatment effect estimate in the simulation, and set it to be 0.3 of a
1116
+ standard deviation of the raw outcome measure. We denote student-level reading scores with
1117
+ Rij, student-level math scores as Mij, and the percentage of students proficient in math and
1118
+ reading in each school with Pj. Next, we generate potential outcomes under control as:
1119
+ y0 = ˆβ0 + 2.5Rij + 2.5Mij + 1.9Pj + v1,
1120
+ where v1 is a draw from a normal distribution that is mean zero with a standard deviation
1121
+ of 12. We selected these parameter values so that misspecification would produce a bias of
1122
+ approximately 0.3 standard deviations on a standardized scale. A bias of this magnitude is
1123
+ large enough to completely obscure the true treatment effect. Next, we generated potential
1124
+ 21
1125
+
1126
+ outcomes under treatment as y1 = y0 + τ, and we generated simulated outcomes as ˜Yij =
1127
+ Zjy1 + (1 − Zj)y0. Note that in this DGP, selection into treatment at the school level is only
1128
+ a function of school-level covariates, but the potential outcomes under control are a function
1129
+ of the student-level test scores and, to a lesser extent, the overall quality of the school as
1130
+ measured by the percentage of proficient students. This DGP also induces a correlation
1131
+ within clusters. The average ICC across simulations was 0.29 with a standard deviation of
1132
+ 0.04. We judge this to be a common data structure in educational COS designs. Using this
1133
+ DGP, we conduct two different simulation studies.
1134
+ 5.1
1135
+ Simulation study 1
1136
+ In the first study, we conduct a comparative analysis of adjustment methods for COS designs.
1137
+ First, we produce a naive estimate of the treatment effect as the difference in means without
1138
+ any covariate adjustment, and an estimate using multilevel matching as implemented in
1139
+ Pimentel et al. (2018). Next, we implement our balancing weights in two ways: first, as
1140
+ balancing weights by solving Equation (6) and second, as subset balancing weights by solving
1141
+ Equation (14).
1142
+ We set the hyperparameters via the heuristic in Section 3.3, using the
1143
+ estimated coefficients and variance components from a multilevel regression model. In this
1144
+ simulation, we focus on how the performance changes as we vary the level of overlap by
1145
+ setting c to values of 1, 2.5, 7.5, and 10. When c = 1 this will induce poor overlap, and when
1146
+ c = 10 there is excellent overlap, with intermediate values increasing the level of overlap. For
1147
+ each scenario, we repeated the simulation 1,000 times, and we report the bias and root mean-
1148
+ squared error (RMSE). In our results, we standardize the bias, dividing it by the standard
1149
+ deviation of the control group’s outcomes from the original data.
1150
+ In the first panel of Figure 1, we plot the bias as a function of overlap for all four
1151
+ estimation methods.
1152
+ We observe that matching and both types of weights reduce bias
1153
+ compared to an unadjusted estimate. However, when overlap is poor, both sets of weights
1154
+ remove substantially more bias than multilevel matching.
1155
+ The performance of matching
1156
+ could be improved by trimming treated observations. However, with the balancing weights,
1157
+ we can reduce the bias compared to matching without having to change the estimand. The
1158
+ subset weights also alter the estimand, but allow for an unbiased treatment effect estimate.
1159
+ 22
1160
+
1161
+ −0.15
1162
+ −0.10
1163
+ −0.05
1164
+ 0.00
1165
+ Poor Overlap
1166
+ Low
1167
+ Medium
1168
+ Good Overlap
1169
+ Degree of Overlap
1170
+ Standardized Bias
1171
+ 0
1172
+ 5
1173
+ 10
1174
+ 15
1175
+ Poor Overlap
1176
+ Low
1177
+ Medium
1178
+ Good Overlap
1179
+ Degree of Overlap
1180
+ RMSE
1181
+ Unadjusted
1182
+ ML Matching
1183
+ Weighting
1184
+ Subset Weighting
1185
+ Figure 1: Bias and RMSE for matching and two different weighting estimators by overlap
1186
+ condition.
1187
+ We next compare the methods in terms of RMSE. Both matching and weighting discard
1188
+ data. Matching discards some of the control schools and units, while weighting gives some
1189
+ of the control schools and students zero weight. One open question is whether one method
1190
+ or the other does so in a more efficient fashion. In the second panel of Figure 1, we plot the
1191
+ RMSE as a function of overlap for all the estimation methods. In this scenario, the difference
1192
+ in performance between matching and balancing weights is clear. While the subset weights
1193
+ have the lowest RMSE, the difference between the two weighting methods is minor. However,
1194
+ both forms of balancing weights outperform matching across all overlap scenarios by nearly a
1195
+ factor of 10. Balancing weights thus manage to reduce bias while also retaining a much larger
1196
+ effective sample size, which improves efficiency. Overall, we find that balancing weights are
1197
+ a clear improvement over matching. Balancing weights have lower bias than matching when
1198
+ overlap is poor and also are considerably more efficient.
1199
+ 5.2
1200
+ Simulation study 2
1201
+ In the second simulation, we focus on the performance of the proposed plug-in variance
1202
+ estimator, relative to using the standard weighted cluster-robust sandwich estimator, for the
1203
+ balancing weights solving Equation (6). In this simulation, we fixed the overlap parameter
1204
+ at 10 and vary the number of clusters. We control the number of clusters by resampling
1205
+ clusters with replacement from the original data, and then generate outcomes and treatments
1206
+ following the DGP above. We used cluster sample sizes of 50, 100, 150, 200 and 250. For
1207
+ 23
1208
+
1209
+ 4
1210
+ 6
1211
+ 8
1212
+ 10
1213
+ 12
1214
+ 50
1215
+ 100
1216
+ 150
1217
+ 200
1218
+ 250
1219
+ Number of Clusters
1220
+ SEs
1221
+ 20
1222
+ 30
1223
+ 40
1224
+ 50
1225
+ 50
1226
+ 100
1227
+ 150
1228
+ 200
1229
+ 250
1230
+ Number of Clusters
1231
+ Confidence Interval Length
1232
+ Plug−in SE
1233
+ Sandwich SE
1234
+ Figure 2: Comparative performance for two different variance estimators.
1235
+ each scenario, we repeated the simulation 1,000 times, and we report the average standard
1236
+ error estimate and the length of the 95% confidence interval.
1237
+ Figure 2 shows the results for the good overlap scenario. In the first panel, we compare
1238
+ the magnitude of the two variance estimates. As we expect, our proposed plug-in method
1239
+ produces smaller standard errors by removing variance due to the covariates. The difference
1240
+ between the two methods is largest when the number of clusters is small. These smaller
1241
+ standard errors also produce narrower confidence intervals. We also measured nominal cov-
1242
+ erage of the confidence intervals for both methods and found that both methods led to
1243
+ over-coverage of the true effect. This result is consistent with the general finding that the
1244
+ sandwich variance estimator for weighting methods is conservative. The pattern from the
1245
+ good overlap scenario is very similar for the other two overlap settings, so we report those
1246
+ results in the supplementary materials.
1247
+ 6
1248
+ Applications
1249
+ Next, we present results from two different empirical applications. The first, from education,
1250
+ compares the performance of Catholic and public schools. The overlap between Catholic
1251
+ and public schools is known to be poor (Keele et al., 2022), which allows us to study the
1252
+ performance of the subset weights in a context for which they were designed. The second ap-
1253
+ plication, from health services research, compares different residency programs for surgeons.
1254
+ Here, overlap is much better, but the sample sizes may prove computationally challenging
1255
+ 24
1256
+
1257
+ for multilevel matching.
1258
+ 6.1
1259
+ Catholic Schools
1260
+ Our analysis is a replication Keele et al. (2022), which used multilevel matching and high-
1261
+ lighted the limited amount of overlap between Catholic and public schools. Here, we compare
1262
+ results based on our proposed weighting methods to those based on multilevel matching. The
1263
+ data are a public release of the 1982 High School and Beyond survey and includes records for
1264
+ 7,185 high school students from 160 schools. Of these schools, 70 are Catholic schools and are
1265
+ thus considered treated in this application, while the remainder are public high schools and
1266
+ thus serve as a reservoir of controls. The average number of students sampled per Catholic
1267
+ school is approximately 50 and ranges from 20 – 67, while the average number of students
1268
+ sampled per public school is 41 and ranges from 14 – 61. The data contain covariates on
1269
+ both students and schools. Student-level covariates are: an indicator for whether or not the
1270
+ student is female; an indicator for whether a student belongs to a particular racial/ethnic
1271
+ group; and a scale for socioeconomic status (SES). Three of the school-level measures are
1272
+ school-level averages of these student-level measures. Three additional school-level covariates
1273
+ are: total enrollment; the percentage of students on an academic track; and a measure of
1274
+ disciplinary climate. The disciplinary climate variable is a composite measure created from a
1275
+ factor score on measures of the number of attacks on teachers, fights, and other disciplinary
1276
+ incidents. This variable ranges from -1.7 to 2.7.
1277
+ In our analysis, we computed COS balancing weights for the ATT and subset weights for
1278
+ the overlapping set. Using a random effects model, we estimated the two components of the
1279
+ hyperparameter: the estimated ICC is 0.036, and the estimated signal-to-noise ratio is 1.2.
1280
+ We also include two of the multilevel matches implemented in Keele et al. (2022). The first
1281
+ of these matches retains all the Catholic schools, and the second trimmed 10 Catholic schools
1282
+ to improve the balance and increase overlap. Table 1 contains a comparison of how well each
1283
+ method balanced the baseline covariates as measured by the standardized difference: the
1284
+ difference in weighted/matched Catholic and public schools means divided by the pooled
1285
+ standard deviation before adjustment. The lack of overlap is apparent in the size of the
1286
+ standardized differences in the school-level covariates; several of the standardized differences
1287
+ 25
1288
+
1289
+ are larger than 0.50, and three exceed 1. We observe that balancing weights outperform
1290
+ matching in terms of balance. While the standardized differences for the COS balancing
1291
+ weights are still fairly large for two covariates, they are less than half of those obtained via
1292
+ multilevel matching. Moreover, if we are willing to alter the estimand, the subset weights
1293
+ are able to nearly exactly balance the Catholic and public school distributions.
1294
+ Table 1: Balance Table Comparing Catholic and Public Schools on Baseline Covariate Distribu-
1295
+ tions.
1296
+ Unweighted
1297
+ Balancing
1298
+ Subset
1299
+ Matching
1300
+ Matching –
1301
+ Weights
1302
+ Weights
1303
+ Trimmed
1304
+ Student SES
1305
+ 0.48
1306
+ 0.00
1307
+ 0.00
1308
+ 0.33
1309
+ 0.13
1310
+ % Students Minority
1311
+ -0.14
1312
+ 0.26
1313
+ 0.00
1314
+ 0.24
1315
+ 0.22
1316
+ % Students Female
1317
+ -0.00
1318
+ -0.10
1319
+ -0.00
1320
+ 0.04
1321
+ -0.09
1322
+ Enrollment
1323
+ -0.80
1324
+ -0.17
1325
+ -0.01
1326
+ -0.58
1327
+ -0.66
1328
+ % Students on Academic Track
1329
+ 1.52
1330
+ 0.32
1331
+ 0.00
1332
+ 1.27
1333
+ 0.90
1334
+ Disciplinary Climate Scale
1335
+ -1.64
1336
+ -0.47
1337
+ -0.01
1338
+ -0.92
1339
+ -0.92
1340
+ School SES Average
1341
+ 1.17
1342
+ 0.15
1343
+ 0.00
1344
+ 0.79
1345
+ 0.31
1346
+ Note: Cell entries are standardized differences, the difference in means divided by the pool
1347
+ standard deviation.
1348
+ To better understand the differences between the COS weights and the COS subset
1349
+ weights, we provide descriptive statistics on the largest weights. For the balancing weights,
1350
+ the vast majority of the weights are between 0 and 1, but there are 63 weights that are larger
1351
+ than 10, and the largest weight has a value of 92. For the COS subset weights the largest
1352
+ weight is 14. This should reduce the likelihood of unstable behavior in the treatment effect
1353
+ estimates due to large weights. Next, we measure the effective sample sizes to understand
1354
+ the loss of information due to weighting. In our data, before adjustment there are 5,273
1355
+ students. The effective sample size for the balancing weights is 1,710, and 1,036 for the
1356
+ subset weights. For comparison, the effective sample size for the match with all treated
1357
+ schools is 570, and 392 for the match that trimmed treated schools.
1358
+ Next, we review the point estimates for the Catholic school effect. Note that the outcome
1359
+ is a standardized test score, so estimates are measured in standard deviations. First, the
1360
+ unadjusted effect is 0.43 with a 95% CI of (0.31, 0.55). Next, the Catholic school effect
1361
+ estimated by balancing weights is -0.08. The 95% confidence interval using the sandwich
1362
+ 26
1363
+
1364
+ variance estimator is (-0.76, 0.59), and the confidence interval based on our proposed plug-in
1365
+ estimator is (-0.47, 0.31). The Catholic school estimate based on subset weights is 0.07.
1366
+ The 95% confidence interval using the sandwich variance estimator is (-0.29, 0.44), and
1367
+ the confidence interval based on our proposed plug-in estimator is -0.08–0.24. Note that
1368
+ these estimates are not directly comparable, as they target different estimands, but we
1369
+ observe in both cases small point estimates with confidence intervals that include zero.
1370
+ Notably, the plug-in variance estimator produces shorter confidence intervals consistent with
1371
+ the simulation results. For comparison, we next report the estimates based on multilevel
1372
+ matching. The estimate based on multilevel matching is 0.26 (95% CI: 0.12, 0.40), and the
1373
+ estimate based on matching with trimming is 0.13 (95% CI: -0.06, 0.31). The estimates based
1374
+ on matching are closer to those based on weighting if we include additional bias reduction via
1375
+ outcome modeling. This suggests that weighting was much more successful than matching
1376
+ at removing bias without reference to outcomes.
1377
+ Finally, while the subset balancing weights were able to balance the data better even with
1378
+ poor overlap, we had to change the estimand. To aid in the interpretation of this estimand,
1379
+ we plot the covariate means for Catholic and public schools before and after subset weighting
1380
+ in Figure 3. That is, we compare the raw Catholic school means to the weighted Catholic
1381
+ school means, and the same for public schools. We observe that Catholic and public school
1382
+ do not differ much in terms of gender and racial mix. While Catholic and public schools differ
1383
+ some in terms of SES, the key difference is in terms of disciplinary climate. For this covariate,
1384
+ we observe the largest difference between the weighted and unweighted estimates for both
1385
+ Catholic and public schools. In the overlap population of schools, Catholic schools have a
1386
+ much stricter disciplinary climate and public schools have a more permissive disciplinary
1387
+ climate.
1388
+ 6.2
1389
+ Surgical Training
1390
+ One important question in health services research is whether certain aspects of surgical
1391
+ training have an effect on patient outcomes (Asch et al., 2009; Bansal et al., 2016; Za-
1392
+ heer et al., 2017; Sullivan et al., 2012).
1393
+ Sellers et al. (2018) studied whether surgeons
1394
+ from university-based residency programs produce superior patient outcomes than surgeons
1395
+ 27
1396
+
1397
+ % Students Female
1398
+ % Students Minority
1399
+ % Students on Academic Track
1400
+ Disciplinary Climate Scale
1401
+ School SES Average
1402
+ Student SES
1403
+ −0.4
1404
+ 0.0
1405
+ 0.4
1406
+ Means
1407
+ Covariates
1408
+ Contrast
1409
+ Unweighted − Catholic
1410
+ Subset Wgt. − Catholic
1411
+ Unweighted − Public
1412
+ Subset Wgt.
1413
+ Figure 3: Covariate means for Catholic and Public schools before and after subset weighting.
1414
+ trained in community-based residency programs. The original study used data based on all-
1415
+ payer hospital discharge claims from New York, Florida and Pennsylvania from 2012–2013.
1416
+ In the data, surgeons were classified as having attended a university-based (UBR) or non-
1417
+ university based (NUBR) residency based on the residency program listed in the American
1418
+ Medical Association Masterfile. The data contain covariates for surgeons, including age, sex,
1419
+ and years of training completion, and covariates for patients, such as sociodemographic and
1420
+ clinical characteristics including 31 comorbidities based on Elixhauser indices (Elixhauser
1421
+ et al., 1998). The primary outcome was postoperative complications. Complications were
1422
+ identified using ICD-9 diagnosis codes and collapsed into a binary variable indicating the
1423
+ development of 1 or more complications. They compared surgeon performance between UBR
1424
+ and NUBR surgeons for patients that underwent one of 44 common operations performed
1425
+ by general surgeons in an inpatient setting (Sellers et al., 2018).
1426
+ In this application, there are 498 treated surgeons and 1,201 control surgeons. Overall,
1427
+ there are 86,305 patients operated on by UBR surgeons, and 193,307 patients operated on by
1428
+ NUBR surgeons. The number of patients treated by each surgeon varied from five to 1,074
1429
+ over the two-year period. In the UBR application, standardized differences before weighting
1430
+ are relatively small–one indication that overlap is good for this data set. As such, we only
1431
+ target the ATT estimand and do not use the subset weights. We again set hyperparameter
1432
+ 28
1433
+
1434
+ values based on estimates from a random effects model. In this data, the estimated ICC is
1435
+ 0.016, and the noise to signal ratio is 0.221. Figure 4 contains a balance plot for the subset
1436
+ of covariates with the largest imbalances. For each of these covariates, weighting improves
1437
+ any imbalance relative to the unadjusted data, giving close to exact balance.
1438
+ In the full data, there are 279,611 patients. After weighting, the effective sample size is
1439
+ 262,672. Here, the loss of sample size is relatively small. We also attempted to implement a
1440
+ multilevel match on a desktop computer with 32 GB of RAM. However, we received an error
1441
+ that R was unable to allocate enough memory to complete the match. For the balancing
1442
+ weights, we are able to estimate weights is less than a minute. As such, balancing weights
1443
+ have clear computational advantages for COS applications with larger sample sizes.
1444
+ Next, we estimate treatment effects. First, we estimate the unadjusted treatment effect
1445
+ via a regression model with clustering at the surgeon level. We find that for UBR surgeons,
1446
+ the estimated percentage of cases with a complication is 1.35 percentage points lower, and
1447
+ the 95% confidence interval does not contain zero (95% CI: -2.14, -0.55). Once we account
1448
+ for confounding via weighting, the estimate treatment effect is -0.85 percentage points. We
1449
+ estimated 95% confidence intervals using both the cluster-robust sandwich variance estimator
1450
+ (95% CI: -1.62, -0.09) and our proposed plug-in variance estimator (95% CI: -1.53, -0.18).
1451
+ Because the balance is excellent, further bias reduction via an outcome model does not
1452
+ meaningfully change the estimate. In sum, we find that UBR surgeons do indeed appear to
1453
+ cause fewer complications, even when accounting for observed confounding. However, the
1454
+ magnitude of the treatment effect is smaller once we control for observed covariates.
1455
+ 7
1456
+ Conclusion
1457
+ We introduced an approximate balancing weight estimator for designs where treatments
1458
+ are administered to entire clusters such as school or hospitals.
1459
+ To do so, we considered
1460
+ two potential estimands — one adjusting for both unit-level and cluster-level covariates
1461
+ and another adjusting for cluster-level covariates alone — that have different identification
1462
+ assumptions associated with them. For both of these estimands, we find weights to minimize
1463
+ an upper bound on the mean square error of the resulting weighting estimator.
1464
+ When
1465
+ adjusting for cluster-level covariates alone, the weights are constant within clusters, while
1466
+ 29
1467
+
1468
+ Years Experience
1469
+ White
1470
+ Urgent Admission
1471
+ Surgeon Age
1472
+ Self Insurance
1473
+ Other Racial Cat.
1474
+ No. of Procedures
1475
+ No. of Essential Procedures
1476
+ No. of Complex Procedures
1477
+ No. Comorbidities
1478
+ Medicare
1479
+ Medicaid
1480
+ Male
1481
+ Hispanic
1482
+ Emergency Admission
1483
+ Elective Admission
1484
+ Commercial Insur.
1485
+ Age
1486
+ African−American
1487
+ −0.10
1488
+ −0.05
1489
+ 0.00
1490
+ 0.05
1491
+ 0.10
1492
+ Standardized Difference
1493
+ Covariates
1494
+ Contrast
1495
+ Weighted
1496
+ Unweighted
1497
+ Figure 4: Balance Plot: UBR vs NUBR surgeons for the set of covariates with the largest
1498
+ baseline imbalances.
1499
+ 30
1500
+
1501
+ the weights vary across units within clusters when including unit-level covariates.
1502
+ This
1503
+ affects the overall variance, as units’ outcomes can be correlated within clusters. We showed
1504
+ that when it is sufficient to adjust only for cluster-level covariates in order to estimate
1505
+ the ATT, there can be efficiency gains to including unit-level covariates as well, depending
1506
+ on the predictive strength of those covariates and the level of correlation between units’
1507
+ outcomes within clusters. We also considered two adaptations to deal with cases where it
1508
+ is impossible to find weights that achieve good covariate balance: (i) bias-correction via an
1509
+ outcome model and (ii) changing the estimand and finding an overlapping weighted subset
1510
+ of the data. We then showed how to construct confidence intervals that are asymptotically
1511
+ valid under certain conditions. In a simulation study, we demonstrated that our proposed
1512
+ weighting estimator outperformed multilevel matching. This was especially true in terms of
1513
+ using more of the sample size which resulted in higher efficiency and lower RMSE. In two
1514
+ empirical applications, we found balancing weights also had several practical advantages over
1515
+ multilevel matching.
1516
+ There are several avenues for future work. First, while we propose a heuristic for choosing
1517
+ the hyperparameters from the observed data, an important question is how to choose these
1518
+ hyperparameters in a rigorous, data-driven way that keeps weights independent of outcomes.
1519
+ Second, we can consider resampling approaches to uncertainty quantification such as the
1520
+ block weighted bootstrap proposed by Cui et al. (2022). Finally, many COS designs have
1521
+ a longitudinal structure, where data is available at multiple granularities over time. For
1522
+ example, many policy changes happen at the state-level, but county and municipality-level
1523
+ data series exist for the outcome of interest. Exploring these cases and extending our analysis
1524
+ to such settings will be important areas for future research.
1525
+ 31
1526
+
1527
+ Bibliography & References Cited
1528
+ Abadie, A. and G. W. Imbens (2011). Bias-corrected matching estimators for average treat-
1529
+ ment effects. Journal of Business and Economic Statistics 29(1), 1–11.
1530
+ Asch, D. A., S. Nicholson, S. Srinivas, J. Herrin, and A. J. Epstein (2009).
1531
+ Evaluating
1532
+ obstetrical residency programs using patient outcomes. Jama 302(12), 1277–1283.
1533
+ Athey, S., G. W. Imbens, and S. Wager (2018). Approximate residual balancing: debiased
1534
+ inference of average treatment effects in high dimensions. Journal of the Royal Statistical
1535
+ Society: Series B (Statistical Methodology) 80(4), 597–623.
1536
+ Bansal, N., K. D. Simmons, A. J. Epstein, J. B. Morris, and R. R. Kelz (2016). Using
1537
+ patient outcomes to evaluate general surgery residency program performance.
1538
+ JAMA
1539
+ surgery 151(2), 111–119.
1540
+ Ben-Michael, E., A. Feller, and E. Hartman (2021). Multilevel calibration weighting for
1541
+ survey data.
1542
+ Ben-Michael, E., A. Feller, D. A. Hirshberg, and J. R. Zubizarreta (2021). The balancing
1543
+ act in causal inference. arXiv preprint arXiv:2110.14831.
1544
+ Ben-Michael, E., A. Feller, and J. Rothstein (2021). The Augmented Synthetic Control
1545
+ Method. Journal of the American Statistical Association 116(536), 1789–1803.
1546
+ Chattopadhyay, A., Christopher H. Hase, and J. R. Zubizarreta (2020). Balancing Versus
1547
+ Modeling Approaches to Weighting in Practice. Statistics in Medicine in press.
1548
+ Cochran, W. G. (1965). The planning of observational studies of human populations. Journal
1549
+ of Royal Statistical Society, Series A 128(2), 234–265.
1550
+ Cui, C., S. Yang, B. J. Reich, and D. A. Gill (2022). Matching Estimators of Causal Effects
1551
+ in Clustered Observational Studies with Application to Quantifying the Impact of Marine
1552
+ Protected Areas on Biodiversity.
1553
+ Elixhauser, A., C. Steiner, D. R. Harris, and R. M. Coffey (1998). Comorbidity measures
1554
+ for use with administrative data. Medical care 36(1), 8–27.
1555
+ 32
1556
+
1557
+ Hainmueller, J. (2011). Entropy Balancing for Causal Effects: A Multivariate Reweighting
1558
+ Method to Produce Balanced Samples in Observational Studies. Political Analysis 20,
1559
+ 25–46.
1560
+ Hansen, B. E. and S. Lee (2019).
1561
+ Asymptotic theory for clustered samples.
1562
+ Journal of
1563
+ Econometrics 210(2), 268–290.
1564
+ Hazlett, C. (2019). Kernel Balancing : A flexible non-parametric weighting procedure for
1565
+ estimating causal effects. Statistica Sincia.
1566
+ Hedges, L. V. and E. C. Hedberg (2007). Intraclass correlation values for planning group-
1567
+ randomized trials in education. Educational Evaluation and Policy Analysis 29(1), 60–87.
1568
+ Hirshberg, D. A., A. Maleki, and J. Zubizarreta (2019). Minimax linear estimation of the
1569
+ retargeted mean. arXiv preprint arXiv:1901.10296.
1570
+ Hirshberg, D. A. and S. Wager (2021). Augmented minimax linear estimation. The Annals
1571
+ of Statistics 49(6), 3206–3227.
1572
+ Huber, P. J. (1967). Under nonstandard conditions. In Proceedings of the Fifth Berkeley
1573
+ Symposium on Mathematical Statistics and Probability: Weather Modification; University
1574
+ of California Press: Berkeley, CA, USA, pp. 221.
1575
+ Keele, L., M. Lenard, and L. Page (2021). Matching methods for clustered observational
1576
+ studies in education. Journal of Research on Educational Effectiveness 14(3), 696–725.
1577
+ Keele, L., M. Lenard, and L. Page (2022). Overlap violations in clustered observational
1578
+ studies of educational interventions. Journal of Research on Educational Effectiveness,
1579
+ 1–18.
1580
+ Keele, L. J. and J. Zubizarreta (2017). Optimal multilevel matching in clustered observational
1581
+ studies: A case study of the effectiveness of private schools under a large-scale voucher
1582
+ system. Journal of the American Statistical Association 112(518), 547–560.
1583
+ Li, F., K. L. Morgan, and A. M. Zaslavsky (2018). Balancing covariates via propensity score
1584
+ weighting. Journal of the American Statistical Association 113(521), 390–400.
1585
+ 33
1586
+
1587
+ Page, L. C., M. Lenard, and L. Keele (2020, July-Sept). The design of clustered observational
1588
+ studies in education. AERA Open 6(3), 1–14.
1589
+ Pimentel, S. D., L. C. Page, M. Lenard, and L. J. Keele (2018). Optimal multilevel matching
1590
+ using network flows: An application to a summer reading intervention. Annals of Applied
1591
+ Statistics 12(3), 1479–1505.
1592
+ Raudenbush, S. W. (1997). Statistical analysis and optimal design for cluster randomized
1593
+ trials. Psychological Methods 2(2), 173.
1594
+ Robins, J. M., A. Rotnitzky, and L. Ping Zhao (1994). Estimation of Regression Coefficients
1595
+ When Some Regressors are not Always Observed. Journal of the American Statistical
1596
+ Association 89427, 846–866.
1597
+ Rubin, D. B. (1974). Estimating causal effects of treatments in randomized and nonrandom-
1598
+ ized studies. Journal of Educational Psychology 6(5), 688–701.
1599
+ Rubin, D. B. (2007). The design versus the analysis of observational studies for causal effects:
1600
+ parallels with the design of randomized trials. Statistics in medicine 26(1), 20–36.
1601
+ Rubin, D. B. (2008, September). For objective causal inference, design trumps analysis. The
1602
+ Annals of Applied Statistics 2(3), 808–840.
1603
+ Rubinstein, M., A. Haviland, and D. Choi (2022). Balancing weights for estimated region-
1604
+ level data: the effect of Medicaid Expansion on the uninsurance rate among states that
1605
+ did not expand Medicaid.
1606
+ Sellers, M. M., L. J. Keele, C. E. Sharoky, C. Wirtalla, E. A. Bailey, and R. R. Kelz (2018).
1607
+ Association of surgical practice patterns and clinical outcomes with surgeon training in
1608
+ university-or nonuniversity-based residency program. JAMA surgery 153(5), 418–425.
1609
+ Small, D. S., T. R. T. Have, and P. R. Rosenbaum (2008, March). Randomization inference
1610
+ in a group–randomized trial of treatments for depression: Covariate adjustment, noncom-
1611
+ pliance, and quantile effects. Journal of the American Statistical Association 103(481),
1612
+ 271–279.
1613
+ 34
1614
+
1615
+ Sullivan, M. C., G. Sue, E. Bucholz, H. Yeo, R. H. Bell Jr, S. A. Roman, and J. A. Sosa
1616
+ (2012). Effect of program type on the training experiences of 248 university, community,
1617
+ and us military-based general surgery residencies. Journal of the American College of
1618
+ Surgeons 214(1), 53–60.
1619
+ Tarr, A. and K. Imai (2021). Estimating Average Treatment Effects with Support Vector
1620
+ Machines.
1621
+ Wang, Y. and J. R. Zubizarreta (2019). Minimal dispersion approximately balancing weights:
1622
+ asymptotic properties and practical considerations. Biometrika.
1623
+ Wang, Y. and J. R. Zubizarreta (2020). Minimal dispersion approximately balancing weights:
1624
+ asymptotic properties and practical considerations. Biometrika 107(1), 93–105.
1625
+ White, H. (1980). A heteroskedasticity-consistent covariance matrix estimator and a direct
1626
+ test for heteroskedasticity. Econometrica: Journal of the Econometric Society, 817–838.
1627
+ Ye, T., T. Westling, L. Page, and L. Keele (2022). Nonparametric identification of causal ef-
1628
+ fects in clustered observational studies with differential selection. Unpublished Manuscript.
1629
+ Zaheer, S., S. D. Pimentel, K. D. Simmons, L. E. Kuo, J. Datta, N. Williams, D. L. Fraker,
1630
+ and R. R. Kelz (2017). Comparing international and united states undergraduate medical
1631
+ education and surgical outcomes using a refined balance matching methodology. Annals
1632
+ of surgery 265(5), 916–922.
1633
+ Zhao, Q. (2019). Covariate balancing propensity score by tailored loss functions. Annals of
1634
+ Statistics 47(2), 965–993.
1635
+ Zhao, Q. and D. Percival (2016). Entropy Balancing is Doubly Robust. Journal of Causal
1636
+ Inference.
1637
+ Zubizarreta, J. R. (2015). Stable weights that balance covariates for estimation with incom-
1638
+ plete outcome data. Journal of the American Statistical Association 110(511), 910–922.
1639
+ 35
1640
+
19E4T4oBgHgl3EQfzg22/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf ADDED
Binary file (69.7 kB). View file
 
3NFIT4oBgHgl3EQf5iun/content/tmp_files/2301.11390v1.pdf.txt ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.11390v1 [gr-qc] 26 Jan 2023
2
+ Current problems and recent advances in wormhole physics
3
+ (Editorial for a Special issue of Universe journal)
4
+ Kirill A. Bronnikov,a,b,c,1 Sergey V. Sushkovd,2
5
+ a Center of Gravitation and Fundamental Metrology, VNIIMS, Ozyornaya ul. 46, Moscow 119361, Russia
6
+ b Peoples’ Friendship University of Russia (RUDN University), 6 Miklukho-Maklaya St, Moscow, 117198,
7
+ Russia
8
+ c National Research Nuclear University “MEPhI”, Kashirskoe sh. 31, Moscow 115409, Russia
9
+ d Institute of Physics, Kazan Federal University, Kremliovskaya str. 16a, Kazan 420008, Russia
10
+ Wormholes are hypothetical space-time tunnels with nontrivial topologies capable of connecting
11
+ either two distant regions of the same universe or two different universes [1–3]. From the theoretical
12
+ point of view, the possibility of their existence is problematic but cannot be ruled out. If wormholes
13
+ do exist, many unusual phenomena can be expected. Among them, probably the most exciting ones
14
+ are shortcuts providing interstellar, intergalactic or inter-universe trips and even time machines.
15
+ At present, wormholes are extremely attractive and popular objects for research, it is sufficient
16
+ to mention that the word “wormhole” is found in the titles of 1614 articles on the resource arXiv.org
17
+ for all years and 175 articles for the last 12 months (as of 19.01.2023). Although many mathematical
18
+ and physical properties of these objects have been discovered and studied in the recent decades,
19
+ there remain multiple unsolved problems and opportunities of interest to be explored.
20
+ The present Special Issue “Recent Advances in Wormhole Physics” is aimed at enlighten-
21
+ ing some recent results in selected areas of wormhole physics. The issue is a collection of fourteen
22
+ papers [4–17] that fairly well characterizes the diversity of subjects and methods of wormhole
23
+ physics.
24
+ Very roughly, wormhole studies may be classified as follows:
25
+ • A search for wormhole solutions in general relativity and other theories of gravity, investiga-
26
+ tions of their properties and conditions of their existence.
27
+ • Studies of mathematical, physical, metaphysical and philosophical consequences of possible
28
+ wormhole existence.
29
+ • Assuming that wormholes do exist in the Universe, studies of their astronomical and as-
30
+ trophysical manifestations, in particular, their possible observational distinctions from black
31
+ holes.
32
+ Needless to say that many studies combine some or all of these trends. Nevertheless, it can be
33
+ more or less conditionally observed that the first trend is represented by the papers [4,13,14,16],
34
+ the second one by [5,7,11,15], and the third one by [6,8–10,12,17].
35
+ Two of the papers are brief reviews [4,9]. Thus, Takafumi Kokubu and Tomohiro Harada [4] con-
36
+ sider different physical aspects of thin-shell wormholes, both in GR and in Einstein–Gauss–Bonnet
37
+ gravity, including spherical, planar and hyperbolic symmetries of space-time and the stability issues.
38
+ Cosimo Bambi and Dejan Stojkovic [9] describe astronomically observable effects of wormholes as
39
+ possible substitutes of black holes, including gravitational lensing and shadows, possible orbiting
40
+ 1e-mail: kb20@yandex.ru
41
+ 2e-mail: sergey sushkov@mail.ru
42
+
43
+ 2
44
+ star trajectories, accretion disk spectra, and gravitational waves emitted at merges of compact
45
+ objects, of which one or both are wormholes.
46
+ Some of the papers evidently go beyond the traditional subjects of wormhole physics, concerning
47
+ classical traversable wormholes. Thus, Sergey Bondarenko [11] considers their quantum counter-
48
+ parts, called quantum wormholes, and, in particular, their possible role in the origin of a small
49
+ cosmological constant favored by observations. Elias Zafiris and Albrecht von M¨uller [15] discuss
50
+ the possible role of Planck-scale wormholes in the so-called “ER=EPR” conjecture in quantum en-
51
+ tanglement. Alexander Kirillov and Elena Savelova [16] discuss the conditions under which Planck-
52
+ scale virtual wormholes may be converted into macroscopic and observable ones.
53
+ The present
54
+ authors together with Pavel Kashargin [13] discuss wormholes that can emerge in the framework
55
+ of general relativity without any exotic matter in nonstatic space-times.
56
+ The outstanding progress of observational astronomy and cosmology, above all, the discovery
57
+ of gravitational waves and the pictures obtained by the Event Horizon Telescope, are apparently
58
+ reflected in the wealth of studies devoted to possible observable effects due to wormholes. In the
59
+ present issue, we see the discussions of high-energy particle collisions in wormhole space-times
60
+ [6], gravitational lensing by wormholes with unusual topologies [8], peculiar features of accretion
61
+ flows [10] and nearby stellar orbits [12,17] and possible manifestations of a fractal distribution of
62
+ primordial wormholes [11].
63
+ It should be noted that, with the whole diversity of wormhole studies presented in this Special
64
+ issue, some areas turned out to be almost unmentioned. These are axially and cylindrically sym-
65
+ metric wormholes with or without rotation (though, some effects of rotation are discussed in [6])
66
+ — so we would here refer to the reviews [18, 19] and references therein. These are also stability
67
+ studies concerning all kinds of perturbations of static or stationary wormholes (note that Ref. [4]
68
+ only discusses the stability of thin-shell wormholes with respect to shell motion), so please see,
69
+ e.g., [20,21] and references therein for more general studies.
70
+ Concluding, we would like to say that the study of wormholes is far from being complete,
71
+ and one can expect many new interesting physical and mathematical results in this relevant and
72
+ promising area of physics.
73
+ Funding
74
+ S.V.S. is supported by the RSF grant No.
75
+ 21-12-00130.
76
+ Partially, this work was done in the
77
+ framework of the Russian Government Program of Competitive Growth of the Kazan Federal
78
+ University. K.B. acknowledges partial support from the Ministry of Science and Higher Education
79
+ of the Russian Federation, Project “Fundamental properties of elementary particles and cosmology”
80
+ No. 0723-2020-0041, and from Project No. FSSF-2023-0003.
81
+ Conflict of interest
82
+ The authors declare that they have no conflicts of interest.
83
+ References
84
+ [1] Morris, M.S.; Thorne, K.S. Wormholes in spacetime and their use for interstellar travel: A tool for
85
+ teaching general relativity, Am. J. Phys. 1988, 56, 395.
86
+ [2] Visser, M. Lorentzian Wormholes: From Einstein to Hawking; American Institute of Physics: Wood-
87
+ bury, NY, USA, 1995; 412p.
88
+
89
+ 3
90
+ [3] Lobo, F.S.N. Exotic solutions in General Relativity: Traversable wormholes and “warp drive” space-
91
+ times. In Classical and Quantum Gravity Research; Nova Science Publishers: New York, NY, USA,
92
+ 2008; pp. 1–78; arxiv: 0710.4474.
93
+ [4] Takafumi Kokubu and Tomohiro Harada, Thin-Shell Wormholes in Einstein and Einstein-Gauss-Bonnet
94
+ Theories of Gravity, Universe 2020, 6(11), 197; https://doi.org/10.3390/universe6110197 - 26 Oct 2020
95
+ [5] Sergey Bondarenko, CPTM Discrete Symmetry, Quantum Wormholes and Cosmological Constant Prob-
96
+ lem, Universe 2020, 6(8), 121; https://doi.org/10.3390/universe6080121 - 11 Aug 2020
97
+ [6] Oleg B. Zaslavskii, New Scenarios of High-Energy Particle Collisions Near Wormholes, Universe 2020,
98
+ 6(12), 227; https://doi.org/10.3390/universe6120227 - 30 Nov 2020
99
+ [7] Brandon Mattingly, Abinash Kar, Matthew Gorban, William Julius, Cooper K. Watson, MD Ali,
100
+ Andrew Baas, Caleb Elmore, Jeffrey S. Lee, Bahram Shakerin, Eric W. Davis and Gerald B.
101
+ Cleaver, Curvature Invariants for the Alcubierre and Nat´ario Warp Drives, Universe 2021, 7(2), 21;
102
+ https://doi.org/10.3390/universe7020021 - 20 Jan 2021
103
+ [8] Kimet Jusufi, Determining the Topology and Deflection Angle of Ringholes via Gauss-Bonnet Theorem,
104
+ Universe 2021, 7(2), 44; https://doi.org/10.3390/universe7020044 - 16 Feb 2021
105
+ [9] Cosimo
106
+ Bambi
107
+ and
108
+ Dejan
109
+ Stojkovic,
110
+ Astrophysical
111
+ Wormholes,
112
+ Universe
113
+ 2021,
114
+ 7(5),
115
+ 136;
116
+ https://doi.org/10.3390/universe7050136 - 08 May 2021
117
+ [10] Rosaliya M. Yusupova, Ramis Kh. Karimov, Ramil N. Izmailov and Kamal K. Nandi, Accretion Flow
118
+ onto Ellis-Bronnikov Wormhole Universe 2021, 7(6), 177; https://doi.org/10.3390/universe7060177 - 02
119
+ Jun 2021
120
+ [11] Alexander A. Kirillov, Elena P. Savelova and Polina O. Vladykina, Possible Effects of the Fractal
121
+ Distribution of Relic Wormholes Universe 2021, 7(6), 178; https://doi.org/10.3390/universe7060178 -
122
+ 03 Jun 2021
123
+ [12] Zdenek Stuchlik and Jaroslav Vrba, Epicyclic Oscillations around Simpson-Visser Regular Black Holes
124
+ and Wormholes, Universe 2021, 7(8), 279; https://doi.org/10.3390/universe7080279 - 01 Aug 2021
125
+ [13] Kirill A. Bronnikov, Pavel E. Kashargin and Sergey V. Sushkov, Magnetized Dusty Black Holes and
126
+ Wormholes, Universe 2021, 7(11), 419; https://doi.org/10.3390/universe7110419 - 03 Nov 2021
127
+ [14] J´ulio
128
+ C.
129
+ Fabris,
130
+ Tales
131
+ Augusto
132
+ Oliveira
133
+ Gomes
134
+ and
135
+ Denis
136
+ Campos
137
+ Rodrigues,
138
+ Black
139
+ Hole and Wormhole Solutions in Einstein-Maxwell-Scalar Theory,
140
+ Universe 2022,
141
+ 8(3),
142
+ 151;
143
+ https://doi.org/10.3390/universe8030151 - 27 Feb 2022
144
+ [15] Elias Zafiris and Albrecht von M¨uller, The “ER = EPR” Conjecture and Generic Gravitational Prop-
145
+ erties: A Universal Topological Linking Model of the Correspondence between Tripartite Entanglement
146
+ and Planck-Scale Wormholes, Universe 2022, 8(3), 189; https://doi.org/10.3390/universe8030189 - 18
147
+ Mar 2022
148
+ [16] Alexander A. Kirillov and Elena P. Savelova, On Possible Origin of an Artificial Wormhole, Universe
149
+ 2022, 8(8), 428; https://doi.org/10.3390/universe8080428 - 19 Aug 2022
150
+ [17] Ramis Kh. Karimov, Ramil N. Izmailov and Kamal K. Nandi, On a Class of Harko-Kovacs-Lobo Worm-
151
+ holes, Universe 2022, 8(10), 540; https://doi.org/10.3390/universe8100540 - 18 Oct 2022
152
+ [18] Kirill A. Bronnikov, Nilton Santos and Anzhong Wang, Cylindrical systems in general relativity (re-
153
+ view). Class. Quantum Grav. 37, 113002 (2020); arXiv: 1901.06561.
154
+ [19] Burkhard Kleihaus, Jutta Kunz, Rotating Wormholes. In: Lobo, F. (ed.), Wormholes, Warp Drives
155
+ and Energy Conditions, Fundamental Theories of Physics, vol 189. Springer, Cham.
156
+ https://doi.org/10.1007/978-3-319-55182-1 3
157
+ [20] Kirill A. Bronnikov, Scalar fields as sources for wormholes and regular black holes. Particles 2018, 1,
158
+ 5; arXiv: 1802.00098.
159
+ [21] Francesco Cremona, Livio Pizzocchero, Olivier Sarbach, Gauge-invariant spherical linear perturbations
160
+ of wormholes in Einstein gravity minimally coupled to a self-interacting phantom scalar field. Phys.
161
+ Rev. D 101, 104061 (2020); arXiv: 1911.13103.
162
+
3NFIT4oBgHgl3EQf5iun/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf,len=164
2
+ page_content='arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
3
+ page_content='11390v1 [gr-qc] 26 Jan 2023 Current problems and recent advances in wormhole physics (Editorial for a Special issue of Universe journal) Kirill A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
4
+ page_content=' Bronnikov,a,b,c,1 Sergey V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
5
+ page_content=' Sushkovd,2 a Center of Gravitation and Fundamental Metrology, VNIIMS, Ozyornaya ul.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
6
+ page_content=' 46, Moscow 119361, Russia b Peoples’ Friendship University of Russia (RUDN University), 6 Miklukho-Maklaya St, Moscow, 117198, Russia c National Research Nuclear University “MEPhI”, Kashirskoe sh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
7
+ page_content=' 31, Moscow 115409, Russia d Institute of Physics, Kazan Federal University, Kremliovskaya str.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
8
+ page_content=' 16a, Kazan 420008, Russia Wormholes are hypothetical space-time tunnels with nontrivial topologies capable of connecting either two distant regions of the same universe or two different universes [1–3].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
9
+ page_content=' From the theoretical point of view, the possibility of their existence is problematic but cannot be ruled out.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
10
+ page_content=' If wormholes do exist, many unusual phenomena can be expected.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
11
+ page_content=' Among them, probably the most exciting ones are shortcuts providing interstellar, intergalactic or inter-universe trips and even time machines.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
12
+ page_content=' At present, wormholes are extremely attractive and popular objects for research, it is sufficient to mention that the word “wormhole” is found in the titles of 1614 articles on the resource arXiv.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
13
+ page_content='org for all years and 175 articles for the last 12 months (as of 19.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
14
+ page_content='01.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
15
+ page_content='2023).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
16
+ page_content=' Although many mathematical and physical properties of these objects have been discovered and studied in the recent decades, there remain multiple unsolved problems and opportunities of interest to be explored.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
17
+ page_content=' The present Special Issue “Recent Advances in Wormhole Physics” is aimed at enlighten- ing some recent results in selected areas of wormhole physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
18
+ page_content=' The issue is a collection of fourteen papers [4–17] that fairly well characterizes the diversity of subjects and methods of wormhole physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
19
+ page_content=' Very roughly, wormhole studies may be classified as follows: A search for wormhole solutions in general relativity and other theories of gravity, investiga- tions of their properties and conditions of their existence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
20
+ page_content=' Studies of mathematical, physical, metaphysical and philosophical consequences of possible wormhole existence.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
21
+ page_content=' Assuming that wormholes do exist in the Universe, studies of their astronomical and as- trophysical manifestations, in particular, their possible observational distinctions from black holes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
22
+ page_content=' Needless to say that many studies combine some or all of these trends.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
23
+ page_content=' Nevertheless, it can be more or less conditionally observed that the first trend is represented by the papers [4,13,14,16], the second one by [5,7,11,15], and the third one by [6,8–10,12,17].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
24
+ page_content=' Two of the papers are brief reviews [4,9].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
25
+ page_content=' Thus, Takafumi Kokubu and Tomohiro Harada [4] con- sider different physical aspects of thin-shell wormholes, both in GR and in Einstein–Gauss–Bonnet gravity, including spherical, planar and hyperbolic symmetries of space-time and the stability issues.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
26
+ page_content=' Cosimo Bambi and Dejan Stojkovic [9] describe astronomically observable effects of wormholes as possible substitutes of black holes, including gravitational lensing and shadows, possible orbiting 1e-mail: kb20@yandex.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
27
+ page_content='ru 2e-mail: sergey sushkov@mail.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
28
+ page_content='ru 2 star trajectories, accretion disk spectra, and gravitational waves emitted at merges of compact objects, of which one or both are wormholes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
29
+ page_content=' Some of the papers evidently go beyond the traditional subjects of wormhole physics, concerning classical traversable wormholes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
30
+ page_content=' Thus, Sergey Bondarenko [11] considers their quantum counter- parts, called quantum wormholes, and, in particular, their possible role in the origin of a small cosmological constant favored by observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
31
+ page_content=' Elias Zafiris and Albrecht von M¨uller [15] discuss the possible role of Planck-scale wormholes in the so-called “ER=EPR” conjecture in quantum en- tanglement.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
32
+ page_content=' Alexander Kirillov and Elena Savelova [16] discuss the conditions under which Planck- scale virtual wormholes may be converted into macroscopic and observable ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
33
+ page_content=' The present authors together with Pavel Kashargin [13] discuss wormholes that can emerge in the framework of general relativity without any exotic matter in nonstatic space-times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
34
+ page_content=' The outstanding progress of observational astronomy and cosmology, above all, the discovery of gravitational waves and the pictures obtained by the Event Horizon Telescope, are apparently reflected in the wealth of studies devoted to possible observable effects due to wormholes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
35
+ page_content=' In the present issue, we see the discussions of high-energy particle collisions in wormhole space-times [6], gravitational lensing by wormholes with unusual topologies [8], peculiar features of accretion flows [10] and nearby stellar orbits [12,17] and possible manifestations of a fractal distribution of primordial wormholes [11].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
36
+ page_content=' It should be noted that, with the whole diversity of wormhole studies presented in this Special issue, some areas turned out to be almost unmentioned.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
37
+ page_content=' These are axially and cylindrically sym- metric wormholes with or without rotation (though, some effects of rotation are discussed in [6]) — so we would here refer to the reviews [18, 19] and references therein.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
38
+ page_content=' These are also stability studies concerning all kinds of perturbations of static or stationary wormholes (note that Ref.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
39
+ page_content=' [4] only discusses the stability of thin-shell wormholes with respect to shell motion), so please see, e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
40
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
41
+ page_content=', [20,21] and references therein for more general studies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
42
+ page_content=' Concluding, we would like to say that the study of wormholes is far from being complete, and one can expect many new interesting physical and mathematical results in this relevant and promising area of physics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
43
+ page_content=' Funding S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
44
+ page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
45
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
46
+ page_content=' is supported by the RSF grant No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
47
+ page_content=' 21-12-00130.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
48
+ page_content=' Partially, this work was done in the framework of the Russian Government Program of Competitive Growth of the Kazan Federal University.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
49
+ page_content=' K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
50
+ page_content='B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
51
+ page_content=' acknowledges partial support from the Ministry of Science and Higher Education of the Russian Federation, Project “Fundamental properties of elementary particles and cosmology” No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
52
+ page_content=' 0723-2020-0041, and from Project No.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
53
+ page_content=' FSSF-2023-0003.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
54
+ page_content=' Conflict of interest The authors declare that they have no conflicts of interest.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
55
+ page_content=' References [1] Morris, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
56
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
57
+ page_content=' ;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
58
+ page_content=' Thorne, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
59
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
60
+ page_content=' Wormholes in spacetime and their use for interstellar travel: A tool for teaching general relativity, Am.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
61
+ page_content=' J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
62
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
63
+ page_content=' 1988, 56, 395.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
64
+ page_content=' [2] Visser, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
65
+ page_content=' Lorentzian Wormholes: From Einstein to Hawking;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
66
+ page_content=' American Institute of Physics: Wood- bury, NY, USA, 1995;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
67
+ page_content=' 412p.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
68
+ page_content=' 3 [3] Lobo, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
69
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
70
+ page_content='N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
71
+ page_content=' Exotic solutions in General Relativity: Traversable wormholes and “warp drive” space- times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
72
+ page_content=' In Classical and Quantum Gravity Research;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
73
+ page_content=' Nova Science Publishers: New York, NY, USA, 2008;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
74
+ page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
75
+ page_content=' 1–78;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
76
+ page_content=' arxiv: 0710.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
77
+ page_content='4474.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
78
+ page_content=' [4] Takafumi Kokubu and Tomohiro Harada, Thin-Shell Wormholes in Einstein and Einstein-Gauss-Bonnet Theories of Gravity, Universe 2020, 6(11), 197;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
79
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
80
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
81
+ page_content='3390/universe6110197 - 26 Oct 2020 [5] Sergey Bondarenko, CPTM Discrete Symmetry, Quantum Wormholes and Cosmological Constant Prob- lem, Universe 2020, 6(8), 121;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
82
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
83
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
84
+ page_content='3390/universe6080121 - 11 Aug 2020 [6] Oleg B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
85
+ page_content=' Zaslavskii, New Scenarios of High-Energy Particle Collisions Near Wormholes, Universe 2020, 6(12), 227;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
86
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
87
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
88
+ page_content='3390/universe6120227 - 30 Nov 2020 [7] Brandon Mattingly, Abinash Kar, Matthew Gorban, William Julius, Cooper K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
89
+ page_content=' Watson, MD Ali, Andrew Baas, Caleb Elmore, Jeffrey S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
90
+ page_content=' Lee, Bahram Shakerin, Eric W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
91
+ page_content=' Davis and Gerald B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
92
+ page_content=' Cleaver, Curvature Invariants for the Alcubierre and Nat´ario Warp Drives, Universe 2021, 7(2), 21;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
93
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
94
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
95
+ page_content='3390/universe7020021 - 20 Jan 2021 [8] Kimet Jusufi, Determining the Topology and Deflection Angle of Ringholes via Gauss-Bonnet Theorem, Universe 2021, 7(2), 44;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
96
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
97
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
98
+ page_content='3390/universe7020044 - 16 Feb 2021 [9] Cosimo Bambi and Dejan Stojkovic, Astrophysical Wormholes, Universe 2021, 7(5), 136;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
99
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
100
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
101
+ page_content='3390/universe7050136 - 08 May 2021 [10] Rosaliya M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
102
+ page_content=' Yusupova, Ramis Kh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
103
+ page_content=' Karimov, Ramil N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
104
+ page_content=' Izmailov and Kamal K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
105
+ page_content=' Nandi, Accretion Flow onto Ellis-Bronnikov Wormhole Universe 2021, 7(6), 177;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
106
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
107
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
108
+ page_content='3390/universe7060177 - 02 Jun 2021 [11] Alexander A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
109
+ page_content=' Kirillov, Elena P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
110
+ page_content=' Savelova and Polina O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
111
+ page_content=' Vladykina, Possible Effects of the Fractal Distribution of Relic Wormholes Universe 2021, 7(6), 178;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
112
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
113
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
114
+ page_content='3390/universe7060178 - 03 Jun 2021 [12] Zdenek Stuchlik and Jaroslav Vrba, Epicyclic Oscillations around Simpson-Visser Regular Black Holes and Wormholes, Universe 2021, 7(8), 279;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
115
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
116
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
117
+ page_content='3390/universe7080279 - 01 Aug 2021 [13] Kirill A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
118
+ page_content=' Bronnikov, Pavel E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
119
+ page_content=' Kashargin and Sergey V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
120
+ page_content=' Sushkov, Magnetized Dusty Black Holes and Wormholes, Universe 2021, 7(11), 419;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
121
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
122
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
123
+ page_content='3390/universe7110419 - 03 Nov 2021 [14] J´ulio C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
124
+ page_content=' Fabris, Tales Augusto Oliveira Gomes and Denis Campos Rodrigues, Black Hole and Wormhole Solutions in Einstein-Maxwell-Scalar Theory, Universe 2022, 8(3), 151;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
125
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
126
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
127
+ page_content='3390/universe8030151 - 27 Feb 2022 [15] Elias Zafiris and Albrecht von M¨uller, The “ER = EPR” Conjecture and Generic Gravitational Prop- erties: A Universal Topological Linking Model of the Correspondence between Tripartite Entanglement and Planck-Scale Wormholes, Universe 2022, 8(3), 189;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
128
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
129
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
130
+ page_content='3390/universe8030189 - 18 Mar 2022 [16] Alexander A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
131
+ page_content=' Kirillov and Elena P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
132
+ page_content=' Savelova, On Possible Origin of an Artificial Wormhole, Universe 2022, 8(8), 428;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
133
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
134
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
135
+ page_content='3390/universe8080428 - 19 Aug 2022 [17] Ramis Kh.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
136
+ page_content=' Karimov, Ramil N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
137
+ page_content=' Izmailov and Kamal K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
138
+ page_content=' Nandi, On a Class of Harko-Kovacs-Lobo Worm- holes, Universe 2022, 8(10), 540;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
139
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
140
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
141
+ page_content='3390/universe8100540 - 18 Oct 2022 [18] Kirill A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
142
+ page_content=' Bronnikov, Nilton Santos and Anzhong Wang, Cylindrical systems in general relativity (re- view).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
143
+ page_content=' Class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
144
+ page_content=' Quantum Grav.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
145
+ page_content=' 37, 113002 (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
146
+ page_content=' arXiv: 1901.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
147
+ page_content='06561.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
148
+ page_content=' [19] Burkhard Kleihaus, Jutta Kunz, Rotating Wormholes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
149
+ page_content=' In: Lobo, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
150
+ page_content=' (ed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
151
+ page_content=' ), Wormholes, Warp Drives and Energy Conditions, Fundamental Theories of Physics, vol 189.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
152
+ page_content=' Springer, Cham.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
153
+ page_content=' https://doi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
154
+ page_content='org/10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
155
+ page_content='1007/978-3-319-55182-1 3 [20] Kirill A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
156
+ page_content=' Bronnikov, Scalar fields as sources for wormholes and regular black holes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
157
+ page_content=' Particles 2018, 1, 5;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
158
+ page_content=' arXiv: 1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
159
+ page_content='00098.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
160
+ page_content=' [21] Francesco Cremona, Livio Pizzocchero, Olivier Sarbach, Gauge-invariant spherical linear perturbations of wormholes in Einstein gravity minimally coupled to a self-interacting phantom scalar field.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
161
+ page_content=' Phys.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
162
+ page_content=' Rev.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
163
+ page_content=' D 101, 104061 (2020);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
164
+ page_content=' arXiv: 1911.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
165
+ page_content='13103.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/3NFIT4oBgHgl3EQf5iun/content/2301.11390v1.pdf'}
3dAzT4oBgHgl3EQf9P73/content/tmp_files/2301.01918v1.pdf.txt ADDED
@@ -0,0 +1,1727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Plant Species Richness Prediction from DESIS Hyperspectral Data:
2
+ A Comparison Study on Feature Extraction Procedures
3
+ and Regression Models
4
+ Yiqing Guoa, Karel Mokanya, Cindy Ongb, Peyman Moghadamc, Simon Ferriera, Shaun R.
5
+ Levickd
6
+ aCSIRO Land and Water, Acton, ACT 2601, Australia
7
+ bCSIRO Energy, Kensington, WA 6151, Australia
8
+ cCSIRO Data61, Pullenvale, QLD 4069, Australia
9
+ dCSIRO Land and Water, Winnellie, NT 0822, Australia
10
+ Abstract
11
+ The diversity of terrestrial vascular plants plays a key role in maintaining the stability and pro-
12
+ ductivity of ecosystems. Monitoring species compositional diversity across large spatial scales is
13
+ challenging and time consuming. Airborne hyperspectral imaging has shown promise for mea-
14
+ suring plant diversity remotely, but to operationalise these efforts over large regions we need to
15
+ advance satellite-based alternatives. The advanced spectral and spatial specification of the re-
16
+ cently launched DESIS (the DLR Earth Sensing Imaging Spectrometer) instrument provides a
17
+ unique opportunity to test the potential for monitoring plant species diversity with spaceborne
18
+ hyperspectral data. This study provides a quantitative assessment on the ability of DESIS hy-
19
+ perspectral data for predicting plant species richness in two different habitat types in southeast
20
+ Australia. Spectral features were first extracted from the DESIS spectra, then regressed against
21
+ on-ground estimates of plant species richness, with a two-fold cross validation scheme to assess the
22
+ predictive performance. We tested and compared the effectiveness of Principal Component Anal-
23
+ ysis (PCA), Canonical Correlation Analysis (CCA), and Partial Least Squares analysis (PLS) for
24
+ feature extraction, and Kernel Ridge Regression (KRR), Gaussian Process Regression (GPR), and
25
+ Random Forest Regression (RFR) for species richness prediction. The best prediction results were
26
+ r = 0.76 and RMSE = 5.89 for the Southern Tablelands region, and r = 0.68 and RMSE = 5.95 for
27
+ the Snowy Mountains region. Relative importance analysis for the DESIS spectral bands showed
28
+ Preprint submitted to ISPRS Journal of Photogrammetry and Remote Sensing
29
+ January 6, 2023
30
+ arXiv:2301.01918v1 [cs.LG] 5 Jan 2023
31
+
32
+ that the red-edge, red, and blue spectral regions were more important for predicting plant species
33
+ richness than the green bands and the near-infrared bands beyond red-edge. We also found that the
34
+ DESIS hyperspectral data performed better than Sentinel-2 multispectral data in the prediction
35
+ of plant species richness. Our results provide a quantitative reference for future studies exploring
36
+ the potential of spaceborne hyperspectral data for plant biodiversity mapping.
37
+ Keywords:
38
+ hyperspectral, remote sensing, vascular plant, biodiversity, species richness, DESIS
39
+ (the DLR Earth Sensing Imaging Spectrometer)
40
+ 1. Introduction
41
+ Plant biodiversity is of critical importance to the stability of terrestrial ecosystems (Frankel
42
+ et al., 1995). Anthropogenic activities, such as inappropriate cropping, deforestation, overgrazing,
43
+ and construction, in conjunction with climate change, have been leading to substantial degradation
44
+ and loss of natural habitats, posing imminent threats to vulnerable plant species (Ceballos et al.,
45
+ 2015; Tollefson, 2019). Consequently, species extinctions are occurring much faster than the natural
46
+ background rate (Ceballos et al., 2015). Conservation activities have been undertaken in many
47
+ places around the world aiming to reduce the current rate of extinction (Lecl`ere et al., 2020;
48
+ Mokany et al., 2020).
49
+ Spatial mapping of plant biodiversity helps with a better understanding of the distribution
50
+ and temporal trends of plant species richness, facilitating effective policy making in environmental
51
+ conservation and restoration (Stevenson et al., 2021; Myers et al., 2021; De Palma et al., 2021).
52
+ Considerable effort has been made to collect samples of plant species richness through in-situ sur-
53
+ veys (Kattge et al., 2020). Despite the ever increasing amount of data, it has been recognised that
54
+ the completeness and representativeness of biodiversity samples still remain as a major challenge
55
+ for the compilation of up-to-date biodiversity maps with fine resolution and wide coverage (K¨onig
56
+ et al., 2019; Kattge et al., 2020). This data gap is understandable as field expeditions are labour
57
+ 2
58
+
59
+ and time consuming, and sometimes infeasible if the location is remote or hard to access (Wang and
60
+ Gamon, 2019; Guo et al., 2018). Moreover, inconsistencies among collection campaigns in their
61
+ sampling strategies and ground plot sizes, confounded by human subjectivity and bias, further
62
+ hampered the use of ground sampling data in downstream scientific research (Wang and Gamon,
63
+ 2019).
64
+ Spaceborne remote sensing has long been deemed as a promising and cost-effective tool for
65
+ mapping plant biodiversity, mainly due to its ability to capture data over large areas and in a
66
+ timely manner (Skidmore et al., 2021; Wang and Gamon, 2019; Bush et al., 2017; Pettorelli et al.,
67
+ 2016). Among different types of remote sensing data, hyperspectral data is of particular interest for
68
+ the biodiversity community, as it contains rich features in the spectral domain that can be utilised
69
+ to explore the underlying relationship with plant biodiversity on the ground (Ghiyamat and Shafri,
70
+ 2010; Carlson et al., 2007; Guo et al., 2022). Previous studies have shown that plant biodiversity
71
+ is linked to remotely sensed spectral measurements because of a well-founded interrelationship
72
+ between plant species richness and primary productivity (Wang et al., 2016; Grace et al., 2016). It is
73
+ hypothesised that a high diversity of plant species enhances primary production of the community,
74
+ as a result of complementary functions provided by the diversified species composition. These
75
+ interrelationships have the potential to enable researchers to use hyperspectral measurements, and
76
+ their derived spectral indices, as remotely sensed measures of vegetation productivity and estimates
77
+ of species richness.
78
+ Most research into the relationships between biodiversity and hyperspectral data have made use
79
+ of hand-held or airborne hyperspectral sensors, which are more readily available than spaceborne
80
+ hyperspectral data-streams (e.g., Peng et al. (2018), Asner (2008), F´eret and Asner (2014), and
81
+ Hacker et al. (2020)). Spaceborne hyperspectral imaging is relatively rare, with no active satellites
82
+ in orbit since the Hyperion mission (which was active from 2000 to 2017). However, in preparation
83
+ for the upcoming launch of EnMAP (Guanter et al., 2015), the DLR launched an exploratory
84
+ 3
85
+
86
+ system to the International Space Station and embedded into the Multi-User-System for Earth
87
+ Sensing (MUSES) platform in 2018.
88
+ The DLR Earth Sensing Imaging Spectrometer (DESIS)
89
+ (Eckardt et al., 2015; Mafanya et al., 2022) provides a unique opportunity to test the potential for
90
+ monitoring plant species diversity with spaceborne hyperspectral data. It delivers hyperspectral
91
+ images with 235 spectral bands over the visible and near-infrared regions of 400 ∼ 1000 nm, with
92
+ a spectral resolution of 2.55 nm and a spatial resolution of 30 m (Eckardt et al., 2015; Alonso
93
+ et al., 2019; Krutz et al., 2019). The high resolutions in both spectral and spatial domains make
94
+ DESIS a promising data source for estimating biodiversity from space. However, there is so far
95
+ a lack of quantitative studies on assessing the ability of DESIS hyperspectral data for predicting
96
+ plant species richness values on the ground. It is worth noting the added challenge of spaceborne
97
+ hyperspectral imagery such as DESIS having a larger pixel size (30m) than typical airborne (ranging
98
+ from a few centimeters to several meters). As the signal of a large pixel tends to comprise a mix
99
+ of multiple species, many techniques previously used to quantify richness with hyperspectral data,
100
+ which detect individual species and then sum up to get richness (e.g., Asner (2008); F´eret and
101
+ Asner (2014)), cannot be applied.
102
+ The original bands in hyperspectral imagery are not orthogonal but rather highly collinear
103
+ with each other, presenting a high degree of redundancy of information. Such redundancy can
104
+ be removed to some extent by transforming the original spectral bands into an orthogonal space
105
+ of a lower dimensionality.
106
+ Among popular algorithms for dimensionality reduction are Princi-
107
+ pal Component Analysis (PCA) (Xu et al., 2019; Jia and Richards, 1999), Canonical Correlation
108
+ Analysis (CCA) (Zhao et al., 2014b; Richards and Jia, 2006), and Partial Least Squares analysis
109
+ (PLS) (Feilhauer et al., 2015; Hacker et al., 2020; Wang et al., 2019). These dimensionality re-
110
+ duction techniques reduce information redundancy by removing multicollinearity among spectral
111
+ bands, enabling extracting spectral features from the original spectral data of hundreds of bands.
112
+ In contrast to feature selection that chooses a subset of the original bands (or spectral indices
113
+ 4
114
+
115
+ computed from a subset of the original bands), feature extraction techniques such as PCA, CCA,
116
+ and PLS are able to makes use of information in all bands by transforming them into compact
117
+ yet informative features. Compared with pre-defined vegetation indices such as Ratio Vegetation
118
+ Index (RVI) and Normalised Difference Vegetation Index (NDVI), spectral features generated with
119
+ feature extraction have shown better performance in extracting useful information from hyperspec-
120
+ tral measurements (Zhao et al., 2014b). Following the extraction of spectral features, regression
121
+ analysis can then be conducted to explore potential relationships between the extracted features
122
+ and target biological variables. Commonly applied regression algorithms include the Kernel Ridge
123
+ Regression (KRR), Gaussian Process Regression (GPR), and Random Forest Regression (RFR).
124
+ Statistical regression based on extracted spectral features has shown to be effective in addressing
125
+ biological problems with hyperspectral remote sensing data. For example, in a study to detect
126
+ unintended herbicide damage in crops with hyperspectral measurements, dimensionality reduction
127
+ was conducted with CCA in order to extract useful information to discriminate between healthy
128
+ and damaged crops (Zhao et al., 2014b). In another study aiming to retrieve foliar traits from
129
+ hyperspectral data, PLS was used to reduce the spectral dimensionality, with adequate retrieval
130
+ accuracies being achieved for 10 out of the 11 functional traits (Hacker et al., 2020). These studies
131
+ demonstrate that feature extraction and statistical regression can be effective tools in addressing
132
+ biological problems with hyperspectral measurements.
133
+ In this study, we aimed to assess the potential for DESIS hyperspectral data to predict on-
134
+ ground plant species richness in two regions of southeast New South Wales, Australia—the South-
135
+ ern Tablelands and Snowy Mountains. Our approach focused on spectral feature extraction from
136
+ the DESIS spectra, and subsequent regression against field-measured plant species richness. We
137
+ tested the combination of different feature extraction procedures (PCA, CCA, and PLS) and re-
138
+ gression models (GPR, KRR, and RFR) for the predictive performance of species richness with
139
+ DESIS data. Through quantitative analyses, we sought to address primarily the following impor-
140
+ 5
141
+
142
+ tant questions: (1) How much variation in plant species richness can be explained with DESIS
143
+ data? (2) Which parts of the spectrum had the most explanatory power? (3) Could similar results
144
+ be achieved with more readily available multi-spectral imagery such as Sentinel-2?
145
+ 2. Materials and Methods
146
+ While DESIS hyperspectral data contain rich spectral information, modelling is needed to link
147
+ such information to field-based measurements of plant species richness. Here we followed a two-step
148
+ approach whereby spectral features were first extracted from DESIS spectra and then correlated to
149
+ species richness through regression. In this section, we start with describing the DESIS spectra and
150
+ in-situ richness samples, followed by introducing the methods for feature extraction, regression,
151
+ and accuracy assessment.
152
+ 2.1. Study site and field data
153
+ This study focused on two different habitat types in southeast New South Wales, Australia,
154
+ namely the Southern Tablelands (34°12’26”–34°39’07”S, 150°05’57”–150°40’51”E) and Snowy Moun-
155
+ tains (35°43’58”–36°16’30”S, 148°23’16”–148°39’02”E), as shown in Fig. 1. Both regions are lo-
156
+ cated within the climate zone of Cfb (oceanic climates), according to the K¨oppen–Geiger climate
157
+ classification system.
158
+ The Southern Tablelands region is located to the southwest of Sydney (Fig. 1). It is charac-
159
+ terised by high-altitude plains with a rich biodiversity. There are more than 1200 plant species
160
+ within Southern Tablelands, of which 30 are listed as threatened (Fallding, 2002). A large part
161
+ of the landscape has been transformed into suburbs for residential developments and pastures for
162
+ grazing purposes. Considering the high degree of human interference and habitat alteration, con-
163
+ servation efforts have been undertaken in order to preserve endangered plant species by improving
164
+ habitat connectivity and condition.
165
+ 6
166
+
167
+ Figure 1: Locations of in-situ plant species richness samples collected in field experiments.
168
+ The Snowy Mountains region is located to the southwest of Canberra (Fig. 1). It encompasses
169
+ the highest mountain ranges of the Australian Alps, serving as an important habitat for alpine-
170
+ exclusive species. There are 212 species of vascular plants, of which 21 are endemic (Pickering et al.,
171
+ 2008). Due to its unique status in Australia’s ecosystem, the plant biodiversity in Snowy Mountains
172
+ has drawn consistent interest from the research community (e.g. K¨orner (1995), Pickering et al.
173
+ (2008), and Pickering and Green (2009)).
174
+ For on-ground measures of vascular plant species richness, we obtained plant community survey
175
+ data from the NSW BioNet Vegetation Information System database (Government, 2019). Field
176
+ surveys were conducted to collect species richness samples in 2016 and 2017 for the two regions,
177
+ with a sampling plot area of 400 m2 (20 m × 20 m) at each surveying location. A significant
178
+ bushfire event occurred during the 2019–2020 summer, with some of the sampling points situated
179
+ within the affected areas. These bushfire-affected samples were excluded from the data set, based
180
+ 7
181
+
182
+ Southern
183
+ Sydney
184
+ Tablelands
185
+ NT
186
+ Qld
187
+ WA
188
+ SA
189
+ NSW
190
+ Vic
191
+ ACT
192
+ Tas
193
+ Canberra
194
+ Snowy
195
+ Mountains
196
+ N
197
+ 100kmon the National Indicative Aggregated Fire Extent Datasets (NIAFED) provided by the Australian
198
+ Government Department of Agriculture, Water and the Environment. After the exclusion, a total
199
+ of 44 and 29 samples were used in this study for analysis for the Southern Tablelands and Snowy
200
+ Mountains regions, respectively. The locations of these samples are shown in Fig. 1, and their
201
+ associated information is summarised in Table 1. For each sampling plot, the number of native
202
+ vascular plant species was calculated and used as the response variable in our analyses.
203
+ Table 1: Information summary of the plant species richness samples collected in field observations.
204
+ Southern Tablelands
205
+ Snowy Mountains
206
+ Number of Samples
207
+ 44
208
+ 29
209
+ Sampling Time
210
+ Feb 19, 2017 ∼ Dec 07, 2017
211
+ Feb 24, 2016 ∼ Dec 13, 2017
212
+ Plot Area
213
+ 400 m2 (20 m × 20 m)
214
+ 400 m2 (20 m × 20 m)
215
+ Geo-extent
216
+ 34°12’26”–34°39’07”S
217
+ 150°05’57”–150°40’51”E
218
+ 35°43’58”–36°16’30”S
219
+ 148°23’16”–148°39’02”E
220
+ The histograms of species richness distribution for sampling plots in the two regions are shown
221
+ in Fig. 2. Generally, the sampling plots in Southern Tablelands show a higher richness of species
222
+ than Snowy Mountains, with a mean richness value of 44.4 for the former and 23.5 for the latter.
223
+ The difference in species richness can be mainly attributed to the fact that the Southern Tablelands
224
+ is located at lower altitudes with a relatively warmer climate than the mountainous region of Snowy
225
+ Mountains.
226
+ 2.2. Satellite Data
227
+ The DESIS spectrometer (Krutz et al., 2019) is embedded in the MUSES platform onboard
228
+ the International Space Station at an altitude of approximately 400 km. It operates in a push-
229
+ broom imaging mode featuring state-of-the-art radiometric and spectral specifications. It delivers
230
+ 8
231
+
232
+ Figure 2: Histograms of species richness distribution for sampling plots in the (a) Southern Table-
233
+ lands, and (b) Snowy Mountains regions.
234
+ hyperspectral images with 235 spectral bands over the visible and near-infrared regions of 400 ∼
235
+ 1000 nm, with a spectral resolution of 2.55 nm and a spatial resolution of 30 m. The radiometric
236
+ resolution for each band is 12 bit with 1 bit gain. The signal-to-noise ratio is 195 at the wavelength
237
+ of 550 nm.
238
+ In our study, the DESIS Level-2A product was used. It consisted of surface reflectance images
239
+ 9
240
+
241
+ 14
242
+ (a)
243
+ 12
244
+ 10
245
+ 8 -
246
+ Count of
247
+ 6
248
+ 4
249
+ 2
250
+ 0
251
+ 25
252
+ 30
253
+ 35
254
+ 40
255
+ 45
256
+ 50
257
+ 55
258
+ 60
259
+ Species Richness
260
+ 10
261
+ (b)
262
+ 8
263
+ 6
264
+ 4
265
+ 2
266
+ 0
267
+ 10
268
+ 15
269
+ 20
270
+ 25
271
+ 30
272
+ 35
273
+ 40
274
+ Species Richnesswith atmospheric correction having been applied. The correction was conducted with DLR’s PACO
275
+ (Python Atmospheric COrrection) software (De los Reyes et al., 2018) where the MODTRAN®
276
+ radiative transfer model (Berk, 2016) served as the module for simulating atmospheric effects.
277
+ As inputs for atmospheric simulation, aerosol optical thickness and water vapour content were
278
+ retrieved per pixel using reflectance in the red and NIR bands, and bands around the water
279
+ absorption features of 820 nm, respectively.
280
+ DESIS spectra intersecting with locations of the
281
+ species richness samples were queried within CSIRO’s Earth Analytics and Science Innovation
282
+ (EASI) platform. We selected spectra captured in January 2020 for our analysis. In order to
283
+ moderate the random noise present in the original spectra, the spectral resolution were down-
284
+ sampled from 2.55 nm into 10.2 nm bins with the assumption of a Gaussian-shaped spectral
285
+ response function. The atmospherically affected bands of 759, 769, 933.4, 943.4, and 953.2 nm,
286
+ and the low quality bands of 402.8, 410.3, and 999.5 nm at the left and right ends of the spectrum
287
+ were removed. A total of 52 bands were retained after the removal. Pixels flagged as cloud by the
288
+ DLR Level-1 processing were masked out.
289
+ The Bidirectional Reflectance Distribution Function (BRDF) effect in DESIS data is un-
290
+ neglectable, given the 4.1◦ field of view in conjunction with the ±15◦ along track pointing capability
291
+ of the DESIS sensor, and the ±25◦ along track and −45◦ ∼ +5◦ cross track tilting capability of
292
+ the MUSES platform. Following the approach adopted in Green and Craig (1985) and Ong and
293
+ Cudahy (2014) for correcting BRDF effect in hyperspectral data, in our study each DESIS spectra
294
+ was mean-normalised with each spectral band being divided by the mean value over all bands.
295
+ For comparison purpose, cloud-free Sentinel-2 multispectral data observed closest to the sensing
296
+ time of DESIS spectra were also downloaded. These Sentinel spectra were downloaded as Level-2A
297
+ surface reflectance with atmospheric effects being corrected. Each spectrum consisted of 12 bands
298
+ covering the visible and near-infrared spectral regions. The Sentinel-2 data were re-sampled into
299
+ a spatial resolution of 30 m to be consistent with that of the DESIS data. A comparison between
300
+ 10
301
+
302
+ the DESIS and Sentinel-2 spectra at one of the ground sampling plots is shown in Fig. 3. Both
303
+ spectra cover the visible, near-infrared, and short-wave-infrared (SWIR) regions, including the
304
+ red-edge region that is critical for vegetation mapping. The Sentinel-2 and DESIS spectra show a
305
+ consistent shape in these spectra regions, with DESIS having a much denser band coverage. The
306
+ SWIR region is covered by Sentinel-2 only with its two bands. Though information in SWIR is
307
+ not contained in the DESIS data, it will be provided by the upcoming DLR mission of EnMAP
308
+ (for which DESIS has been served as a preparation mission), as shown in Fig. 3.
309
+ Figure 3: Comparison of the DESIS (before and after pre-processing) and Sentinel-2 spectra at
310
+ one of the ground sampling plots. An EnMAP spectrum simulated for a random location is also
311
+ shown for reference.
312
+ 2.3. Dimensionality Reduction for DESIS Hyperspectral Data
313
+ The DESIS hyperspectral data provide rich information in the abundant and spectrally con-
314
+ tinuous bands, but these bands are highly collinear. We performed dimensionality reduction to
315
+ 11
316
+
317
+ 0.4
318
+ Visible
319
+ Near-Infrared
320
+ Short-Wave-Infrared
321
+ 0.35
322
+ DESIS (After Pre-processing)
323
+ 0.3
324
+ ○ DESIS (Before Pre-processing
325
+ + Sentinel-2
326
+ 0.25
327
+ X EnMAP (Simulated)
328
+ Reflectance
329
+ 0.2
330
+ R
331
+ 0.15
332
+ 0.1
333
+ 0.05
334
+ 0
335
+ 300
336
+ 800
337
+ 1300
338
+ 1800
339
+ 2300
340
+ Wavelength (nm)address this problem—testing three different approaches, namely the Principal Component Anal-
341
+ ysis (PCA), Canonical Correlation Analysis (CCA), and Partial Least Squares analysis (PLS).
342
+ These methods aim to find a linear transformation to project the DESIS spectra from the original
343
+ space of n spectral bands to a new space of a reduced dimensionality defined by k uncorrelated
344
+ components, with k being smaller than n.
345
+ Mathematically, the transformation for dimensionality reduction is written as:
346
+ T = XW,
347
+ (1)
348
+ where the input data for the transformation is X, which is a n×m matrix consisting of the original
349
+ spectra, with n being the number of observed spectra and m being the number of spectral bands;
350
+ the output of the transformation is T = [t1, t2, · · · , tk], which is a n × k matrix consisting of k
351
+ components of X; W = [w1, w2, · · · , wk] is a m × k matrix transforming X from the original n-
352
+ dimensional space into a new space of k components. The weights in wi are a measure of relative
353
+ contributions of the original bands to the transformed component ti = Xwi.
354
+ The number of
355
+ components, k, needs to be preset. In this study, k is selected as the one that achieves the highest
356
+ cross-validation accuracy.
357
+ The PCA is an unsupervised algorithm that finds orthogonal components as the ones that
358
+ explain the maximum variance in the spectral data, disregarding the target values of species
359
+ richness. In contrast, CCA and PLS are supervised with both spectral data and species richness
360
+ values being taken into account in the computation of components. The difference between CCA
361
+ and PLS is that CCA seeks to maximise the correlation between computed components and species
362
+ richness values, while PLS aims to maximise the covariance between the two.
363
+ 2.4. Estimation of Species Richness with Regression Models
364
+ After reducing the dimensionality of spectral data from the original n bands to k compo-
365
+ nents, regression is conducted to predict species richness from the components, such that the
366
+ 12
367
+
368
+ mismatch between model-predicted and ground-truth species richness is minimised. The Kernel
369
+ Ridge Regression (KRR), Gaussian Process Regression (GPR), and Random Forest Regression
370
+ (RFR) algorithms are employed and compared, covering respectively the deterministic, Bayesian,
371
+ and ensemble approaches to statistical regression. Here we focus on formulating our task of esti-
372
+ mating species richness within the frameworks of these regression approaches.
373
+ The DESIS spectra {xi}n
374
+ i=1 can be represented by their components {ti}n
375
+ i=1 using one of the
376
+ dimensionality reduction methods described in Subsection 2.3. The KRR transforms {ti}n
377
+ i=1 into
378
+ a feature space of high dimensionality (potentially infinite dimensionality) via a function ϕ(ti). A
379
+ linear model in the high-dimensional feature space is non-linear when it projects back to the original
380
+ space, thus enabling capturing non-linear relationships in the data. The number of parameters
381
+ for a linear model increases with space dimensionality, posing a risk of over-fitting. In order to
382
+ constrain the model complexity, a regularisation term is adopted in KRR to penalise the norm of
383
+ the coefficient vector b. The optimisation problem is:
384
+ arg min
385
+ b
386
+ n
387
+
388
+ i=1
389
+ (tT
390
+ i b − yi)2 + λ ∥b∥2
391
+ 2 ,
392
+ (2)
393
+ where λ is the regularisation parameter controls the relative importance of the regularisation term
394
+ ∥b∥2
395
+ 2.
396
+ The input data for Eq. 2 are the transformed components {ti}n
397
+ i=1. Solving Eq. 2 does not
398
+ involve calculating ϕ(ti). Instead, it only requires computation of the inner product k(ti, tj) =
399
+ ϕ(ti)ϕ(tj), where ti and tj are pairs from the input data {ti}n
400
+ i=1. In this study, the kernel function
401
+ k(ti, tj) is specified as a combination of a dot-product kernel kd(ti, tj), a radial-basis function
402
+ 13
403
+
404
+ kernel kr(ti, tj), and a white kernel kw(ti, tj):
405
+ k(ti, tj) = kd(ti, tj) + kr(ti, tj) + kw(ti, tj),
406
+ kd(ti, tj) = ti · tj + σ2,
407
+ kr(ti, tj) = exp(−∥ti − tj∥2
408
+ 2
409
+ 2l2
410
+ ),
411
+ kw(ti, tj) = δ
412
+ if ti = tj else 0,
413
+ (3)
414
+ where σ, l, and δ are hyperparameters that need to be selected with grid search. The dot-product
415
+ and radial-basis function kernels account for the linearity and non-linearity of the data, respectively,
416
+ while the white kernel explains the noise in the data. For a new spectrum x with its extracted
417
+ components t, the predicted species richness is ˆy = �n
418
+ i=1 αik(ti, t), where αi = (K + λI)−1yi with
419
+ Ki,j = k(ti, tj).
420
+ In contrast to KRR that is formulated in a deterministic form, GPR is a Bayesian approach for
421
+ regression. The underlying function correlating DESIS components and species richness is assumed
422
+ to be distributed probabilistically as a Gaussian Process (GP):
423
+ f(t) ∼ GP(m(t), k(t, t′))
424
+ (4)
425
+ where m(t) is the mean function which is often set to 0, and k(t, t′) is the covariance function that
426
+ can be specified as a kernel function. The input data for Eq. 4 are the transformed components
427
+ {ti}n
428
+ i=1. The covariance function k(t, t′) for GPR is set in the same way as that for KRR (Eq.
429
+ 3), i.e., k(t, t′) = k(ti, tj) where ti and tj are pairs from the input data {ti}n
430
+ i=1.
431
+ In contrast
432
+ to KRR in which the optimal kernel parameters is found by grid search, the parameters of the
433
+ covariance function in GPR can be automatically determined based on gradient-ascent on the
434
+ marginal likelihood function. After the posterior likelihood is determined for the GP, the predicted
435
+ species richness is ˆy = �n
436
+ i=1 αik(ti, t) for a new spectrum, where αi = (K + ϵ2I)−1yi with Ki,j
437
+ being k(ti, tj) and ϵ being a pre-set parameter explaining the noise in the data.
438
+ In addition to KRR and GPR that are respectively belong to the deterministic and Bayesian
439
+ regression categories, we also tested the ensemble method of RFR. A random forest combines a
440
+ 14
441
+
442
+ number of decision tree regressors and takes the average regression result over all trees as the final
443
+ estimate. Due to the ensemble structure, RFR tends to produce robust regression results with
444
+ high resistance to overfitting and data noise. The training of a random forest regressor minimises
445
+ the following optimization function:
446
+ arg min
447
+ θj
448
+ 1
449
+ d
450
+ d
451
+
452
+ j=1
453
+ n
454
+
455
+ i=1
456
+ [h(ti; θj) − yi]2 ,
457
+ (5)
458
+ where h(·) is the decision tree regressor with θj being the parameters of the jth tree; d is the
459
+ number of decision trees, which was set to 100 in this study.
460
+ 2.5. Determination of Hyperparameters
461
+ Hyperparameters that need to be pre-set included the number of components k in the dimen-
462
+ sionality reduction methods PCA, CCA, and PLS, and the kernel parameters σ, l, and δ in the
463
+ non-linear regression method KRR. The number of components k determines how much informa-
464
+ tion to retain after dimensionality reduction. An optimal selection of k is able to reduce data
465
+ redundancy without excessively discarding useful information in the original DESIS spectra. In
466
+ this study, we tested different k values ranging from 1 to 10. The optimal k value was selected
467
+ based on the accuracy of species richness prediction, and the amount of variance in the spectral
468
+ data that the retained components could explain. The tunable hyperparameters σ, l, and δ for
469
+ the kernel function define the non-linearity structure of the regression model KRR. These kernel
470
+ parameters were selected based on grid search. A grid of values was tested with each parameter
471
+ varying from 10−5 to 105 on a logarithmic scale. The combination of σ, l, and δ that produced
472
+ the best performance in species richness prediction was selected.
473
+ 2.6. Accuracy Assessment and Analyses
474
+ A two-fold validation scheme was used in this study for assessing the modelling accuracy. For
475
+ each of the Southern Tablelands and Snowy Mountains regions, the whole data set was randomly
476
+ partitioned into two subsets (Subsets I and II), with Subset I dedicated to training and Subset II
477
+ 15
478
+
479
+ for validation (Round I), followed by Subset II for training and Subset I for validation (Round II).
480
+ This procedure was repeated 100 times with the data set being partitioned differently each time.
481
+ Correlation diagrams were plotted between the ground-truth species richness and the predicted
482
+ values from the DESIS spectra. The coefficient of correlation (r) and Root-Mean-Square Error
483
+ (RMSE) were calculated to evaluate the performance of the models. Results with different feature
484
+ extraction procedures (PCA, CCA, and PLS) and regression models (KRR, GPR, and RFR) were
485
+ computed and compared.
486
+ 2.7. Band Importance Analysis
487
+ The DESIS data consist of spectral measurements over the visible and near-infrared bands
488
+ from 400 nm to 1000 nm. Importance analysis was conducted in order to analyse which spectral
489
+ bands provide more explanatory power than others in predicting plant species richness. We used
490
+ the vector length of contribution values of each band to all components used in the regression
491
+ weighted by the partial correlation coefficients as the importance index of the band:
492
+ Ii =
493
+
494
+
495
+
496
+
497
+ k
498
+
499
+ j=1
500
+ (wi,j · pj)2,
501
+ (6)
502
+ where Ii is the importance index for the ith band; wi,j is the (i, j)th element of the weight matrix
503
+ W in Eq. 1, representing the contribution of the ith band to the jth component; pj is the partial
504
+ correlation coefficient of the jth component with the target variable of species richness; k is the
505
+ number of components used in regression. In our study, the importance indices Ii are normalised
506
+ to relative values ˜Ii with sum over all bands equal to one:
507
+ ˜Ii =
508
+ Ii
509
+ �m
510
+ i=1 Ii
511
+ ,
512
+ (7)
513
+ where m is the number of spectral bands.
514
+ 2.8. Comparison with Multispectral Data
515
+ Spaceborne multispectral imagery such as Sentinel-2 is more readily available than hyperspec-
516
+ tral.
517
+ Though Sentinel-2 images have less bands than the DESIS hyperspectral data, they are
518
+ 16
519
+
520
+ delivered on a more stable and systematic basis. In this study, an analysis is conducted to see if
521
+ Sentinel-2 multispectral data are able to achieve comparable results. Through this analysis, we
522
+ hoped to examine how well Sentinel-2 could serve as a substitute for plant biodiversity mapping
523
+ in instances where hyperspectral data are unavailable.
524
+ The Sentinel-2 data set described in Section 2.2 was used for the comparison.
525
+ In order to
526
+ conduct a fair comparison between hyperspectral and multispectral data with minimised differences
527
+ in instrumental specifications and acquisition conditions, a Sentinel-2-like synthetic data set was
528
+ simulated from the DESIS data. The simulation involved resampling the DESIS spectra using
529
+ the spectral response functions of Sentinel-2’s visible and near-infrared bands. Both the real and
530
+ synthetic Sentinel-2 sets were used for species richness prediction, with results being compared to
531
+ those achieved with DESIS data.
532
+ 3. Results
533
+ 3.1. Spectral Reflectance Differences Between Species Richness Classes
534
+ The blue, green, and red curves in Fig. 4 show the DESIS spectra averaged over ground sam-
535
+ pling plots with species richness falling into the low, intermediate, and high tertiles, respectively.
536
+ Results for the Southern Tablelands and Snowy Mountains regions are displayed in Figs 4a and
537
+ b, respectively.
538
+ For each region, it was observed that plots of higher richness showed a lower
539
+ reflectance in the visible range of 400 ∼ 680 nm. Considering that major absorption features of
540
+ chlorophyll are located within the visible region (Zhao et al., 2014b), the lower reflectance in this
541
+ spectral portion might indicate a higher concentration of chlorophyll. It was also observed that
542
+ plots of higher richness showed a higher reflectance in the near-infrared plateau of 780 ∼ 1000 nm
543
+ and a steeper red edge between 680 ∼ 780 nm, which might suggest a larger Leaf Area Index (LAI)
544
+ (Delegido et al., 2013) and a greater vegetation vigor (Boochs et al., 1990) for those plots. On the
545
+ basis of these observations, the spectral shape of high richness plots, as compared with spectra
546
+ 17
547
+
548
+ of intermediate and low richness plots, may imply a generally richer vegetation. This is consis-
549
+ tent with findings reported in literature that high species richness enhances primary productivity
550
+ (Wang et al., 2016; Grace et al., 2016) and biomass (Malhi et al., 2020; Tilman et al., 1997).
551
+ 3.2. Hyperparameter Selection Results
552
+ Fig. 5 shows the r and RMSE values achieved with different numbers of components (ranging
553
+ from 1 to 10) being selected as features. The results were averaged over the two study regions.
554
+ It was observed that two components achieved the best performance for all the three feature
555
+ extraction methods of PCA (Fig.
556
+ 5a), CCA (Fig.
557
+ 5b), and PLS (Fig.
558
+ 5c).
559
+ When only one
560
+ component was used with more information in the original spectral data being discarded, lower
561
+ r values and higher RMSE values were also observed, indicating a poorer performance compared
562
+ with that achieved by two components. When more than two components were retained with a
563
+ higher degree of data redundancy presenting, weaker results were also observed. The eigenvalue,
564
+ percentage of explained variance, and cumulative percentage of explained variance with different
565
+ components are shown in Table 2. It was seen that with two components, the PCA, CCA, and PLS
566
+ could explain 93.81%, 89.34%, and 87.38% of variance in the DESIS data, respectively. Based on
567
+ these results, the number of components, k, was set to two in our experiments for dimensionality
568
+ reduction with PCA, CCA, and PLS. The first and second components are plotted in Fig. 6. The
569
+ first component depicted the general shape of the spectral brightness, while the second component
570
+ highlighted more on local spectral features of the spectrum.
571
+ 18
572
+
573
+ Figure 4: Average DESIS reflectance spectra calculated from field sample plots with low, interme-
574
+ diate, and high species richness for the (a) Southern Tablelands and (b) Snowy Mountains regions.
575
+ 19
576
+
577
+ 0.30
578
+ (a)
579
+ 0.25
580
+ Low richness plots
581
+ Intermediate richness plots
582
+ 0.20
583
+ High richness plots
584
+ Reflectance
585
+ 0.15
586
+ 0.10
587
+ 0.05
588
+ 0.00
589
+ 400
590
+ 500
591
+ 600
592
+ 700
593
+ 800
594
+ 900
595
+ 1000
596
+ Wavelength (nm)
597
+ 0.30
598
+ (b)
599
+ 0.25
600
+ Low richness plots
601
+ Intermediate richness plots
602
+ 0.20
603
+ High richness plots
604
+ Reflectance
605
+ 0.15
606
+ 0.10
607
+ 0.05
608
+ 0.00
609
+ 400
610
+ 500
611
+ 600
612
+ 700
613
+ 800
614
+ 900
615
+ 1000
616
+ Wavelength (nm)Figure 5: Impact of number of components on the estimation accuracy of plant species richness
617
+ with (a) Principal Component Analysis (PCA), (b) Canonical Correlation Analysis (CCA), and
618
+ (c) Partial Least Squares analysis (PLS).
619
+ 20
620
+
621
+ 0.7
622
+ (a)
623
+ Root Mean Square Error (RMSE)
624
+ r
625
+ Coefficient of Regression (r)
626
+ RMSE
627
+
628
+ 8.5
629
+ 0.6
630
+ 8
631
+ 0.5
632
+ 7.5
633
+ 0.4
634
+ - 6.5
635
+ 0.3
636
+ 6
637
+ 2
638
+ 3
639
+ 4
640
+ 5
641
+ 8
642
+ 9
643
+ 10
644
+ Number of Components
645
+ 0.7
646
+ (b)
647
+ Root Mean Square Error (RMSE)
648
+ Coefficient of Regression (r)
649
+ RMSE
650
+ 8.5
651
+ 0.6
652
+ 8
653
+ 0.5
654
+ 7.5
655
+ 7
656
+ 0.4
657
+ 6.5
658
+ 0.3
659
+ 1
660
+ 2
661
+ 3
662
+ 4
663
+ 6
664
+ 7
665
+ 8
666
+ 9
667
+ 10
668
+ 5
669
+ Number of Components
670
+ 0.7
671
+ (c)
672
+ Root Mean Square Error (RMSE)
673
+ r
674
+ Coefficient of Regression (r)
675
+ RMSE
676
+ 8.5
677
+ 0.6
678
+ 8
679
+ 0.5
680
+ 7.5
681
+ /
682
+ 0.4
683
+ 6.5
684
+ 0.3
685
+ 1
686
+ 2
687
+ 3
688
+ 4
689
+ 5
690
+ 6
691
+ 9
692
+ 10
693
+ Number of ComponentsTable 2: The eigenvalue, percentage of explained variance, and cumulative percentage of explained variance for components produced with
694
+ Principal Component Analysis (PCA), Canonical Correlation Analysis (CCA), and Partial Least Squares analysis (PLS).
695
+ Component
696
+ PCA
697
+ CCA
698
+ PLS
699
+ Eigenvalue
700
+ % of
701
+ Variance
702
+ Cumulative %
703
+ of Variance
704
+ Eigenvalue
705
+ % of
706
+ Variance
707
+ Cumulative %
708
+ of Variance
709
+ Eigenvalue
710
+ % of
711
+ Variance
712
+ Cumulative %
713
+ of Variance
714
+ 1
715
+ 44.35
716
+ 84.48
717
+ 84.48
718
+ 40.43
719
+ 74.61
720
+ 74.61
721
+ 40.96
722
+ 75.58
723
+ 75.58
724
+ 2
725
+ 4.90
726
+ 9.34
727
+ 93.81
728
+ 7.98
729
+ 14.73
730
+ 89.34
731
+ 6.39
732
+ 11.80
733
+ 87.38
734
+ 3
735
+ 2.22
736
+ 4.23
737
+ 98.04
738
+ 4.16
739
+ 7.67
740
+ 97.01
741
+ 4.94
742
+ 9.12
743
+ 96.50
744
+ 4
745
+ 0.44
746
+ 0.84
747
+ 98.87
748
+ 0.61
749
+ 1.13
750
+ 98.14
751
+ 0.70
752
+ 1.29
753
+ 97.79
754
+ 5
755
+ 0.22
756
+ 0.41
757
+ 99.29
758
+ 0.48
759
+ 0.89
760
+ 99.03
761
+ 0.41
762
+ 0.75
763
+ 98.55
764
+ 6
765
+ 0.10
766
+ 0.18
767
+ 99.47
768
+ 0.12
769
+ 0.23
770
+ 99.26
771
+ 0.23
772
+ 0.42
773
+ 98.97
774
+ 7
775
+ 0.04
776
+ 0.08
777
+ 99.55
778
+ 0.10
779
+ 0.19
780
+ 99.45
781
+ 0.14
782
+ 0.26
783
+ 99.22
784
+ 8
785
+ 0.04
786
+ 0.08
787
+ 99.63
788
+ 0.08
789
+ 0.14
790
+ 99.59
791
+ 0.11
792
+ 0.19
793
+ 99.42
794
+ 9
795
+ 0.03
796
+ 0.06
797
+ 99.69
798
+ 0.06
799
+ 0.11
800
+ 99.69
801
+ 0.08
802
+ 0.14
803
+ 99.56
804
+ 10
805
+ 0.02
806
+ 0.04
807
+ 99.73
808
+ 0.04
809
+ 0.08
810
+ 99.77
811
+ 0.06
812
+ 0.11
813
+ 99.67
814
+ 21
815
+
816
+ Figure 6: The first and second components for the transformed DESIS spectra.
817
+ Left column:
818
+ Result for the Southern Tablelands with (a) Principal Component Analysis (PCA), (b) Canonical
819
+ Correlation Analysis (CCA), and (c) Partial Least Squares analysis (PLS); Right column: Result
820
+ for the Snowy Mountains region with (d) PCA, (e) CCA, and (f) PLS.
821
+ 22
822
+
823
+ 0.6
824
+ 0.6
825
+ (a)
826
+ 1st Component
827
+ (d)
828
+ 1st Component
829
+ 0.4
830
+ 2nd Component
831
+ 0.4
832
+ 2nd Component
833
+ 0.2
834
+ 0.2
835
+ Component
836
+ Component
837
+ 0.0
838
+ 0.0
839
+ -0.2
840
+ -0.2
841
+ -0.4
842
+ -0.4
843
+ -0.6
844
+ -0.6
845
+ 400
846
+ 600
847
+ 800
848
+ 1000
849
+ 400
850
+ 600
851
+ 800
852
+ 1000
853
+ Wavelength (nm)
854
+ Wavelength (nm)
855
+ 0.6
856
+ 0.6
857
+ (b)
858
+ 1st Component
859
+ (e)
860
+ 1st Component
861
+ 0.4 -
862
+ 2nd Component
863
+ 0.4 -
864
+ 2nd Component
865
+ 0.2
866
+ 0.2
867
+ Component
868
+ 0.0
869
+ 0.0
870
+ -0.2
871
+ -0.2
872
+ -0.4
873
+ -0.4
874
+ -0.6
875
+ -0.6
876
+ 400
877
+ 600
878
+ 800
879
+ 1000
880
+ 400
881
+ 600
882
+ 800
883
+ 1000
884
+ Wavelength (nm)
885
+ Wavelength (nm)
886
+ 0.6
887
+ 0.6
888
+ (c)
889
+ 1st Component
890
+ (f)
891
+ 1st Component
892
+ 0.4 -
893
+ 2nd Component
894
+ 0.4
895
+ 2nd Component
896
+ 0.2
897
+ 0.2
898
+ Component
899
+ Component
900
+ 0.0
901
+ 0.0
902
+ -0.2
903
+ -0.2
904
+ -0.4
905
+ -0.4
906
+ -0.6
907
+ -0.6
908
+ 400
909
+ 600
910
+ 800
911
+ 1000
912
+ 400
913
+ 600
914
+ 800
915
+ 1000
916
+ Wavelength (nm)
917
+ Wavelength (nm)The grid searching result for the optimal KRR hyperparameters σ, l, and δ is shown in Fig. 7.
918
+ The best combination of kernel parameters was σ = 103, l = 103, and δ = 10. This combination
919
+ of kernel parameter values was used as default values for KRR in our experiments.
920
+ Figure 7: Selection of kernel parameters σ, l, and δ based on grid search for Kernel Ridge Regression
921
+ (KRR). The best performing combinations of kernel parameters are circled in black.
922
+ 3.3. Plant Species Richness Prediction
923
+ The assessment of different dimensionality reduction and regression methods (Table 3) showed
924
+ that, for the Southern Tablelands region, the best result was achieved with a combination of
925
+ PLS for dimensionality reduction and GPR for regression, with r being 0.76 and RMSE being
926
+ 23
927
+
928
+ = 10-3
929
+ = 10-5
930
+ 10-1
931
+ 105
932
+ 105
933
+ 105
934
+ 103
935
+ 103
936
+ 103
937
+ 101
938
+ 101
939
+ 101
940
+ 10-1
941
+ 10-1,
942
+ 10-1
943
+ 10-3
944
+ 10-3
945
+ 10-3
946
+ 10-5
947
+ 10-5.
948
+ 10-5
949
+ 10-5
950
+ 10-3
951
+ 10-1
952
+ 101
953
+ 103
954
+ 105
955
+ 10-5
956
+ 10-3
957
+ 10-1
958
+ 101
959
+ 103
960
+ 105
961
+ 10-5
962
+ 10-3
963
+ 10-1
964
+ 101
965
+ 103
966
+ 105
967
+ 0
968
+ 0
969
+ 0
970
+ S= 101
971
+ = 103
972
+ = 105
973
+ 105
974
+ 105
975
+ 105
976
+ 103
977
+ 103
978
+ 103
979
+ 101
980
+ 101
981
+ 101
982
+ 10-1
983
+ 10-1
984
+ 10-1
985
+ 10-3
986
+ 10-3
987
+ 10-3
988
+ 10-5
989
+ 10-5
990
+ 10-5
991
+ 10-5
992
+ 10-3
993
+ 10-1
994
+ 101
995
+ 103
996
+ 105
997
+ 10-5
998
+ 10-3
999
+ 10-1
1000
+ 101
1001
+ 103
1002
+ 105
1003
+ 10-5
1004
+ 10-3
1005
+ 10-
1006
+ 101
1007
+ 103
1008
+ 105
1009
+ 0
1010
+ 0
1011
+ 0
1012
+ RMSE
1013
+ 10
1014
+ >105.89. Results with PLS as the dimensionality reduction method performed better than those with
1015
+ PCA and CCA. The PLS-based models achieved 0.75 ∼ 0.76 for r and 5.89 ∼ 5.92 for RMSE,
1016
+ respectively, better than the PCA-based models with 0.70 ∼ 0.71 for r and 5.99 ∼ 6.02 for RMSE,
1017
+ and the CCA-based models with 0.69 ∼ 0.73 for r and 5.98 ∼ 6.02 for RMSE.
1018
+ Table 3: The coefficient of correlation (r) and Root-Mean-Square Error (RMSE) between model
1019
+ predicted and ground truth plant species richness for the Southern Tablelands region.
1020
+ Dimensionality
1021
+ Reduction
1022
+ Regression
1023
+ r
1024
+ RMSE
1025
+ PCA
1026
+ KRR
1027
+ 0.70
1028
+ 6.01
1029
+ PCA
1030
+ GPR
1031
+ 0.71
1032
+ 5.99
1033
+ PCA
1034
+ RFR
1035
+ 0.70
1036
+ 6.02
1037
+ CCA
1038
+ KRR
1039
+ 0.69
1040
+ 6.02
1041
+ CCA
1042
+ GPR
1043
+ 0.72
1044
+ 5.98
1045
+ CCA
1046
+ RFR
1047
+ 0.73
1048
+ 6.00
1049
+ PLS
1050
+ KRR
1051
+ 0.75
1052
+ 5.92
1053
+ PLS
1054
+ GPR
1055
+ 0.76
1056
+ 5.89
1057
+ PLS
1058
+ RFR
1059
+ 0.75
1060
+ 5.91
1061
+ The assessment results for the Snowy Mountains region in shown in Table 4.
1062
+ The Snowy
1063
+ Mountains region is located at high altitudes with less human interference and a generally lower
1064
+ species richness than the Southern Tablelands (Fig. 2). Compared with the results for the Southern
1065
+ Tablelands in Table 3, it can be seen that generally the r values were lower and RMSE were higher
1066
+ for the Snowy Mountains region (Table 4). The best r and RMSE were 0.68 and 5.95, respectively,
1067
+ achieved with a combination of PLS for dimensionality reduction and RFR for regression.
1068
+ The correlation diagrams in Fig. 8 show the relationship between the ground-truth species
1069
+ 24
1070
+
1071
+ Table 4: The coefficient of correlation (r) and Root-Mean-Square Error (RMSE) between model
1072
+ predicted and ground truth plant species richness for the Snowy Mountains region.
1073
+ Dimensionality
1074
+ Reduction
1075
+ Regression
1076
+ r
1077
+ RMSE
1078
+ PCA
1079
+ KRR
1080
+ 0.51
1081
+ 6.17
1082
+ PCA
1083
+ GPR
1084
+ 0.54
1085
+ 6.03
1086
+ PCA
1087
+ RFR
1088
+ 0.56
1089
+ 6.01
1090
+ CCA
1091
+ KRR
1092
+ 0.52
1093
+ 6.14
1094
+ CCA
1095
+ GPR
1096
+ 0.53
1097
+ 6.08
1098
+ CCA
1099
+ RFR
1100
+ 0.54
1101
+ 5.99
1102
+ PLS
1103
+ KRR
1104
+ 0.64
1105
+ 6.08
1106
+ PLS
1107
+ GPR
1108
+ 0.66
1109
+ 5.97
1110
+ PLS
1111
+ RFR
1112
+ 0.68
1113
+ 5.95
1114
+ richness and the predicted values from the DESIS spectra, with data samples from the validation
1115
+ set. These results were produced with the best performing models in Tables 3 and 4.
1116
+ 3.4. Generalised Modelling Results
1117
+ Figs 8a and 8b show results with modelling being conducted for the Southern Tablelands and
1118
+ Snowy Mountains regions separately. When we modelled the pooled data from both regions, we
1119
+ found that accuracy decreased (Fig. 9). The prediction result was r = 0.61 and RMSE = 10.1,
1120
+ which was lower than modelling with data from only one region (Figs 8a and 8b). This indicates
1121
+ that location-specific modelling performs better than using one model to describe multiple regions.
1122
+ The difficulty of modelling relationships between hyperspectral data and plant species richness
1123
+ for multiple regions, compared modelling for each region separately, is worth further investigation.
1124
+ 25
1125
+
1126
+ Figure 8: Correlation diagrams between the ground-truth species richness and the predicted values
1127
+ from the DESIS spectra for the (a) Southern Tablelands and (b) Snowy Mountains regions. The
1128
+ r and RMSE stand for the coefficient of correlation and root-mean-square error.
1129
+ Different regions may differ in the assemblages of plant species, and their compositional and struc-
1130
+ tural properties, resulting in extra variations in hyperspectral data in addition to those induced by
1131
+ the richness of species. These additional variations may add complexity in exploring useful infor-
1132
+ mation in hyperspectral data to predict plant species richness. The location-specific relationship
1133
+ 26
1134
+
1135
+ (a)
1136
+ 55
1137
+ Regression Line
1138
+ 50
1139
+ 1:1 Line
1140
+ 45
1141
+ Confidence Band
1142
+ 40
1143
+ 35
1144
+ r = 0.76
1145
+ RMSE = 5.89
1146
+ 30
1147
+ 25
1148
+ 25
1149
+ 30
1150
+ 35
1151
+ 40
1152
+ 45
1153
+ 50
1154
+ 55
1155
+ Ground Truth Species Richness
1156
+ (b)
1157
+ 35
1158
+ Regression Line
1159
+ 30
1160
+ 1:1 Line
1161
+ Confidence Band
1162
+ 25
1163
+ 20
1164
+ r = 0.66
1165
+ RMSE = 5.97
1166
+ 15
1167
+ 10
1168
+ 10
1169
+ 15
1170
+ 20
1171
+ 25
1172
+ 30
1173
+ 35
1174
+ Ground Truth Species Richnesshyperspectral data and plant species richness calls for location-dependant modelling or encoding
1175
+ location information into the input spectra in future studies when mapping plant species richness
1176
+ at continental or global scales is attempted.
1177
+ Figure 9: Correlation diagrams between the ground-truth species richness and values predicted
1178
+ from DESIS spectra with data congregating both of the Southern Tablelands and Snowy Mountains
1179
+ regions. The r and RMSE stand for the coefficient of correlation and root-mean-square error.
1180
+ 3.5. The Relative Importance of Spectral Bands
1181
+ The relative importance of DESIS bands in predicting plant species richness is shown in Fig.
1182
+ 10, with the aim to analyse which parts of the spectrum had the most explanatory power. Subplots
1183
+ 10a, b, and c display the results with PCA, CCA, and PLS being used as the feature extraction
1184
+ procedure, respectively. From these subplots, it was observed that bands in the red-edge spectral
1185
+ region of approx. 700 ∼ 720 nm showed the highest importance. This may suggest that the slope
1186
+ of the red-edge is an important spectral feature for plant species richness prediction, considering
1187
+ that the low, intermediate, and high species richness plots showed varied red-edge slopes in Fig.
1188
+ 4.
1189
+ This observation might be supported by literature that the red-edge is a critical spectral
1190
+ 27
1191
+
1192
+ 60
1193
+ Regression Line
1194
+ Model Predicted Species Richness
1195
+ 50
1196
+ 1:1 Line
1197
+ Confidence Band
1198
+ 40
1199
+ 30
1200
+ r = 0.61
1201
+ RMSE = 10.1
1202
+ 20
1203
+ 10
1204
+ 10
1205
+ 20
1206
+ 30
1207
+ 40
1208
+ 50
1209
+ 60
1210
+ Ground Truth Species Richnessregion for vegetation mapping as it is closely related to biological variables such as leaf area index
1211
+ (Delegido et al., 2013), plant vigour (Boochs et al., 1990), and biochemical contents (Mutanga and
1212
+ Skidmore, 2007). Followed by the red-edge, the visible range of approx. 400 ∼ 700 nm also showed
1213
+ high importance. The importance of visible bands in plant species richness prediction might be
1214
+ justified by the fact that, this portion of the spectrum, especially bands in red and blue, is the major
1215
+ leaf pigment absorption range. It is sensitive to mainly chlorophyll a and b contents, according
1216
+ to the sensitivity analysis result reported in Zhao et al. (2014b,a). Though less important than
1217
+ the red-edge and visible regions, the near-infrared region with wavelengths longer than 720 nm
1218
+ also showed some explanatory power. This indicated that the near-infrared region also provided
1219
+ contributory information in predicting plant species richness, as this portion of the spectrum, often
1220
+ characterised by high reflectance for vegetation, is sensitive to leaf thickness (Zhao et al., 2014a)
1221
+ and the amount, arrangement, and inclination of leaves in the canopy (Knipling, 1970).
1222
+ 3.6. Comparison with Multispectral Data
1223
+ Figs 11a and 11b show correlation diagrams between the ground-truth species richness and
1224
+ values predicted from the real Sentinel-2 multispectral data for the Southern Tablelands and Snowy
1225
+ Mountains regions, respectively. It is seen that, a prediction result of r = 0.66 and RMSE = 6.27
1226
+ was achieved for the Southern Tablelands region (Fig. 11a), and r = 0.57 and RMSE = 6.31 for
1227
+ the Snowy Mountains region (Fig. 11b). The prediction results with the Sentinel-2-like synthetic
1228
+ data set are shown in Fig. 12. For the Southern Tablelands, the r and RMSE values are 0.65 and
1229
+ 6.19, while for the Snowy Mountains, the r and RMSE values are 0.57 and 6.39.
1230
+ These multispectral results were slightly worse than the hyperspectral results shown in Fig. 8.
1231
+ This could be explained by the fact that hyperspectral data contain richer information in the spec-
1232
+ tral domain. However, when taking data availability and stability into consideration, spaceborne
1233
+ multispectral imagery could serve as a reliable alternative for plant biodiversity mapping. Also,
1234
+ data such as Sentinel has much more consistent and regular sampling over time, and harnessing the
1235
+ 28
1236
+
1237
+ Figure 10: Relative importance analysis for the DESIS bands in predicting plant species richness
1238
+ with spectral features being extracted by (a) Principal Component Analysis (PCA), (b) Canonical
1239
+ Correlation Analysis (CCA), and (c) Partial Least Squares analysis (PLS).
1240
+ 29
1241
+
1242
+ 0.08
1243
+ (a)
1244
+ : Importance
1245
+ 0.06
1246
+ 0.04
1247
+ Relative I
1248
+ 0.02
1249
+ 0.00
1250
+ 400
1251
+ 500
1252
+ 600
1253
+ 700
1254
+ 800
1255
+ 900
1256
+ 1000
1257
+ Wavelength (nm)
1258
+ 0.08
1259
+ (b)
1260
+ : Importance
1261
+ 0.06
1262
+ 0.04
1263
+ Relative I
1264
+ 0.02
1265
+ 0.00
1266
+ 400
1267
+ 500
1268
+ 600
1269
+ 700
1270
+ 800
1271
+ 900
1272
+ 1000
1273
+ Wavelength (nm)
1274
+ 0.08
1275
+ (c)
1276
+ Relative Importance
1277
+ 0.06
1278
+ 0.04
1279
+ 0.02
1280
+ 0.00
1281
+ 400
1282
+ 500
1283
+ 600
1284
+ 700
1285
+ 800
1286
+ 900
1287
+ 1000
1288
+ Wavelength (nm)Figure 11: Correlation diagrams between the ground-truth species richness and values predicted
1289
+ from the real Sentinel-2 multispectral data set for the (a) Southern Tablelands and (b) Snowy
1290
+ Mountains regions. The r and RMSE stand for the coefficient of correlation and root-mean-square
1291
+ error.
1292
+ added information in temporal changes in multispectral signal could provide further explanatory
1293
+ power for biodiversity analyses.
1294
+ 30
1295
+
1296
+ (a)
1297
+ 55
1298
+ Regression Line
1299
+ 50
1300
+ 1:1 Line
1301
+ Confidence Band
1302
+ 45
1303
+ 40
1304
+ 35
1305
+ r = 0.66
1306
+ 30
1307
+ RMSE = 6.27
1308
+ 25
1309
+ 25
1310
+ 30
1311
+ 35
1312
+ 40
1313
+ 45
1314
+ 50
1315
+ 55
1316
+ Ground Truth Species Richness
1317
+ (b)
1318
+ 35
1319
+ Model Predicted Species Richness
1320
+ Regression Line
1321
+ 1:1 Line
1322
+ 30
1323
+ Confidence Band
1324
+ 25
1325
+ 20
1326
+ r = 0.57
1327
+ 15
1328
+ RMSE = 6.31
1329
+ 10
1330
+ 10
1331
+ 15
1332
+ 20
1333
+ 25
1334
+ 30
1335
+ 35
1336
+ Ground Truth Species RichnessFigure 12: Correlation diagrams between the ground-truth species richness and values predicted
1337
+ from the Sentinel-2-like synthetic multispectral data set for the (a) Southern Tablelands and (b)
1338
+ Snowy Mountains regions. The r and RMSE stand for the coefficient of correlation and root-mean-
1339
+ square error.
1340
+ 31
1341
+
1342
+ (a)
1343
+ 55
1344
+ Regression Line
1345
+ 50
1346
+ l:1 Line
1347
+ Confidence Band
1348
+ 45
1349
+ 40
1350
+ 35
1351
+ r = 0.65
1352
+ 30
1353
+ RMSE = 6.19
1354
+ 25
1355
+ 25
1356
+ 30
1357
+ 35
1358
+ 40
1359
+ 45
1360
+ 50
1361
+ 55
1362
+ Ground Truth Species Richness
1363
+ (b)
1364
+ 35
1365
+ Regression Line
1366
+ 1:1 Line
1367
+ 30
1368
+ Confidence Band
1369
+ 25
1370
+ 20
1371
+ r= 0.57
1372
+ 15
1373
+ RMSE = 6.39
1374
+ 10
1375
+ 10
1376
+ 15
1377
+ 20
1378
+ 25
1379
+ 30
1380
+ 35
1381
+ Ground Truth Species Richness4. Discussions
1382
+ 4.1. Comparison to Previous Studies
1383
+ Predicting plant biodiversity from remotely sensed measurements helps with making evidence-
1384
+ informed environmental policies and conducting effective conservation activities. Hyperspectral
1385
+ imagery delivered by the recently launched DESIS instrument opens the potential for plant bio-
1386
+ diversity monitoring at a finer spatial scale and with a higher accuracy. In this study, we take
1387
+ the the Southern Tablelands and Snowy Mountains regions in southeast Australia as experimental
1388
+ sites to test the relationship between DESIS data and plant species richness. A two step approach
1389
+ is proposed, where feature extraction techniques are first used to reduce the dimensionality of hy-
1390
+ perspectral data, followed by regression models with kernel functions to account for the linearity,
1391
+ non-linearity, and noise in data.
1392
+ Obtaining informative features from hyperspectral data is important to the success of sub-
1393
+ sequent interference of plant species richness. In previous studies, features have been primarily
1394
+ selected as a subset of the original bands, or as spectral indices computed from a subset of the
1395
+ original bands (e.g., Malhi et al. (2020), Peng et al. (2018), and Wang et al. (2016)). The bands
1396
+ or indices are often selected based on our a priori knowledge of the spectral properties, such as
1397
+ absorption features of biochemical contents. Though selected features usually offer good explain-
1398
+ ability, it means we have to discard information in unselected bands. Considering the large number
1399
+ of bands in hyperspectral data, many of them would be discarded as the high collinearity of hy-
1400
+ perspectral data often requires a considerable reduction of dimensionality. In contrast, the feature
1401
+ extraction approach, as adopted in this work, makes use of the information in all original bands
1402
+ by transforming them into a new feature space of lower and non-collinear dimensionality.
1403
+ Linear models have been primarily employed in previous studies to relate features to species
1404
+ richness. For example, multiple linear regression has been adopted by Wang et al. (2016) and
1405
+ Malhi et al. (2020), and stepwise linear regression by Peng et al. (2018). However, the relationship
1406
+ 32
1407
+
1408
+ between features and richness might not necessarily follows simply a linear pattern. In our study,
1409
+ a novel kernel function is proposed (Eq. 3), with the dot-product and radial-basis function kernels
1410
+ account for the linearity and non-linearity of the data, respectively, and the white kernel explains
1411
+ the noise in the data. The ability of our model to explore both linear and non-linear patterns
1412
+ distinguishes our work from aforementioned studies.
1413
+ 4.2. Mechanism of Plant Species Richness Prediction
1414
+ Though detailed field surveys have been deemed as the most accurate and reliable way for
1415
+ assessing plant biodiversity, remote sensing data can serve as a proxy for large-scale and cost-
1416
+ effective biodiversity mapping.
1417
+ The mechanism justifying the use of remote sensing has been
1418
+ widely discussed in literature. It is worth noting the exact mechanism is dependant on which
1419
+ method is used and the sensor specifications (e.g., pixel size and spectral range). The richness of
1420
+ species can be estimated either directly from raw spectra, or via the extracted plant functional
1421
+ traits or types (Wang and Gamon, 2019). Methods based on the spectral variation hypothesis is
1422
+ also intriguing, whereby the spectral variation across spatially adjacent pixels is employed as the
1423
+ proxy (Palmer et al., 2002; Fassnacht et al., 2022). Here we focus on discussing the underlying
1424
+ mechanism that underpins our study where the DESIS spectra with a pixel size of 30 m and a
1425
+ spectral range of 400–1000 nm are linked to on-ground species richness via feature extraction and
1426
+ statistical regression.
1427
+ It has been reported in literature that plant communities of high diversity tend to have an
1428
+ enhanced primary productivity (Wang et al., 2016; Grace et al., 2016) and a higher above ground
1429
+ biomass (Malhi et al., 2020; Tilman et al., 1997). Though the reason to explain the richness–
1430
+ productivity/biomass relationship is a subject of debate, a common theory is that the comple-
1431
+ mentary roles played by different species lead to lower nutrient losses and more sustainable soils
1432
+ (Tilman et al., 1996). Species complementarity allows plants to capture resources in ways that are
1433
+ complementary in space or/and time, leading to increased biomass production (Cardinale et al.,
1434
+ 33
1435
+
1436
+ 2007). For example, a high number of species allows plants to reside in various partitions of niches,
1437
+ resulting in a denser occupation of space and a higher efficiency of water, nutrition, and sunlight
1438
+ usage.
1439
+ Based on this relationship, many studies have successfully estimated plant species richness from
1440
+ hyperspectral measurements (e.g., Wang et al. (2016), Malhi et al. (2020), and Peng et al. (2018)),
1441
+ given that remotely sensed hyperspectral data is a good proxy of vegetative biomass and primary
1442
+ productivity. A positive and dynamic productivity–diversity relationship is observed in a prairie
1443
+ grassland experiment at Cedar Creek, Minnesota, USA, with NDVI being employed as a proxy of
1444
+ vegetation productivity to estimate species richness (Wang et al., 2016). The correlation between
1445
+ spectral indices and plant species diversity is also reported in Peng et al. (2018) for a semi-arid
1446
+ sandland ecosystem in Inner Mongolia, China.
1447
+ It is worth noting that previous studies are primarily focused on ground-measured (e.g., (Peng
1448
+ et al., 2018) and Wang et al. (2016)) or airborne (e.g., Asner (2008)) hyperspectral data, with a
1449
+ limited spatial range. The recently launched DESIS and PRISMA (Pignatti et al., 2013; Verrelst
1450
+ et al., 2021), and the upcoming EnMAP (Environmental Mapping and Analysis Program) (Guanter
1451
+ et al., 2015) missions, enable us to test the potential of spaceborne hyperspectral measurements
1452
+ in plant species richness mapping. Our study shows a promising correlation between the two, and
1453
+ finds that the correlation is location-dependent.
1454
+ 4.3. Limitations
1455
+ Over the past decades, in-situ samples of plant species richness have been collected via various
1456
+ survey campaigns. In total, more than 188 thousands of samples have been gathered in Australia
1457
+ as of the year of 2018 (Gellie et al., 2018).
1458
+ However, most of the samples are not able to be
1459
+ matched with a DESIS observation that is temporally close enough to them, as DESIS has not
1460
+ been in operation until 2018. In this study, in order to avoid large temporal discrepancies, we have
1461
+ limited our on-ground samples to those that are less than three years apart with their associated
1462
+ 34
1463
+
1464
+ DESIS spectra. The limited spatial coverage of DESIS images, and bushfires during the 2019–2020
1465
+ summer, have further reduced the number of available samples. As a result, analyses in this study
1466
+ are conducted on a relatively small number of samples. Nevertheless, the ever increasing amount
1467
+ of both satellite images and on-ground samples will enable more comprehensive assessment of the
1468
+ potential of spaceborne hyperspectral remote sensing for plant biodiversity mapping. Moreover,
1469
+ though the data set used in this work does not have information on which strata the observed species
1470
+ come from, it is worth splitting out richness for different strata in future field surveys. With strata
1471
+ information on record, we might be able to model tree richness and understory richness separately,
1472
+ and then combine them once predicted to get total richness. The results reported in this study
1473
+ may serve as a basis for future studies.
1474
+ It is important to note that the DESIS pixel size (30×30 m) does not match exactly with the plot
1475
+ area of ground species richness samples (400 m2 = 20×20 m). Though richness measurements could
1476
+ be scaled to a larger or smaller plot area using an assumed power relationship (S = cAz) between
1477
+ species richness (S) and plot area (A) (Rosenzweig, 1995), the location-specific power parameter
1478
+ (z) is often hard to be accurately determined without adequate knowledge about the experiment
1479
+ site. It is suggested that, in future field campaigns, it is worthwhile to gather information on
1480
+ the richness-area relationship, in order to facilitate accurate up- and down-scaling of plot areas.
1481
+ Potential inaccuracies in geo-registration of DESIS images and in geo-positioning measurements
1482
+ during field surveys may also result in geo-mismatch between pixels and ground plots. Better geo-
1483
+ registration and geo-positioning accuracies would help reduce uncertainties in predicting species
1484
+ richness in future works.
1485
+ The DESIS sensor covers the visible and near-infrared (VNIR) portions of the spectrum. As
1486
+ shown in this study, this spectral range is informative in plant biodiversity mapping. However, it is
1487
+ also worthwhile to leverage the potential of the short-wave-infrared (SWIR) bands. The upcoming
1488
+ EnMAP imaging spectroscopy mission (Guanter et al., 2015) is scheduled to be launched in 2022.
1489
+ 35
1490
+
1491
+ The EnMAP dual-spectrometer instrument, covering both VNIR and SWIR from 420 nm to 2450
1492
+ nm (as shown in Fig. 3), will provide an opportunity to integrate information from SWIR for plant
1493
+ biodiversity mapping. In addition to hyperspectral optical imagery, the combination of data from
1494
+ other sensor types, such synthetic-aperture radar (SAR) or LiDAR data, could also be explored
1495
+ to improve our ability in remote mapping of plant biodiversity.
1496
+ 5. Conclusion
1497
+ Spaceborne hyperspectral remote sensing is a promising and cost-effective data source to en-
1498
+ able plant biodiversity mapping. Thanks to its advanced spectral and spatial specifications, the
1499
+ recently launched hyperspectral instrument DESIS (the DLR Earth Sensing Imaging Spectrom-
1500
+ eter) opens up an opportunity to monitor plant biodiversity at a finer spatial scale and with a
1501
+ higher accuracy. In this study, we assessed the ability of DESIS hyperspectral data in predicting
1502
+ plant species richness in the Southern Tablelands and Snowy Mountains regions in southeast New
1503
+ South Wales, Australia. The spectral features were firstly extracted, and then correlated to plant
1504
+ species richness via statistical regression. We evaluated the performance of several combinations
1505
+ of feature extraction procedures (PCA, CCA, and PLS) and regression models (KRR, GPR, and
1506
+ RFR). The main findings of this study are summarised as follows:
1507
+ (1) Plant species richness values were predicted from DESIS data in the two study regions.
1508
+ Prediction accuracies fell within a comparable range for different combinations of feature extraction
1509
+ techniques and regression models (Tables 3 and 4). The best prediction results were r = 0.76 and
1510
+ RMSE = 5.89 for Southern Tablelands region, and r = 0.68 and RMSE = 5.95 for the Snowy
1511
+ Mountains region.
1512
+ (2) The correlation between DESIS hyperspectral data and plant species richness was region-
1513
+ specific. Modelling the correlation separately for each region produced better results than building
1514
+ a single model for all regions.
1515
+ 36
1516
+
1517
+ (3) The relative importance analysis conducted among DESIS bands showed that the red-edge,
1518
+ red, and blue spectral regions are more important in predicting plant species richness than the
1519
+ green bands and the near-infrared bands beyond red-edge (noting that the SWIR region is not
1520
+ sampled by DESIS).
1521
+ (4) The DESIS hyperspectral data performed better than multispectral data in predicting
1522
+ plant species richness, indicating that the provision of richer information in the spectral domain is
1523
+ important for diversity mapping.
1524
+ Results shown in this study provided a quantitative reference on the potential for spaceborne
1525
+ hyperspectral data to be used in the mapping of on-ground plant species richness. Future studies
1526
+ should focus on extending the current approach to larger areas, investigating the potential of
1527
+ upcoming hyperspectral missions that extend into the SWIR region, and exploring the combination
1528
+ of data from other sensor types.
1529
+ 6. Acknowledgement
1530
+ The authors are grateful to the anonymous reviewers for their important and insightful com-
1531
+ ments for improving this manuscript.
1532
+ References
1533
+ Alonso, K., Bachmann, M., Burch, K., Carmona, E., Cerra, D., De los Reyes, R., Dietrich, D.,
1534
+ Heiden, U., H¨olderlin, A., Ickes, J., et al., 2019. Data products, quality and validation of the
1535
+ DLR Earth sensing imaging spectrometer (DESIS). Sensors 19, 4471.
1536
+ Asner, G.P., 2008. Hyperspectral remote sensing of canopy chemistry, physiology, and biodiversity
1537
+ in tropical rainforests. Hyperspectral remote sensing of tropical and sub-tropical forests , 261–
1538
+ 296.
1539
+ Berk, A., 2016. MODTRAN 5.4. 0 User’s Manual. Spectral Sciences Inc., Burlingon, MA .
1540
+ 37
1541
+
1542
+ Boochs, F., Kupfer, G., Dockter, K., K¨uhbauch, W., 1990.
1543
+ Shape of the red edge as vitality
1544
+ indicator for plants. International Journal of Remote Sensing 11, 1741–1753.
1545
+ Bush, A., Sollmann, R., Wilting, A., Bohmann, K., Cole, B., Balzter, H., Martius, C., Zlinszky,
1546
+ A., Calvignac-Spencer, S., Cobbold, C.A., et al., 2017. Connecting Earth observation to high-
1547
+ throughput biodiversity data. Nature Ecology & Evolution 1, 1–9.
1548
+ Cardinale, B.J., Wright, J.P., Cadotte, M.W., Carroll, I.T., Hector, A., Srivastava, D.S., Loreau,
1549
+ M., Weis, J.J., 2007. Impacts of plant diversity on biomass production increase through time
1550
+ because of species complementarity.
1551
+ Proceedings of the National Academy of Sciences 104,
1552
+ 18123–18128.
1553
+ Carlson, K.M., Asner, G.P., Hughes, R.F., Ostertag, R., Martin, R.E., 2007. Hyperspectral remote
1554
+ sensing of canopy biodiversity in Hawaiian lowland rainforests. Ecosystems 10, 536–549.
1555
+ Ceballos, G., Ehrlich, P.R., Barnosky, A.D., Garc´ıa, A., Pringle, R.M., Palmer, T.M., 2015. Ac-
1556
+ celerated modern human–induced species losses: Entering the sixth mass extinction. Science
1557
+ Advances 1, e1400253.
1558
+ De Palma, A., Hoskins, A., Gonzalez, R.E., B¨orger, L., Newbold, T., Sanchez-Ortiz, K., Ferrier, S.,
1559
+ Purvis, A., 2021. Annual changes in the Biodiversity Intactness Index in tropical and subtropical
1560
+ forest biomes, 2001–2012. Scientific Reports 11, 1–13.
1561
+ Delegido, J., Verrelst, J., Meza, C., Rivera, J., Alonso, L., Moreno, J., 2013. A red-edge spectral
1562
+ index for remote sensing estimation of green LAI over agroecosystems. European Journal of
1563
+ Agronomy 46, 42–52.
1564
+ Eckardt, A., Horack, J., Lehmann, F., Krutz, D., Drescher, J., Whorton, M., Soutullo, M., 2015.
1565
+ DESIS (DLR Earth sensing imaging spectrometer for the ISS-MUSES platform), in: 2015 IEEE
1566
+ International Geoscience and Remote Sensing Symposium (IGARSS), IEEE. pp. 1457–1459.
1567
+ 38
1568
+
1569
+ Fallding, M., 2002. A planning framework for natural ecosystems of the ACT and NSW Southern
1570
+ Tablelands. Natural Heritage Trust, NSW National Parks and Wildlife Service.
1571
+ Fassnacht, F.E., M¨ullerova, J., Conti, L., Malavasi, M., Schmidtlein, S., 2022. About the link
1572
+ between biodiversity and spectral variation. Applied Vegetation Science , e12643.
1573
+ Feilhauer, H., Asner, G.P., Martin, R.E., 2015. Multi-method ensemble selection of spectral bands
1574
+ related to leaf biochemistry. Remote Sensing of Environment 164, 57–65.
1575
+ Frankel, O.H., Brown, A.H., Burdon, J.J., 1995. The Conservation of Plant Biodiversity. Cam-
1576
+ bridge University Press.
1577
+ F´eret,
1578
+ J.B.,
1579
+ Asner,
1580
+ G.P.,
1581
+ 2014.
1582
+ Mapping
1583
+ tropical
1584
+ forest
1585
+ canopy
1586
+ di-
1587
+ versity
1588
+ using
1589
+ high-fidelity
1590
+ imaging
1591
+ spectroscopy.
1592
+ Ecological
1593
+ Applica-
1594
+ tions
1595
+ 24,
1596
+ 1289–1296.
1597
+ URL:
1598
+ https://esajournals.onlinelibrary.wiley.
1599
+ com/doi/abs/10.1890/13-1824.1,
1600
+ doi:https://doi.org/10.1890/13-1824.1,
1601
+ arXiv:https://esajournals.onlinelibrary.wiley.com/doi/pdf/10.1890/13-1824.1.
1602
+ Gellie, N.J., Hunter, J.T., Benson, J.S., Kirkpatrick, J.B., Cheal, D.C., McCreery, K., Brockle-
1603
+ hurst, P., 2018. Overview of plot-based vegetation classification approaches within Australia.
1604
+ Phytocoenologia 48, 251–272.
1605
+ Ghiyamat, A., Shafri, H.Z., 2010. A review on hyperspectral remote sensing for homogeneous
1606
+ and heterogeneous forest biodiversity assessment. International Journal of Remote Sensing 31,
1607
+ 1837–1856.
1608
+ Government, N., 2019. BioNet Systematic Flora Survey. https://www.environment.nsw.gov.
1609
+ au/research/VISplot.htm. [Online; accessed 11-February-2022].
1610
+ Grace, J.B., Anderson, T.M., Seabloom, E.W., et al., 2016. Integrative modelling reveals mecha-
1611
+ nisms linking productivity and plant species richness. Nature 529, 390–393.
1612
+ 39
1613
+
1614
+ Green, A., Craig, M., 1985. Analysis of aircraft spectrometer data with logarithmic residuals, in:
1615
+ JPL Proc. of the Airborne Imaging Spectrometer Data Anal. Workshop, pp. 111–119.
1616
+ Guanter, L., Kaufmann, H., Segl, K., Foerster, S., Rogass, C., Chabrillat, S., Kuester, T., Hollstein,
1617
+ A., Rossner, G., Chlebek, C., et al., 2015. The EnMAP spaceborne imaging spectroscopy mission
1618
+ for Earth observation. Remote Sensing 7, 8830–8857.
1619
+ Guo, Y., Jia, X., Paull, D., 2018. Effective sequential classifier training for svm-based multitempo-
1620
+ ral remote sensing image classification. IEEE Transactions on Image Processing 27, 3036–3048.
1621
+ Guo, Y., Mokany, K., Ong, C., Moghadam, P., Ferrier, S., Levick, S., 2022. Quantitative assessment
1622
+ of DESIS hyperspectral data for plant biodiversity estimation in Australia, in: 2022 IEEE
1623
+ International Geoscience and Remote Sensing Symposium, IEEE. pp. 1744–1747.
1624
+ Hacker, P.W., Coops, N.C., Townsend, P.A., Wang, Z., 2020. Retrieving foliar traits of quercus
1625
+ garryana var. garryana across a modified landscape using leaf spectroscopy and LiDAR. Remote
1626
+ Sensing 12, 26.
1627
+ Jia, X., Richards, J.A., 1999. Segmented principal components transformation for efficient hyper-
1628
+ spectral remote-sensing image display and classification. IEEE Transactions on Geoscience and
1629
+ Remote Sensing 37, 538–542.
1630
+ Kattge, J., B¨onisch, G., D´ıaz, S., Lavorel, S., Prentice, I.C., Leadley, P., Tautenhahn, S., Werner,
1631
+ G.D., Aakala, T., Abedi, M., et al., 2020. TRY plant trait database–enhanced coverage and
1632
+ open access. Global Change Biology 26, 119–188.
1633
+ Knipling, E.B., 1970. Physical and physiological basis for the reflectance of visible and near-infrared
1634
+ radiation from vegetation. Remote Sensing of Environment 1, 155–159.
1635
+ K¨onig, C., Weigelt, P., Schrader, J., Taylor, A., Kattge, J., Kreft, H., 2019. Biodiversity data
1636
+ integration—the significance of data resolution and domain. PLoS Biology 17, e3000183.
1637
+ 40
1638
+
1639
+ K¨orner, C., 1995. Alpine plant diversity: a global survey and functional interpretations, in: Arctic
1640
+ and alpine biodiversity: Patterns, causes and ecosystem consequences. Springer, pp. 45–62.
1641
+ Krutz, D., M¨uller, R., Knodt, U., G¨unther, B., Walter, I., Sebastian, I., S¨auberlich, T., Reulke,
1642
+ R., Carmona, E., Eckardt, A., et al., 2019. The instrument design of the DLR Earth sensing
1643
+ imaging spectrometer (DESIS). Sensors 19, 1622.
1644
+ Lecl`ere, D., Obersteiner, M., Barrett, M., Butchart, S.H., Chaudhary, A., De Palma, A., DeClerck,
1645
+ F.A., Di Marco, M., Doelman, J.C., D¨urauer, M., et al., 2020. Bending the curve of terrestrial
1646
+ biodiversity needs an integrated strategy. Nature 585, 551–556.
1647
+ Mafanya, M., Tsele, P., Zengeya, T., Ramoelo, A., 2022. An assessment of image classifiers for
1648
+ generating machine-learning training samples for mapping the invasive Campuloclinium macro-
1649
+ cephalum (Less.) DC (pompom weed) using DESIS hyperspectral imagery. ISPRS Journal of
1650
+ Photogrammetry and Remote Sensing 185, 188–200.
1651
+ Malhi, R.K.M., Anand, A., Mudaliar, A.N., Pandey, P.C., Srivastava, P.K., Sandhya Kiran, G.,
1652
+ 2020. Synergetic use of in situ and hyperspectral data for mapping species diversity and above
1653
+ ground biomass in Shoolpaneshwar Wildlife sanctuary, Gujarat. Tropical Ecology 61, 106–115.
1654
+ Mokany, K., Ferrier, S., Harwood, T.D., Ware, C., Di Marco, M., Grantham, H.S., Venter, O.,
1655
+ Hoskins, A.J., Watson, J.E., 2020.
1656
+ Reconciling global priorities for conserving biodiversity
1657
+ habitat. Proceedings of the National Academy of Sciences 117, 9906–9911.
1658
+ Mutanga, O., Skidmore, A.K., 2007. Red edge shift and biochemical content in grass canopies.
1659
+ ISPRS Journal of Photogrammetry and Remote Sensing 62, 34–42.
1660
+ Myers, B.J., Weiskopf, S.R., Shiklomanov, A.N., Ferrier, S., Weng, E., Casey, K.A., Harfoot, M.,
1661
+ Jackson, S.T., Leidner, A.K., Lenton, T.M., et al., 2021.
1662
+ A new approach to evaluate and
1663
+ reduce uncertainty of model-based biodiversity projections for conservation policy formulation.
1664
+ BioScience 71, 1261–1273.
1665
+ 41
1666
+
1667
+ Ong, C., Cudahy, T., 2014. Mapping contaminated soils: using remotely-sensed hyperspectral data
1668
+ to predict pH. European journal of soil science 65, 897–906.
1669
+ Palmer, M.W., Earls, P.G., Hoagland, B.W., White, P.S., Wohlgemuth, T., 2002. Quantitative
1670
+ tools for perfecting species lists. Environmetrics 13, 121–137.
1671
+ Peng, Y., Fan, M., Song, J., Cui, T., Li, R., 2018. Assessment of plant species diversity based on
1672
+ hyperspectral indices at a fine scale. Scientific Reports 8, 1–11.
1673
+ Pettorelli, N., Wegmann, M., Skidmore, A., M¨ucher, S., Dawson, T.P., Fernandez, M., Lucas, R.,
1674
+ Schaepman, M.E., Wang, T., O’Connor, B., et al., 2016. Framing the concept of satellite remote
1675
+ sensing essential biodiversity variables: challenges and future directions. Remote Sensing in
1676
+ Ecology and Conservation 2, 122–131.
1677
+ Pickering, C., Green, K., 2009. Vascular plant distribution in relation to topography, soils and
1678
+ micro-climate at five GLORIA sites in the Snowy Mountains, Australia. Australian Journal of
1679
+ Botany 57, 189–199.
1680
+ Pickering, C., Hill, W., Green, K., 2008. Vascular plant diversity and climate change in the alpine
1681
+ zone of the Snowy Mountains, Australia. Biodiversity and Conservation 17, 1627–1644.
1682
+ Pignatti, S., Palombo, A., Pascucci, S., Romano, F., Santini, F., Simoniello, T., Umberto, A.,
1683
+ Vincenzo, C., Acito, N., Diani, M., et al., 2013. The PRISMA hyperspectral mission: Science
1684
+ activities and opportunities for agriculture and land monitoring, in: 2013 IEEE International
1685
+ Geoscience and Remote Sensing Symposium-IGARSS, IEEE. pp. 4558–4561.
1686
+ De los Reyes, R., Richter, R., Langheinrich, M., Pflug, B., Schwind, P., 2018.
1687
+ Validation of
1688
+ a new atmospheric correction software using AERONET reference data PACO: Python-based
1689
+ Atmospheric COrrection, in: Workshop on Land Product Validation and Evolution (LPVE2018),
1690
+ pp. 1–1.
1691
+ 42
1692
+
1693
+ Richards, J.A., Jia, X., 2006. Remote Sensing Digital Image Analysis [4th Edition]. Springer.
1694
+ Rosenzweig, M.L., 1995. Species diversity in space and time. Cambridge University Press.
1695
+ Skidmore, A.K., Coops, N.C., Neinavaz, E., Ali, A., Schaepman, M.E., Paganini, M., Kissling,
1696
+ W.D., Vihervaara, P., Darvishzadeh, R., Feilhauer, H., et al., 2021. Priority list of biodiversity
1697
+ metrics to observe from space. Nature Ecology & Evolution 5, 896–906.
1698
+ Stevenson, S.L., Watermeyer, K., Caggiano, G., Fulton, E.A., Ferrier, S., Nicholson, E., 2021.
1699
+ Matching biodiversity indicators to policy needs. Conservation Biology 35, 522–532.
1700
+ Tilman, D., Knops, J., Wedin, D., Reich, P., Ritchie, M., Siemann, E., 1997. The influence of
1701
+ functional diversity and composition on ecosystem processes. Science 277, 1300–1302.
1702
+ Tilman, D., Wedin, D., Knops, J., 1996. Productivity and sustainability influenced by biodiversity
1703
+ in grassland ecosystems. Nature 379, 718–720.
1704
+ Tollefson, J., 2019. Humans are driving one million species to extinction. Nature 569, 171–172.
1705
+ Verrelst, J., Rivera-Caicedo, J.P., Reyes-Mu˜noz, P., Morata, M., Amin, E., Tagliabue, G., Pani-
1706
+ gada, C., Hank, T., Berger, K., 2021. Mapping landscape canopy nitrogen content from space
1707
+ using PRISMA data. ISPRS Journal of Photogrammetry and Remote Sensing 178, 382–395.
1708
+ Wang, R., Gamon, J.A., 2019. Remote sensing of terrestrial plant biodiversity. Remote Sensing of
1709
+ Environment 231, 111218.
1710
+ Wang, R., Gamon, J.A., Montgomery, R.A., et al., 2016. Seasonal variation in the NDVI–species
1711
+ richness relationship in a prairie grassland experiment (Cedar Creek). Remote Sensing 8, 128.
1712
+ Wang, Z., Townsend, P.A., Schweiger, A.K., Couture, J.J., Singh, A., Hobbie, S.E., Cavender-
1713
+ Bares, J., 2019. Mapping foliar functional traits and their uncertainties across three years in a
1714
+ grassland experiment. Remote Sensing of Environment 221, 405–416.
1715
+ 43
1716
+
1717
+ Xu, M., Jia, X., Pickering, M., Jia, S., 2019. Thin cloud removal from optical remote sensing images
1718
+ using the noise-adjusted principal components transform. ISPRS Journal of Photogrammetry
1719
+ and Remote Sensing 149, 215–225.
1720
+ Zhao, F., Guo, Y., Huang, Y., Reddy, K.N., Lee, M.A., Fletcher, R.S., Thomson, S.J., 2014a.
1721
+ Early detection of crop injury from herbicide glyphosate by leaf biochemical parameter inversion.
1722
+ International Journal of Applied Earth Observation and Geoinformation 31, 78–85.
1723
+ Zhao, F., Huang, Y., Guo, Y., Reddy, K.N., Lee, M.A., Fletcher, R.S., Thomson, S.J., 2014b. Early
1724
+ detection of crop injury from glyphosate on soybean and cotton using plant leaf hyperspectral
1725
+ data. Remote Sensing 6, 1538–1563.
1726
+ 44
1727
+
3dAzT4oBgHgl3EQf9P73/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
4NFKT4oBgHgl3EQfRi0P/content/tmp_files/2301.11771v1.pdf.txt ADDED
@@ -0,0 +1,977 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Phase separation of passive particles in active liquids
2
+ Pragya Kushwaha1, Vivek Semwal2, Sayan Maity1, Shradha Mishra2, Vijayakumar Chikkadi1
3
+ 1 Indian Institute of Science Education and Research Pune, India 411008
4
+ 2 Indian Institute of Technology (BHU) Varanasi, India 221005
5
+ (Dated: January 30, 2023)
6
+ The transport properties of colloidal particles in active liquids have been studied extensively. It
7
+ has led to a deeper understanding of the interactions between passive and active particles. However,
8
+ the phase behavior of colloidal particles in active media has received little attention. Here, we present
9
+ a combined experimental and numerical investigation of passive colloids dispersed in suspensions of
10
+ active particles. Our study reveals dynamic clustering of colloids in active media due to an interplay
11
+ of active noise and an attractive effective potential between the colloids. The size-ratio of colloidal
12
+ particles to the bacteria sets the strength of the interaction. As the relative size of the colloids
13
+ increases, the effective potential becomes stronger and the average size of the clusters grows. The
14
+ simulations reveal a macroscopic phase separation of passive colloids at sufficiently large size-ratios.
15
+ We will present the role of density fluctuations and hydrodynamic interactions in the emergence of
16
+ effective interactions.
17
+ The Brownian colloids self-assemble to display a wide
18
+ variety of phases depending on their shapes and inter-
19
+ actions [1–3]. Their equilibrium phase behavior is gov-
20
+ erned by the principles of equilibrium statistical mechan-
21
+ ics [4, 5]. However, our understanding of the collective
22
+ behavior of colloids far from equilibrium remains a chal-
23
+ lenge [6, 7]. In recent years, active matter has emerged
24
+ as a new paradigm for understanding nonequilibrium sys-
25
+ tems [8–11]. They are known to display many interest-
26
+ ing phenomena such as flocking [12, 13], motility induced
27
+ phase separation [14–16], active turbulence [17], superflu-
28
+ idity [18], that are absent in equilibrium systems. There-
29
+ fore, active matter offers novel approaches to colloidal
30
+ assembly in systems far from equilibrium.
31
+ In this let-
32
+ ter, we have investigated the phase behavior of colloidal
33
+ particles dispersed in active liquids.
34
+ Wu and Libchaber [19] did seminal experiments on
35
+ the active transport of colloidal particles in suspensions
36
+ of bacteria. They discovered anomalous diffusion and a
37
+ large effective diffusion constant, when compared to dif-
38
+ fusion at equilibrium, which inspired a slew of theoreti-
39
+ cal investigations and detailed experiments [20–27]. The
40
+ subsequent efforts have elucidated how enhanced diffu-
41
+ sion arises due to an interplay of entrainment of colloids
42
+ by bacteria, far-field hydrodynamic interactions, direct
43
+ collisions, and the relative size of bacteria and colloid
44
+ [23–25]. Further, the effective interaction between a pair
45
+ of passive particles in active media has been the focus
46
+ of several investigations. It has been predicted to be at-
47
+ tractive, repulsive, and long-ranged, depending on the ge-
48
+ ometry of passive particles, the activity of active species,
49
+ and their density [28–36]. This understanding has opened
50
+ new routes to colloidal assembly mediated by active flu-
51
+ ids [7, 39]. The phase behavior of active-passive mixtures
52
+ is a topic of recent interest [11, 37–43, 45, 47], where ex-
53
+ perimental investigations are scarce [6, 7]. On the one
54
+ hand, theory and simulations at high Peclet numbers
55
+ have shown that homogeneous mixtures of active and
56
+ passive particles are unstable.
57
+ The underlying physics
58
+ is similar to motility induced phase separation (MIPS)
59
+ [40, 42]. On the other hand, in the diffusive limit, theory
60
+ and simulations of nonequilibrium binary mixtures with
61
+ different diffusivities and temperatures reveal phase sep-
62
+ aration [41, 45, 46] due to spinodal-like instability. Sur-
63
+ prisingly, there is little known about mixtures at moder-
64
+ ate Peclet numbers. This is the range where most of the
65
+ active matter experiments involving living matter or syn-
66
+ thetic systems, such as diffusio-phoretic colloids, fall. A
67
+ recent study of colloids in active suspensions of bacteria
68
+ reports dynamical clustering and absence of phase sepa-
69
+ ration at moderate Peclet numbers [6]. The conclusions
70
+ were based on the phase diagram obtained from varia-
71
+ tions of Peclet number and rotation rate of active par-
72
+ ticles. In contrast, earlier numerical studies have shown
73
+ a macroscopic phase separation [43]. Therefore, it is not
74
+ clear whether active-passive mixtures show macroscopic
75
+ phase separation at moderate Peclet numbers.
76
+ This letter presents a combined experimental and nu-
77
+ merical study of the phase behavior of colloidal parti-
78
+ cles in active liquids. The experiments were performed
79
+ using colloids in bacteria suspensions, and simulations
80
+ of active-passive mixtures were realized using Brownian
81
+ dynamics [48–50]. Earlier simulations of active-passive
82
+ mixtures, by one of the authors of this letter, had shown
83
+ a significant influence of the size-ratio of passive to ac-
84
+ tive particles on their phase diagram [43].
85
+ Motivated
86
+ by this study, our experiments were performed over a
87
+ range of densities and sizes of passive colloidal particles
88
+ in active suspensions of bacteria.
89
+ The colloids display
90
+ dynamic clustering due to an interplay of activity and an
91
+ attractive effective potential. However, the average size
92
+ of the clusters increases with the size of colloidal par-
93
+ ticles, suggesting an enhanced interaction between the
94
+ particles.
95
+ Using simulations, we confirm an attractive
96
+ effective potential between passive particles in an active
97
+ medium.
98
+ The strength of the interaction is shown to
99
+ grow with an increasing size-ratio. When the size-ratio
100
+ is sufficiently large, the interactions are strong enough to
101
+ arXiv:2301.11771v1 [cond-mat.soft] 27 Jan 2023
102
+
103
+ 2
104
+ drive the phase separation of passive colloids. The ori-
105
+ gin of the effective potential in simulations appears to be
106
+ related to long-ranged density fluctuations of active par-
107
+ ticles. In contrast, the correlations of density fluctuations
108
+ of bacteria decay rapidly in experiments. These results
109
+ indicate a hydrodynamic origin of effective interactions
110
+ between the colloids in our experiments. Thus, shedding
111
+ new light on the phase behavior of passive particles in
112
+ active media.
113
+ The active suspensions were prepared using E.coli cells
114
+ (U5/41 type strain). The cells were cultured using well
115
+ established protocols in the literature [18, 44]. They are
116
+ suspended in a motility media to get desired concentra-
117
+ tions. Details of the method are given in the supplemen-
118
+ tary section. The density of bacteria in our experiments
119
+ was well below the density threshold for the onset of col-
120
+ lective motion. The average speed and average size of
121
+ bacteria cells were estimated to be v = 33.84±9.98 µm/s
122
+ [supplementary Fig. S1(a)] and l = 2.68 ± 0.86 µm [sup-
123
+ plementary Fig. S1(b)], respectively. Their rotational dif-
124
+ fusion time scale was estimated to be τr = 1.67 s [supple-
125
+ mentary Fig. S1(c)]. The Peclet number, which is defined
126
+ as Pe = τrv/l, turns out to be Pe ∼ 21 for our system.
127
+ The phase behavior of colloidal particles in suspensions
128
+ of bacteria was investigated by varying the size and den-
129
+ sity of the beads at a constant density of bacteria. The
130
+ diameters of the particles used in the experiments were
131
+ σ = 7µm, 10µm and 15µm, and their density is varied
132
+ from φ ∼ 0.1 − 0.4, where φ = N ∗ πσ2/(4A) is the area
133
+ fraction, N is the number of colloidal particles in the field
134
+ of view of area A. The size-ratio S = σ/l is defined as
135
+ the ratio of the diameter of colloids to the length of the
136
+ bacteria.
137
+ The simulations were performed using a binary mix-
138
+ ture of active Brownian particles (ABP) with Na small
139
+ active particles of radius aa and Np big passive particles
140
+ of radius ap (ap > aa) moving on a two dimensional fric-
141
+ tional substrate. The active particles are associated with
142
+ a self propulsion speed v and an orientation unit vector
143
+ ˆvi. The equations of motion and other simulation details
144
+ are given in the supplementary section. We simulate the
145
+ system in a square box of size lbox × lbox, with periodic
146
+ boundary conditions. The system is defined by the area
147
+ fractions φa = Naπa2
148
+ a/l2
149
+ box and φp = Npπa2
150
+ p/l2
151
+ box of the
152
+ active and passive particles respectively, the activity v of
153
+ active particles and the size-ratio (S = ap/aa) defined as
154
+ the ratio of the radius of a passive particle to the radius
155
+ of an active particle. We start with a random homoge-
156
+ neous distribution of active and passive particles in the
157
+ box and with random directions for the velocity of active
158
+ particles. The Eqs. (S1-S3) are updated for all particles
159
+ and one simulation step is counted after a single update
160
+ for all the particles. The simulations does not include
161
+ the hydrodynamic interactions that are present in exper-
162
+ iments. The effect of hydrodynamic interaction can be
163
+ included using coarse-grained studies similar to [51].
164
+ The colloids used in our experiments are bigger than
165
+ 5 µm, so they are non-Brownian particles. However, they
166
+ FIG. 1. The structure of passive particles in active suspen-
167
+ sions.
168
+ Main panel : The pair correlation function g(r) for
169
+ φ ∼ 0.10 and S ∼ 2.5, 3.5, and 5.5.
170
+ The g(r) curves are
171
+ shifted along the y − axis for clarity. Insets: The bright field
172
+ images of particles at φ ∼ 0.10 and size ratios S ∼ 2.5 (left)
173
+ and S ∼ 5.5 (right), respectively. The scale bar in the images
174
+ is 50µm.
175
+ diffuse in suspensions of bacteria due to active fluctua-
176
+ tions with a characteristic super-diffusive motion on short
177
+ time scales and a diffusive motion on long time scales.
178
+ To investigate their collective behavior in active suspen-
179
+ sions, we first analyze their pair correlation function g(r),
180
+ which is shown in the main panel of Fig. 1 at an area
181
+ fraction of φ ∼ 0.1 and size ratios S ∼ 2.5 − 5.5. The
182
+ normalized g(r) for different size-ratios is shifted along
183
+ the y-axis for clarity.
184
+ What is prominent is the pres-
185
+ ence of a sharp peak at r = σ, and additional peaks
186
+ develop at r = 1.7σ and r = 2σ with increasing size ra-
187
+ tio. The peak at 2σ indicates a second shell of neighbors,
188
+ and the one at 1.7σ is a signature of hexagonal order-
189
+ ing in the cluster. These observations are evident in the
190
+ bright field images presented in the insets of Fig.1. The
191
+ larger size ratios lead to larger clusters with enhanced
192
+ order. These images are reminiscent of clustering in sys-
193
+ tems of purely active particles [15]. However, the clusters
194
+ of passive particles in our experiments break and form
195
+ much more rapidly. A real-time video of dynamic clus-
196
+ ter formation is presented in the supplementary section
197
+ Video. SV1 for φ ∼ 0.10 and S ∼ 2.5. Recent simulations
198
+ have reported similar dynamic clustering and traveling
199
+ interfaces of active-passive particles that are not observed
200
+ in our current study [40, 42]. One of the main difference
201
+ between our experiments and these simulations are the
202
+ large Peclet numbers used in simulations. Further, as re-
203
+ ported by earlier investigations, self-propulsion of parti-
204
+ cles is a manifestation of an attractive effective potential
205
+ between the passive particles due to active fluctuations
206
+ [28].
207
+ We next turn our attention to cluster size distribution
208
+ (CSD), p(n), which is a count of clusters of n particles
209
+ [43, 52]. The clusters in our experiments were determined
210
+ by setting a distance criterion of rc ≤ 1.1σ to identify
211
+ pairs of particles as neighbors. This was set based on the
212
+
213
+ 60
214
+ *S=2.5
215
+ S=3.5
216
+ 8
217
+ 50
218
+ 88
219
+ ☆-S=5.5
220
+ %
221
+ 8
222
+ 8
223
+ 40
224
+ 8
225
+ 888
226
+ 88
227
+ 30
228
+ b
229
+ 20
230
+ 10
231
+ 0.5
232
+ 1
233
+ 1.5
234
+ 2
235
+ 2.5
236
+ 3
237
+ 0
238
+ Y3
239
+ FIG. 2. Cluster statistics of passive particles. (a) Cluster size
240
+ distribution in the main panel is shown for different size-ratios
241
+ S ∼ 2.5, 3.5, and 5.5 at a density of φ ∼ 0.10. The symbols
242
+ distinguish different size ratios.
243
+ The inset shows the CSD
244
+ plot for same size ratios at φ ∼ 0.3. (b) The average cluster
245
+ size < n > for varying S. The curves with different symbols
246
+ correspond to different particle densities, ranging from φ ∼
247
+ 0.1 − 0.4.
248
+ position of the first peak of g(r) in Fig.1, and to account
249
+ for small polydispersity (< 5%) in the size distribution
250
+ of colloidal particles. The results of our analysis are pre-
251
+ sented in Figs. 2a & 2b. The main panel in Fig.2a shows
252
+ CSD for varying size ratios of S ∼ 2.5, 3.5, and 5.5 at
253
+ a density of φ ∼ 0.1. For small size ratios S < 5, p(n)
254
+ has an exponential form exp(−n/n0) as observed in the
255
+ equilibrium case [50]. The clustering is weak at these size
256
+ ratios, however, for S > 5 the p(n) displays a power-law
257
+ decay with an exponential cut-off at large n, i.e., it is best
258
+ described by p(n)/p(1) ∼ 1/nα exp(−n/n0). The fits of
259
+ this form to our data are shown in the figure using dashed
260
+ lines. These results indicate that the characteristic size
261
+ of clusters grows with increasing size ratio. The growth
262
+ of clusters is dramatic at larger area fractions, the inset
263
+ of Fig.2a shows cluster distribution at φ ∼ 0.3.
264
+ We further elucidate the clustering of colloids by com-
265
+ puting the average cluster size using the expression <
266
+ n >= � n p(n), which is presented in Fig. 2b where
267
+ the curves with different symbols correspond to different
268
+ area fractions ranging from φ ∼ 0.1 − 0.4. These mea-
269
+ surements were made in the steady state where the mean
270
+ cluster sizes fluctuates around a mean value. This data is
271
+ provided in Fig. S2(a-c) for various size ratios and area
272
+ fractions for over 5000 frames or 500 s. What is clear
273
+ from Fig. 2b is that increasing the size-ratio or the rel-
274
+ ative size of colloids leads to larger cluster sizes. This
275
+ suggests that the effective potential between the colloids
276
+ becomes stronger with an increasing size ratio. One can
277
+ intuitively understand the underlying physics by consid-
278
+ ering the interaction between an isolated colloidal parti-
279
+ cle and a swimmer. When the size of a particle is small,
280
+ a bacterium entrains the particle to larger distances be-
281
+ fore changing its direction of motion. However, when the
282
+ particle is large, the entrainment distance is small, and
283
+ the scattering angle of the swimmer is large [25]. It in-
284
+ dicates that the bacteria can suppress cluster formation
285
+ when the colloidal particles are smaller.
286
+ What is not
287
+ clear from our experiments is whether larger size-ratios
288
+ lead to a macroscopic phase separation in our system. To
289
+ understand this aspect, we turn to numerical simulations
290
+ that allow a detailed exploration of parameter space.
291
+ The first quantity we have calculated in the simulations
292
+ is the effective potential between two passive particles in
293
+ the medium of ABPs with torque. In order to calculate
294
+ the effective potential between two passive particles, we
295
+ choose Np = 2 at positions r1 and r2, respectively, in a
296
+ system of ABPs with Na = 1800. We keep r1 fixed and
297
+ slowly vary r2 in small steps of ∆x = 0.5aa starting from
298
+ the zero surface to surface distance between two passive
299
+ particles. The cartoon of the system simulated for the
300
+ force calculation for a fixed r is shown in Fig. S3 (SM).
301
+ In the figure, ABPs are shown in red and passive particles
302
+ in blue for S = 8. For resolution, only a part of the sys-
303
+ tem near the two passive particles is shown. The active
304
+ particles’ positions and orientations are updated accord-
305
+ ing to the Eqns. (S1 and S2). For each configuration at a
306
+ given distance between two passive particles, the system
307
+ is allowed to reach the steady state. Further, we use the
308
+ steady state configuration to calculate the force FS(r)
309
+ between two-passive particles at a surface to surface sep-
310
+ aration r, such that FS(r) = F12(r)+�Na
311
+ i=1 F1i(r). Here
312
+ F12(r) is the force due to passive particle 2nd on 1st, and
313
+ �Na
314
+ i=1 F1i(r) represents the sum of all the forces due to
315
+ active particles on 1st passive particle for a given con-
316
+ figuration of two passive particles at separation r. The
317
+ potential is then calculated by integrating the force over
318
+ the distance U(r) =
319
+ � r
320
+ −∞ FS(r)dr [53–55]. Here we set
321
+ the lower limit as one-fourth of the box-length. The re-
322
+ sults are averaged over 30 independent realizations.
323
+ We calculated the effective potentials U(r) for Pe = 25
324
+ (which is comparable to the experimental system) and
325
+ four size-ratios S = 3, 5, 8 and 10.
326
+ The comparable
327
+ size-ratio in experimental system is S ∼ (2.5 to 5.5).
328
+ We first plot the effective potential U(r). In the main
329
+ panel of Fig. 3(a) we show the plot of U(r) vs.
330
+ r for
331
+ the system for S = 3, 5, 8 and 10. The distance is nor-
332
+ malised by the radius of active particles, which is kept
333
+ fixed to 0.1.
334
+ The negative side of the potential shows
335
+ attraction and the positive nature is repulsion. For all
336
+ the parameters the potential approaches zero at large
337
+ distances, and it is negative at intermediate distances.
338
+ The depth of the potential becomes deeper with increas-
339
+ ing S. The inset shows the effective potential with the
340
+ distance r scaled by the size of passive particles. Sur-
341
+ prisingly, the minima of the potentials for the size ratios
342
+ S = 5, 8 and 10 fall at r/ap = 1, which implies that the
343
+ length scale characterizing the range of the interaction
344
+ potential is set by size passive particles. We investigate
345
+ further the origin of long-range interactions by consider-
346
+ ing a single passive particle in the center of our system,
347
+ as shown in Fig. S4. It is evident that the passive par-
348
+ ticle disturbs the density field of active particles, lead-
349
+ ing to clustering around the passive particle. The main
350
+ panel of Fig. 3(b) shows the normalised density correla-
351
+ tion C(r) = (⟨ρ(0)ρ(r)⟩ − ⟨ρ(r)⟩2)/(
352
+
353
+ ρ(r)2�
354
+ − ⟨ρ(r)⟩2) of
355
+
356
+ (a)
357
+ 10
358
+ 100
359
+ P(n)/P(1)
360
+
361
+ 10-5
362
+
363
+ 10-2
364
+ 100
365
+ 104
366
+ /(u)
367
+ n
368
+ P
369
+ 10
370
+ V
371
+ S=2.5
372
+
373
+ S=3.5
374
+ S=5.5
375
+ 10-6
376
+
377
+ 100
378
+ 102
379
+ m(b)
380
+ 20
381
+ 1Φ=0.10
382
+ fΦ=0.20
383
+ Φ=0.30
384
+ 15
385
+ IΦ=0.40
386
+ V
387
+ V
388
+ 10
389
+ 5
390
+ 2
391
+ 3
392
+ 4
393
+ 5
394
+ 6
395
+ S4
396
+ FIG. 3.
397
+ Top left figure: The effective potential between a
398
+ pair ,of colloidal particles. The main panel shows the plot of
399
+ the effective potential U(r) vs. distance r for Pe = 25 and
400
+ size-ratios S = 3, 5, 8, and 10. The inset shows the effec-
401
+ tive potential with the scaled distance r/ap. Top right figure:
402
+ The main panel shows normalized correlations of density fluc-
403
+ tuations C(r) due to a passive particle. The inset show the
404
+ length scale extracted from C(r) as function of the size-ratio.
405
+ The length scale is expressed in terms of active particle size.
406
+ Bottom panel: Snapshots of the system obtained from the
407
+ microscopic simulation: two types of particles for different
408
+ size ratio S = 3, 5 and 8 (left, central and right columns) at
409
+ Pe = 25. Red particles are ABPs and blue particles are pas-
410
+ sive particles, for fixed packing fraction φ = 0.60 in a system
411
+ of size lbox = 140aa.
412
+ active particles calculated from the surface of the big pas-
413
+ sive particle for four different size ratios S = 3, 5, 8 and
414
+ 10. The inset of Fig. 3(b) shows the typical size of clus-
415
+ ters L(S) around a single passive particle in the center of
416
+ the box for different size ratios S. The L(S) is measured
417
+ in terms of size of ABP. Clearly, L(S) increases with in-
418
+ creasing S. The number fluctuations of active particles
419
+ around an isolated passive particle yield similar conclu-
420
+ sions. The Fig. S5 (SM) shows number fluctuations for
421
+ three different sizes of the passive particle or for three
422
+ different size ratios S = 3, 5 and 8. The details of the cal-
423
+ culations are give in the supplementary material. For all
424
+ the cases the graph is a power law with ∆N ≃ N α, where
425
+ α ≃ 0.7 for moderate N for all S and starts to deviate
426
+ for large N. The deviation appears at relatively larger N
427
+ on increasing size ratio. Hence increasing the size of pas-
428
+ sive particle increases the stretch of density fluctuation of
429
+ ABP’s. These results establishes that the density fluctu-
430
+ ations play a central role in the emergence of long-range
431
+ effective attractive interactions between passive particles
432
+ in our simulations.
433
+ We elucidate the effect of such effective potential, full
434
+ microscopic simulations of mixtures of active and passive
435
+ were performed using the Eqs. (S1-S3). We simulated the
436
+ system for Pe = 25 and size ratio S = 3, 5 and 8, which
437
+ are close to experimental values. In the bottom panel of
438
+ Fig. 3 the steady state snapshots of passive (blue, big-
439
+ ger) and active (red, smaller) are shown for different size
440
+ ratios S = 3, 5 and 8 respectively. Clusters with moder-
441
+ ate to strong ordering is found on increasing S. For small
442
+ S = 3 clusters are present but without strong local hexag-
443
+ onal ordering, whereas as we increase S the ordering and
444
+ clustering is enhanced. We also calculated the percent of
445
+ passive particles participating in the largest cluster for
446
+ different size ratios and it increases from 35% to 67% as
447
+ we increase size from 3 to 8 (data not shown). Hence for
448
+ large size ratio passive particles show the macroscopic
449
+ phase separation.
450
+ A similar examination of correlations of density fluc-
451
+ tuations of bacteria in experiments reveals that they are
452
+ suppressed, which is evident from C(r) in Fig. S6. The
453
+ clustering of colloidal particles appears to arise from their
454
+ hydrodynamic interactions. An earlier numerical study
455
+ of active-passive matter with pusher-type swimmers at
456
+ dilute concentrations had shown hydrodynamic interac-
457
+ tions to stabilize colloidal clusters [59]. In addition, a re-
458
+ cent theoretical model of active gels shows a long-ranged
459
+ attractive effective potential between colloids due to hy-
460
+ drodynamic effects [58]. Considering these studies, hy-
461
+ drodynamics is likely to promote the formation of col-
462
+ loidal clusters.
463
+ Our investigations conclude that the interplay of ef-
464
+ fective potential and active noise determines the phase
465
+ behavior of colloidal particles in active liquids.
466
+ The
467
+ strength of the effective potential is set by the size ra-
468
+ tio of passive particles to active ones; larger size ra-
469
+ tios lead to stronger interactions.
470
+ The simulations re-
471
+ veal a long-ranged effective potential extending to sev-
472
+ eral active particle diameters. It appears to emerge from
473
+ the long-ranged density fluctuations of active particles
474
+ in the system. When the size-ratio is small, the passive
475
+ particles display dynamic clusters that form and break
476
+ rapidly. However, the effective potential is strong enough
477
+ to lead to phase separation of passive particles at suffi-
478
+ ciently large size-ratios. These are the novel features of
479
+ active-passive mixtures absent in the equilibrium ana-
480
+ log of colloid-polymer mixtures where the range of effec-
481
+ tive potential is short-ranged. The density fluctuations of
482
+ Bacteria are suppresses in experiments. Further investi-
483
+ gation is needed to understand the role of hydrodynamic
484
+ interactions on the effective potential of colloids in our
485
+ experiments with active suspensions.
486
+ We thank Chaitanya Athale,
487
+ Apratim Chatterji,
488
+ Thomas Pucadyil, Sunish Radhakrishnan, Rajesh Singh,
489
+ and Ganesh Subramanian for helpful discussions and sup-
490
+ port. We thank Madan Rao for drawing our attention
491
+ to [58], and Kumar Gourav for assistance in the initial
492
+ stages of experiments. V.C. acknowledges financial sup-
493
+ port from IISER Pune and DST/SERB under the project
494
+ grant CRG/2021/007824.
495
+ P.K. is supported by CSIR-
496
+ UGC fellowship 1353. V.S. and S. M. thank I.I.T. (BHU)
497
+ Varanasi computational facility.
498
+ V.S. thanks DST IN-
499
+ SPIRE (INDIA) for the research fellowship. S.M. thanks
500
+ DST, SERB (INDIA), Project No.
501
+ ECR/2017/000659
502
+ for partial financial support.
503
+
504
+ S=3
505
+ S=5
506
+ 0.5
507
+ S=8
508
+ S=10
509
+ 0
510
+ -0.5
511
+ 0.5
512
+ 0
513
+ -1
514
+ -0.5
515
+ -1
516
+ -1.5
517
+ -1.5
518
+ -2
519
+ 0
520
+ 2
521
+ 4
522
+ r/a
523
+ p
524
+ -2
525
+ 10
526
+ 20
527
+ 30
528
+ 40
529
+ r/a
530
+ a24
531
+ 0.8
532
+ 16
533
+ 0.6
534
+ C
535
+ 0.4
536
+ 4
537
+ 6
538
+ 8
539
+ 10
540
+ S
541
+ S=3
542
+ S=5
543
+ 0.2
544
+ S=8
545
+ S=10
546
+ 0
547
+ 10
548
+ 20
549
+ 30
550
+ 40
551
+ r/a
552
+ aS=3
553
+ S=5
554
+ S=85
555
+ [1] S. Sacanna, M. Korpics, K. Rodriguez et al., Shaping
556
+ colloids for self-assembly, Nat. Commun. 4, 1688 (2013).
557
+ [2] T. Hueckel, G.M. Hocky, and S. Sacanna, Total synthesis
558
+ of colloidal matter, Nat Rev Mater 6, 1053–1069 (2021).
559
+ [3] S. Glotzer and M. Solomon, Anisotropy of building
560
+ blocks and their assembly into complex structures, Na-
561
+ ture Mater. 6, 557–562 (2007).
562
+ [4] P. N. Pusey, in Liquids, Freezing and Glass Transition
563
+ edited by J. P. Hansen, D. Levesque, and J. Zinn-Justin
564
+ (North-Holland, 1991), pp. 765–942.
565
+ [5] W. C. K. Poon, “Colloidal suspensions,” in Oxford Hand-
566
+ book of Soft Condensed Matter, edited by E. Terentjev
567
+ and D. A. Weitz (Oxford University Press, Oxford, 2015),
568
+ pp. 1–49.
569
+ [6] S. Gokhale, J. Li, A. Solon, J. Gore, and N. Fakhri, Dy-
570
+ namic clustering of passive colloids in dense suspensions
571
+ of motile bacteria, Phys. Rev. E, 105 (5), 054605 (2022).
572
+ [7] F. Kummel, P. Shabestari, C. Lozano, G. Volpe, and C.
573
+ Bechinger, Formation, compression and surface melting
574
+ of colloidal clusters by active particles, Soft Matter 11,
575
+ 6187 (2015).
576
+ [8] P. Romanczuk, M. Bar, W. Ebeling, and B Lindner, Ac-
577
+ tive Brownian particles , Eur. Phys. J. Special Topics
578
+ 202,1-162(2012)
579
+ [9] S. Ramaswamy, Active matter, Journal of Statistical Me-
580
+ chanics: Theory and Experiment, 2017,054002,(2017)
581
+ [10] M. C. Marchetti, J.-F. Joanny, S. Ramaswamy, T. B.
582
+ Liverpool, J. Prost, M. Rao, and R. A. Simha, Hydrody-
583
+ namics of soft active matter, Rev. Mod. Phys. 85, 1143
584
+ (2013).
585
+ [11] C. Bechinger, R.D. Leonardo, H. Lowen, C. Reichhardt,
586
+ G. Volpe, G. Volpe, Active particles in complex and
587
+ crowded environments, Rev. Mod. Phys. 88, 045006
588
+ (2016).
589
+ [12] T. Vicsek, A. Czir´ok, E. Ben-Jacob, I. Cohen, and O.
590
+ Shochet, Novel type of phase transition in a system of
591
+ self-driven particles, Phys. Rev. Lett. 75, 1226 (1995).
592
+ [13] A. Bricard, J.-B. Caussin, N. Desreumaux, O. Dauchot,
593
+ and D. Bartolo, Emergence of macroscopic directed mo-
594
+ tion in populations of motile colloids, Nature 503, 95
595
+ (2013).
596
+ [14] M. E. Cates and J. Tailleur, Motility-Induced Phase
597
+ Separation, Annu. Rev. Condens. Matter Phys. 6, 219
598
+ (2015).
599
+ [15] I. Buttinoni, J. Bialk´e, F. K¨ummel, H. L¨owen, C.
600
+ Bechinger, T. Speck, Dynamical clustering and phase
601
+ separation in suspensions of self-propelled colloidal par-
602
+ ticles, Phys. Rev. Lett. 110, 238301 (2013).
603
+ [16] J. Palacci, S. Sacanna, A. P. Steinberg, D. J. Pine and P.
604
+ M. Chaikin, Living Crystals of Light-Activated Colloidal
605
+ Surfers, Science, 339, 936 (2013).
606
+ [17] H. Wensink et al., Meso-scale turbulence in living fluids,
607
+ Proc. Natl. Acad. Sci. USA 109, 14308 (2012).
608
+ [18] H.M. Lopez, J. Gachelin, C. Douarche, H. Auradou, and
609
+ E. Clement, Turning bacteria suspensions into superflu-
610
+ ids, Phys. Rev. Lett. 115, 028301 (2015).
611
+ [19] X.-L. Wu and A. Libchaber, Particle Diffusion in a Quasi-
612
+ Two-Dimensional Bacterial Bath, Phys. Rev. Lett. 84,
613
+ 3017 (2000).
614
+ [20] K.C. Leptos, J.S. Guasto, J.P. Gollub, A.I. Pesci, and
615
+ R.E. Goldstein, Dynamics of enhanced tracer diffusion
616
+ in suspensions of swimming eukaryotic microorganisms,
617
+ Phys. Rev. Lett. 103, 198103 (2009).
618
+ [21] C. Valeriani, M. Li, J. Novosel, J. Arlt, and D. Maren-
619
+ duzzo, Colloids in a bacterial bath: simulations and ex-
620
+ periments, Soft Matter 7, 5228 (2011).
621
+ [22] J.-L. Thiffeault, Distribution of particle displacements
622
+ due to swimming microorganisms, Phys. Rev. E 92,
623
+ 023023 (2015).
624
+ [23] R. Jeanneret, D. O. Pushkin, V. Kantsler, and M.
625
+ Polin, Entrainment dominates the interaction of microal-
626
+ gae with micron-sized objects, Nat. Commun. 7, 12518
627
+ (2016).
628
+ [24] A. J. T. M. Mathijssen, R. Jeanneret, and M. Polin,
629
+ Universal entrainment mechanism controls contact times
630
+ with motile cells, Phys. Rev. Fluids 3, 033103 (2018).
631
+ [25] H. Shum and J. M. Yeomans, Entrainment and scattering
632
+ in microswimmer-colloid interactions, Phys. Rev. Fluids
633
+ 2, 113101 (2017).
634
+ [26] L. Ortlieb, S. Rafai, P. Peyla, C. Wagner, and T. John,
635
+ Statistics of colloidal suspensions stirred by microswim-
636
+ mers, Phys. Rev. Lett. 122, 148101 (2019).
637
+ [27] A. Lagarde, N. Dages, T. Nemoto, V. Demery, D. Bar-
638
+ tolo, and T. Gibaud, Colloidal transport in bacteria sus-
639
+ pensions: from bacteria collision to anomalous and en-
640
+ hanced diffusion, Soft Matter 16, 7503 (2020).
641
+ [28] L. Angelani, C. Maggi, M. L. Bernardini, A. Rizzo, and
642
+ R. Di Leonardo, Effective interactions between colloidal
643
+ particles suspended in a bath of swimming cells, Phys.
644
+ Rev. Lett. 107, 138302 (2011).
645
+ [29] D. Ray, C. Reichhardt, and C. J. Olson Reichhardt,
646
+ Casimir effect in active matter systems, Phys. Rev. E
647
+ 90, 013019 (2014).
648
+ [30] R. Ni, M.A. Cohen Stuart, and P.G. Bolhuis, Tunable
649
+ long range forces mediated by self-propelled colloidal
650
+ hard spheres, Phys. Rev. Lett. 114, 018302 (2015).
651
+ [31] M. Z. Yamchi and A. Naji, Effective interactions between
652
+ inclusions in an active bath, J. Chem. Phys. 147, 194901
653
+ (2017).
654
+ [32] F. Feng, T. Lei, and N. Zhao, Tunable depletion force
655
+ in active and crowded environments, Phys. Rev. E 103,
656
+ 022604 (2021).
657
+ [33] F. Smallenburg and H. L¨owen, Swim pressure on walls
658
+ with curves and corners, Phys. Rev. E 92, 032304 (2015).
659
+ [34] J. Harder, S. A. Mallorya, C. Tung, C. Valeriani, and A.
660
+ Cacciuto, The role of particle shape in active depletion,
661
+ J. Chem. Phys. 141, 194901 (2014).
662
+ [35] P. Liu, S. Ye, F. Ye, K. Chen, and M. Yang, Constraint
663
+ dependence of active depletion forces on passive particles,
664
+ Phys. Rev. Lett. 124, 158001 (2020).
665
+ [36] Y. Baek, A. P. Solon, X. Xu, N. Nikola, and Y. Kafri,
666
+ Generic long-range interactions between passive bodies
667
+ in an active liquid, Phys. Rev. Lett. 120, 058002 (2018).
668
+ [37] S. R. McCandlish, A. Bhaskaran, and M. Hagan, Sponta-
669
+ neous segregation of self-propelled particles with different
670
+ motilities, Soft Matter 8, 2527 (2012).
671
+ [38] S. C. Takatori and J. F. Brady, A theory for the phase
672
+ behavior of mixtures of active particles, Soft Matter 11,
673
+ 7920 (2015).
674
+ [39] A. K. Omar, Y. Wu, Z. G. Wang, and J. F. Brady, Swim-
675
+ ming to stability: Structural and dynamical control via
676
+ active doping, ACS Nano 13, 560 (2019).
677
+
678
+ 6
679
+ [40] J. Stenhammar, R. Wittkowski, D. Marenduzzo, and M.
680
+ E. Cates, Activity-induced phase separation and self-
681
+ assembly in mixtures of active and passive particles,Phys.
682
+ Rev. Lett. 114, 018301 (2015).
683
+ [41] S. N. Weber, C. A. Weber and E. Frey, Binary mixtures
684
+ of particles with different diffusivities demix, Phys. Rev.
685
+ Lett. 116, 058301 (2016).
686
+ [42] A. Wysocki, R. G. Winkler, and G. Gompper, Propagat-
687
+ ing interfaces in mixtures of active and passive Brownian
688
+ particles, New J. Phys. 18, 123030 (2016).
689
+ [43] P. Dolai, A. Simha, and S. Mishra, Phase separation in
690
+ binary mixtures of active and passive particles, Soft Mat-
691
+ ter 14, 6137 (2018).
692
+ [44] J. Adler, and B. Templeton, The effect of environmental
693
+ conditions on the motility of Escherichia coli. J. Gen.
694
+ Microbiol. 46, 175–184 (1967).
695
+ [45] E. Ilker and J.-F. Joanny, Phase separation and nucle-
696
+ ation in mixtures of particles with different temperatures,
697
+ Phys. Rev. Res. 2, 023200 (2020).
698
+ [46] A. Y. Grosberg and J.-F. Joanny, Nonequilibrium statis-
699
+ tical mechanics of mixtures of particles in contact with
700
+ different thermostats, Phys. Rev. E 92, 032118 (2015).
701
+ [47] F. Hauke, H. Lowen, and B. Liebchen, Clustering-
702
+ induced velocity-reversals of active colloids mixed with
703
+ passive particles, J. Chem. Phys. 152, 014903 (2020).
704
+ [48] R. L. Schilling and L. Partzsch, in Brownian Motion:An
705
+ Introduction to Stochastic Processes, contributed by B.
706
+ B¨ottcher (De Gruyter Textbook, 2012).
707
+ [49] Z. Schuss, Brownian Dynamics at Boundaries and Inter-
708
+ faces: In Physics, Chemistry, and Biology (Springer, New
709
+ York, 2015)
710
+ [50] Y. Fily and M. C. Marchetti, Athermal Phase Separa-
711
+ tion of Self-Propelled Particles with No Alignment, Phys.
712
+ Rev. Lett. 108, 235702 (2012).
713
+ [51] A. Tiribocchi, R. Wittkowski, D. Marenduzzo, and M.
714
+ E. Cates, Active Model H: Scalar Active Matter in
715
+ a Momentum-Conserving Fluid, Phys. Rev. Lett., 115,
716
+ 188302 (2015).
717
+ [52] F. Peruani and M. B¨ar, A kinetic model and scaling prop-
718
+ erties of non-equilibrium clustering of self-propelled par-
719
+ ticles, New J. Phys., 15, 065009(2013).
720
+ [53] J. P. Singh, S. Pattanayak, S. Mishra and J. Chakrabarti,
721
+ Effective single component description of steady state
722
+ structures of passive particles in an active bath, The
723
+ Journal of Chemical Physics, 156,(2022).
724
+ [54] J. Dzubiella, J. Chakrabarti and H. Lowen, Tuning col-
725
+ loidal interactions in subcritical solvents by solvophobic-
726
+ ity: Explicit versus implicit modeling, J. Chem. Phys.
727
+ 131,044513(2009).
728
+ [55] J Chakrabarti, S Chakrabarti and H Lown, Short ranged
729
+ attraction and long ranged repulsion between two solute
730
+ particles in a subcritical liquid solvent, J. Phys. Condense
731
+ Matter 18,81-87(2006).
732
+ [56] S. Asakura and F. Oosawa, On Interaction between Two
733
+ Bodies Immersed in a Solution of Macromolecules, J.
734
+ Chem. Phys. 22, 1255(1954).
735
+ [57] H.N.W. Lekkerkerker, W. C. K. Poon, P. N. Pusey, A.
736
+ Stroobants, and P. B. Warren, Phase Behaviour of Col-
737
+ loid + Polymer Mixtures, Europhys. Lett. 20, 559 (1992).
738
+ [58] A.S. Vishen, J. Prost, M. Rao, Breakdown of effective
739
+ temperature, power law interactions, and self-propulsion
740
+ in a momentum-conserving active fluid, Phys. Rev. E.
741
+ 100 (6), 062602 (2019).
742
+ [59] R.C. Krafnick and A.E. Garcia, Impact of hydrodynam-
743
+ ics on effective interactions in suspensions of active and
744
+ passive matter, Phys. Rev. E 91, 022308 (2015).
745
+
746
+ Supplementary information
747
+ Phase separation of passive particles in active liquids
748
+ Pragya Kushwaha1, Vivek Semwal2, Sayan Maity1, Shradha Mishra2, Vijayakumar Chikkadi11
749
+ 11 Indian Institute of Science Education and Research Pune, India 411008
750
+ 2 Indian Institute of Technology (BHU) Varanasi, India 221005
751
+ I.
752
+ METHODS
753
+ A.
754
+ Experiments
755
+ The active suspensions were prepared using E.coli cells (U5/41 type strain). The bacteria were grown overnight at
756
+ 37◦C in LB agar plate containing 1% tryptone, 1% NaCl, 0.5% yeast extract and 1.5% agar. A single colony of E.
757
+ coli was added to 10 ml of LB broth and kept at 37◦C until OD600 (optical density at 600 nm wavelength) reached
758
+ a value of 1.3. Bacterial cells were then harvested and washed three times with motility media (10 mM potassium
759
+ phosphate (pH 7.0), 0.1 mM EDTA , 0.002% Tween-20 and 50 mM L-Serine) by centrifugation at 3000 rpm for 5
760
+ minutes at room temperature to remove the traces of LB broth. The pellet was later resuspended in motility media
761
+ to get desired concentrations. The observation chamber was created using a circular cavity of size 1 cm and 100 µm
762
+ deep, which was glued to a PEG coated coverslip using double-sided tape. The bacteria density of the sample with
763
+ OD600 = 1 was estimated to contain 6 × 109 cells/ml (b0). The density of bacteria in our experiments was fixed at
764
+ 10b0, which was well below the density threshold for the onset of collective motion.
765
+ B.
766
+ Simulations
767
+ The simulations were performed using a binary mixture with Na small active particles of radius aa and Np big
768
+ passive particles of radius ap (ap > aa) moving on a two dimensional frictional substrate. The active particles are
769
+ associated with a self propulsion speed v and an orientation unit vector ˆvi = (cosθi; sinθi), where θi is the angle
770
+ between the velocity vector and a reference direction. The motion of active Brownian particles (ABP) is governed by
771
+ the following Langevin equations:
772
+ dri
773
+ dt = vˆvi − µ1
774
+
775
+ j̸=i
776
+ Fij +
777
+
778
+ 2DT ηT i
779
+ (S1)
780
+ dθi
781
+ dt = Γ
782
+
783
+ i
784
+ sin(θi − θij) +
785
+
786
+ 2Drηri.
787
+ (S2)
788
+ Here µ1 is the mobility and Fij is the force acting on particle i due to particle j. The noise term is defined as
789
+ < ηr,T i(t)ηr,T j(t′) >= 2Dr,T δijδ(t − t′), DT and Dr are the translational and rotational diffusion constants of active
790
+ particles and Γ is the magnitude of alignment and θij = arg (ri − rj). The persistence length lp = v/Dr of active
791
+ particles is constant in our simulations, it is set at lp = 20aa. The other constants in our simulations are DT = 0.005
792
+ and Γ = 1
793
+ The equation of motion for passive particles is
794
+ dri
795
+ dt = µ2
796
+
797
+ j̸=i
798
+ Fij,
799
+ (S3)
800
+ where µ2 is the mobility of passive particles. There is no translational noise in Eq. 3, so the dynamics of passive
801
+ particles is only due to interaction force. We choose the mobility of both species to be the same i.e., µ1 = µ2. Particles
802
+ interact through short ranged soft repulsive forces Fij = Fijˆrij, where Fij = k(ai + aj − rij) when rij ≤ (ai + aj)
803
+ and Fij = 0 otherwise; rij = |ri − rj| and k is a constant.
804
+ The elastic time scale in our system is defined by
805
+ (µk)−1 = (150)−1.
806
+ arXiv:2301.11771v1 [cond-mat.soft] 27 Jan 2023
807
+
808
+ 2
809
+ II.
810
+ SUPPLEMENTARY FIGURES
811
+ A.
812
+ Peclet number estimation
813
+ FIG. S1.
814
+ (a) Histogram of bacteria velocities. The average velocity is < v >= 33.84 ± 9.98 µm/s. (b) Histogram of the size of
815
+ the bacteria. The size of the bacteria is its length along the longer axis. The average length of the cells is < l >= 2.68±0.86 µm.
816
+ (c) The rotational diffusion time of the bacteria was estimated from their normalised velocity auto-correlation function. The
817
+ dashed line is an exponential fit to the data, which gives a rotational diffusion time of τr ∼ 1.67 s.
818
+ III.
819
+ SUPPLEMENTARY FIGURES
820
+ A.
821
+ Steady state in experiments
822
+ FIG. S2.
823
+ The average cluster size in our system as a function of frame number for a range of area fractions from φ ∼ 0.1 − 0.4
824
+ and at S ∼ 2.5 (a), S ∼ 3.5 (b) and S ∼ 5.5 (c). The steady state of the system is evident from these figures. The total
825
+ duration of the measurement was 500 s
826
+
827
+ 350
828
+ (a)
829
+ 300
830
+ 250
831
+ 200
832
+ Count
833
+ 150
834
+ 100
835
+ 50
836
+ 0
837
+ 20
838
+ 40
839
+ 60
840
+ um/sec
841
+ 225
842
+ (b)
843
+ 20
844
+ 15
845
+ Count
846
+ 10
847
+ 5
848
+ 0
849
+ 2
850
+ 3
851
+ 1
852
+ 4
853
+ 5
854
+ um(c)
855
+ 100
856
+ é(t).é(0)
857
+ 10-1
858
+ 0
859
+ 1
860
+ 2
861
+ 3
862
+ 4
863
+ t(s)5.5
864
+ (a)
865
+ Φ=0.10
866
+ 5
867
+ - Φ=0.20
868
+ -Φ=0.30
869
+ cluster size
870
+ 4.5
871
+ -Φ=0.39
872
+ 4
873
+ 3.5
874
+ 3
875
+ 2.5
876
+ 2
877
+ ha
878
+ 1.5
879
+ 1
880
+ 1000
881
+ 2000
882
+ 3000
883
+ 4000
884
+ 5000
885
+ Frames(b)
886
+ —Φ=0.11
887
+ 7
888
+ - Φ=0.22
889
+ --Φ=0.30
890
+ -Φ=0.40
891
+ 6
892
+ 5
893
+ 4
894
+ 3
895
+ 2
896
+ 1
897
+ 1000
898
+ 2000
899
+ 3000
900
+ 4000
901
+ 5000
902
+ Frames(c)
903
+ 一Φ=0.11
904
+ 30
905
+ - Φ=0.21
906
+ ---Φ=0.30
907
+ size
908
+ -Φ=0.40
909
+ 25
910
+ cluster s
911
+ 20
912
+ Average
913
+ 15
914
+ 10
915
+ 5
916
+ 1000
917
+ 2000
918
+ 3000
919
+ 4000
920
+ 5000
921
+ Frames3
922
+ B.
923
+ Calculation of effective potential
924
+ FIG. S3. Snapshot of the part of the system to calculate the potential. The two bigger particles are passive particles with the
925
+ left one marked as particle 1 and the right one marked 2 with positions r1 and r2, respectively. The red circles are ABPs. The
926
+ line shows the surface to surface distance r between two passive particles
927
+ C.
928
+ Density of active particles in the vicinity of an isolated passive particle in simulations
929
+ FIG. S4.
930
+ Density fluctuations of active particles due to a single passive particle in the center of the system. Instantaneous
931
+ snapshots of the system for four size ratios S = 3, 5, 8 and 10, from left to right. Many such configurations are used to calculate
932
+ the density correlations C(r). Red particles are ABP’s and blue particle at the center is the bigger passive particle.
933
+ D.
934
+ Number fluctuations of active particles in the vicinity of an isolated passive particle in simulations
935
+ FIG. S5. Number fluctuation ∆N vs. N for different size ratios. The slopes of dotted line is 0.5 and dashed line is 0.7
936
+ For calculating number fluctuation, we start with an annular disc with an inner radius the radius of the passive
937
+ particle and outer radius is varied. The mean and variance of number of ABP’s are calculated for different outer
938
+
939
+ S=3
940
+ S=5
941
+ S=8
942
+ slope=0.5
943
+ slope=0.7
944
+ 10
945
+ 10
946
+ 100
947
+ 1000
948
+ N4
949
+ radius of the disc. The same is repeated for three different sizes of the passive particle or for three different size ratios
950
+ S = 3, 5 and 8. In the Fig. S6 we show the plot of ∆N vs. N for three sizes S = 3, 5 and 8. For all the cases the
951
+ graph is power law with ∆N ≃ N α, where α ≃ 0.7 for moderate N for all S and starts to deviate for large N. The
952
+ deviation appears at relatively larger N on increasing size ratio. Hence increasing the size of passive particle increases
953
+ the stretch of density fluctuation of ABP’s and it might be one of the factor to introduce a long ranged attraction
954
+ among passive particles for larger size ratio.
955
+ E.
956
+ Correlations of density fluctuations of bacteria in the vicinity of an isolated colloidal particle in
957
+ experiments
958
+ FIG. S6. The correlations of density fluctuations C(r), as defined in the main text, is shown for two different size ratios S ∼ 2.5
959
+ and 5.5. The x-axis is scaled by the size of the bacteria l. It is clear from these results that the density fluctuations are
960
+ suppressed in experiments.
961
+
962
+ ε-S = 5.5
963
+ e-S = 2.5
964
+ 0.8
965
+ 0.6
966
+ 0.4
967
+ r
968
+ 0.2
969
+ 0
970
+ -0.2
971
+ -0.4
972
+ 5
973
+ 10
974
+ 15
975
+ 0
976
+ 20
977
+ r/l
4NFKT4oBgHgl3EQfRi0P/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
69E0T4oBgHgl3EQfwAF2/content/tmp_files/2301.02626v1.pdf.txt ADDED
@@ -0,0 +1,1615 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A hierarchical equations of motion (HEOM) analog for systems with delay: illustrated
2
+ on inter-cavity photon propagation
3
+ Robert Fuchs1 and Marten Richter1, ∗
4
+ 1Institut f¨ur Theoretische Physik, Nichtlineare Optik und Quantenelektronik,
5
+ Technische Universit¨at Berlin, Hardenbergstr. 36, EW 7-1, 10623 Berlin, Germany
6
+ (Dated: January 9, 2023)
7
+ Over the last two decades, the hierarchical equations of motion (HEOM) of Tanimura and Kubo
8
+ have become the equation of motion-based tool for numerically exact calculations of system-bath
9
+ problems.
10
+ The HEOM is today generalized to many cases of dissipation and transfer processes
11
+ through an external bath.
12
+ In spatially extended photonic systems, the propagation of photons
13
+ through the bath leads to retardation/delays in the coupling of quantum emitters. Here, the idea
14
+ behind the HEOM derivation is generalized to the case of photon retardation and applied to the
15
+ simple example of two dielectric slabs. The derived equations provide a simple reliable framework
16
+ for describing retardation and may provide an alternative to path integral treatments.
17
+ After the hierarchical equations of motion (HEOM)
18
+ were initially invented by Tanimura and Kubo [1, 2] to
19
+ solve numerically exactly the open quantum system prob-
20
+ lem with a Debye spectral density, the HEOM did not
21
+ immediately take off, since the limited numeric capabil-
22
+ ities did not allow for a versatile implementation at the
23
+ time. However, the idea to use the time constant deriva-
24
+ tive of the Debye spectral density time correlation func-
25
+ tion stuck.
26
+ Recently, various implementations [2–7] of
27
+ HEOM followed after sufficient computing power became
28
+ available. Soon after its invention, many generalizations
29
+ using arbitrary spectral densities by decomposition into
30
+ summed Debye form spectral densities were also devel-
31
+ oped.
32
+ For most system-bath approaches it provides a
33
+ well-established path to a numerically exact solution.
34
+ A different type of system-bath problem is the prop-
35
+ agation of quantum states e.g. through a bath of pho-
36
+ tons or phonons [8–17]. A typical problem is describing
37
+ quantum interconnects for quantum computing and cryp-
38
+ tography applications. Recently, various applications of
39
+ these systems with a delay caused by the propagation
40
+ through the bath were investigated [8–17] including the
41
+ development of different methods. However, the number
42
+ of propagating photons is still limited, as it was for the
43
+ open quantum systems approaches until HEOM imple-
44
+ mentations became widespread, along with other meth-
45
+ ods such as tensor networks [14, 15, 18–33]. In this pa-
46
+ per, an analysis of the HEOM derivation in the context
47
+ of delay is carried out and HEOM analog equations for
48
+ systems with delay are derived. We demonstrate that the
49
+ approach leads to a systematic set of equations ordered
50
+ by the number of photons propagating through the bath.
51
+ In the future, combinations with, e.g., tensor networks
52
+ or automatic derivation may lead to an additional route
53
+ to solve problems involving delays.
54
+ The paper starts with a derivation of the HEOM ana-
55
+ log for open quantum systems with delay and illustrates
56
+ its potential with a simple photon propagation example.
57
+ HEOM derivation: An HEOM analog with delay is
58
+ derived for an open quantum system with: H = Hs +
59
+ Hb + Hsb. Here, Hs is the Hamiltonian of the system,
60
+ which consists of quantum emitters in different spatially
61
+ separated cavities. Hb is the bath Hamiltonian contain-
62
+ ing the propagating photon modes. Finally, Hsb is the
63
+ system-bath coupling Hamiltonian.
64
+ In open quantum
65
+ systems, only the observables of the system are of in-
66
+ terest, which can be calculated from the relevant density
67
+ matrix ρs(t) = trB(ρ(t)).
68
+ Its calculation is the main
69
+ objective of HEOM, where we transfer the steps by Tan-
70
+ imura and Kubo [1] to systems with delay. We assume a
71
+ factorized initial state ρ(t0) = ρs(t0) ⊗ ρB, where ρB is a
72
+ harmonic bath state. The system dynamics obey:
73
+ ρs(t) = trB (T←U(t, t0)
74
+ exp
75
+
76
+ − i
77
+
78
+ � t
79
+ t0
80
+ dτU(t0, τ)Hsb,−(τ)U(τ, t0)
81
+
82
+ ρs(t0) ⊗ ρB) ,
83
+ (1)
84
+ where ALρ = Aρ, ARρ = ρA, and A− = AL − AR
85
+ define the Liouville space operators acting on Liouville
86
+ operator ρ for any Hilbert space operator A [34] and
87
+ U(t, t0) = T←exp
88
+
89
+ − i
90
+
91
+ � t
92
+ t0 dτ(Hs,−(τ) + Hb,−(τ))
93
+
94
+ with
95
+ time ordering operator T←. Hs,−(τ) may also contain
96
+ Lindblad operators for describing external processes act-
97
+ ing on the joint system-bath state. Following the HEOM
98
+ derivation [1] and the path integral derivation from [17],
99
+ we convert Eq. (1) to path integral form:
100
+ ρs(t) = trB (
101
+ T←
102
+ M
103
+
104
+ i=0
105
+ Ui,i−1exp
106
+ �� t0+εi
107
+ t0+ε(i−1)
108
+ dτU †
109
+ i−1(τ)Hsb,−(τ)Ui−1(τ)
110
+
111
+ ρs(t0) ⊗ ρB)
112
+ (2)
113
+ with ε
114
+ =
115
+ (t − t0)/M and M
116
+
117
+ ∞ (in the fol-
118
+ lowing equations the limit is always assumed).
119
+ Fur-
120
+ thermore, Ui,j
121
+ =
122
+ U(t0 + εi, t0 + εj) and Ui(τ)
123
+ =
124
+ U(τ, t0 + ε(i)).
125
+ For
126
+ small
127
+ ε,
128
+ the
129
+ approxima-
130
+ tion Ui,i−1 exp
131
+ �� t0+εi
132
+ t0+ε(i−1) dτU †
133
+ i−1(τ)Hsb(τ)Ui−1(τ)
134
+
135
+
136
+ arXiv:2301.02626v1 [quant-ph] 6 Jan 2023
137
+
138
+ 2
139
+ Ui,i−1 + ε · Ui,i−1/2Hsb(t0 + ε(i − 1/2))Ui−1/2,i−1 =:
140
+ Ui,i−1 + ε · U (1)
141
+ sb (i) holds, yielding:
142
+ ρs(t) = trB
143
+
144
+ T←
145
+ M
146
+
147
+ i=0
148
+ (Ui,i−1 + ε · U (1)
149
+ sb (i))ρs(t0) ⊗ ρB
150
+
151
+ .
152
+ We assume linear system-bath coupling:
153
+ Hsb
154
+ =
155
+
156
+ ijµ CijµAijBµ with system Aij and linear bath opera-
157
+ tor Bµ. For a system A and bath B Liouville operator
158
+ the relation (AB)− = A+B− + A−B+ holds, so U (1)
159
+ sb (i)
160
+ can be written as a sum over products of the system
161
+ and bath operators U (1)
162
+ sb (i) = �
163
+ l A(1)
164
+ l
165
+ (i)B(1)
166
+ l
167
+ (i), and we
168
+ define A(0)
169
+ l
170
+ = U s
171
+ i,i−1δl,0 and B(0)
172
+ l
173
+ = U b
174
+ i,i−1δl,0 with the
175
+ system and bath parts of Ui,i−1. With these relations,
176
+ we write ρS in terms of a system part S and an influence
177
+ functional (similar form as in [17]),
178
+ ρs(t) =
179
+ 1
180
+
181
+ k1... kM=0
182
+
183
+ l1... lM
184
+ � M
185
+
186
+ i=1
187
+ εki
188
+
189
+ S(k1l1, ... , kMlM)
190
+ × I(k1l1, ... , kMlM).
191
+ (3)
192
+ The system part is still an operator S(k1l1, ... , kMlM) =
193
+ T←
194
+ �M
195
+ i=1 A(ki)
196
+ li
197
+ (i)ρs(t0), while the influence functional
198
+ I(k1l1, ... , kMlM) = trB(T←
199
+ �M
200
+ i=1 B(ki)
201
+ li
202
+ (i)ρB) is just a
203
+ number. Since ρB is assumed to be a harmonic bath equi-
204
+ librium state, Wick’s theorem allows us to factorize the
205
+ influence functional I into expectation values of two bath
206
+ operators B(1)
207
+ l
208
+ (·). Furthermore, for small ε, the system
209
+ propagator is roughly U s
210
+ i,i−1 ≈ Ids − i
211
+ ℏεHs,−(t0 + ε(i −
212
+ 1/2)). Using the approximations of the time propagators
213
+ Figure 1. (a) Model of two open QNM cavities with dissi-
214
+ pation rates γµ and effective inter-cavity coupling strength
215
+ Vµη. (b) 1D model with two slabs of width L = 21 µm with
216
+ constant permittivity ϵR = π2 serving as QNM cavities, sit-
217
+ ting against a background ϵB = 1. (c) Scheme of the HEOM
218
+ depicting a process including inter-cavity transfer and dissi-
219
+ pation.
220
+ and using Wick’s theorem we obtain,
221
+ ρs(t + ε) ≈ ρs(t) − ε i
222
+ ℏHs,−(t0 + ε(M + 1/2))ρs(t)
223
+ +
224
+
225
+ lM+1
226
+ T←εA(1)
227
+ lM+1
228
+
229
+ k1... kM
230
+
231
+ l1... lM
232
+ � M
233
+
234
+ i=1
235
+ εki
236
+
237
+ Aki
238
+ li (i)ρs(t0)
239
+ ×
240
+ M
241
+
242
+ m=1
243
+ trB(B(1)
244
+ lM+1(M + 1)U B
245
+ M,m+1B(1)
246
+ lm (m)ρB)δkm,1
247
+ I(k1l1, ... , km−1lm−1, 00, km+1lm+1... , kMlM),
248
+ including only the terms at most linear in ε. Collecting
249
+ the terms linear in ε yields the derivative of ρs[1]:
250
+ ∂tρs(t) = − i
251
+ ℏHs,−(t)ρs(t)
252
+ (4)
253
+ +
254
+
255
+ l˜l
256
+ A(1)
257
+ l
258
+ (t)
259
+ � t
260
+ t0
261
+ dt1⟨B(1)
262
+ l
263
+ (t)B(1)
264
+ ˜l
265
+ (t1)⟩Bρ(1)
266
+ s˜l (t, t1),
267
+ where ⟨A⟩B = trB(AρB) and the bath correlation func-
268
+ tion is in the interaction picture, and
269
+ ρ(1)
270
+ sl (t, ˜t) = δkm1δlml
271
+
272
+ T←
273
+ M
274
+
275
+ i=1.i̸=m
276
+ B(ki)
277
+ li
278
+ (i)
279
+
280
+ B
281
+
282
+ k1...kM
283
+
284
+ l1...lM
285
+ T←A(1)
286
+ l
287
+ (m)
288
+
289
+
290
+ M
291
+
292
+ i=1,i̸=m
293
+ εkiA(ki)
294
+ li
295
+ (i)
296
+
297
+ � ρs(t0),
298
+ with ˜t = mε + t0.
299
+ Here, the derivation deviates from
300
+ the original recipe of Kubo and Tanimura, since the as-
301
+ sumption of a spectral density in Debye form (simple
302
+ exponential e−γt in time) is not compatible with systems
303
+ including delay. Generalizations of HEOM usually rely
304
+ on a decomposition of the spectral density into a sum of
305
+ exponential functions to recover the Debye form. How-
306
+ ever, an expansion of the correlation function for the de-
307
+ lay case using e−γ|t−tdelay| does not yield the advantages
308
+ of Kubo’s and Tanimura’s approach, since the original
309
+ relies on a time constant derivative of the Debye spec-
310
+ tral density time correlation function. Instead, a delayed
311
+ correlation of the above form introduces a sign change at
312
+ t = tdelay, so that a dependence of ρ(n) on earlier inte-
313
+ gration times is unavoidable in the case with delay. Thus
314
+ the integration over t1 is not included in the definition
315
+ of ρ(1) in contrast to the original HEOM [1]. Keeping
316
+ the general form of the bath correlation function is more
317
+ flexible than using a special form, which would simplify
318
+ the equations of motion in the following. ρ(1)
319
+ sl (·, t1) de-
320
+ scribes bath disturbances to the system density matrix,
321
+ which are initially caused by an interaction with A(1)
322
+ ˜l
323
+ at
324
+ time t1 (similar to the auxiliary dimensions in extended
325
+ TCL [35]). Of course, the additional time argument pre-
326
+ vents direct numerical implementations for increasing n.
327
+ But specific bath correlation functions together with an-
328
+ alytic calculation or tensor network methods [14, 15, 29–
329
+ 33, 36–40] will allow solutions nevertheless.
330
+ Using the
331
+
332
+ ER
333
+ ER3
334
+ same technique as for ∂tρs(t) yields:
335
+ ∂tρ(1)
336
+ sl1(t, t1) = − i
337
+ ℏHs,−(t)ρ(1)
338
+ sl1(t, t1)
339
+ +
340
+
341
+ l2˜l2
342
+ A(1)
343
+ l2 (t)
344
+ � t
345
+ t0
346
+ dt2⟨B(1)
347
+ l2 (t)B(1)
348
+ ˜l2 (t2)⟩Bρ(2)
349
+ sl1˜l2(t, t2, t1)
350
+ + δ(t − t1)A(1)
351
+ l1 (t1)ρs(t1 − 0+).
352
+ (5)
353
+ where we use the interaction picture for the bath correla-
354
+ tion function. Instead of an initial condition ρ(1)
355
+ sl1(t1, t1) =
356
+ Al1(t1)ρs(t1 − 0+), the δ term at the time of the initial
357
+ condition is included. I.e., ρ(1)
358
+ sl1(·, t1) is equal to zero (in
359
+ the delta case) or not defined (in the initial condition
360
+ case) before time t1. Note that t1, t2 of ρ(2)(t, t2, t1) are
361
+ not time ordered since different delay/retardation times
362
+ can occur in open quantum systems.
363
+ The form of ρ(2) points to a general definition of ρ(n)
364
+ starting with ρ(0)(t) = ρs(t):
365
+ ρ(n)
366
+ s˜l1... ˜ln(t, ˜tn, ... , ˜t1) =
367
+
368
+ k1...kM
369
+
370
+ l1...lM
371
+ T←
372
+
373
+
374
+ n
375
+
376
+ j=1
377
+ A(1)
378
+ ˜lj (mj)δ˜lj,lmj δ˜kmj 1
379
+
380
+
381
+
382
+
383
+ M
384
+
385
+ j=1,∧n
386
+ i=1j̸=mi
387
+ εkjA(kj)
388
+ lj
389
+ (j)
390
+
391
+ � ρs(t0)
392
+
393
+ T←
394
+ M
395
+
396
+ j=1,∧n
397
+ i=1j̸=mi
398
+ B(kj)
399
+ lj
400
+ (j)
401
+
402
+ B
403
+ ,
404
+ (6)
405
+ with ˜ti = miε + t0. Analogous to ρ(1), this yields:
406
+ ∂tρ(n)
407
+ sl1... ln(t, t1, ... , tn) = − i
408
+ ℏHs,−(t)ρ(n)
409
+ sl1... ln(t, t1, ... , tn)
410
+ +
411
+
412
+ ln+1˜ln+1
413
+ A(1)
414
+ ln+1(t)
415
+ � t
416
+ t0
417
+ dtn+1⟨B(1)
418
+ ln+1(t)B(1)
419
+ ˜ln+1(tn+1)⟩B
420
+ ρ(n+1)
421
+ sl1...ln˜ln+1(t, t1, . . . , tn+1)
422
+ +
423
+ n
424
+
425
+ p=1
426
+ δ(t − tp)A(1)
427
+ lp (tp)
428
+ (7)
429
+ × ρ(n−1)
430
+ sl1... lp−1lp+1... ln(tp − 0+, t1, ... , tp−1, tp+1, ... tn+1).
431
+ The last term is again a replacement to an initial condi-
432
+ tion: ρ(n)
433
+ sl1... ln(tp, t1, ... , tn) = Alp(tp)ρ(n−1)
434
+ sl1... lp−1lp+1ln(tp −
435
+ 0+, t1, ... , tp−1, tp+1, ... , tn) with tp = maxi(ti), and it is
436
+ clear that ρ(n)
437
+ sl1... ln(t, t1, ... , tn) = 0 for t < tp. So for the
438
+ last term only p with the largest time tp contributes. Fur-
439
+ thermore ρ(n)
440
+ s
441
+ is invariant under permutations of t1, ... , tn
442
+ including their corresponding l1, ... , ln.
443
+ The physics behind Eq. (7) is very accessible: n corre-
444
+ sponds to the maximum number of photons traveling in
445
+ the bath at a given time t, so an exact truncation of
446
+ Figure 2. (a) Single-photon occupations of the two 1D dielec-
447
+ tric slabs from Fig. 1. The dotted lines show the full reference
448
+ wave function solution. (b) Absolute values of the two-photon
449
+ coherences. For both cases, the QNM frequencies of the slabs
450
+ are identical ˜ω1 = (0.06 − 0.0124i) eV, with coupling strength
451
+ VBA = VAB = 0.0062 eV.
452
+ the equations based on the traveling photons is possible.
453
+ Note, the photons on the left and right side states of
454
+ the density matrix count accumulating, so a transfer of a
455
+ single photon density requires two traveling photons (left
456
+ and right side of density matrix), as opposed to one trav-
457
+ eling photon for a single photon coherence.
458
+ For other
459
+ open quantum system equations of motion techniques
460
+ such as Nakajima-Zwanzig [41] or time convolution less
461
+ (TCL) equations [41], the generators K in the equations
462
+ of motion contain the system-bath coupling in any order.
463
+ A calculation of higher-order contributions from K is gen-
464
+ erally cumbersome involving higher products of system-
465
+ bath correlation functions as well as a truncation at a
466
+ given photon number. For the HEOM analog, only one
467
+ system-bath correlation function appears in the second
468
+ term of Eq. (7) cleanly separating on photon number.
469
+ The first term of Eq. (7) describes the system dynam-
470
+ ics. The second term represents the absorption of a bath
471
+ photon, which entered the bath at time tn+1. The last
472
+ term describes photon emission into the bath.
473
+ Two photon propagation: As a benchmark for the new
474
+ approach, we consider two spatially separated quasinor-
475
+ mal mode (QNM) cavities, coupled to a common pho-
476
+ tonic bath (Fig. 1(a)). The QNMs ˜fµ are an open sys-
477
+ tem analog to normal modes, which solve the Helmholtz
478
+ equation under an outgoing radiation condition [42–48].
479
+ QNMs have complex eigenfrequencies ˜ωµ = ωµ−iγµ with
480
+ photon decay rate γµ > 0. Here, two dielectric slabs serve
481
+ as QNM cavities as in Fig. 1(b). We assume an effective
482
+ 1D problem with homogenous continuation in the y, z
483
+ direction . The model allows the analytical calculation
484
+ of the modes (assuming a constant real permittivity ϵR)
485
+ and coupling elements (cf. [49]).
486
+ We include only the
487
+ lowest energy QNM, assuming that all other modes are
488
+ off-resonance.
489
+ Since the slabs are identical, both have
490
+ the same frequency ˜ωA = ˜ωB = ˜ω1. However, we keep
491
+ the indices for generality. The slabs are separated by the
492
+ distance R, which is large enough for a separate quanti-
493
+ zation of the modes without inter-cavity coupling.
494
+ As a first step, we consider a population with initially
495
+
496
+ 1.00
497
+ 1.00
498
+ (a)
499
+ slab A
500
+ (b)
501
+ (20ps/00)
502
+ 0.75
503
+ slab B
504
+ Occupation
505
+ (02/ps/00)
506
+ 0.75
507
+ Amplitude
508
+ (11/ps/00)
509
+ 0.50
510
+ 0.50
511
+ 0.25
512
+ 0.25
513
+ 0.00
514
+ 0.00
515
+ 0
516
+ 100
517
+ 200
518
+ 300
519
+ 0
520
+ 100
521
+ 200
522
+ 300
523
+ t [ps]
524
+ t [ps]4
525
+ one excitation (one photon on each side of the density
526
+ matrix) in slab A and a vacuum bath. Therefore, the
527
+ hierarchy truncates at n = 2, i.e. ρ(n) = 0, n > 2, and:
528
+ ρ(2)
529
+ s,l1,l2(t, t1, t2) =
530
+ Θ(t1 − t2)U s(t, t1)A(1)
531
+ l1 (t1)ρ(1)
532
+ s,l2(t1 − 0+, t2)
533
+ + Θ(t2 − t1)U s(t, t2)A(1)
534
+ l2 (t2)ρ(1)
535
+ s,l1(t2 − 0+, t1),
536
+ (8)
537
+ using the initial conditions for ρ(2). The truncation is
538
+ exact since the maximal number of propagating photons
539
+ at any time is set by the initial conditions.
540
+ Inserting Eq. (8) into Eq. (5), we obtain:
541
+ ∂tρ(1)
542
+ sl1(t, t1) = − i
543
+ ℏHs,−(t)ρ(1)
544
+ sl1(t, t1)
545
+ +
546
+
547
+ l2˜l2
548
+ A(1)
549
+ ˜l2 (t)
550
+ � t1
551
+ t0
552
+ dt2⟨B(1)
553
+ ˜l2 (t)B(1)
554
+ l2 (t2)⟩B
555
+ × U s(t, t1)A(1)
556
+ l1 (t1)ρ(1)
557
+ sl2(t1 − 0+, t2)
558
+ +
559
+
560
+ l2˜l2
561
+ A(1)
562
+ ˜l2 (t)
563
+ � t
564
+ t1
565
+ dt2⟨B(1)
566
+ ˜l2 (t)B(1)
567
+ l2 (t2)⟩B
568
+ × U s(t, t2)A(1)
569
+ l2 (t2)ρ(1)
570
+ sl1(t2 − 0+, t1)
571
+ + δ(t − t1)A(1)
572
+ l1 (t1)ρs(t1 − 0+).
573
+ (9)
574
+ Eqs. (9) and (4) form a closed set of equations of motion
575
+ for the system density matrix that are exactly solvable
576
+ (cf. [49]) for at most two traveling photons.
577
+ Fig. 1(c)
578
+ illustrates connections between the equations with one
579
+ photon traveling from time t1 = t − τ until t through the
580
+ bath, requiring the calculation of ρ(1). Intermittently a
581
+ second photon is emitted into the bath at t2.
582
+ The
583
+ dynamics
584
+ of
585
+ a
586
+ specific
587
+ system
588
+ are
589
+ deter-
590
+ mined
591
+ by
592
+ the
593
+ system-bath
594
+ correlation
595
+ function
596
+ trB(B(1)
597
+ ˜l2 (t)B(1)
598
+ l2 (t1)ρB), which describes the emission
599
+ of a photon into the bath at time t1 and reabsorption
600
+ at time t.
601
+ The correlation function results from the
602
+ system-bath coupling and reads (cf. [49]):
603
+ Cµη(t − t′) ≈ 2Vµηℏ2 (Θ(t − t′)δ(t − t′ − τ)
604
+ + Θ(t′ − t)δ(t − t′ + τ)) ,
605
+ (10)
606
+ where µ, η are cavity indices (A or B).
607
+ The coupling
608
+ strength is given by Vµη = (1 + δµη)γ1/2 with the cavity
609
+ decay rate γ1. Due to topology, the inter-cavity coupling
610
+ is exactly half the dissipation rate.
611
+ For the 1D case,
612
+ a photon emitted away from the other cavity will not
613
+ return, while a photon emitted towards the other cavity
614
+ can be transferred into that cavity. In higher dimensions,
615
+ the inter-cavity coupling will be much smaller than the
616
+ dissipation rate. The delay time τ in Eq. (10) depends
617
+ implicitly on the involved cavities, with τ = R/c, µ ̸= η,
618
+ and 0 otherwise. For one initial excitation, three system
619
+ states |A⟩ = |10⟩, |B⟩ = |01⟩, |0⟩ = |00⟩ contribute, with
620
+ the excitation in slab A or B, or both slabs in the ground
621
+ state, respectively. Inserting Eq. (10) into Eq. (4) yields
622
+ the equations of motion. As an example, the occupation
623
+ in slab A ⟨A|ρs(t)|A⟩ evolves as (cf. [49]):
624
+ ∂t⟨A|ρs(t)|A⟩ = −2γA⟨A|ρs(t)|A⟩
625
+ − 2V ∗
626
+ BAe−iωBτ⟨0|ρ(1)L
627
+ s,0B(t, t − τ)|A⟩ + c.c. (11)
628
+ For the auxiliary density matrix ρ(1), starting from
629
+ Eq. (9) results in (cf. [49]):
630
+ ∂t⟨0|ρ(1)L
631
+ s,0B(t, t1)|A⟩ = δ(t − t1)⟨B|ρs(t1)|A⟩
632
+ = −γA⟨0|ρ(1)L
633
+ s,0B(t, t1)|A⟩
634
+ − 2VBAeiωBτ⟨B|ρ(1)R
635
+ s,B0(t1, t − τ)|0⟩
636
+ − 2VBAeiωAτ⟨0|ρ(1)L
637
+ s,0B(t − τ, t1)|A⟩.
638
+ (12)
639
+ The remaining equations for the occupation in B, the
640
+ coherences, and matrix elements for ρ(1) are calculated
641
+ analogously (cf. [49]).
642
+ For time-non-local interactions,
643
+ the system density matrix in Eq. (11) only couples to the
644
+ first auxiliary density matrix ρ(1). Time-local processes
645
+ such as the cavity photon dissipation are included in the
646
+ zeroth step of the hierarchy.
647
+ Fig. 2(a) shows the time dynamics of the single-photon
648
+ occupations in slabs A and B. The model system allows a
649
+ calculation using the wave function (cf. [49]) as a bench-
650
+ mark.
651
+ The HEOM (solid lines) and exact wave func-
652
+ tion (dotted lines) results agree perfectly. Note, that the
653
+ HEOM allows in principle the inclusion of Lindblad terms
654
+ (e.g. for pumping), which the wave function does not.
655
+ Over time the single excitation in slab A will dissipate
656
+ into the bath. However, some photons are transferred to
657
+ the QNM of slab B with delay τ ≈ 44 ps. For the used
658
+ parameters, the occupation in B is even larger than the
659
+ occupation in A after some time. Eventually, the system
660
+ arrives at a trapped state [14, 50–55] due to constructive
661
+ interference from the inter-cavity transfer.
662
+ Note, that the HEOM allows in principle the inclu-
663
+ sion of Lindblad terms (e.g.
664
+ for pumping), which the
665
+ wave function does not. Also an extension to two pho-
666
+ ton processes is feasible for the HEOM. Fig. 2(b) shows
667
+ the two-photon coherences (two photons on one side
668
+ of the density matrix, none on the other) for the two
669
+ slabs from Fig. 1(b), which includes at most two trav-
670
+ eling photons, resulting in a calculation analogous to
671
+ Fig. 2(a). The amplitudes of the intra-cavity coherences
672
+ ⟨20|ρs|00⟩/⟨02|ρs|00⟩ resemble the dynamics of the den-
673
+ sities in Fig. 2(a), since in principle the same indepen-
674
+ dent processes are involved. The inter-cavity coherence
675
+ ⟨11|ρs|00⟩ requires the transfer of just one photon and
676
+ thus shows a rapid increase after t = τ.
677
+ In the final
678
+ equilibrium state the probability (coherence squared) of
679
+ an inter-cavity contribution matches the sum of the two
680
+ intra-cavity probabilities.
681
+
682
+ 5
683
+ A feasible calculation of the exact solution as shown
684
+ here is limited to a small number of photons by the ex-
685
+ ponential scaling of the numerical complexity with the
686
+ number of excitations.
687
+ For systems requiring a higher
688
+ number of traveling photons, a calculation of the higher
689
+ steps in the hierarchy via matrix product states or other
690
+ tensor networks [14, 15, 29–33, 36–40] may be possible as
691
+ well as analytic calculations in special setups. Further-
692
+ more, the HEOM allows a perturbative truncation of the
693
+ hierarchy for systems with a small system-bath coupling.
694
+ Thus, at least an approximate solution may be possible
695
+ for higher excitation numbers.
696
+ In conclusion, we analyzed the derivation of hierar-
697
+ chical equations of motion and transferred the idea to
698
+ open quantum systems with delay. The resulting equa-
699
+ tions allow a natural, easy truncation on the number of
700
+ excitations in the bath, which is otherwise cumbersome
701
+ for Nakajima-Zwanzig or time convolution-less equations.
702
+ The first implementation for single- and multi-photon
703
+ transfer between two cavities demonstrated the feasibility
704
+ of the approach. We expect that in the future more de-
705
+ manding implementations including tensor network ap-
706
+ proaches may allow the simulation of several photons
707
+ traveling through complex quantum networks.
708
+ ∗ marten.richter@tu-berlin.de
709
+ [1] Y. Tanimura and R. Kubo, Time evolution of a quantum
710
+ system in contact with a nearly gaussian-markoffian noise
711
+ bath, Journal of the Physical Society of Japan 58, 101
712
+ (1989).
713
+ [2] Y. Tanimura, Numerically “exact” approach to open
714
+ quantum dynamics: The hierarchical equations of mo-
715
+ tion (heom), The Journal of chemical physics 153, 020901
716
+ (2020).
717
+ [3] L. Ye, X. Wang, D. Hou, R.-X. Xu, X. Zheng, and Y. Yan,
718
+ Heom-quick: a program for accurate, efficient, and uni-
719
+ versal characterization of strongly correlated quantum
720
+ impurity systems, WIREs Computational Molecular Sci-
721
+ ence 6, 608 (2016).
722
+ [4] N. Lambert, T. Raheja, S. Ahmed, A. Pitchford, and
723
+ F. Nori, Bofin-heom: A bosonic and fermionic numer-
724
+ ical hierarchical-equations-of-motion library with appli-
725
+ cations in light-harvesting, quantum control, and single-
726
+ molecule electronics, arXiv preprint arXiv:2010.10806
727
+ (2020).
728
+ [5] T. Kramer, M. Noack, A. Reinefeld, M. Rodr´ıguez, and
729
+ Y. Zelinskyy, Efficient calculation of open quantum sys-
730
+ tem dynamics and time-resolved spectroscopy with dis-
731
+ tributed memory heom (dm-heom), Journal of Compu-
732
+ tational Chemistry 39, 1779 (2018).
733
+ [6] J. Seibt and O. K¨uhn, Strong exciton-vibrational cou-
734
+ pling in molecular assemblies. dynamics using the po-
735
+ laron transformation in heom space, The Journal of Phys-
736
+ ical Chemistry A 125, 7052 (2021).
737
+ [7] T. Kramer, M. Noack, J. R. Reimers, A. Reinefeld,
738
+ M. Rodr´ıguez, and S. Yin, Energy flow in the photosys-
739
+ tem i supercomplex: Comparison of approximative theo-
740
+ ries with dm-heom, Chemical Physics 515, 262 (2018).
741
+ [8] R. F. Oulton, V. J. Sorger, D. Genov, D. Pile, and
742
+ X. Zhang, A hybrid plasmonic waveguide for subwave-
743
+ length confinement and long-range propagation, Nature
744
+ Photonics 2, 496 (2008).
745
+ [9] M. I. Stockman, Nanofocusing of optical energy in ta-
746
+ pered plasmonic waveguides, Physical review letters 93,
747
+ 137404 (2004).
748
+ [10] A. Orieux, M. A. Versteegh, K. D. J¨ons, and S. Ducci,
749
+ Semiconductor devices for entangled photon pair genera-
750
+ tion: a review, Reports on Progress in Physics 80, 076001
751
+ (2017).
752
+ [11] M. Weiß and H. J. Krenner, Interfacing quantum emit-
753
+ ters with propagating surface acoustic waves, Journal of
754
+ Physics D: Applied Physics 51, 373001 (2018).
755
+ [12] H. Jayakumar, A. Predojevi´c, T. Kauten, T. Huber,
756
+ G. S. Solomon, and G. Weihs, Time-bin entangled pho-
757
+ tons from a quantum dot, Nature communications 5, 1
758
+ (2014).
759
+ [13] A. Carmele and S. Reitzenstein, Non-markovian features
760
+ in semiconductor quantum optics: quantifying the role
761
+ of phonons in experiment and theory, Nanophotonics 8,
762
+ 655 (2019).
763
+ [14] H. Pichler and P. Zoller, Photonic circuits with time de-
764
+ lays and quantum feedback, Phys. Rev. Lett. 116, 093601
765
+ (2016).
766
+ [15] O. Kaestle, R. Finsterhoelzl, A. Knorr, and A. Carmele,
767
+ Continuous and time-discrete non-markovian system-
768
+ reservoir interactions:
769
+ Dissipative coherent quantum
770
+ feedback in liouville space, Phys. Rev. Research 3, 023168
771
+ (2021).
772
+ [16] S. Arranz Regidor, G. Crowder, H. Carmichael, and
773
+ S. Hughes, Modeling quantum light-matter interactions
774
+ in waveguide qed with retardation, nonlinear interac-
775
+ tions, and a time-delayed feedback:
776
+ Matrix product
777
+ states versus a space-discretized waveguide model, Phys.
778
+ Rev. Research 3, 023030 (2021).
779
+ [17] M. Richter and S. Hughes, Enhanced tempo algorithm
780
+ for quantum path integrals with off-diagonal system-bath
781
+ coupling: Applications to photonic quantum networks,
782
+ Phys. Rev. Lett. 128, 167403 (2022).
783
+ [18] A. Caldeira and A. Leggett, Path integral approach to
784
+ quantum brownian motion, Physica A 121, 587 (1983).
785
+ [19] Y. Tanimura and S. Mukamel, Real-time path-integral
786
+ approach to quantum coherence and dephasing in nona-
787
+ diabatic transitions and nonlinear optical response, Phys.
788
+ Rev. E 47, 118 (1993).
789
+ [20] N. Makri and D. E. Makarov, Tensor propagator for itera-
790
+ tive quantum time evolution of reduced density matrices.
791
+ i. theory, J. Chem. Phys. 102, 4600 (1995).
792
+ [21] N. Makri and D. E. Makarov, Tensor propagator for it-
793
+ erative quantum time evolution of reduced density ma-
794
+ trices. ii. numerical methodology, J. Chem. Phys. 102,
795
+ 4611 (1995).
796
+ [22] A. Vagov, M. D. Croitoru, M. Gl¨assl, V. M. Axt, and
797
+ T. Kuhn, Real-time path integrals for quantum dots:
798
+ Quantum dissipative dynamics with superohmic environ-
799
+ ment coupling, Phys. Rev. B 83, 094303 (2011).
800
+ [23] A. Strathearn, B. W. Lovett, and P. Kirton, Efficient real-
801
+ time path integrals for non-markovian spin-boson mod-
802
+ els, New Journal of Physics 19, 093009 (2017).
803
+ [24] A. Strathearn, P. Kirton, D. Kilda, J. Keeling, and B. W.
804
+ Lovett, Efficient non-markovian quantum dynamics using
805
+ time-evolving matrix product operators, Nature commu-
806
+
807
+ 6
808
+ nications 9, 3322 (2018).
809
+ [25] D. Gribben, D. M. Rouse, J. Iles-Smith, A. Strathearn,
810
+ H. Maguire, P. Kirton, A. Nazir, E. M. Gauger, and
811
+ B. W. Lovett, Exact dynamics of nonadditive environ-
812
+ ments in non-markovian open quantum systems, PRX
813
+ Quantum 3, 010321 (2022).
814
+ [26] M. Cygorek, M. Cosacchi, A. Vagov, V. M. Axt, B. W.
815
+ Lovett, J. Keeling, and E. M. Gauger, Simulation of open
816
+ quantum systems by automated compression of arbitrary
817
+ environments, Nature Physics , 1 (2022).
818
+ [27] J. Prior, I. de Vega, A. W. Chin, S. F. Huelga, and M. B.
819
+ Plenio, Quantum dynamics in photonic crystals, Phys.
820
+ Rev. A 87, 013428 (2013).
821
+ [28] F. Caycedo-Soler, A. Mattioni, J. Lim, T. Renger,
822
+ S. Huelga, and M. Plenio, Exact simulation of pigment-
823
+ protein complexes unveils vibronic renormalization of
824
+ electronic parameters in ultrafast spectroscopy, Nature
825
+ Communications 13, 1 (2022).
826
+ [29] S. R. Clark, J. Prior, M. J. Hartmann, D. Jaksch, and
827
+ M. B. Plenio, Exact matrix product solutions in the
828
+ Heisenberg picture of an open quantum spin chain, New
829
+ Journal of Physics 12, 025005 (2010).
830
+ [30] A.
831
+ H.
832
+ Werner,
833
+ D.
834
+ Jaschke,
835
+ P.
836
+ Silvi,
837
+ M.
838
+ Kliesch,
839
+ T. Calarco, J. Eisert, and S. Montangero, Positive tensor
840
+ network approach for simulating open quantum many-
841
+ body systems, Phys. Rev. Lett. 116, 237201 (2016).
842
+ [31] R. Rosenbach, J. Cerrillo, S. F. Huelga, J. Cao, and M. B.
843
+ Plenio, Efficient simulation of non-markovian system-
844
+ environment interaction, New Journal of Physics 18,
845
+ 023035 (2016).
846
+ [32] F. A. Schr¨oder, D. H. Turban, A. J. Musser, N. D.
847
+ Hine, and A. W. Chin, Tensor network simulation of
848
+ multi-environmental open quantum dynamics via ma-
849
+ chine learning and entanglement renormalisation, Nature
850
+ communications 10, 1 (2019).
851
+ [33] A. D. Somoza, O. Marty, J. Lim, S. F. Huelga, and M. B.
852
+ Plenio, Dissipation-assisted matrix product factorization,
853
+ Phys. Rev. Lett. 123, 100502 (2019).
854
+ [34] V.
855
+ Chernyak
856
+ and
857
+ S.
858
+ Mukamel,
859
+ Collective
860
+ coor-
861
+ dinates
862
+ for
863
+ nuclear
864
+ spectral
865
+ densities
866
+ in
867
+ energy
868
+ transfer
869
+ and
870
+ femtosecond
871
+ spectroscopy
872
+ of
873
+ molecu-
874
+ lar
875
+ aggregates,
876
+ J.
877
+ Chem.
878
+ Phys.
879
+ 105,
880
+ 4565
881
+ (1996),
882
+ https://doi.org/10.1063/1.472302.
883
+ [35] M. Richter and A. Knorr, A time convolution less density
884
+ matrix approach to the nonlinear optical response of a
885
+ coupled system–bath complex, Annals of Physics 325,
886
+ 711 (2010).
887
+ [36] R. Or´us, A practical introduction to tensor networks:
888
+ Matrix product states and projected entangled pair
889
+ states, Annals of Physics 349, 117 (2014).
890
+ [37] U. Schollw¨ock, The density-matrix renormalization group
891
+ in the age of matrix product states, Annals of physics
892
+ 326, 96 (2011).
893
+ [38] J. Cirac, D. P´erez-Garc´ıa, N. Schuch, and F. Verstraete,
894
+ Matrix product density operators: Renormalization fixed
895
+ points and boundary theories, Annals of Physics 378, 100
896
+ (2017).
897
+ [39] F. Verstraete and J. I. Cirac, Matrix product states rep-
898
+ resent ground states faithfully, Physical Review B 73,
899
+ 094423 (2006).
900
+ [40] G. Vidal, Classical simulation of infinite-size quantum
901
+ lattice systems in one spatial dimension, Physical review
902
+ letters 98, 070201 (2007).
903
+ [41] H.-P. Breuer, F. Petruccione, et al., The theory of open
904
+ quantum systems (Oxford University Press on Demand,
905
+ 2002).
906
+ [42] G. Garc´ıa-Calder´on and R. Peierls, Resonant states and
907
+ their uses, Nuclear Physics A 265, 443 (1976).
908
+ [43] K. Lee, P. Leung, and K. Pang, Dyadic formulation of
909
+ morphology-dependent resonances. i. completeness rela-
910
+ tion, JOSA B 16, 1409 (1999).
911
+ [44] E. A. Muljarov, W. Langbein, and R. Zimmermann,
912
+ Brillouin-wigner perturbation theory in open electro-
913
+ magnetic systems, EPL (Europhysics Letters) 92, 50010
914
+ (2011).
915
+ [45] P. T. Kristensen, C. Van Vlack, and S. Hughes, Gener-
916
+ alized effective mode volume for leaky optical cavities,
917
+ Optics letters 37, 1649 (2012).
918
+ [46] C.
919
+ Sauvan,
920
+ J.-P.
921
+ Hugonin,
922
+ I.
923
+ S.
924
+ Maksymov,
925
+ and
926
+ P. Lalanne, Theory of the spontaneous optical emission
927
+ of nanosize photonic and plasmon resonators, Physical
928
+ Review Letters 110, 237401 (2013).
929
+ [47] S. Franke, S. Hughes, M. K. Dezfouli, P. T. Kristensen,
930
+ K. Busch, A. Knorr, and M. Richter, Quantization of
931
+ quasinormal modes for open cavities and plasmonic cav-
932
+ ity quantum electrodynamics, Physical review letters
933
+ 122, 213901 (2019).
934
+ [48] P. T. Kristensen,
935
+ K. Herrmann,
936
+ F. Intravaia, and
937
+ K. Busch, Modeling electromagnetic resonators using
938
+ quasinormal modes, Advances in Optics and Photonics
939
+ 12, 612 (2020).
940
+ [49] See supplemental material at [url will be inserted by pub-
941
+ lisher] for (i) analytic coupling elements,(ii) equations of
942
+ motions and (iii) approach using the wave function.
943
+ [50] M. Bello, G. Platero, J. I. Cirac, and A. Gonz´alez-Tudela,
944
+ Unconventional quantum optics in topological waveguide
945
+ qed, Science advances 5, eaaw0297 (2019).
946
+ [51] S. Hughes and G. S. Agarwal, Anisotropy-induced quan-
947
+ tum interference and population trapping between or-
948
+ thogonal quantum dot exciton states in semiconduc-
949
+ tor cavity systems, Physical review letters 118, 063601
950
+ (2017).
951
+ [52] A. L. Grimsmo, Time-delayed quantum feedback control,
952
+ Physical review letters 115, 060402 (2015).
953
+ [53] N. N´emet, A. Carmele, S. Parkins, and A. Knorr, Com-
954
+ parison between continuous-and discrete-mode coherent
955
+ feedback for the jaynes-cummings model, Physical Re-
956
+ view A 100, 023805 (2019).
957
+ [54] K.
958
+ Barkemeyer,
959
+ R.
960
+ Finsterh¨olzl,
961
+ A.
962
+ Knorr,
963
+ and
964
+ A. Carmele, Revisiting quantum feedback control: dis-
965
+ entangling the feedback-induced phase from the corre-
966
+ sponding amplitude, Advanced Quantum Technologies 3,
967
+ 1900078 (2020).
968
+ [55] R. Finsterh¨olzl, M. Katzer, and A. Carmele, Nonequi-
969
+ librium non-markovian steady states in open quantum
970
+ many-body systems: Persistent oscillations in heisenberg
971
+ quantum spin chains, Physical Review B 102, 174309
972
+ (2020).
973
+ [56] P. Lalanne, W. Yan, K. Vynck, C. Sauvan, and J.-P.
974
+ Hugonin, Light interaction with photonic and plasmonic
975
+ resonances, Laser & Photonics Reviews 12, 1700113
976
+ (2018).
977
+ [57] R.-C. Ge, P. T. Kristensen, J. F. Young, and S. Hughes,
978
+ Quasinormal mode approach to modelling light-emission
979
+ and propagation in nanoplasmonics, New Journal of
980
+ Physics 16, 113048 (2014).
981
+ [58] S. Franke, M. Richter, J. Ren, A. Knorr, and S. Hughes,
982
+ Quantized quasinormal-mode description of nonlinear
983
+
984
+ 7
985
+ cavity-qed effects from coupled resonators with a fano-
986
+ like resonance, Physical Review Research 2, 033456
987
+ (2020).
988
+ [59] T. Gruner and D.-G. Welsch, Green-function approach
989
+ to the radiation-field quantization for homogeneous and
990
+ inhomogeneous kramers-kronig dielectrics, Phys. Rev. A
991
+ 53, 1818 (1996).
992
+ [60] S. Mukamel, Principles of nonlinear optical spectroscopy,
993
+ 6 (Oxford University Press on Demand, 1999).
994
+
995
+ S1
996
+ Analytic coupling elements
997
+ We use analytic expressions of the mode frequencies,
998
+ decay constants, and coupling elements for numeric eval-
999
+ uation. For linearly polarized waves and assuming a ho-
1000
+ mogeneous continuation in y, z-direction, the problem re-
1001
+ duces to the 1D model from Fig. 1(b). The QNM within
1002
+ each slab is given by [48, 56]
1003
+ ˜fµ(x)
1004
+ ���
1005
+ |x|<L/2 = einRkµx + e−inRkµx+iµπ,
1006
+ (S1)
1007
+ where nR = √ϵR is the refractive index of the slab and
1008
+ kµ = ˜ωµ/c is the QNM wavenumber.
1009
+ The QNM fre-
1010
+ quency ˜ωµ is [48, 56]
1011
+ ˜ωµL/c = 2πµ + iln
1012
+
1013
+ (nR − nB)2/(nr + nB)2�
1014
+ 2nR
1015
+ .
1016
+ (S2)
1017
+ Thus,
1018
+ the
1019
+ frequency
1020
+ of
1021
+ the
1022
+ first
1023
+ QNM
1024
+ ˜f1(x)
1025
+ is
1026
+ ˜ω1 = ω1 − iγ1 = (1 − i0.21)L/c. The second QNM ˜f2(x)
1027
+ has a resonance frequency that is twice as large. Hence,
1028
+ as a first approximation, we take only the first QNM in
1029
+ our calculations.
1030
+ Outside
1031
+ of
1032
+ the
1033
+ cavity
1034
+ (|x|
1035
+ >
1036
+ L/2),
1037
+ we
1038
+ re-
1039
+ place
1040
+ the
1041
+ QNMs
1042
+ with
1043
+ regularized
1044
+ modes
1045
+ [57]
1046
+ ˜Fµ(x, ω)
1047
+ =
1048
+ � L/2
1049
+ −L/2 dx′GB(x, x′, ω)∆ϵ(x′) ˜fµ(x′)
1050
+ =
1051
+ (x/|x|)Mµ(ω)eiω|x|/c, where ∆ϵ(x) = ϵR − ϵB, |x| < L/2,
1052
+ and 0 otherwise, and
1053
+ Mµ(ω) = i
1054
+ 2L(π2 − 1)
1055
+
1056
+ si
1057
+ �(ω + π˜ωµ)L
1058
+ 2c
1059
+
1060
+ −si
1061
+ �(ω − π˜ωµ)L
1062
+ 2c
1063
+ ��
1064
+ (S3)
1065
+ is an analytical factor that vanishes for ω
1066
+
1067
+ ∞.
1068
+ si(x)
1069
+ =
1070
+ sin(x)/x is the unnormalized sinc-function.
1071
+ GB(x, x′, ω) = ie−iω|x−x′|/c/2 is the vacuum Green’s
1072
+ function for the case of linearly polarized waves, solving
1073
+ the Helmholtz equation
1074
+
1075
+ ∂2
1076
+ x + ω2
1077
+ c2
1078
+
1079
+ GB(x, x′, ω) = ω2
1080
+ c2 δ(x − x′).
1081
+ (S4)
1082
+ We locate the slab A at x = 0 and slab B at x = R (cf.
1083
+ Fig. 1(b)), so that ˜f1(x) = ˜fA(x) and ˜fB(x) = ˜fA(x−R).
1084
+ We quantize the QNMs following the procedure laid out
1085
+ in [47], with minor adjustments due to the 1D nature of
1086
+ the problem, e.g. taking the 1D analog of the electric
1087
+ field quantization and QNM Green function instead of
1088
+ the 3D expressions that were used in [47]. Since the QNM
1089
+ quantization relies on a complex dissipative permittivity,
1090
+ we add a constant imaginary part to the permittivities
1091
+ of the slabs and background medium: ϵα = ϵR/B + iακ
1092
+ (cf. [58]) so that the original values are retained in the
1093
+ limit α → 0. Taking the 1D analog of the quantization
1094
+ in dissipative media from [59], we find the electric field
1095
+ operator to be
1096
+ Eα(x) =
1097
+ � ∞
1098
+ 0
1099
+
1100
+
1101
+ dx′ i
1102
+ ωϵ0
1103
+ Gα(x, x′, ω)ˆjα(x′, ω) + H.a.,
1104
+ (S5)
1105
+ where G(x, x′, ω) is the Greens function of the dissipa-
1106
+ tive medium and ˆjα(x, ω) = ω
1107
+
1108
+ (ℏϵ0/π)ϵα
1109
+ I (x, ω)ˆb(x, ω)
1110
+ is the noise-current density operator, with ϵI denot-
1111
+ ing the imaginary part of the permittivity and ˆb(x, ω)
1112
+ a Bosonic photon annihilation operator.
1113
+ We use the
1114
+ Green’s function expansion in terms of QNMs [43, 56, 57]
1115
+ G(x, x′, ω) = �
1116
+ µ=A,B Aµ(ω) ˜fµ(x) ˜fµ(x′), where Aµ(ω) =
1117
+ ω/(2(˜ωµ − ω)), and the QNM functions ˜fµ are replaced
1118
+ with regularized modes ˜Fµ outside their respective cav-
1119
+ ity volumes. Inserting the QNM Green’s function into
1120
+ Eq. (S5), we find QNM operators analogous to [47]:
1121
+ ˜aA =
1122
+
1123
+ 2
1124
+ πωA
1125
+ � ∞
1126
+ 0
1127
+ dωAA(ω)
1128
+ �� L/2
1129
+ −L/2
1130
+ dx
1131
+
1132
+ ϵα
1133
+ I (x, ω) ˜f α
1134
+ A(x)ˆb(x, ω)
1135
+ + lim
1136
+ λ→∞
1137
+ � λ
1138
+ L/2
1139
+ dx
1140
+
1141
+ ϵα
1142
+ I (x, ω) ˜F α
1143
+ A(x, ω)ˆb(x, ω)
1144
+ + lim
1145
+ λ→∞
1146
+ � −L/2
1147
+ −λ
1148
+ dx
1149
+
1150
+ ϵα
1151
+ I (x, ω) ˜F α
1152
+ A(x, ω)ˆb(x, ω)
1153
+
1154
+ ,
1155
+ (S6)
1156
+ which depend implicitly on α → 0. In the first integral,
1157
+ the limit α → 0 can be carried out immediately, so that
1158
+ this contribution vanishes, because limα→0 ϵα
1159
+ I = 0.
1160
+ In
1161
+ the other two integrals, the order of the limits cannot be
1162
+ exchanged, as pointed out in [58], so the limit λ → ∞ has
1163
+ to be taken first. The operators for the QNMs of cavity
1164
+ B are defined analogously, just spatially shifted by R.
1165
+ The QNM operators defined in Eq. (S6) are non-Bosonic,
1166
+ with
1167
+
1168
+ ˜aA, ˜a†
1169
+ A
1170
+
1171
+ = SAA, and
1172
+ SAA =
1173
+ 2
1174
+ πωA
1175
+ � ∞
1176
+ 0
1177
+ dωAA(ω)A∗
1178
+ A(ω)
1179
+ ×
1180
+
1181
+ lim
1182
+ λ→∞
1183
+ � λ
1184
+ L/2
1185
+ dxϵα
1186
+ I (x, ω) ˜F α
1187
+ A(x, ω) ˜F ∗,α
1188
+ A (x, ω)
1189
+ + lim
1190
+ λ→∞
1191
+ � −L/2
1192
+ −λ
1193
+ dxϵα
1194
+ I (x, ω) ˜F α
1195
+ A(x, ω) ˜F ∗,α
1196
+ A (x, ω)
1197
+
1198
+ .
1199
+ (S7)
1200
+ Analogous to [58], we employ the Helmholtz equation of
1201
+ the background Green’s function (Eq. (S4)) to reduce the
1202
+ integral over x to the value of the modes at the limits of
1203
+ the integration volume. Taking the limit λ → ∞ first
1204
+ and then α → 0, we find
1205
+ SAA = 2c
1206
+ γ1
1207
+ |M1(˜ω1)|2 ,
1208
+ (S8)
1209
+
1210
+ S2
1211
+ where we used ˜ωA = ˜ω1.
1212
+ The overlap integral [˜aA, ˜a†
1213
+ B] = SAB is calculated ac-
1214
+ cordingly. We make use of the fact that the two slabs are
1215
+ identical except for their spatial separation and hence
1216
+ ˜ωA = ˜ωB = ˜ω1, to obtain
1217
+ SAB = 2c
1218
+ γ1
1219
+ |M1(˜ω1)|2 Re
1220
+ � ˜ω1
1221
+ 2ω1
1222
+ e−iω1R/c
1223
+
1224
+ e−γ1R/c.
1225
+ (S9)
1226
+ Since
1227
+ ��Re
1228
+
1229
+ ˜ω1e−iω1R/c/(2ω1)
1230
+ ���
1231
+ <
1232
+ 1, it follows that
1233
+ |SAB/SAA| < e−γ1R/c, due to the retarded interaction
1234
+ between the slabs.
1235
+ The QNMs penetrate through the
1236
+ boundary of the slab so that there is a non-zero overlap
1237
+ even without time delay. However, the mode is concen-
1238
+ trated at the cavity so that the overlap is small if the
1239
+ slabs are well enough separated. Below, the correlation
1240
+ functions are discussed for the case with finite time delay.
1241
+ The QNMs’ wavelength is λ1 = 2L, so a separation of a
1242
+ few dozen wavelengths, as used in the main text, leads
1243
+ to negligible contributions of the overlap.
1244
+ Thus, the QNM operators are symmetrized indepen-
1245
+ dently within their respective cavities similar to the
1246
+ single-cavity case in [47]:
1247
+ ˆaµ =
1248
+
1249
+ dx
1250
+ � ∞
1251
+ 0
1252
+ dωLµ(x, ω)ˆb(x, ω),
1253
+ (S10)
1254
+ with
1255
+ Lµ(x, ω) = S−1/2
1256
+ µµ
1257
+
1258
+ γµϵI(x, ω)
1259
+ πcωµ|M1(˜ωµ)|Aµ(ω) ˜fµ(x),
1260
+ (S11)
1261
+ and the mode function ˜fµ is replaced by the regularized
1262
+ mode ˜Fµ outside the slab volume. The imaginary part
1263
+ of the permittivity and the bounds of the spatial integral
1264
+ include implicit limits, as discussed above.
1265
+ We now define continuum operators ˆc(x, ω) = ˆb(x, ω) −
1266
+
1267
+ µ=A,B L∗
1268
+ µ(x, ω)ˆaµ [58], which commute with the sym-
1269
+ metrized Bosonic QNM operators and serve as the bath.
1270
+ While they are generally non-Bosonic, as a first ap-
1271
+ proximation, we neglect the non-Bosonic contributions.
1272
+ This allows us to decompose the full Hamiltonian H =
1273
+
1274
+
1275
+ dx
1276
+ � ∞
1277
+ 0
1278
+ dωωˆb†(x, ω)ˆb(x, ω) into system and bath parts
1279
+ [58]:
1280
+ HS = ℏ
1281
+
1282
+ µ=A,B
1283
+ ωµˆa†
1284
+ µˆaµ,
1285
+ HB = ℏ
1286
+
1287
+ dx
1288
+ � ∞
1289
+ 0
1290
+ dωωˆc†(x, ω)ˆc(x, ω),
1291
+ HSB = ℏ
1292
+
1293
+ µ=A,B
1294
+
1295
+ dx
1296
+ � ∞
1297
+ 0
1298
+ dωgµ(x, ω)ˆc(x, ω)ˆa†
1299
+ µ + H.a.
1300
+ (S12)
1301
+ The
1302
+ coupling
1303
+ elements
1304
+ gµ(x, ω)
1305
+ =
1306
+ −S−1/2
1307
+ µµ
1308
+ ×
1309
+
1310
+ ϵI(x, ω)/(2πωµ)ω ˜fµ(x),
1311
+ are derived from the pro-
1312
+ jectors Lµ(x, ω), with the pole at ω = ˜ωµ removed
1313
+ during the derivation, as shown in [58].
1314
+ To derive the coupling strength of the interaction
1315
+ between the slabs mediated via the bath, we calculate
1316
+ the correlation function [34, 60] that characterizes the
1317
+ system-bath interaction in the HEOM formalism:
1318
+ Cµη(t − t′) = ℏ2
1319
+ � ∞
1320
+ 0
1321
+
1322
+ � ∞
1323
+ 0
1324
+ dω′
1325
+
1326
+ dx
1327
+
1328
+ dx′e−iω(t−t′)
1329
+ ×gµ(x, ω)g∗
1330
+ η(x′, ω′)
1331
+
1332
+ ˆc(x, ω)ˆc†(x′, ω′)
1333
+
1334
+ B .
1335
+ (S13)
1336
+ For ρB = |0⟩⟨0| (no initial photons), the trace results in
1337
+ a delta function, so only an integral over the coupling
1338
+ elements remains. This is calculated similarly to [58], i.e.
1339
+ by assuming that the coupling is sharply peaked at the
1340
+ QNM frequency, so that
1341
+
1342
+ dxgµ(x, ω)g∗
1343
+ η(x, ω) ≈ S−1
1344
+ 11
1345
+ 2c
1346
+ γ1
1347
+ |M1(˜ω1)|2 γ1
1348
+
1349
+ ×
1350
+
1351
+ eiωRµη/c + e−iωRµη/c�
1352
+ , (S14)
1353
+ where |Rµη| is R if µ ̸= η and 0 otherwise. Using S11 =
1354
+ 2c|M1(˜ω1)|2/γ1, and defining the retardation time τ =
1355
+ Rµη/c as an implicit function of µ and η, the correlation
1356
+ function becomes
1357
+ Cµη(t − t′) = γ1ℏ2
1358
+
1359
+ � ∞
1360
+ 0
1361
+
1362
+
1363
+ eiωτ + e−iωτ�
1364
+ e−iω(t−t′),
1365
+ (S15)
1366
+ As a final approximation, we extend the lower limit to
1367
+ −∞ [58], to obtain the correlation function in Eq. (10).
1368
+ Calculation of the equations of motion
1369
+ For equations of motion of the density-matrix ele-
1370
+ ments, we convert Eq. (4) to a more explicit form by
1371
+ replacing the indices l → (α, ν1ν2, x, ω) of the system
1372
+ and bath operators
1373
+ Al(t) → ˆAα
1374
+ ν1ν2(t),
1375
+ Bl(t) →
1376
+ (S16)
1377
+
1378
+ µ
1379
+ ⟨ν1|ˆaµ|ν2⟩
1380
+
1381
+ dx
1382
+ � ∞
1383
+ 0
1384
+ dωg∗
1385
+ µ(x, ω)ˆc†,α(x, ω) + H.a.,
1386
+ with ˆAν1ν2 = |ν1⟩⟨ν2|, so that
1387
+ ∂tρs(t) = − i
1388
+ ℏHs,−(t)ρs(t)
1389
+ +
1390
+
1391
+ α,β=L,R
1392
+
1393
+ ν1...ν4
1394
+ (−1)α+β ˆAα
1395
+ ν1ν2(t)
1396
+ ×
1397
+ � t
1398
+ t0
1399
+ dt1Cβ
1400
+ ν1ν2ν3ν4(t, t1)ρ(1)β
1401
+ s,ν3ν4(t, t1),
1402
+ (S17)
1403
+
1404
+ S3
1405
+ where |νi⟩ is a system state, and α, β determines whether
1406
+ the operator acts on the left or right side of the den-
1407
+ sity matrix.
1408
+ The sign is negative if α ̸= β.
1409
+ The
1410
+ correlation function is defined as CL
1411
+ ν1ν2ν3ν4(t, t1)
1412
+ =
1413
+
1414
+ µη⟨ν1|ˆa†
1415
+ µ|ν2⟩⟨ν3|ˆaη|ν4⟩Cµη(t − t1),
1416
+ with
1417
+ Cµη
1418
+ from
1419
+ Eq. (10), and CR
1420
+ ν1ν2ν3ν4(t, t1) =
1421
+
1422
+ CL
1423
+ ν1ν2ν3ν4(t, t1)
1424
+ �∗.
1425
+ For brevity, we use |A⟩, |B⟩, |0⟩ as defined in the main
1426
+ text, above Eq. (11). To derive Eq. (11), we take the
1427
+ expectation value with respect to state |A⟩ on (S17) to
1428
+ obtain,
1429
+ ∂t⟨A|ρs(t)|A⟩ = − i
1430
+ ℏ⟨A|Hs,−ρs(t)|A⟩
1431
+ +
1432
+
1433
+ α,β=L,R
1434
+
1435
+ ν1...ν4
1436
+ (−1)α+β
1437
+ � t
1438
+ t0
1439
+ dt1Cβ
1440
+ ν1ν2ν3ν4(t, t1)
1441
+ × ⟨A| ˆAα
1442
+ ν1ν2(t)ρ(1)β
1443
+ s,ν3ν4(t, t1)|A⟩.
1444
+ (S18)
1445
+ Since the |νi⟩ are orthogonal, only certain combinations
1446
+ of states and α, β survive.
1447
+ Using the definition of the
1448
+ QNM correlation function (Eq. (10)) and the initial con-
1449
+ ditions for ρ(1), results in the first-order equation of mo-
1450
+ tion given in Eq. (11). Similarly, we obtain an equation
1451
+ for the coherence ⟨A|ρs(t)|B⟩:
1452
+ ∂t⟨A|ρs(t)|B⟩ = − (γA + γB)⟨A|ρs(t)|B⟩
1453
+ − 2V ∗
1454
+ BAe−iωBτ⟨0|ρ(1)L
1455
+ s,0B(t, t − τ)|B⟩
1456
+ + c.c.(A ↔ B).
1457
+ (S19)
1458
+ The equations for the occupation in slab B and the second
1459
+ coherence term are obtained from Eq. (11) and Eq. (S19),
1460
+ respectively, by exchanging A ↔ B.
1461
+ For Eq. (12), we insert Eq. (S16) into Eq. (9). We use a
1462
+ rotating-frame representation of ρ(1) with respect to its
1463
+ time arguments, e.g.,
1464
+ ⟨0|ρ(1),L
1465
+ s,0B (t, t1)|A⟩ → e−iω1(t+t1)⟨0|ρ(1),L
1466
+ s,0B (t, t1)|A⟩, (S20)
1467
+ where we have used ωA = ωB = ω1. A similar derivation
1468
+ as for the matrix elements of ρs yields fast-rotating terms.
1469
+ Within the rotating-frame, ⟨0|ρ(1),L
1470
+ s,0B (t, t1)|A⟩ evolves ac-
1471
+ cording to Eq. (12). In the same manner, we derive:
1472
+ ∂t⟨A|ρ(1)R
1473
+ s,A0(t, t1)|0⟩ = δ(t − t1)⟨A|ρs(t1)|A⟩
1474
+ = −γA⟨A|ρ(1)L
1475
+ s,A0(t, t1)|0⟩
1476
+ − 2V ∗
1477
+ BAe−iωBτ⟨0|ρ(1)R
1478
+ s,0B(t1, t − τ)|A⟩
1479
+ − 2V ∗
1480
+ BAe−iωBτ⟨B|ρ(1)L
1481
+ s,A0(t − τ, t1)|0⟩.
1482
+ (S21)
1483
+ The last six matrix elements of ρ(1) are derived from
1484
+ Eq. (12) and (S21) by complex conjugation or exchang-
1485
+ ing the indices A and B. Note that ρ(1)(t, t1) vanishes for
1486
+ t < t1 or t1 < 0. Furthermore, only ρ(1)(t, t − τ) appears
1487
+ in Eq. (11) and Eq. (S19). Therefore, the last terms in
1488
+ Eq. (12) and Eq. (S21), respectively, do not contribute
1489
+ to the dynamics of ρs.
1490
+ For the two-photon coherences, we obtain (following a
1491
+ similar derivation as for the single-photon occupation):
1492
+ ∂t⟨20|ρs(t)|00⟩ = −2γA⟨20|ρs(t)|00⟩
1493
+
1494
+
1495
+ 8V ∗
1496
+ BAe−iωAτ⟨10|ρ(1)L
1497
+ s,0B1B(t, t − τ)|00⟩,
1498
+ (S22)
1499
+ where we use 0B and 1B to indicate that the initial
1500
+ system-bath interaction involves the transition of cavity
1501
+ B from the one-photon state to the ground state. Anal-
1502
+ ogously, ⟨02|ρs(t)|00⟩ = (⟨20|ρs(t)|00⟩)(A ↔ B) and
1503
+ ∂t⟨11|ρs(t)|00⟩ = −(γA + γB)⟨11|ρs(t)|00⟩
1504
+
1505
+
1506
+ 8V ∗
1507
+ BAe−iωAτ⟨01|ρ(1)L
1508
+ s,1B2B(t, t − τ)|00⟩
1509
+
1510
+
1511
+ 8V ∗
1512
+ ABe−iωBτ⟨10|ρ(1)L
1513
+ s,1A2A(t, t − τ)|00⟩
1514
+ − 2V ∗
1515
+ BAe−iωAτ⟨01|ρ(1)L
1516
+ s,0B1B(t, t − τ)|00⟩
1517
+ − 2V ∗
1518
+ ABe−iωBτ⟨10|ρ(1)L
1519
+ s,0A1A(t, t − τ)|00⟩.
1520
+ (S23)
1521
+ The equations for the matrix elements of ρ(1) in the rotat-
1522
+ ing frame read (keeping only those terms that contribute
1523
+ to ρs):
1524
+ ∂t⟨10|ρ(1)L
1525
+ s,0B1B(t, t1)|00⟩ = δ(t − t1)∂t⟨11|ρs(t1)|00⟩
1526
+ − γA⟨10|ρ(1)L
1527
+ s,0B1B(t, t1)|00⟩
1528
+
1529
+
1530
+ 8V ∗
1531
+ BAe−iωAτ⟨01|ρ(1)L
1532
+ s,1B2B(t1, t − τ)|00⟩
1533
+ − 2V ∗
1534
+ BAe−iωAτ⟨01|ρ(1)L
1535
+ s,0B1B(t1, t − τ)|00⟩,
1536
+ ∂t⟨10|ρ(1)L
1537
+ s,0A1A(t, t1)|00⟩ = −γA⟨10|ρ(1)L
1538
+ s,0A1A(t, t1)|00⟩
1539
+ − 2V ∗
1540
+ BAe−iωAτ⟨10|ρ(1)L
1541
+ s,0B1B(t1, t − τ)|00⟩,
1542
+ ∂t⟨10|ρ(1)L
1543
+ s,1A2A(t, t1)|00⟩ = δ(t − t1)∂t⟨20|ρs(t1)|00⟩
1544
+ − γA⟨10|ρ(1)L
1545
+ s,1A2A(t, t1)|00⟩.
1546
+ (S24)
1547
+ The remaining three matrix elements are again obtained
1548
+ by exchanging A ↔ B.
1549
+ Wave function approach
1550
+ For initially one excitation in slab A from Fig. 1(b),
1551
+ the general wave function has the form
1552
+ |ψ⟩ = NA|A⟩|0⟩ + NB|B⟩|0⟩ +
1553
+
1554
+ dx
1555
+ � ∞
1556
+ 0
1557
+ dωNx,ω|0⟩|x, ω⟩.
1558
+ (S25)
1559
+ The first Ket refers to the system state as defined above
1560
+ Eq. (11), and the second is the bath state with contin-
1561
+ uous spatial and frequency indices x, ω. N is the time-
1562
+ dependent amplitude of a particular state, with the ini-
1563
+ tial conditions NA(0) = 1, NB(0) = Nx,ω(0) = 0. In the
1564
+
1565
+ S4
1566
+ interaction picture, the dynamics of the states are gov-
1567
+ erned by the Schr¨odinger equation with the system-bath
1568
+ interaction Hamiltonian from Eq. (S12). The QNM and
1569
+ bath operators carry the free evolution of the system and
1570
+ bath: ˆaµ(t) = e−iωµtˆaµ and ˆc(x, ω, t) = e−iωtˆc(x, ω).
1571
+ Multiplying the Schr¨odinger equation for (S25) with
1572
+ ⟨0|⟨A| from the left yields an equation for NA:
1573
+ iℏ∂tNA(t) = ℏ
1574
+
1575
+ dx
1576
+ � ∞
1577
+ 0
1578
+ dωNx,ωgA(x, ω)e−iωteiωAt.
1579
+ (S26)
1580
+ Similarly, we obtain the equation for Nx,ω:
1581
+ iℏ∂tNx,ω(t) = ℏ
1582
+
1583
+ NAg∗
1584
+ A(x, ω)e−iωAt
1585
+ +NBg∗
1586
+ B(x, ω)e−iωBt�
1587
+ eiωt,
1588
+ which we integrate formally and insert the result back
1589
+ into eq. (S26) to find:
1590
+ ∂tNA(t) = − 1
1591
+ ℏ2
1592
+ � ∞
1593
+ t0
1594
+ dt′
1595
+
1596
+ dx
1597
+ � ∞
1598
+ 0
1599
+ dω (CAA(t − t′)NA(t′)
1600
+ + eiωAt−iωBt′CAB(t − t′)NB(t′)
1601
+
1602
+ ,
1603
+ (S27)
1604
+ where we have inserted the definition of the QNM cor-
1605
+ relation function from Eq. (S13).
1606
+ Using Eq. (10) and
1607
+ ωA = ωB, we arrive at:
1608
+ ∂tNA(t) = −γANA(t) − 2V ∗
1609
+ BAeiωBτNB(t − τ)Θ(t − τ).
1610
+ (S28)
1611
+ An analogous derivation for NB yields a similar equation,
1612
+ with the indices switched (A ↔ B). The density matrix
1613
+ elements are calculated by multiplying the amplitudes
1614
+ with their complex conjugates, e.g., ⟨A|ρs|A⟩ = |NA|2.
1615
+
69E0T4oBgHgl3EQfwAF2/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
8tE2T4oBgHgl3EQflgdv/content/tmp_files/2301.03989v1.pdf.txt ADDED
@@ -0,0 +1,1454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GPU-based high-precision orbital propagation of large sets of
2
+ initial conditions through Picard-Chebyshev augmentation
3
+ Alessandro Masat ∗ and Camilla Colombo†
4
+ Politecnico di Milano, Milano, Italy, 20156
5
+ Arnaud Boutonnet ‡
6
+ European Space Agency (ESA-ESOC), Darmstadt, Germany, 64293
7
+ The orbital propagation of large sets of initial conditions under high accuracy requirements
8
+ is currently a bottleneck in the development of space missions, e.g. for planetary protection
9
+ compliance analyses. The proposed approach can include any force source in the dynamical
10
+ model through efficient Picard-Chebyshev (PC) numerical simulations. A two-level augmenta-
11
+ tion of the integration scheme is proposed, to run an arbitrary number of simulations within the
12
+ same algorithm call, fully exploiting high performance and GPU (Graphics Processing Units)
13
+ computing facilities. The performances obtained with implementation in C and NVIDIA®
14
+ CUDA® programming languages are shown, on a test case taken from the optimization of a
15
+ Solar Orbiter-like first resonant phase with Venus.
16
+ I. Introduction
17
+ Complex trajectory solutions have become a standard choice for interplanetary missions, and often include several
18
+ gravity assist maneuvers to reach remote space regions with limited fuel consumption. Two recent missions, the
19
+ ESA/NASA mission Solar Orbiter [1] and the ESA mission JUICE [2], are meaningful ongoing cases. The former
20
+ first features preparatory flybys of Earth and Venus, then exploits several resonant encounters with Venus to raise the
21
+ spacecraft inclination over the ecliptic and observe the Sun’s polar regions. The latter requires multiple flybys to reach
22
+ Jupiter, and then repeatedly swings by different Jupiter’s moons to fulfill its scientific observational objectives. Both
23
+ missions would not be practically feasible without all the designed flybys, as every saved kilogram of fuel means more
24
+ mass available to board scientific equipment.
25
+ The common framework where high-energy multi-flyby trajectories are designed is the patched conics approximation,
26
+ whose simple but effective model allows a consistent preliminary mission analysis. Feasible gravity assist maneuvers are
27
+ identified and the consequent optimization of the initial trajectory guess can be performed, by refining the preliminary
28
+ orbital parameters and/or adding correction maneuvers to meet the operational requirements and fit the full body
29
+ ∗PhD Candidate, Department of Aerospace Science and Technology, Via G. La Masa 34, 20156, Milano, Italy, alessandro.masat@polimi.it
30
+ †Associate Professor, Department of Aerospace Science and Technology, Via G. La Masa 34, 20156, Milano, Italy, camilla.colombo@polimi.it
31
+ ‡Senior Mission Analyst, HSO-GFA, ESA-ESOC, Robert-Bosch-Str. 5, D-64293 Darmstadt, Germany, arnaud.boutonnet@esa.int
32
+ arXiv:2301.03989v1 [cs.DC] 10 Jan 2023
33
+
34
+ dynamics.
35
+ Nevertheless, the real-life environment features more complex physical phenomena, whose perturbing effects may
36
+ have significant consequences on the nominal mission. Repeated flybys also imply a higher impact risk of disposal
37
+ objects with the planets flown by, possibly affecting the mission compliance with planetary protection requirements.
38
+ COSPAR [3] maintains worldwide planetary protection policies, e.g. ESA demands the use of at least the N-body
39
+ Newtonian gravitational model for the compliance assessment [4]. This accuracy requirement has so far limited the
40
+ analysis to Monte-Carlo based techniques [5–9] with Cartesian equations of motion, whose simulations inherently carry
41
+ a heavy computational burden that may limit the mission development time. Recent developments [10] nearly halved
42
+ the required computational time by implementing the Kustaanheimo-Stiefel [11] formulation of the dynamical motion.
43
+ This work takes inspiration from the results obtained in [12, 13], whose aim was to optimize a multi-flyby trajectory
44
+ arc while taking advantage of the natural dynamics of the N-body relativistic environment. Despite the conceptual
45
+ complexity, the combined b-plane and Picard-Chebyshev (PC) approach led to an overall computationally feasible
46
+ algorithm, for a Matlab® sequential implementation converging to an optimal trajectory in a few minutes only. At
47
+ the core of the efficiency lies the fixed-point nature of the PC numerical scheme, that minimizes the need of reading
48
+ and interpolating ephemerides data. The test case is described in more detail in Section VI.A. The set of trajectories
49
+ generated by the optimization process is used to validate the concepts proposed in this work, in terms of their parallel
50
+ propagation with the modified version of the PC method.
51
+ The modified PC method has been continuously developed in the past few years, both in its formulation and
52
+ implementation and outlining possible applications for Earth orbits where it contributed to increase the efficiency
53
+ of the numerical analyses. Junkins et al [14] analyzed the performances of the method comparing the efficiency
54
+ against the Runge-Kutta-Nystrom 12(10) integrator, proposing also a second order version. Later, Koblick and Shankar
55
+ [15] extended the analysis to the propagation of accurate orbits testing difference force models with NASA’s Java
56
+ Astrodynamics toolkit. Woollands et al. [16–18] applied the method as numerical integrator for the solution of
57
+ the Lambert two-point boundary value problem, assessing also the benefits of adopting the Kuustanheimo-Stiefel
58
+ formulation of the dynamics and proposing a solution for the multi-revolution trajectory design. Swenson et al. [19]
59
+ applied the modified PC method on the circular restricted three-body problem, using the differential correction approach.
60
+ Singh et al. [20] used the method as the numerical integration scheme for their feasibility study on quasi-frozen,
61
+ near polar and low altitude lunar orbits, including the N-bodies and the spherical harmonics perturbations. The fixed
62
+ point nature of the method was exploited by Koblick et al. [21] to design low-thrust trajectories as an optimal control
63
+ problem, discretizing the control impulses and also included the Earth’s oblateness J2 perturbation. Macomber et al.
64
+ [22] introduced the concepts of cold, warm, hot starts of the method, addressing possible efficiency improvements by
65
+ means of better initial conditions, and variable-precision force models taking advantage of the fixed-point nature of the
66
+ algorithm. Woollands et al. [23]extended the optimal low-thrust design to a high-fidelity model for the non-spherical
67
+ 2
68
+
69
+ Earth, considering an arbitrary number of spherical harmonics in the perturbing acceleration. Woollands and Junkins
70
+ [24] developed the Adaptive PC method, including an integral error feedback that accelerates the convergence of the
71
+ Picard iterations and an empirical law to determine segment length and polynomial degree of the method, based on
72
+ previous stability analyses. Atallah et al. [25] compared the method with other sequential integration techniques on
73
+ different Earth-based orbital cases.
74
+ Despite the parallelization possibilities, the method’s instability when dealing with long integration spans and the
75
+ limited number of nodes (about 200) required to reach machine precision levels are disadvantages for the massively
76
+ parallel/GPU (Graphics Processing Units) implementation of the PC integration scheme. Differently from the previous
77
+ developments of the method, this work introduces an augmentation of the dynamical system being integrated to enable
78
+ stable massive parallelism for the short-term propagation of large sets of initial conditions. The results obtained in
79
+ [12, 13], as all the simulated trajectories analyzed to find the optimal one, are re-run as a direct application of the
80
+ proposed approach, discussing the parallelization options in deep detail. Within a single run, the common time nodes of
81
+ all the trajectory arcs allow to rework the iterative refinement process of PC. Instead of the six-dimensional Cartesian
82
+ state of the single trajectory, the propagation is performed for a two-level augmented state, made by a properly sorted
83
+ collection of the states of multiple trajectories at the same time node. Regardless the sorting choices of the single states
84
+ within the augmented system, the fundamental mathematical structure of the PC process remains unaltered, without
85
+ the need of re-defining the method’s steps, coefficients and matrices. The integration of the newest developments of
86
+ the method, particularly the second order version [14] with error feedback [24], would therefore be applicable to the
87
+ augmented version as well. The two different augmentation levels allow to preserve the fine grain flexibility that, if
88
+ feasible, simulating each single trajectory alone would have, without a too large sacrifice in computational efficiency.
89
+ Based on a different approach, GPU-based programs for the propagation of large sets of initial conditions already
90
+ exist: Geda et al. [26] developed for the European Space Agency the CUDAjectory tool, a GPU-based propagator
91
+ that implements the Runge-Kutta-Fehlberg 7/8 scheme. The massive parallelism is exploited by taking the forward
92
+ integration step in parallel for many different samples, parallelizing the evaluation of the dynamics function and the
93
+ simple matrix multiplications involved in the evaluation of the intermediate sub steps. Another interesting feature
94
+ implemented in CUDAjectory is the optimized ephemerides reading and storage technique, following the work of
95
+ Schrammel et al. [27], which increases the data locality and minimizes the required memory transactions. Despite
96
+ also propagating large sets of trajectories, the proposed work differs from the CUDAjectory approach in the adopted
97
+ numerical scheme. The PC method is by itself a parallelizable routine, the augmented version proposed and implemented
98
+ in this work contributes to enhance this aspect, resulting in a highly scalable and performing program.
99
+ On a final note, the concept of the proposed approach may extend to the more general uncertainty propagation case,
100
+ applied also to planetary and terrestrial system, for robust analysis or collision probability computation. The heavy
101
+ computational burden has promoted the adoption of simplified approaches, that aim at achieving acceptable accuracy in
102
+ 3
103
+
104
+ the propagated uncertainty for lower computational costs [28]. The heavy burden of accurate Monte Carlo simulations
105
+ was found to be a major challenge, for instance, even in the design of the touchdown of the Hayabusa 2 mission [29] and
106
+ the characterisation of the impact probability with Earth of the Asteroid Apophis [30]. It is beyond the scopes of this
107
+ work to detail the existing uncertainty propagation techniques that have been developed to reduce the computational
108
+ burden. The only similarity with the proposed approach is the accessible reduction of the computational time, that is
109
+ achieved using an inexpensive GPU, whose logic could be transferred to any other uncertainty propagation method that
110
+ requires the simulations of relatively large sets of initial conditions.
111
+ II. Picard-Chebyshev integration method
112
+ A. Integration concept
113
+ Picard iterations [31] are a method that can be used to obtain an approximation of the solution of initial/boundary
114
+ value problems. Denoting the state of dimension 𝑛 with x, the independent variable with 𝑡, the initial/boundary condition
115
+ with x0 and the dynamics function with f(x, 𝑡), the problem is defined as:
116
+ 𝑑x
117
+ 𝑑𝑡 = f(x, 𝑡),
118
+ x0 = x(𝑡0)
119
+ (1)
120
+ Starting from an initial approximation x(0) (𝑡) of the actual solution x(𝑡) in the interval
121
+
122
+ 𝑡0, 𝑡
123
+
124
+ of the initial/boundary
125
+ value problem presented in Equation (1), the 𝑖-th Picard iteration improves the previous approximation x(𝑖−1) (𝑡) of x(𝑡)
126
+ with x(𝑖) (𝑡) as in [31]:
127
+ x(𝑖) (𝑡) = x(0) (𝑡) +
128
+ ∫ 𝑡
129
+ 𝑡0
130
+ f�x(𝑖−1) (𝑠), 𝑠�𝑑𝑠
131
+ (2)
132
+ The method converges for a good enough initial approximation x(0) (𝑡) and for 𝑖 −→ +∞ [31].
133
+ In the analytical Picard iteration context, performing more than one iteration is in general hard. The increasingly
134
+ complex expressions for x(𝑖) (𝑡) make it difficult to retrieve closed form solutions after the first 2-3 steps [32]. At the
135
+ same time, numerically computing the integral functions by quadrature might not suffice in accuracy, as only the first
136
+ few iterations in general improve the function approximation. In the attempt to develop parallelizable routines for the
137
+ integration of the dynamical motion, the PC method was built combining the Picard iterations with the Chebyshev
138
+ polynomial approximation [33]. A possible derivation of the method that follows the work of Fukushima [34] can be
139
+ summarized in three steps:
140
+ 1) Select a good enough initial guess x(0) (𝑡).
141
+ 2) Approximate f(x, 𝑡) and x(0) (𝑡) with their Chebyshev polynomial expansion.
142
+ 3) Perform a Picard iteration to update the coefficients of the interpolating Chebyshev polynomials.
143
+ The Picard iterations halt when the stopping conditions are met, based on the maximum difference between two
144
+ 4
145
+
146
+ consecutive iterations dropping below some user-specified tolerance.
147
+ The so defined method allows to easily perform several more Picard iterations than the analytical case. The involved
148
+ expressions remains always of the same type, i.e. the Chebyshev polynomials. The function approximation becomes an
149
+ interpolation through nodes that should be close to the true trajectory, instead of a global function whose value after the
150
+ iterations still depend on the initial guess choice. Furthermore, few iterations suffice to drop below a low tolerance if
151
+ the real solution x(𝑡) differs from the initial guess x(0) (𝑡) only because of small perturbations [31]. Starting from the
152
+ unperturbed Keplerian solution for the generic weakly perturbed two body problem, a relatively fast convergence of the
153
+ method is ensured [34]. In the context of orbital simulations, Macomber [35] referred to this type of initial guess as
154
+ warm-starting the PC iteration method, because the analytic solution of the dominant dynamics part is used to reduce
155
+ the number of iterations required. Differently, the cold start was defined by simply setting all the trajectory samples
156
+ as equal to the initial condition. In general, the closer the initial guess to the true trajectory, the lower the number of
157
+ iterations will be. Semi-analytic initial guesses or results of propagations from simpler models are also an options, and
158
+ in the case of three-body-like perturbed trajectories would be a better choice compared to the Keplerian approximation.
159
+ Macomber also introduced the concept of hot start in the case of time spans covering multiple Earth planetary orbits
160
+ [35], where the first orbit was used to compute the difference between the Keplerian guess and the converged trajectory.
161
+ The near-periodicity of the spherical harmonics perturbation was then exploited, including this difference in the starting
162
+ trajectory, achieving a further reduction of the iterations required for convergence.
163
+ B. Matrix form for vectorized and parallel computation
164
+ The method is suitable for parallel or vector implementation (indeed Fukushi-ma also proposed a vectorized
165
+ version [36]), in particular for the evaluation of the dynamics function and the execution of the matrix multiplications.
166
+ More recent works over this technique by Bai and Junkins developed the modified PC method [37] and a CUDA
167
+ implementation for NVIDIA GPUs [32]. For compactness and to better highlight the parallelization possibilities, the
168
+ method is presented following the matrix formulation by Koblick et al [38].
169
+ For 𝑁 Chebyshev nodes and the integration interval [𝑡0, 𝑡𝑁 −1], the independent variable 𝑡 is sampled for 𝑗 =
170
+ 0, 1, ..., 𝑁 − 1 up-front as
171
+ 𝑡 𝑗 = 𝜔2𝜏𝑗 + 𝜔1
172
+ (3)
173
+ with
174
+ 𝜏𝑗 = − cos
175
+
176
+ 𝑗𝜋
177
+ 𝑁 − 1
178
+
179
+ ,
180
+ 𝜔1 = 𝑡𝑁 −1 + 𝑡0
181
+ 2
182
+ ,
183
+ 𝜔2 = 𝑡𝑁 −1 − 𝑡0
184
+ 2
185
+ (4)
186
+ Given the 𝑛-dimensional sampled states y(𝑖−1) (𝑡 𝑗) = y(𝑖−1)
187
+ 𝑗
188
+ ,
189
+ 𝑗 = 0, ..., 𝑁 as a matrix y(𝑖−1) of dimension 𝑁 × 𝑛
190
+ computed at the Picard iteration 𝑖 − 1, the whole process can be summarized in three sequential steps to obtain the states
191
+ 5
192
+
193
+ at the iteration 𝑖. The first one collects the evaluations of the dynamics function f in the 𝑁 × 𝑛 force matrix F [38]:
194
+ F(𝑖)
195
+ 𝑗+1 = 𝜔2 f�y(𝑖−1)
196
+ 𝑗
197
+ , 𝑡 𝑗
198
+ �,
199
+ 𝑗 = 0, ..., 𝑁 − 1
200
+ (5)
201
+ Secondly, identifying with A, C, S the method’s constant matrices whose definition can be found in [38], the 𝑁 × 𝑛
202
+ matrix B is obtained by rows as [38]
203
+ B1 = SAF + 2y0,
204
+ B 𝑗 = AF,
205
+ 𝑗 = 2, ..., 𝑁
206
+ (6)
207
+ Third and last, the 𝑁 × 𝑛 matrix of the state guesses y(𝑖) for the 𝑖-th Picard iteration is
208
+ y(𝑖) = CB
209
+ (7)
210
+ The iteration process stops when the maximum state difference between two consecutive Picard iterations y(𝑖) and
211
+ y(𝑖−1) drops below a specified relative or absolute tolerance, upon user’s choice.
212
+ Despite the proved theoretical convergence, large integration spans may lead to numerical instabilities, due to the
213
+ cumulation of round-off errors even with large 𝑁 as multiple orbital revolutions take place [32, 34, 37]. Fukushima [34]
214
+ suggests a piece-wise approach as a workaround, which has been implemented in this work and uses the modified PC
215
+ method to integrate orbit by orbit in sequence∗ until the end of the span.
216
+ The core steps of the proposed algorithm follow the presented scheme [32, 37], together with the automatic generation
217
+ of the Keplerian initial guess spanning one nominal orbital period.
218
+ III. Two-level system augmentation
219
+ The just described instability problems make the PC algorithm not efficient for massive parallelism, since increasing
220
+ the number of nodes would not result in improved accuracy after a certain point. Nevertheless, an increase in parallel
221
+ efficiency can be found when more trajectories are propagated, in the way the following lines describe.
222
+ A. One-level augmentation
223
+ Instead of the evolution of the sole trajectory determined by the initial condition y0, the system being integrated
224
+ can be re-written so that 𝑀 different trajectories sampled on the same 𝑁 time nodes can be processed within a unique
225
+ iterative process. At the iteration 𝑖, the matrix Y(𝑖) collects all the samples of all the trajectories, its 𝑗-th row is related
226
+ ∗The proposed implementation automatically handles either forward or backward integration.
227
+ 6
228
+
229
+ to the 𝑗-th time sample of the 𝑚-th trajectory by:
230
+ Y(𝑖)
231
+ 𝑗
232
+ =
233
+
234
+ y(𝑖)
235
+ 𝑗,1 · · · y(𝑖)
236
+ 𝑗,𝑚 · · · y(𝑖)
237
+ 𝑗,𝑀
238
+
239
+ ,
240
+ 𝑗 = 1, ..., 𝑁
241
+ (8)
242
+ and similarly for the dynamics function evaluations collected in the matrix F(𝑖):
243
+ F(𝑖)
244
+ 𝑗
245
+ =
246
+
247
+ F(𝑖)
248
+ 𝑗,1 · · · F(𝑖)
249
+ 𝑗,𝑚 · · · F(𝑖)
250
+ 𝑗,𝑀
251
+
252
+ ,
253
+ 𝑗 = 1, ..., 𝑁
254
+ (9)
255
+ whose elements are still computed per sample:
256
+ F(𝑖)
257
+ 𝑗,𝑚 = 𝜔2 f�y(𝑖−1)
258
+ 𝑗,𝑚 , 𝑡 𝑗−1
259
+ �,
260
+ 𝑗 = 1, ..., 𝑁
261
+ (10)
262
+ In principle, building the augmented system only requires to define Y(𝑖) by stacking the different 𝑀 trajectory
263
+ matrices along the columns, and similarly F(𝑖) undergoes the exact same modification. The structure of the PC iterations
264
+ remains unchanged and features the usual steps. First, evaluate the dynamics function for all the 𝑁 states of all the 𝑀
265
+ trajectories with F(𝑖)
266
+ 𝑗,𝑚 = 𝜔2 f�y(𝑖−1)
267
+ 𝑗,𝑚 , 𝑡 𝑗−1
268
+ �. Second, perform the matrix operations B1 = SAF + 2Y0 and B 𝑗 = AF, for
269
+ 𝑗 = 2, ..., 𝑁. Third and last, update the guesses for all the 𝑀 trajectories with Y(𝑖) = CB.
270
+ B. Two-level augmentation
271
+ The stack-along-column rule can be applied again, this time collecting in one single matrix 𝑃 groups of different
272
+ 𝑀𝑝 trajectories each. The augmented matrix Y(𝑖) is now built as
273
+ Y(𝑖)
274
+ 𝑗
275
+ =
276
+
277
+ Y(𝑖)
278
+ 𝑗,1 · · · Y(𝑖)
279
+ 𝑗,𝑝 · · · Y(𝑖)
280
+ 𝑗,𝑃
281
+
282
+ ,
283
+ 𝑗 = 1, ..., 𝑁
284
+ (11)
285
+ with
286
+ Y(𝑖)
287
+ 𝑗,𝑝 =
288
+
289
+ y(𝑖)
290
+ 𝑗,𝑝,1 · · · y(𝑖)
291
+ 𝑗,𝑝,𝑚 · · · y(𝑖)
292
+ 𝑗,𝑝,𝑀𝑝
293
+
294
+ ,
295
+ 𝑗 = 1, ..., 𝑁
296
+ (12)
297
+ In principle, infinite augmentation levels could be built relying on the same logic, none of them would require
298
+ modifications in the core PC algorithm structure. Nevertheless, a re-definition of the iteration error can be helpful for
299
+ practical purposes, since the augmentation rationale is purely computational.
300
+ Two strategies can be addressed. The first uses a traditional error definition, that treats the trajectory samples as if
301
+ they were part of a unique system, whose maximum will be compared against the iteration stopping condition. The
302
+ second introduces a more flexible per-block error definition, that treats the different trajectory blocks as independent, for
303
+ which the augmentation has then a sole computational purpose. Both the approaches have advantages and drawbacks.
304
+ The former would allow a simpler implementation and is inevitably computationally more efficient than the latter,
305
+ 7
306
+
307
+ because of the reduced overhead compared to maintaining the group split. However, dissimilar trajectories requiring a
308
+ significantly different number of iterations would keep the computational resources busy for already converged blocks,
309
+ while the per-block definition allows far more flexibility on this regard.
310
+ This work uses the two augmentation levels to build a hybrid approach, that treats the outer blocks as independent,
311
+ and the inner ones as a single system in a more strict sense. In this way, groups of similar trajectories can be considered as
312
+ unique but separate augmented system, allowing to maximize the integration performances. The two level augmentation
313
+ also provides a framework to deal with single trajectories within the same high performance computing context,
314
+ considering them as a group made of only one member. As a practical example, the outer augmentation level could
315
+ be used to "isolate" a sub-group of trajectories experiencing a planetary flyby, since their dynamics would become
316
+ significantly different from the rest of the samples.
317
+ IV. Parallel computing, GPU computing and NVIDIA® CUDA® fundamentals
318
+ Parallelizable programs are characterized by some of their parts that can be executed simultaneously. Conceptually,
319
+ task-parallel and data-parallel routines may exist: the former features completely independent tasks that do not need to
320
+ be executed one before the other, the latter is identified by a common and repeated task that should be executed on
321
+ different data. This work focuses on this second aspect, whose features enable efficient GPU computing when massive
322
+ parallelization is possible. For instance, taking the product of two matrices is a highly parallelizable task, as any element
323
+ of the result matrix could be computed in parallel. Similarly, in the orbital propagation of large sets of initial conditions†
324
+ the dynamics function could be evaluated in parallel for all the states.
325
+ A. Shared memory parallelism with multiple CPUs: OpenMP®
326
+ Prior to discussing the rationale of GPU computing, the most straightforward parallelization concept involves the
327
+ use of multiple CPUs (Central Processing Units). Shared memory parallelism is the simplest scenario, where all the
328
+ machine compute cores have access to the same and common memory locations. More complex supercomputers use
329
+ however a distributed memory logic, where group of CPUs access their own independent memory. Such systems also
330
+ include a communication network, to distribute and collect the computed data on the different nodes, and require their
331
+ own programming paradigm that includes the data and message passing routines [39].
332
+ OpenMP is a set of pre-processor instructions that enable simple shared memory parallelization of C, C++ and
333
+ Fortran programs [40], which take action during the code compilation. Only minor modifications are required to
334
+ accelerate the most intensive parts of the program, and OpenMP instructions are simply ignored and treated as comments
335
+ if the OpenMP compilation flag is disabled. Common parallelization instructions involve for loops, which can include
336
+ some extra functionality: for instance, perfectly nested loops can be collapsed into a single larger loop, increasing the
337
+ †Assuming they are not interacting with each other and have negligible mass.
338
+ 8
339
+
340
+ program efficiency, and specific instructions can aid the memory management. Despite the simple interface, most
341
+ programs require at least some variables to be declared private to each worker, since OpenMP treats all the variables as
342
+ shared by default. The typical case of variables that should be made private are the loop counters: it is fundamental that
343
+ each thread works with its own variable, particularly for those cases where the loop counter also identifies the position in
344
+ a shared array where to access some data. More complex programming scenarios often arise and require the programmer
345
+ to explicitly control concurrent updates of shared variables, it is beyond the scopes of this work to tackle them all. The
346
+ reader can refer to the OpenMP programming guide for more complete and detailed information [40]. This paragraph
347
+ serves as a simple introduction to the tool that is used to parallelize the C version of the proposed program.
348
+ The parallel dynamics function is implemented as the simple OpenMP for loop parallelization, collapsing all the
349
+ states of the augmented system into a single loop. The multiple workers access the shared state array, then they compute
350
+ the acceleration values and temporarily store them into thread-private variables, and finally they copy them back to
351
+ a shared and global acceleration array. OpenMP exploits the flexibility of the CPU architecture, thus no significant
352
+ modification are required to the innermost parts of the dynamics function to make an efficiently parallelized program.
353
+ OpenMP is also used "indirectly" for the matrix multiplications of the PC method. The optimized OpenBLAS [41–43]
354
+ libraries are used to implement this part of the program, they already include the OpenMP parallelization.
355
+ B. Parallel reduction
356
+ The parallelization potential is also exploited in the computation of the PC iteration error, through the parallel
357
+ reduction mechanism. Reduction tasks are a broad category of compute operations, whose aim is to extract a single
358
+ scalar value from an array of elements. Some examples are the sum of array elements, finding the maximum or minimum
359
+ element in an array, "and" and "or" logical operators. Despite appearing intrinsically sequential tasks, parallelization
360
+ possibilities do exist even in the reduction case: in principle, the whole array is split into several chunks on the parallel
361
+ workers, which cooperate to perform the reduction task on their own chunk of elements. The cooperated process
362
+ continues, until a single scalar reduced value is obtained. Figure 1 shows a graphical example of the parallel reduction
363
+ logic: on an array of 16 elements, only 5 sequential steps are eventually required with 16 parallel workers. OpenMP
364
+ implements the reduction clause among its functions, the programmer is only asked to define the final reduced scalar as
365
+ a shared variable [40]. The compiler then ensures that all the array values are scanned through and avoids simultaneous
366
+ overwriting of the reduced scalar.
367
+ C. GPU computing
368
+ The features of GPU computing arise from the hardware architecture of graphics cards, which is profoundly different
369
+ from the traditional compute units. Figure 2 shows a graphical representation of such differences: in summary, more
370
+ transistors are devoted to pure data processing on GPUs, instead of flow control and memory management as in the
371
+ 9
372
+
373
+ Fig. 1
374
+ Parallel reduction graphical scheme.
375
+ CPU case [44]. Corresponding colors refer to chip elements of the same type, i.e. green for compute cores, yellow for
376
+ control units, violet for the core-level cached memory, blue for shared cache, and orange for global memory.
377
+ Fig. 2
378
+ CPU vs GPU architecture difference graphical scheme. Picture from [44].
379
+ The processing units are grouped in blocks (typically 32 processing units per block), each controlled by one controller.
380
+ All processing units in the same block all execute the same instruction, issued only once by the controller. This aspect,
381
+ together with the normally hundreds to thousands of processing units available in modern graphics cards, makes GPUs
382
+ 10
383
+
384
+ Core
385
+ Con
386
+ Core
387
+ Con
388
+ trol
389
+ trol
390
+ L1 Cache
391
+ L1 Cache
392
+ Core
393
+ Con
394
+ Core
395
+ Con
396
+ trol
397
+ trol
398
+ L1 Cache
399
+ L1 Cache
400
+ L2 Cache
401
+ L2 Cache
402
+ L3 Cache
403
+ L2 Cache
404
+ DRAM
405
+ DRAMprone to implement massive parallelism, although with lower flexibility and higher programming effort compared to
406
+ CPU applications. Some key concepts are given in the following subsection, a comprehensive view can be found in the
407
+ CUDA C++ programming guide [44].
408
+ D. Main programming paradigms and the CUDA® API
409
+ This section is intended to provide a brief overview and nomenclature of the CUDA language and API. Italic font is
410
+ used to introduce CUDA-specific names and concepts. A complete description can be found in the CUDA user manual
411
+ [44].
412
+ The fundamental execution unit is called thread. Threads can be grouped in blocks, and some shared memory
413
+ (dozens of kilobytes) is available to all threads in a common block. The execution of a single instruction is always
414
+ performed by groups of 32 threads at the same time, called a warp, regardless the number of threads in a block.
415
+ Therefore, blocks with less than 32 threads do not exploit the full hardware resources. Complex configurations can be
416
+ achieved combining multiple CPUs and GPUs to run the same program. The program flow is always controlled by the
417
+ CPU, which also controls the execution of the GPU. A function that is invoked by the CPU but executed on the GPU is
418
+ called kernel.
419
+ The first difference compared to standard programming regards the memory access: the GPU cannot read the usual
420
+ compute memory, but data must be loaded on GPU memory prior to running kernels. In a similar manner, data must be
421
+ retrieved to the CPU after the kernel has completed its execution, before executing other CPU tasks on the same data.
422
+ Consequently, the flow of a GPU program/sub-program always follows a first initialization on the host, a subsequent
423
+ data movement from the CPU to the GPU, the kernel execution, and finally the data retrieval from the GPU to the CPU,
424
+ as represented in Figure 3.
425
+ Fig. 3
426
+ GPU program basic flow.
427
+ 11
428
+
429
+ CPU
430
+ GPU
431
+ Compute data
432
+ Compute data
433
+ Kernel execution
434
+ Result data
435
+ Result dataBecause all threads execute the same instruction at the same time, kernels must be programmed in a warp-oriented
436
+ manner. This also includes explicitly managing the data access by the various threads, and complex functions typically
437
+ require the programmer to optimize memory access and cache utilization by hand: while it is implicitly controlled by the
438
+ compiler for CPUs, graphics card store data by default in the so called global memory, large in size (some gigabytes) and
439
+ referred to by default when GPU variables are initialized. This eases data movements between CPU and GPU, even in
440
+ case of large arrays. However, the access latency is much higher compared to shared memory: this makes global memory
441
+ not efficient for repeated read/write operations with GPU variables. To overcome this limitation, shared memory can be
442
+ exploited for compute purposes to keep read-only values on lower latency locations, instead of only holding common
443
+ values for all the threads in a block. Few cached bytes are also available on registers, thread-private locations to store up
444
+ to 256 single precision floating point values. Their small size and the compute intensity of the relativistic dynamics
445
+ function makes it difficult to use registers instead of the shared memory for the proposed application. Their use is
446
+ limited to temporary and handle variables that aid the final acceleration computation.
447
+ The way array elements are sorted is also fundamental for optimized data movements across global and shared
448
+ GPU memory. While CPU-GPU data transfers are specified by the array size, the most efficient intra-GPU memory
449
+ access happens when threads read/write values on adjacent locations. If this condition is satisfied, data are moved as a
450
+ single memory transaction for all the threads, resulting in minimized cycles spent reading or writing on global memory.
451
+ This memory access pattern is called coalesced, and represents a fundamental performance driver in complex GPU
452
+ programs: even if a program has massive parallelization possibilities, non-optimal memory access may result in the
453
+ memory access latency not compensated at all by the parallelized computational tasks.
454
+ NVIDIA® developed and maintains the CUDA® programming language, which remarkably simplifies the use of
455
+ NVIDIA® GPUs in computer programs. It is built as a C++ extension, with a set of keywords and API functions that
456
+ allow programmers to build their own kernels and control the GPU execution flow. A set of optimized libraries is also
457
+ available, for instance the basic linear algebra cuBLAS® implemented for the PC matrix multiplications of this work
458
+ [44].
459
+ E. Concurrency and advanced features
460
+ The CPU-GPU duality and cooperation exposes more possibilities, other than the simple acceleration of intensive
461
+ parts of the program. In general, kernel calls are asynchronous and do not block the work of the CPU, which allows the
462
+ CPU to execute other tasks while a GPU kernel is still running. Furthermore, modern GPUs can manage at the same
463
+ time two memory transfers (one per direction, CPU to GPU and GPU to CPU) while saturating its compute units for one
464
+ or more concurrent kernel execution [44].
465
+ CUDA® allows the queuing of a series of sequentially dependent GPU function calls with the use of streams: for
466
+ instance, an application may require some data to be transferred to the GPU before the execution of a custom kernel,
467
+ 12
468
+
469
+ which must necessarily be completed before calling a cuBLAS® function, at the end of which the processed data should
470
+ be transferred back to the CPU. All it takes is assigning the sequentially dependent GPU function calls to the same
471
+ stream. Multiple streams can be created and used at the same time, the obtained behaviour mimics batch job submissions
472
+ to supercomputing facilities. CUDA® guarantees the correct execution line and synchronization within the same stream,
473
+ whereas different streams must instead be synchronized with each other by hand. The compiler typically schedules
474
+ executions and memory transactions so that the GPU use is maximized, superposing different GPU function calls and
475
+ data transfers from separate streams [44].
476
+ Despite the lower latency, optimized programs are designed to also control the way threads access shared memory
477
+ locations. If two adjacent threads are asked to access two non-contiguous array elements, then the memory access is
478
+ performed on two cycles instead of a single one, slightly slowing down the program execution. This issue is called bank
479
+ conflict, and can be avoided ensuring thread-varying elements to be stored in the leading dimension of shared memory
480
+ arrays. Bank conflicts do not happen if all threads access the same single memory location.
481
+ V. Implementation
482
+ Among the implementations outlined in this section, the case of independent PC runs for all the propagated
483
+ trajectories is considered as benchmark. This allows to directly assess the performance of the augmented PC algorithm
484
+ against the original integrator for the same test case. All the algorithms were implemented using the C language, with
485
+ the sole exception of the GPU program that was coded in CUDA.
486
+ A. Independent runs PC workflow
487
+ The basic workflow of the independent PC runs is given in the block-scheme of Figure 4. The only parallelization
488
+ possibilities, for high numbers of trajectories, apply at the highest level, inevitably introducing a considerable overhead
489
+ for both the inner sequential execution and the still parallelizable inner functions. In fact, all the per-trajectory steps of
490
+ the PC process would still be parallelizable algorithms per se.
491
+ B. Sequential Augmented PC workflow
492
+ The implementation of the augmented PC integration follows a one-level augmentation only, to highlight the pipeline
493
+ benefits in terms of overhead that this implementation introduces. A block-scheme representation of the augmented PC
494
+ integration workflow is given in Figure 5. The conceptual change, from the PC iteration viewpoint, is only the initial
495
+ sampling in a single array containing all the state vectors of all the trajectories of the augmented system.
496
+ C. OpenMP® parallelized Augmented PC workflow
497
+ The parallelization of the augmented system integration, whose block-scheme representation is given in Figure
498
+ 6, becomes fine-grained. It acts directly on the single state vectors for the dynamics function evaluation and on the
499
+ 13
500
+
501
+ Fig. 4
502
+ Standard PC workflow.
503
+ Fig. 5
504
+ Augmented PC workflow.
505
+ elementary products of the matrix operations, moreover already implemented by the OpenBLAS libraries [41]. In
506
+ addition, reduction operations can be made through OpenMP® for a cooperated and parallel search of the maximum
507
+ error.
508
+ D. CUDA® Augmented PC workflow
509
+ The block-scheme representation of the CUDA® algorithm is given in Figure 7. The two-level augmentation concept
510
+ is exploited, assigning one higher level augmented system to each CUDA® stream and using the thread-based parallelism
511
+ on the lower level augmented systems.
512
+ The principal benefit is the cooperation between CPU and GPU for the overall execution, with as many operations as
513
+ possible executed concurrently. Each lower level augmented system is initially sampled by the host and then moved to
514
+ the GPU. The CUDA® stream management API allows to overlap the CPU sampling of the next higher level augmented
515
+ 14
516
+
517
+ Do while error higher than
518
+ tolerance
519
+ Core iteration:
520
+ Sample
521
+ Evaluate the
522
+ Sequence of
523
+ Iteration error
524
+ Augmented
525
+ dynamics
526
+ matrix
527
+ computation
528
+ initial guess
529
+ function
530
+ operations
531
+ OpenMp? parallel
532
+ OpenMP parallel,
533
+ OpenMP? parallel
534
+ OpenMP? parallel
535
+ OpenBLAS optimized
536
+ reduction
537
+ librariesOpenMP? parallel
538
+ Do for all states in
539
+ Do while error higher than
540
+ the system
541
+ tolerance
542
+ Core iteration:
543
+ Evaluate the
544
+ Sample initial
545
+ Sequence of
546
+ Iteration error
547
+ dynamics
548
+ matrix
549
+ computation
550
+ guess
551
+ function
552
+ operations
553
+ OpenBLAS optimized
554
+ librariesFig. 6
555
+ Augmented and OpenMP® parallelized PC workflow.
556
+ Fig. 7
557
+ Augmented CUDA® PC workflow.
558
+ systems with memory transfers and kernel executions of the already launched ones‡. Similarly, the last step of the PC
559
+ iteration requires to retrieve the computed iteration error for each stream from the GPU to the CPU for loop control
560
+ purposes, which is also subject to the stream concurrency benefits. A stream synchronization at the end of each while
561
+ loop iteration is necessary to achieve the overlapping behaviour of all the streams. Running independent loops for each
562
+ higher level augmented system would result in completely sequential and non-overlapped executions. If a single CUDA®
563
+ stream is generated, the standard one-level augmented system case is reproduced, albeit with the GPU computing
564
+ acceleration instead of the OpenMP® implementation previously described.
565
+ The warp-centric programming model of CUDA® kernels requires a small modification on the lower level augmented
566
+ system definition. Contiguous array elements should be of the same component type (i.e. contiguous 𝑥 coordinates,
567
+ then contiguous 𝑦 coordinates, and so on), instead of storing state vector by state vector. This aspect might seem
568
+ ‡This is true as long as the CPU memory is allocated as paged with specific CUDA® functions [44].
569
+ 15
570
+
571
+ Do while error higher than
572
+ tolerance
573
+ Core iteration:
574
+ Sample
575
+ Evaluate the
576
+ Iteration error
577
+ Sequence of
578
+ Augmented
579
+ dynamics
580
+ matrix
581
+ computation
582
+ initial guess
583
+ function
584
+ operations
585
+ OpenBLAS optimized
586
+ librariesDo while error higher than
587
+ p-th stream on p-th augmented
588
+ tolerance
589
+ Stream synchronization
590
+ sub-system
591
+ before next iteration
592
+ Core iteration:
593
+ Sample
594
+ Evaluate the
595
+ Sequence of
596
+ Iteration error
597
+ dynamics
598
+ lower level augmented
599
+ matrix
600
+ computation
601
+ function
602
+ initial guess
603
+ operations
604
+ cuBLAS @ library
605
+ Kernel
606
+ Done by the host.
607
+ Kernel with
608
+ can be OpenMP? parallel.
609
+ on p-th stream
610
+ functions
611
+ reduction + host
612
+ initialises p-th stream
613
+ on p-th stream
614
+ on p-th streaman implementation detail, however it is fundamental to ensure coalesced global memory access. A too high latency
615
+ would happen otherwise, which cannot be hidden even by intensive parallelized GPU computations. The just discussed
616
+ modification has no effect on the overall algorithm structure, all it requires is the dynamics and error kernels to be
617
+ implemented following this array element logic. This aspect is discussed in more detail in the following section, together
618
+ with the implications it has especially on the evaluation of the dynamics function.
619
+ 1. Dynamics model, array sorting, and CUDA kernel
620
+ At the core of the PC integration scheme lies the evaluation of the dynamics function at each Picard iteration. This
621
+ task can be performed in parallel for all the states of the system being integrated, however, although conceptually
622
+ simple, its implementation may not be straightforward in the GPU computing case. The more complex the dynamical
623
+ model becomes, the more intertwined its implementation inevitably gets, possibly requiring to access data distributed in
624
+ multiple arrays, possibly of notably different sizes. The accuracy requirements of the proposed application demand to
625
+ work under the restricted relativistic N-body problem, following the Einstein-Infeld-Hoffmann equations [45], which
626
+ has a dynamics function of the form:
627
+ �r = f�r, �r, r𝑖, �r𝑖, �r𝑖
628
+
629
+ (13)
630
+ with 𝑖 condensing the dependence on the states of all the major bodies in the ephemeris model, e.g. the solar system
631
+ planets, and r, �r, �r denoting position, velocity, and acceleration, respectively. The relations are unfortunately non-linear:
632
+ as a consequence, each CUDA® thread cannot perform simple operations on one single array element, since both
633
+ position and velocity of each state are required to compute any acceleration component. Moreover, ephemerides data
634
+ for r𝑖, �r𝑖, �r𝑖 also enter the dynamics function. These aspects suggest to implement the dynamics kernel having each
635
+ CUDA® thread to process one full state vector, rather than one element. At the same time, coalescing the global memory
636
+ access remains crucial to obtain a well-performing kernel. The PC method introduces however a partial constraint on
637
+ the array shapes: the matrix multiplications that build the method need the sampled trajectory states to be stored as rows
638
+ of an overall matrix, fixing the different times to identify each row. For a column-major sorted augmented state matrix,
639
+ contiguous state elements are interrupted by the ending time nodes, likely leading to non-coalesced memory access for
640
+ numerous warps. Row-major sorted state arrays feature instead non-coalesced access for all the state elements.
641
+ To cope with these issues, the lower-level augmented state is reformulated by stacking along the columns the same
642
+ components of all the state vectors in the augmented system:
643
+ Y(𝑖)
644
+ 𝑗
645
+ =
646
+
647
+ 𝑥(𝑖)
648
+ 𝑗,1 · · · 𝑥(𝑖)
649
+ 𝑗,𝑀
650
+ · · · 𝑝(𝑖)
651
+ 𝑗,1 · · · 𝑝(𝑖)
652
+ 𝑗,𝑀
653
+ · · · �𝑧(𝑖)
654
+ 𝑗,1 · · · �𝑧(𝑖)
655
+ 𝑗,𝑀
656
+
657
+ ,
658
+ 𝑗 = 1, ..., 𝑁, 𝑝 = 𝑦, 𝑧, �𝑥, �𝑦
659
+ (14)
660
+ where (𝑥, 𝑦, 𝑧) are the Cartesian components of r. The advantage is obvious in the column-major sorting case, since
661
+ 16
662
+
663
+ all the common components are found in adjacent memory addresses. Since in the presented application the number
664
+ of states in the augmented system is much larger than the number of sampled trajectory nodes, many contiguous
665
+ state components also appear in the row-major sorted array case. In addition, the higher-level augmented system
666
+ definition may remain unaltered, since different CUDA® streams would be processing each lower-level sub-systems.
667
+ The augmented force matrix F(𝑖)
668
+ 𝑗
669
+ can be adapted accordingly, without introducing any modification to the matrix
670
+ multiplication characterizing the PC iteration. Lastly, bank conflicts (explained in Section IV.E) are automatically
671
+ avoided [44] with this array sorting approach.
672
+ The kernel design is tied with the array sorting strategy. In particular, a key role is played by the fixed time nodes.
673
+ The augmented system logic is a consequence of the shared time nodes among the different state vectors, this feature
674
+ should also be exploited to design the thread blocks to make the most of the available shared memory. In particular, the
675
+ amount of memory required to store the planetary ephemerides is minimized if all the threads in a block process state
676
+ vectors corresponding to the same time node. For this reason, the proposed program implements a row-major sorting
677
+ strategy of state and force matrices. The augmented state matrix is accessed as a two-dimensional block array: one
678
+ dimension (the rows) follows the different time nodes, whereas the other is used to split the states of a common time
679
+ node into smaller chunks, each containing 32 states§. In this way, all the states in the same block of the two-dimensional
680
+ block array require exactly the same ephemerides data, because they are all associated to the same time node. For
681
+ fixed-time thread blocks bank conflict is automatically avoided also when reading ephemerides data from the shared
682
+ memory, since all the threads are forced to access the same ephemerides item or vector component [44].
683
+ The computation of the dynamics function is a compute-bound task: most of the effort lies on performing
684
+ computations on a limited amount of data, rather than on the movement of a large amount of information between two
685
+ memory locations. Furthermore, the values of the state elements need to be repeatedly accessed. For these reasons
686
+ and because of the limited capacities of thread-private registers, shared memory is used to also temporarily store the
687
+ state values, because of its lower latency compared to the global GPU memory [44]. The resulting dynamics kernel is
688
+ summarized in Figure 8.
689
+ In addition, a close look at the Einstein-Infeld-Hoffmann equations reveals that the acceleration of each propagated
690
+ body depends not only on its state and the gravitational parameter and states of major bodies in the ephemeris model,
691
+ but also explicitly on the gravitational potential and the acceleration of such bodies [45]. These contributions can be
692
+ computed before the evaluation of the dynamics function itself in the restricted problem of the proposed application,
693
+ saving the computational burden of a task that would be repeated at each Picard iteration. The values of gravitational
694
+ potential and the acceleration of the major bodies of the ephemeris model are computed by the CPU before starting the
695
+ Picard iterations, then moved to the global GPU memory, and eventually loaded to the shared memory for the time node
696
+ §32 is the warp size for most NVIDIA® graphics cards [44]. For augmented systems with a number of states that is not a multiple of the block
697
+ size, the dynamics kernel is implemented so that the last block of threads processes the remainder of the integer division between the number of states
698
+ and the block size.
699
+ 17
700
+
701
+ Fig. 8
702
+ Dynamics kernel memory management.
703
+ associated with each block, along with the ephemeris states and gravitational parameters.
704
+ 2. CPU-GPU cooperated iteration error computation
705
+ All the involved CUDA® kernels are run at each Picard iteration. Their execution must be called by the CPU, which
706
+ also stops the Picard iteration while loop as the error between two consecutive steps falls below the desired tolerance.
707
+ Since the updated states already reside on the GPU, the GPU massive parallelism can be exploited to accelerate the
708
+ computation of the iteration error, transferring only a limited amount of data to the CPU to be used for the loop control.
709
+ Despite dealing with the augmented state system, the error is still defined on a per-state basis, with the maximum of the
710
+ errors of all the states that is used to control the loop.
711
+ The error computation process involves two separate kernels and a CPU function. The first kernel computes both
712
+ the position and the velocity errors and stores the maximum between these two, for all the states in the augmented
713
+ system. The second kernel computes the maximum error of groups of 4096 states: 1024 thread-sized blocks are created,
714
+ discerning the first maximums while reading four consecutive chunks of state errors into the shared memory. Then,
715
+ 1024 threads cooperate to find the actual maximum error among the remaining 1024 state errors, with reduction-driven
716
+ parallelism¶. Eventually, the maximum error is copied back to the global memory, in a new array consisting of reduced
717
+ errors only. Finally, this whole array is copied back to the CPU, which finds the actual maximum with a traditional
718
+ sequential for loop-based approach. Even with augmented states made of millions of state vectors, this approach makes
719
+ the CPU search sequentially only over hundreds to thousands candidates at most, with a negligible computational cost
720
+ compared to the other steps.
721
+ ¶Like the dynamics kernel, the error kernels are implemented so that the last thread block processes the remainder between the integer division
722
+ between the number of states and the block size (4096 in the case of the reduction kernel). The reduction steps are controlled accordingly: only those
723
+ corresponding to a number of threads less than or equal to the number of states in the block are activated.
724
+ 18
725
+
726
+ Global Memory
727
+ Shared Memory
728
+ Shared ephemerides
729
+ Shared ephemerides
730
+ States of the block
731
+ States of the block
732
+ Acceleration computation
733
+ Accelerations of the block
734
+ Accelerations of the blockVI. Program performance
735
+ A. Test case
736
+ The test case follows what already computed in [12, 13]. The first resonant phase with Venus of a Solar Orbiter-like
737
+ mission was reproduced, designing a continuous trajectory even during flyby injections and exits. The approach is
738
+ based on the b-plane [46] description of flybys, and uses the PC method for efficient fixed-point simulations to surf the
739
+ relativistic N-body environment‖ minimizing the deep space maneuver effort required to control the flyby entrance.
740
+ Following the trajectory scheme of Figure 9, the design algorithm follows a backward, dynamic programming-like
741
+ recursion logic, i.e. designing flyby 𝑗 + 1 before flyby 𝑗, to preserve accuracy and continuity while also fulfilling
742
+ mission requirements. The reader can find more insight on the design algorithm and technique in [12, 13].
743
+ Fig. 9
744
+ Orbital scheme of the phase from flyby 𝑗 to flyby 𝑗 + 1.
745
+ The Matlab®∗∗ implementation proposed in [12, 13] was re-run on a single core of a local workstation equipped
746
+ with an Intel® CoreTM i7-7700 CPU (3.60 GHz), running Windows® 10 Pro. The algorithm converged running 13509
747
+ PC integrations in 506.3 seconds, to the residual Δr∗ and impulsive action Δv∗ for the required maneuver presented
748
+ in Table 1. To better detail the embedded PC integrations, they all feature 200 Chebyshev nodes spanning the arc
749
+ Table 1
750
+ Optimization results, in terms of position difference residual Δr∗ and correction effort Δv∗ at the
751
+ maneuvering time corresponding to the apocenter of the post-flyby orbit.
752
+ Δr∗
753
+ Δv∗
754
+ [m]
755
+ [m/s]
756
+ 𝑥
757
+ 𝑦
758
+ 𝑧
759
+ 𝑥
760
+ 𝑦
761
+ 𝑧
762
+ −0.52
763
+ −0.52
764
+ −1.19
765
+ −1.28
766
+ 1.57
767
+ 0.22
768
+ connecting the flyby exit and the maneuver point, long about 0.87 orbital periods. Figures 10a and 10b show the final
769
+ trajectory, optimized in the relativistic N-body environment.
770
+ ‖The enforced trajectory continuity even during flybys requires extreme design precision, because the fast dynamics of close approaches would
771
+ lead to divergent and therefore meaningless trajectories, already after the first flyby.
772
+ ∗∗Following the update to Matlab® R2021b higher runtimes were obtained for the same final results, although not affecting the presented discussion
773
+ since the performance analysis is only made on the C and the CUDA algorithm versions.
774
+ 19
775
+
776
+ Maneuver
777
+ Pre-maneuver
778
+ Post-maneuver
779
+ Flyby j+1
780
+ Flyby j
781
+ Pre-flyby j
782
+ Post-flyby j+l(a) Deep space correction maneuver and overview.
783
+ (b) Zoom over Venus’ flybys V2 and V3.
784
+ Fig. 10
785
+ Solar Orbiter’s continuous first resonant phase with Venus. Pictures from [12, 13].
786
+ B. Computational setup
787
+ To build a common framework for the pure algorithm performance evaluations, the 13509 initial conditions generated
788
+ in the optimization process to eventually obtain the results of Table 1 are re-run using C and CUDA implementations
789
+ of the PC integration. The execution of the 13509 independent runs with the C implementation of the algorithm is
790
+ considered as benchmark case, both completely sequential and parallelized with OpenMP [40]. The matrix operations
791
+ featured in the PC iterations are performed using the OpenBLAS library [41–43]. All the presented runs of the C
792
+ algorithm have been executed on a machine running Ubuntu Linux 20.04 equipped with 40 physical / 80 logical cores of
793
+ the type Intel® Xeon™ CPU E5-4620 V4 running at 2.1 GHz, with varying number of OpenMP threads and the "o3"
794
+ gcc compiler optimization enabled. Because of the physical machine where the GPU was available, the CUDA™ code
795
+ has been run on the same workstation of the Matlab® optimal solution computation. In this case a four core OpenMP®
796
+ parallelization on a Intel® CoreTM i7-7700 CPU (3.60 GHz), is combined for concurrent executions with a NVIDIA®
797
+ GTX 1050 (1.3GHz) graphics card††. As a final remark, in all the presented cases the relative error among the different
798
+ implementations for corresponding initial conditions always falls below the specified PC relative tolerance (10−12),
799
+ allowing runtime-only performance comparisons. These small differences are due to the inevitably slightly different
800
+ order of the elementary operations performed by the different machines, libraries and implementations.
801
+ C. Accuracy comparison
802
+ Figure 11 shows the evolution of the propagation error, measured as relative position and velocity error with respect
803
+ to the independent runs case, for both the C augmented and the CUDA® programs. In both cases the error remains
804
+ ††This is a 2016 low-end gaming card model, whose design purpose is far from the double precision computing of this work. Modern
805
+ gaming/professional cards could run up to 60 times faster, data center cards up to 400-500 times faster than this model for the presented application.
806
+ 20
807
+
808
+ Overview
809
+ ×108
810
+ -Pre V2
811
+ Post V3
812
+ -V2-V3
813
+ 0.5
814
+ -Maneuver
815
+ Venus'
816
+ [km]
817
+ Sun
818
+ 0
819
+ -0.5
820
+ -1
821
+ -1.5
822
+ -1
823
+ -0.5
824
+ 0
825
+ 0.5
826
+ 1
827
+ x [km]
828
+ X108Flybys
829
+ -8.5,×107
830
+ ..Pre V2
831
+ ..Post V3
832
+ -8.52
833
+ .. 2-V3
834
+ -Flyby V2
835
+ -Flyby V3
836
+ -8.54
837
+ . Venus at V2
838
+ Y-8.56
839
+ -8.58
840
+ -8.6
841
+ -6.72
842
+ -6.7
843
+ -6.68-6.66-6.64
844
+ -6.62
845
+ -6.6
846
+ x [km]
847
+ ×107lower than the specified relative tolerance used to halt the Picard-Chebyshev iterations, set to 10−12.
848
+ Fig. 11
849
+ Errors of C augmented and CUDA® programs, as the average of the error of all the states in the
850
+ augmented systems.
851
+ Figure 12 shows instead the evolution of the error for the dynamics function only, distinguishing two different
852
+ compilations of the CUDA® programs, with (red dashed) and without (orange) the –use-fast-math flag enabled.
853
+ Despite the error experienced in Figure 11, the C augmented program shows no error for the computation of the
854
+ dynamics function. That means, the error is accumulated throughout the Picard-Chebyshev iterations only because
855
+ of the different structure of the matrix multiplications. Their implementation in the OpenBLAS library [41–43] is
856
+ optimized on the matrix size. This is a known issue in high-performance computing problems, where the floating point
857
+ representation of numbers interferes with a change in the order of the basic math operations on the matrix elements,
858
+ producing small deviations with respect to a reference non-parallelized solutions. No differences were observed between
859
+ parallelizing and not parallelizing the augmented C programs. On top of this consideration, the CUDA® program is also
860
+ subject to the errors introduced by the different compilers. Other than being two different environments (Ubuntu Linux
861
+ with GNU® compilers for the C augmented program, Windows 10 Pro with Microsoft® Visual Studio® and Nvidia®
862
+ CUDA® compilers for the CUDA ® program), small differences can also be observed by setting different optimization
863
+ flags in the compilation. The comparison between enabling and disabling the fast math optimization options are run
864
+ 21
865
+
866
+ -11
867
+ 10
868
+ Relative position error
869
+ 12
870
+ 10
871
+ 10
872
+ -13
873
+ 14
874
+ 10
875
+ 10
876
+ -15
877
+ C Augmented
878
+ 16
879
+ 10
880
+ CUDA
881
+ -17
882
+ 10
883
+ 0
884
+ 25
885
+ 50
886
+ 75
887
+ 100
888
+ 125
889
+ 150
890
+ 175
891
+ 200
892
+ 10
893
+ -11
894
+ Relative velocity error
895
+ -12
896
+ 10
897
+ 0
898
+ 13
899
+ -14
900
+ 10
901
+ -15
902
+ C Augmented
903
+ 10
904
+ -16
905
+ CUDA
906
+ -17
907
+ 10
908
+ 0
909
+ 25
910
+ 50
911
+ 75
912
+ 100
913
+ 125
914
+ 150
915
+ 175
916
+ 200
917
+ Picard-Chebsyhev nodesto exclude the possibility of implementation problems for the dynamics kernel. As it can be observed comparing the
918
+ two compilations, the error spikes reaching 10−12 happen in a seemingly unpredictable manner and can be completely
919
+ attributed to the compiler, because happening at different Picard-Chebyshev nodes for the two cases. Otherwise, the
920
+ error level remains more than two orders of magnitude lower. In any case, the accumulated effect of this error source is
921
+ not taking the overall error above the 10−12 iteration tolerance, as Figure 11 already highlighted.
922
+ Fig. 12
923
+ Errors of C augmented and CUDA® programs, as the average of the error of all the states in the
924
+ augmented systems.
925
+ D. Performance comparison
926
+ Table 2 shows the runtime difference between the sequential C programs. The improved efficiency of the augmented
927
+ integration can be immediately seen even in this sequential case, where the augmented system approach runs 23.87%
928
+ faster, because of the minimized overhead experienced by sharing the outer while loop. In the considered application
929
+ the various trajectories all require 41 or 42 PC iterations, making it negligible to keep running trajectories even if their
930
+ PC process has already converged.
931
+ The scalability properties of the integration of independent trajectories and the augmented system are studied on the
932
+ C implementations, i.e. assessing how well the execution of the two programs accelerates with increasing number of
933
+ 22
934
+
935
+ -11
936
+ 10
937
+ Relative velocity error
938
+ 12
939
+ 10
940
+ 13
941
+ 10
942
+ 14
943
+ 10
944
+ C Augmented
945
+ 10
946
+ -15
947
+ CUDA
948
+ 16
949
+ 10
950
+ CUDA Fast Math
951
+ -17
952
+ 10
953
+ 0
954
+ 25
955
+ 50
956
+ 75
957
+ 100
958
+ 125
959
+ 150
960
+ 175
961
+ 200
962
+ Relative acceleration error
963
+ -11
964
+ 10
965
+ -12
966
+ 10
967
+ 13
968
+ 10
969
+ -14
970
+ 10
971
+ C Augmented
972
+ -15
973
+ 10
974
+ CUDA
975
+ -16
976
+ 10
977
+ CUDA Fast Math
978
+ -17
979
+ 10
980
+ 0
981
+ 25
982
+ 50
983
+ 75
984
+ 100
985
+ 125
986
+ 150
987
+ 175
988
+ 200
989
+ Picard-Chebsyhev nodesTable 2
990
+ Sequential runtimes for the independent runs and the augmented system executions.
991
+ Case
992
+ Runtime
993
+ [s]
994
+ Independent runs
995
+ 245.02
996
+ Augmented system
997
+ 186.54
998
+ OpenMP® threads. Figure 13 shows that the augmented system features excellent scalability properties, for a runtime
999
+ that keeps decreasing for increasing number of OpenMP® threads. On the contrary, the integration of independent
1000
+ trajectories experiences even higher runtimes after a certain number of threads. This happens because the parallelization
1001
+ itself introduces some overhead to the overall program execution, which cannot be compensated for by newly created
1002
+ parallel threads.
1003
+ Fig. 13
1004
+ Augmented system and independent integrations C code runtime comparison with OpenMP® paral-
1005
+ lelization and varying number of threads.
1006
+ Figure 14 shows the achieved speedup, defined as the ratio between the sequential runtime and the parallel runtime,
1007
+ varying the number of OpenMP® threads. It provides a measure of how parallelizable the algorithm is, with higher
1008
+ values underlining higher accelerations. The augmented system integration shows once again excellent scalability
1009
+ properties even at high number of threads, suggesting the efficiency of the GPU computing transition even without
1010
+ 23
1011
+
1012
+ Independent
1013
+ 250
1014
+ Augmented
1015
+ 200
1016
+ Runtime [s]
1017
+ 150
1018
+ 100
1019
+ 50
1020
+ 0
1021
+ 10
1022
+ 20
1023
+ 30
1024
+ 40
1025
+ 50
1026
+ 60
1027
+ 70
1028
+ 80
1029
+ Number of threadshaving assessed the performances of the CUDA® implementation yet. If the number of threads is set equal to one,
1030
+ Fig. 14
1031
+ Augmented system and independent integrations C code speedup comparison with OpenMP® paral-
1032
+ lelization and varying number of threads.
1033
+ the algorithm conceptually reduces to a sequential case. However, enabling compiler optimization and the OpenMP®
1034
+ flag in the compilation, a parallel program introduces data distribution and retrieval tasks across the possibly multiple
1035
+ workers. With OpenMP®, the number of threads is specified by an environment variable right before running the
1036
+ program. Therefore, activating one thread only exposes all the parallelization-induced overhead without having any
1037
+ computational benefit at all. For this reason the one-thread runtimes for the independent and the augmented systems
1038
+ both result higher than the benchmark, sequential runtimes, which were compiled with optimization enabled but without
1039
+ active OpenMP® flag.
1040
+ Finally, the execution of the CUDA® implementation‡‡ resulted the faster overall, taking 15.84 seconds. The whole
1041
+ trajectory set was split into 10 stre-ams§§. The stream definition guideline should in practice fit the application the
1042
+ propagator would run on, being the sole flexibility degree left by the implementation. However, from the GPU viewpoint,
1043
+ larger kernels always imply a better GPU exploitation, thus creating too many outer augmentation levels, i.e. activating a
1044
+ ‡‡Only the results obtained for the compilation without the –use-fast-math options are shown. No significant runtime difference (less than 0.1
1045
+ seconds was observed).
1046
+ §§By the result of the integer division of the 13509 states by 9, with the tenth stream containing a number of states equal to the remainder of that
1047
+ division.
1048
+ 24
1049
+
1050
+ Independent
1051
+ 10
1052
+ Augmented
1053
+ 8
1054
+ 6
1055
+ 4
1056
+ 2
1057
+ 0
1058
+ 10
1059
+ 20
1060
+ 30
1061
+ 40
1062
+ 50
1063
+ 60
1064
+ 70
1065
+ 80
1066
+ Number of threadslarge number of independent CUDA® streams, with too few trajectories each would result in a performance degradation,
1067
+ eventually obtaining what already observed with the independent integration cases for the single trajectories.
1068
+ Table 3 summarizes the runtime results discussed in the previous lines for the different cases, for selected number of
1069
+ cores in the C implementation cases.
1070
+ Table 3
1071
+ Runtimes for the independent runs and the augmented system executions. Average of 10 different runs
1072
+ each.
1073
+ Case
1074
+ Threads
1075
+ GPU
1076
+ Runtime [s]
1077
+ C Independent
1078
+ 1
1079
+ -
1080
+ 245.02
1081
+ C Augmented
1082
+ 1
1083
+ -
1084
+ 186.54
1085
+ C Independent
1086
+ 8
1087
+ -
1088
+ 113.09
1089
+ C Augmented
1090
+ 8
1091
+ -
1092
+ 40.60
1093
+ C Independent
1094
+ 40
1095
+ -
1096
+ 113.54
1097
+ C Augmented
1098
+ 40
1099
+ -
1100
+ 22.67
1101
+ C Independent
1102
+ 80
1103
+ -
1104
+ 178.18
1105
+ C Augmented
1106
+ 80
1107
+ -
1108
+ 18.70
1109
+ CUDA® Augmented
1110
+ 4
1111
+ GTX 1050
1112
+ 15.84
1113
+ Figure 15 shows the increasing speedup achieved by the augmented system when compared to the sequential and
1114
+ independent simulations of all the samples. The CUDA® implementation runs more than 15 times faster compared to
1115
+ the baseline case, suggesting the suitability of the augmented PC algorithm to high performance and GPU computing
1116
+ facilities.
1117
+ E. CUDA® Kernel profiling and optimization
1118
+ The final kernel implementations are the result of a long and detailed profiling and optimization process, performed
1119
+ with the Nsight® Compute tool and a NVIDIA® GeForce® GTX 1050 graphics card. The choice of the 32 units block
1120
+ size of the dynamics kernel was driven mostly by the need of using the shared memory also for the temporary storage of
1121
+ the double precision state and the acceleration vector elements, just too large to be kept on the thread-private registers.
1122
+ The error computation kernels do not feature this bottleneck, thus the full amount of 1024 threads per block can be
1123
+ activated, minimizing the number of memory transactions and maximizing the reduction effects. The further four times
1124
+ of reduction while reading the error from ephemerides data (better detailed in Section V.D.2) are the result of multiple
1125
+ trials: adding more reduction layers resulted in a kernel slowdown not compensated by the consequent speedup in the
1126
+ CPU function final call. Analogously, removing some of them resulted in a CPU function slowdown not compensated
1127
+ by the kernel speedup. The chosen number of while-reading reductions may however be affected by the problem size of
1128
+ the selected test case. Larger (or smaller) sets of initial conditions to be propagated may feature a different optimal
1129
+ implementation of the maximum error computation. On the contrary, 32 is already the minimum block size for the
1130
+ dynamics kernel and is not related to the problem size. Other non-relativistic dynamics function may drive the use of
1131
+ 25
1132
+
1133
+ Fig. 15
1134
+ Speedup comparison among C and CUDA® implementations.
1135
+ the shared memory in a different way, possibly allowing the use of larger block sizes.
1136
+ The kernel performance is condensed into three numbers that result from the profiling process, i.e. the average
1137
+ multiprocessor occupancy for compute operations (Compute in Tables 4 and 5), the shared memory utilization (Shared
1138
+ Memory in Tables 4 and 5), and the memory throughput¶¶ (Throughput in Tables 4 and 5). Despite profiling tools
1139
+ provide more detailed information, the presented indicators already allow the description of the kernel performances in
1140
+ sufficient detail. The profiling indicators of the cuBLAS® kernels for matrix-matrix product (dgemm), matrix-vector
1141
+ product (dgemv), and element-wise summation (daxpy)∗∗∗ used for the matrix multiplications embedded in the PC
1142
+ method are also shown, providing a performance comparison against well-known and heavily optimized library functions.
1143
+ In addition, also the kernel runtime as measured in the profiling activity is added to the comparison, to highlight the
1144
+ difference for the two tested augmented system sizes.
1145
+ Table 4 shows the kernel profiling results for an augmented system made of all the 13509 states to be propagated.
1146
+ The compute-bound kernels can be easily recognized as the ones exploiting the most the GPU’s compute capability
1147
+ (Dynamics (dynamics), dgemm, error computation (errCompute), and maximum error reduction (maxReduce)). The
1148
+ matrix-matrix multiplication is more memory bound than the dynamics function, because larger arrays must be loaded
1149
+ ¶¶Non-compute intensive but still highly parallelizable tasks may have the memory transfer as final bottleneck, this indicator measures how much
1150
+ of the CPU-GPU communication band width is used.
1151
+ ∗∗∗For the sake of conciseness the kernels are identified with the names of the cuBLAS® API functions, although optimized kernels are called for
1152
+ each specific GPU architecture and problem size.
1153
+ 26
1154
+
1155
+ 18
1156
+ 15.57x
1157
+ 16
1158
+ 14
1159
+ 13.10x
1160
+ 12
1161
+ 10.81x
1162
+ 10
1163
+ 8.30x
1164
+ 8
1165
+ 6.03x
1166
+ 6
1167
+ 4
1168
+ 2
1169
+ 1.31x
1170
+ 1.00x
1171
+ 0
1172
+ GPU
1173
+ sequential
1174
+ 1 thread
1175
+ 8 threads
1176
+ 16 threads
1177
+ 40 threads
1178
+ 80 threads Augmentedat the same time on the shared memory to perform the computation. On the contrary, the dynamics kernel features a
1179
+ more relevant computational bottleneck, because of the much more complex algorithm compared to the simple products
1180
+ and summations of the dgemm case. The remaining kernels, as well as the lower-intensity but still highly parallelizable
1181
+ error computation, all feature a high memory throughput. The GTX 1050 card used as a maximum memory band width
1182
+ of 112.1 Gb/s: the closer the throughput to that value the better the memory transactions are managed, essential feature
1183
+ for memory-bound problems. If some computations are added in the kernel, some throughput is inevitably lost, as
1184
+ latency sources are introduced between transaction to/from the shared/global memory.
1185
+ Table 4
1186
+ 13509-sized augmented state kernel profiling results.
1187
+ Kernel
1188
+ Compute
1189
+ Shared
1190
+ Throughput
1191
+ Runtime
1192
+ [%]
1193
+ Memory [%]
1194
+ [Gb/s]
1195
+ [𝜇s]
1196
+ dynamics
1197
+ 97.11
1198
+ 24.96
1199
+ 1.91
1200
+ 137470
1201
+ dgemm
1202
+ 94.15
1203
+ 26.67
1204
+ 4.06
1205
+ 164210
1206
+ dgemv
1207
+ 47.14
1208
+ 84.54
1209
+ 93.24
1210
+ 1400
1211
+ daxpy
1212
+ 18.18
1213
+ 74.78
1214
+ 60.26
1215
+ 32.48
1216
+ errCompute
1217
+ 99.41
1218
+ 53.26
1219
+ 59.36
1220
+ 4740
1221
+ maxReduce
1222
+ 36.80
1223
+ 71.36
1224
+ 79.25
1225
+ 279.36
1226
+ Table 5 shows the kernel profiling results for an augmented system made of 1501 states to be propagated†††.
1227
+ The more compute-bound kernels do not show a significant loss of performance compared to the 13509-sized single
1228
+ augmented state case of Table 4, whereas the other kernels do. This highlights the suitability of GPU computing to
1229
+ extremely intense and parallelizable tasks, where the kernel call overhead is heavily compensated for by the massive
1230
+ task parallelization. Nevertheless, the capability of successfully processing also smaller-sized problems remains crucial
1231
+ for the program flexibility, particularly for what concerns the adoption of the proposed two-level augmentation scheme
1232
+ comprising smaller sub-systems significantly different from each other.
1233
+ Table 5
1234
+ 1501-sized augmented state kernel profiling results.
1235
+ Kernel
1236
+ Compute
1237
+ Shared
1238
+ Throughput
1239
+ Runtime
1240
+ [%]
1241
+ Memory [%]
1242
+ [Gb/s]
1243
+ [𝜇s]
1244
+ dynamics
1245
+ 96.86
1246
+ 24.92
1247
+ 1.91
1248
+ 15300
1249
+ dgemm
1250
+ 93.67
1251
+ 26.65
1252
+ 4.08
1253
+ 18390
1254
+ dgemv
1255
+ 42.59
1256
+ 78.74
1257
+ 87.53
1258
+ 170.85
1259
+ daxpy
1260
+ 9.19
1261
+ 33.95
1262
+ 36.31
1263
+ 6.62
1264
+ errCompute
1265
+ 98.25
1266
+ 53.44
1267
+ 59.78
1268
+ 529.34
1269
+ maxReduce
1270
+ 26.29
1271
+ 63.35
1272
+ 68.20
1273
+ 42.88
1274
+ Table 6 compares instead the runtimes of the CUDA® program, for the cases of 1 and 10 active streams. Theoretically,
1275
+ †††This number results from splitting the full augmented system of 13509 states into 10 CUDA® streams, each but the last with a number of states
1276
+ equal to the integer division between 13509 and 9. The tenth and last streams contains a number of states equal to the remainder of the previous
1277
+ integer division.
1278
+ 27
1279
+
1280
+ the former has the advantage of a better GPU resource exploitation, whereas the latter makes a more aggressive use of
1281
+ the CPU-GPU concurrency. Despite the lower GPU efficiency, the achieved runtimes are almost identical. Therefore,
1282
+ the two-level augmentation scheme can efficiently tackle the case of differently-sized lower-level augmented subsystems,
1283
+ showing the flexibility of the proposed computational scheme, despite the fine grain code optimization necessary for its
1284
+ successful implementation.
1285
+ Table 6
1286
+ 1 and 10 streams CUDA® program executions. Average of 10 different runs each.
1287
+ Case
1288
+ Runtime
1289
+ [s]
1290
+ 1 stream
1291
+ 15.78
1292
+ 10 streams
1293
+ 15.84
1294
+ VII. Conclusion
1295
+ This work explores the benefits that high-performance CPU clusters and GPU computing architectures bring to
1296
+ the short leg orbital propagation of large sets of initial conditions. The tested case studies the runs required to design
1297
+ a Solar Orbiter-like resonant phase with Venus, optimized to surf the relativistic N-body environment, proposing a
1298
+ two-level augmentation strategy implemented in the C and the CUDA® programming languages.
1299
+ Propagating the augmented system always outperforms propagating the trajectories independently, both in the
1300
+ sequential and the parallelized case. The augmentation benefits appear in two different aspects, the first being the
1301
+ reduced overhead compared to the repeated independent runs, the second represented by a finer grain parallelization
1302
+ also exploiting optimized linear algebra libraries.
1303
+ The approach scalability allows its implementation on GPU architectures, with low end and old graphics card
1304
+ already capable of matching the performance of a common-size cluster node. Data center card models can make the
1305
+ algorithm run around 400-500 times faster, while the newest gaming GPUs should already allow a 60 times program
1306
+ acceleration. In addition, implementing the second order version of the modified PC method with error feedback should
1307
+ introduce a further two/three-fold acceleration.
1308
+ Despite being shown on a test case derived by a trajectory optimization application, the proposed scheme represents
1309
+ a completely general orbital propagator. Any application requiring the propagation of large sets of initial conditions
1310
+ could benefit by the high computational efficiency provided by this algorithm, not necessarily requiring the use of
1311
+ supercomputing facilities in favour of much less expensive graphics cards.
1312
+ Future works may keep developing the software tool, managing longer integration spans in sequence, and, as a
1313
+ general propagator, implementing flyby detection procedures. At the same time, the optimization scheme that led to the
1314
+ presented trajectory design application can be modified to accommodate massively parallel search approaches. On the
1315
+ implementation side, the benefits of using CUDA® graphs instead of the stream-based management of the outer level
1316
+ 28
1317
+
1318
+ augmented systems may also be explored.
1319
+ Acknowledgments
1320
+ The research leading to these results has received funding from the European Research Council (ERC) under the
1321
+ European Union’s Horizon2020 research and innovation programme as part of project COMPASS (Grant agreement
1322
+ No 679086), www.compass.polimi.it, and the European Space Agency (ESA) through the Open Space Innovation
1323
+ Platform (OSIP) co-funded research project "Robust trajectory design accounting for generic evolving uncertainties",
1324
+ Contract No. 4000135476/21/NL/GLC/my.
1325
+ References
1326
+ [1] European Space Agency (ESA), “Solar Orbiter Definition Study Report (Red Book),” Tech. Rep. July, 2011. URL https:
1327
+ //sci.esa.int/s/w7yO4P8.
1328
+ [2] European Space Agency (ESA), “Jupiter ICy moons Explorer Exploring the emergence of habitable worlds around gas giants.
1329
+ Definition Study Report.” Tech. Rep. 1.0, 2014. URL https://sci.esa.int/web/juice/-/54994-juice-definition-
1330
+ study-report.
1331
+ [3] COSPAR - Committee on Space Research, “COSPAR Policy on Planetary Protection,” Space Research Today, Vol. 208, 2020,
1332
+ pp. 10–22. https://doi.org/https://doi.org/10.1016/j.srt.2020.07.009, URL https://www.sciencedirect.com/science/article/pii/
1333
+ S1752929820300372.
1334
+ [4] Kminek, G., “ESA planetary protection requirements, Technical Report ESSB-ST-U-001,” Tech. rep., European Space Agency,
1335
+ 2012.
1336
+ [5] Jehn, R., “Estimating the impact probability of ariane upper stages,” Tech. rep., MAS Working paper 601, European Space
1337
+ Agency, Dec 2014.
1338
+ [6] Wallace, M., “A Massively Parallel Bayesian Approach to Planetary Protection Trajectory Analysis and Design,” Proceedings
1339
+ of the 2015 AAS/AIAA Astrodynamics Specialist conference, Vol. AAS 15-535, Vail, CO, USA, 2015. URL https://trs.jpl.nasa.
1340
+ gov/handle/2014/45859.
1341
+ [7] Colombo, C., Letizia, F., and Van Der Eynde, J., “SNAPPshot ESA planetary protection compliance verification software Final
1342
+ report V1.0, Technical Report ESA-IPL-POM-MB-LE-2015-315,” Tech. rep., University of Southampton, 2016.
1343
+ [8] Romano, M., Losacco, M., Colombo, C., and Di Lizia, P., “Impact probability computation of near-Earth objects using Monte
1344
+ Carlo line sampling and subset simulation,” Celestial Mechanics and Dynamical Astronomy, Vol. 132, No. 8, 2020, p. 42.
1345
+ https://doi.org/10.1007/s10569-020-09981-5, URL https://doi.org/10.1007/s10569-020-09981-5.
1346
+ [9] Romano, M., “Orbit propagation and uncertainty modelling for planetary protection compliance verification,” Ph.D. thesis,
1347
+ 29
1348
+
1349
+ Politecnico di Milano, Supervisors: Colombo, Camilla and Sánchez Pérez, José Manuel, Feb 2020. https://doi.org/10.13140/
1350
+ RG.2.2.19692.80001.
1351
+ [10] Masat, A., Romano, M., and Colombo, C., “Kustaanheimo–Stiefel Variables for Planetary Protection Compliance Analysis,”
1352
+ Journal of Guidance, Control, and Dynamics, Vol. 45, No. 7, 2022, pp. 1286–1298. https://doi.org/10.2514/1.G006255, URL
1353
+ https://doi.org/10.2514/1.G006255.
1354
+ [11] Stiefel, E. L., and Scheifele, G., Linear and Regular Celestial Mechanics, Grundlehren der mathematischen Wissenschaften,
1355
+ Springer, Berlin, 1971.
1356
+ [12] Masat, A., Romano, M., and Colombo, C., “Combined B-plane and Picard-Chebyshev approach for the continuous design of
1357
+ perturbed interplanetary resonant trajectories,” 31st AAS/AIAA Space Flight Mechanics Meeting, Vol. AAS-21-289, Charlotte,
1358
+ NC, USA, 2021.
1359
+ [13] Masat, A., and Colombo, C., “B-plane and Picard–Chebyshev integration method: Surfing complex orbital perturbations in
1360
+ interplanetary multi-flyby trajectories,” Acta Astronautica, Vol. 194, 2022, pp. 216–228. https://doi.org/10.1016/j.actaastro.
1361
+ 2022.01.045, URL https://www.sciencedirect.com/science/article/pii/S0094576522000546.
1362
+ [14] Junkins, J. L., Bani Younes, A., Woollands, R. M., and Bai, X., “Picard Iteration, Chebyshev Polynomials and Chebyshev-
1363
+ Picard Methods: Application in Astrodynamics,” Journal of the Astronautical Sciences, Vol. 60, No. 3, 2013, pp. 623–653.
1364
+ https://doi.org/10.1007/s40295-015-0061-1, URL https://doi.org/10.1007/s40295-015-0061-1.
1365
+ [15] Koblick, D., , and Shankar, P., “Evaluation of the Modified Picard-Chebyshev Method for High-Precision Orbit Propagation,”
1366
+ Journal of Aerospace Engineering, Vol. 28, No. 5, 2015, p. 04014125. https://doi.org/10.1061/(ASCE)AS.1943-5525.0000463,
1367
+ URL https://doi.org/10.1061/(ASCE)AS.1943-5525.0000463.
1368
+ [16] Woollands, R. M., Bani Younes, A., and Junkins, J. L., “New Solutions for the Perturbed Lambert Problem Using Regularization
1369
+ and Picard Iteration,” Journal of Guidance, Control, and Dynamics, Vol. 38, No. 9, 2015, pp. 1548–1562. https://doi.org/10.
1370
+ 2514/1.G001028, URL https://doi.org/10.2514/1.G001028.
1371
+ [17] Woollands, R. M., Read, J. L., Probe, A. B., and Junkins, J. L., “Multiple Revolution Solutions for the Perturbed Lambert
1372
+ Problem using the Method of Particular Solutions and Picard Iteration,” Journal of the Astronautical Sciences, Vol. 64, No. 4,
1373
+ 2017, pp. 361–378. https://doi.org/10.1007/s40295-017-0116-6, URL https://doi.org/10.1007/s40295-017-0116-6.
1374
+ [18] Woollands, R. M., Read, J., Hernandez, K., Probe, A., and Junkins, J. L., “Unified Lambert Tool for Massively Parallel
1375
+ Applications in Space Situational Awareness,” Journal of the Astronautical Sciences, Vol. 65, No. 1, 2018, pp. 29–45.
1376
+ https://doi.org/10.1007/s40295-017-0118-4, URL https://doi.org/10.1007/s40295-017-0118-4.
1377
+ [19] Swenson, T., Woollands, R., Junkins, J., and Lo, M., “Application of Modified Chebyshev Picard Iteration to Differential
1378
+ Correction for Improved Robustness and Computation Time,” Journal of the Astronautical Sciences, Vol. 64, No. 3, 2017, pp.
1379
+ 267–284. https://doi.org/10.1007/s40295-016-0110-4.
1380
+ 30
1381
+
1382
+ [20] Singh, S. K., Woollands, R., Taheri, E., and Junkins, J., “Feasibility of quasi-frozen, near-polar and extremely low-altitude lunar
1383
+ orbits,” Acta Astronautica, Vol. 166, 2020, pp. 450–468. https://doi.org/https://doi.org/10.1016/j.actaastro.2019.10.037, URL
1384
+ https://www.sciencedirect.com/science/article/pii/S0094576519313657.
1385
+ [21] Koblick, D., Xu, S., Fogel, J., and Shankar, P., “Low Thrust Minimum Time Orbit Transfer Nonlinear Optimization Using
1386
+ Impulse Discretization via the Modified Picard-Chebyshev Method,” Computer Modeling in Engineering & Sciences, Vol. 111,
1387
+ No. 1, 2016. https://doi.org/10.3970/cmes.2016.111.001, URL https://doi.org/10.3970/cmes.2016.111.001.
1388
+ [22] Macomber, B., Probe, A. B., Woollands, R., Read, J., and Junkins, J. L., “Enhancements to Modified Chebyshev-Picard Iteration
1389
+ Efficiency for Perturbed Orbit Propagation,” Computer Modeling in Engineering & Sciences, Vol. 111, No. 1, 2016, pp. 29–64.
1390
+ https://doi.org/10.3970/cmes.2016.111.029, URL http://www.techscience.com/CMES/v111n1/27313.
1391
+ [23] Woollands, R., Taheri, E., and Junkins, J. L., “Efficient Computation of Optimal Low Thrust Gravity Perturbed Orbit Transfers,”
1392
+ Journal of the Astronautical Sciences, Vol. 67, No. 2, 2020, pp. 458–484. https://doi.org/10.1007/s40295-019-00152-9, URL
1393
+ https://doi.org/10.1007/s40295-019-00152-9.
1394
+ [24] Woollands, R., and Junkins, J. L., “Nonlinear Differential Equation Solvers via Adaptive Picard-Chebyshev Iteration:
1395
+ Applications in Astrodynamics,” Journal of Guidance, Control, and Dynamics, Vol. 42, No. 5, 2019, pp. 1007–1022.
1396
+ https://doi.org/10.2514/1.G003318, URL https://doi.org/10.2514/1.G003318.
1397
+ [25] Atallah, A. M., Woollands, R. M., Elgohary, T. A., and Junkins, J. L., “Accuracy and Efficiency Comparison of Six Numerical
1398
+ Integrators for Propagating Perturbed Orbits,” Journal of the Astronautical Sciences, Vol. 67, No. 2, 2020, pp. 511–538.
1399
+ https://doi.org/10.1007/s40295-019-00167-2, URL https://doi.org/10.1007/s40295-019-00167-2.
1400
+ [26] Geda, M., Noomen, R., and Renk, F., “Massive Parallelization of Trajectory Propagations using GPUs,” Master’s thesis, Delft
1401
+ University of Technology, 2019. URL http://resolver.tudelft.nl/uuid:1db3f2d1-c2bb-4188-bd1e-dac67bfd9dab.
1402
+ [27] Schrammel, F., Renk, F., Mazaheri, A., and Wolf, F., “Efficient Ephemeris Models for Spacecraft Trajectory Simulations on
1403
+ GPUs,” Euro-Par 2020: Parallel Processing, edited by M. Malawski and K. Rzadca, Springer International Publishing, Cham,
1404
+ 2020, pp. 561–577.
1405
+ [28] Li, J.-S., Yang, Z., and Luo, Y.-Z., “A review of space-object collision probability computation methods,” Astrodynamics, Vol. 6,
1406
+ No. 2, 2022, pp. 95–120. https://doi.org/10.1007/s42064-021-0125-x, URL https://doi.org/10.1007/s42064-021-0125-x.
1407
+ [29] Yoshikawa, K., Sawada, H., Kikuchi, S., Ogawa, N., Mimasu, Y., Ono, G., Takei, Y., Terui, F., Saiki, T., Yasuda, S., Matsushima,
1408
+ K., Masuda, T., and Tsuda, Y., “Modeling and analysis of Hayabusa2 touchdown,” Astrodynamics, Vol. 4, No. 2, 2020, pp.
1409
+ 119–135. https://doi.org/10.1007/s42064-020-0073-x, URL https://doi.org/10.1007/s42064-020-0073-x.
1410
+ [30] Armellin, R., Di Lizia, P., Bernelli-Zazzera, F., and Berz, M., “Asteroid close encounters characterization using differential
1411
+ algebra: the case of Apophis,” Celestial Mechanics and Dynamical Astronomy, Vol. 107, No. 4, 2010, pp. 451–470.
1412
+ https://doi.org/10.1007/s10569-010-9283-5, URL https://doi.org/10.1007/s10569-010-9283-5.
1413
+ 31
1414
+
1415
+ [31] Hairer, E., Wanner, G., and Nørsett, S. P., Solving Ordinary Differential Equations I, 2nd ed., Springer Berlin, 1993.
1416
+ https://doi.org/10.1007/978-3-540-78862-1.
1417
+ [32] Bai, X., and Junkins, J. L., “Solving initial value problems by the Picard-Chebyshev method with NVIDIA GPUs,” Advances in
1418
+ the Astronautical Sciences, 2010.
1419
+ [33] Rivlin, T. J., “The Chebyshev Polynomials,” Mathematics of Computation, Vol. 30, 1976. https://doi.org/10.2307/2005983.
1420
+ [34] Fukushima, T., “Picard Iteration method, Chebyshev Polynomial Approximation, and Global Numerical Integration of
1421
+ Dynamical Motions,” The Astronomical Journal, Vol. 113, 1997, pp. 1909–1914. https://doi.org/10.1086/118404.
1422
+ [35] Macomber, B. D., “Enhancements to Chebyshev-Picard Iteration Efficiency for Generally Perturbed Orbits and Constrained
1423
+ Dynamical Systems,” Ph.D. thesis, Texas A & M University, Supervisor: Junkins, John L., Aug 2015.
1424
+ URL https:
1425
+ //hdl.handle.net/1969.1/155745.
1426
+ [36] Fukushima, T., “Vector Integration of Dynamical Motions by the Picard-Chebyshev Method,” The Astronomical Journal, Vol.
1427
+ 113, 1997, p. 2325. https://doi.org/10.1086/118443.
1428
+ [37] Bai, X., and Junkins, J. L., “Modified Chebyshev-Picard Iteration Methods for Orbit Propagation,” Journal of the Astronautical
1429
+ Sciences, Vol. 58, No. 4, 2011, pp. 583–613. https://doi.org/10.1007/BF03321533, URL https://doi.org/10.1007/BF03321533.
1430
+ [38] Koblick, D., Poole, M., and Shankar, P., “Parallel high-precision orbit propagation using the Modified Picard-Chebyshev
1431
+ Method,” ASME International Mechanical Engineering Congress and Exposition, Proceedings (IMECE), 2012. https:
1432
+ //doi.org/10.1115/IMECE2012-87878.
1433
+ [39] Forum, M. P., “MPI: A Message-Passing Interface Standard,” Tech. rep., USA, 1994.
1434
+ [40] Chandra, R., Dagum, L., Kohr, D., Menon, R., Maydan, D., and McDonald, J., Parallel programming in OpenMP, Morgan
1435
+ kaufmann, 2001.
1436
+ [41] “Xianyi/OpenBLAS,” , Last access: November 2021. URL https://github.com/xianyi/OpenBLAS#supported-cpus-and-
1437
+ operating-systems.
1438
+ [42] Wang, Q., Zhang, X., Zhang, Y., and Yi, Q., “AUGEM: Automatically generate high performance Dense Linear Algebra kernels
1439
+ on x86 CPUs,” SC ’13: Proceedings of the International Conference on High Performance Computing, Networking, Storage
1440
+ and Analysis, 2013, pp. 1–12. https://doi.org/10.1145/2503210.2503219.
1441
+ [43] Xianyi, Z., Qian, W., and Yunquan, Z., “Model-driven Level 3 BLAS Performance Optimization on Loongson 3A Processor,”
1442
+ 2012 IEEE 18th International Conference on Parallel and Distributed Systems, 2012, pp. 684–691. https://doi.org/10.1109/
1443
+ ICPADS.2012.97.
1444
+ [44] NVIDIA corporation, “CUDA C++ Programming Guide,” , Nov 2021. URL https://docs.nvidia.com/cuda/cuda-c-programming-
1445
+ guide/index.html.
1446
+ 32
1447
+
1448
+ [45] Seidelmann, P. K., Explanatory Supplement To The Astronomical Almanac, University Science Books, 1305 Walt Whitman
1449
+ Road, Suite 110 Melville, NY 11747 USA, 1992.
1450
+ [46] Valsecchi, G. B., Milani, A., Gronchi, G. F., and Chesley, S. R., “Resonant returns to close approaches: Analytical theory,”
1451
+ A&A, Vol. 408, No. 3, 2003, pp. 1179–1196. https://doi.org/10.1051/0004-6361:20031039, URL https://doi.org/10.1051/0004-
1452
+ 6361:20031039.
1453
+ 33
1454
+
8tE2T4oBgHgl3EQflgdv/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
BNAzT4oBgHgl3EQfhv2_/content/tmp_files/2301.01490v1.pdf.txt ADDED
@@ -0,0 +1,1210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
2
+ Towards a Pipeline for Real-Time Visualization of Faces for
3
+ VR-based Telepresence and Live Broadcasting Utilizing Neural
4
+ Rendering
5
+ Philipp Ladwig*, Rene Ebertowski*,Alexander Pech*, Ralf D¨orner†, Christian Geiger*
6
+ *University of Applied Sciences D¨usseldorf, Germany
7
+ Mixed Reality and Visualization Group (MIREVI)
8
+ {philipp.ladwig@, rene.ebertowski@study.,
9
+ alexander.pech@., geiger@}hs-duesseldorf.de
10
+ www.mirevi.de
11
+ †RheinMain University of Applied Sciences
12
+ Faculty of Design – Computer Science – Media, Wiesbaden, Germany
13
+ ralf.doerner@hs-rm.de
14
+ Abstract
15
+ While head-mounted displays (HMDs) for Virtual Re-
16
+ ality (VR) have become widely available in the con-
17
+ sumer market, they pose a considerable obstacle for a
18
+ realistic face-to-face conversation in VR since HMDs
19
+ hide a significant portion of the participants faces.
20
+ Even with image streams from cameras directly at-
21
+ tached to an HMD, stitching together a convincing im-
22
+ age of an entire face remains a challenging task be-
23
+ cause of extreme capture angles and strong lens dis-
24
+ tortions due to a wide field of view. Compared to the
25
+ long line of research in VR, reconstruction of faces
26
+ hidden beneath an HMD is a very recent topic of re-
27
+ search.
28
+ While the current state-of-the-art solutions
29
+ demonstrate photo-realistic 3D reconstruction results,
30
+ they require high-cost laboratory equipment and large
31
+ Digital Peer Publishing Licence
32
+ Any party may pass on this work by electronic
33
+ means and make it available for download under
34
+ the terms and conditions of the current version
35
+ of the Digital Peer Publishing Licence (DPPL).
36
+ The text of the licence may be accessed and
37
+ retrieved via internet at
38
+ http://www.dipp.nrw.de/.
39
+ First presented at the Workshop of GI Special Interest group VR/AR 2020,
40
+ extended and revised for JVRB
41
+ computational costs. We present an approach that fo-
42
+ cuses on low-cost hardware and can be used on a com-
43
+ modity gaming computer with a single GPU. We lever-
44
+ age the benefits of an end-to-end pipeline by means of
45
+ Generative Adversarial Networks (GAN). Our GAN
46
+ produces a frontal-facing 2.5D point cloud based on a
47
+ training dataset captured with an RGBD camera. In
48
+ our approach, the training process is offline, while the
49
+ reconstruction runs in real-time. Our results show ad-
50
+ equate reconstruction quality within the ’learned’ ex-
51
+ pressions. Expressions not learned by the network pro-
52
+ duce artifacts and can trigger the Uncanny Valley ef-
53
+ fect.
54
+ Keywords:
55
+ Neural Rendering, Telepresence, Face
56
+ Reconstruction, Virtual Reality, Live Broadcasting,
57
+ Image-to-Image Translation, Pix2Pix, Generative Ad-
58
+ versarial Networks
59
+ 1
60
+ Introduction
61
+ Natural
62
+ face-to-face
63
+ communication
64
+ is
65
+ three-
66
+ dimensional.
67
+ A conversation includes not only
68
+ the verbal communication channel but also the
69
+ nonverbal channel. In particular, eye contact, facial
70
+ expressions as well as gestures performed with arms
71
+ and hands (kinesics), and even the physical distance
72
+ between each other (proxemics) are essential informa-
73
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
74
+ arXiv:2301.01490v1 [cs.CV] 4 Jan 2023
75
+
76
+ Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
77
+ Figure 1: Our conceptual pipeline: First, we capture several RGBD images with a helmet camera mount. These
78
+ images are processed and serve as the input data for our GAN. After training, the GAN produces textured point
79
+ clouds in real time. In this work we improve the data set processing, training and inference stage compared to
80
+ our previous systems [LPG20, LPDG20]. Building an face-tracking HMD is not part of the present work.
81
+ tion carriers during a conversation [LG18]. Currently,
82
+ common mainstream technologies for computer-
83
+ mediated communication are video conferencing
84
+ applications such as Skype or FaceTime. While these
85
+ allow reading the facial expressions of the counterpart,
86
+ no ’real’ eye contact is possible, wide gestures may
87
+ be cut off in the camera image, deictic gestures are
88
+ difficult to interpret spatially, and perceptual physical
89
+ body distance between the participants does not exist.
90
+ Current head-mounted displays (HMDs) for VR are
91
+ capable of delivering believable and immersive 3D ex-
92
+ periences including telepresence. However, this does
93
+ not fully apply to real-time social interactions in VR.
94
+ When the face of a person is covered by an HMD,
95
+ it is impossible to read its non-verbal facial commu-
96
+ nication cues, which is, in fact, a crucial communi-
97
+ cation channel between individuals. This is not only
98
+ relevant for face-to-face meetings in VR (for example
99
+ in VRChat [vrc22], Altspace [alt22] or Meta’s Horizon
100
+ Worlds [met22]) but also in VR application scenarios
101
+ in which only one VR user wears an HMD and tries
102
+ to engage with their audience. For example, such a
103
+ VR user could be an architect who presents ideas for a
104
+ new building in VR to their clients, a Virtual YouTuber
105
+ (VTuber), a Twitch streamer in front of a green screen
106
+ who broadcasts themselves from inside a VR environ-
107
+ ment, or friends playing a VR game together in a living
108
+ room.
109
+ The classic way for creating and rendering photo-
110
+ realistic humans in real time is costly and requires a lot
111
+ of manual effort such as scanning, modeling, and man-
112
+ ual texturing from a skilled 3D artist. Furthermore,
113
+ today’s HMDs usually lack adequate sensors for face
114
+ tracking. Only a few research groups have so far ad-
115
+ dressed this problem and presented methods that can
116
+ generate authentic face avatars for VR without exten-
117
+ sive manual modeling [TZS+18, LSSS18, WSS+19,
118
+ RZS+21, GPL+21]. These approaches are not avail-
119
+ able to the public, and some of them require expensive
120
+ hardware [LSSS18, WSS+19].
121
+ Human avatars and their perception have been stud-
122
+ ied in multiple domains. A key issue in this context is
123
+ the occurrence of the Uncanny Valley effect. Humans
124
+ are markedly sensitive to minimal and unnatural dis-
125
+ crepancies in faces. As soon as a virtual human does
126
+ not perfectly resemble a real human, it is often subcon-
127
+ sciously classified as unlikeable, unpleasant, or even
128
+ creepy. One technique that has successfully bridged
129
+ the Uncanny Valley in recent years is Generative Ad-
130
+ versarial Networks (GANs). Today, GANs serve as the
131
+ core technology behind Deepfakes. They enable such
132
+ authentic results that their methods and algorithms are
133
+ the subject of current research to distinguish fake im-
134
+ ages from real ones, as the human eye is no longer
135
+ able to reliably do so [RCV+19]. Therefore, we use
136
+ algorithms in the context of this work that are also
137
+ employed to create Deepfakes.
138
+ We extend this ap-
139
+ proach with an additional dimension (textured 2.5D
140
+ point cloud instead of only an RGB image) to gener-
141
+ ate realistic representations of 2.5D face avatars. We
142
+ do not create a full 3D head because we only capture
143
+ the face of a person from a static frontal position with
144
+ an RGBD sensor. This implies that we do not generate
145
+ realistic textures from side views. However, we main-
146
+ tain a stereoscopic perception of the reconstructed face
147
+ during face-to-face conversations in a virtual environ-
148
+ ment.
149
+ We present an end-to-end learning system that has
150
+ low hardware cost compared to others, requires mod-
151
+ erate computational resources, and generates results
152
+ with frame rates suitable for VR applications.
153
+ Our
154
+ research contributes to GANs playing a key role in
155
+ authentic 3D telepresence applications in the near fu-
156
+ ture. In addition to sharing our insights in this pa-
157
+ per, we make the code of our prototype publicly avail-
158
+ able under:
159
+ https://github.com/Mirevi/
160
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
161
+
162
+ capture
163
+ data set processing
164
+ training
165
+ inference
166
+ textured point cloudJournal of Virtual Reality and Broadcasting, Volume n(200n), no. n
167
+ face-synthesizer-JVRB.
168
+ This work is an extension and improvement of our
169
+ previous neural rendering pipeline [LPG20] and com-
170
+ plements our recent work on how to build an HMD
171
+ with face tracking capabilities [LPDG20]. The contri-
172
+ butions of this work are the creation of a face capture
173
+ pipeline as well as the introduction of a pair of Gen-
174
+ erative Adversarial Networks (GAN) [GPAM+14] that
175
+ are tailored for the authentic reconstruction of faces in
176
+ three-dimensional telepresence and live broadcasting
177
+ applications. The motivation is to use a commodity
178
+ graphics card to capture and reconstruct the individ-
179
+ ual characteristics of a person’s face with a high level
180
+ of (personal) details and VR-enabled frame rates in
181
+ order to create an authentic avatar that goes beyond
182
+ the capabilities of today’s avatar creation tools such
183
+ as VRChat [vrc22], Altspace [alt22] or Meta’s Horizon
184
+ Worlds [met22].
185
+ 2
186
+ Related Work
187
+ Face reconstruction for telepresence and (live) broad-
188
+ casting with an HMD occluding a person’s face is a
189
+ young research field. Olszewski et al. [OLSL16] pre-
190
+ sented a system that uses an RGB camera to transfer
191
+ facial expressions from the lower face area to an avatar.
192
+ Li et al. [LTO+15] extended this approach with pres-
193
+ sure sensors placed in the foam of an HMD captur-
194
+ ing a person’s facial expressions. The idea of using
195
+ sensors within the HMD is similar to our concept, but
196
+ we use personalized avatars that are trained in advance
197
+ and synthesized in a final step.
198
+ Casa et al. [CFA+16], Fr¨uh et al. [FSK17], and
199
+ Thies et al. [TZS+18] used stationary RGBD cameras
200
+ to create personalized avatars of users.
201
+ In the first
202
+ step, the user was captured by the camera without
203
+ an HMD in order to create a virtual avatar.
204
+ When
205
+ the user wore the HMD, the stationary RGBD camera
206
+ recognized facial expressions. Due to the fixed posi-
207
+ tion of the camera, the range of head motion was lim-
208
+ ited. Eye movements were registered by eye-tracking
209
+ cameras and transferred to the user’s face avatar.
210
+ The approaches of Casa et al. [CFA+16] and Fr¨uh et
211
+ al. [FSK17] evoked the Uncanny Valley effect to vary-
212
+ ing degrees. To mitigate this, Fr¨uh et al. [FSK17] did
213
+ not completely remove the HMD, but rendered it as
214
+ a semi-transparent object. These systems are similar
215
+ to our approach in the way that they create a person-
216
+ alized avatar using an RGBD camera and produce al-
217
+ most photo-realistic avatars. The approach of Thies
218
+ et al. [TZS+18] demonstrated better results by using
219
+ a 3D morphable model (3DMM) as underlying head
220
+ mesh template and by optimizing the visual quality
221
+ by an analysis-by-synthesis approach [BV99]. While
222
+ the visual quality is convincing, this approach only
223
+ provides stereoscopic renderings without the ability to
224
+ freely choose the perspective around the reconstructed
225
+ face because the final results are based on a given 2D
226
+ video. Furthermore, it explicitly does not allow for
227
+ manipulation of the head’s rotation, scale, and posi-
228
+ tion in the final result.
229
+ The systems of Lombardi et al. [LSSS18], Wei et
230
+ al. [WSS+19], and Raj et al. [RZS+21] create photo-
231
+ realistic avatars with authentic facial expressions.
232
+ While previous works completed the generation of
233
+ personalized avatars within a few minutes, the sys-
234
+ tem of Lombardi et al. requires a computational time
235
+ of more than a day.
236
+ The three-dimensional avatar
237
+ is generated with the aid of a large number of high-
238
+ resolution images from different angles and facial ex-
239
+ pressions with an expensive hardware setup that gen-
240
+ erates a large amount of data for further processing.
241
+ The created face avatar can be controlled by three
242
+ RGB cameras attached to an HMD. A key component
243
+ of this system is the use of Variational Autoencoders
244
+ (VAEs). Both VAEs and GANs have been proven sev-
245
+ eral times to be suitable for authentic face reconstruc-
246
+ tion. However, since literature shows that VAEs and
247
+ only a L1 loss tend to produce blurry results more of-
248
+ ten, we use GANs [JH19]. The latter concept was first
249
+ presented by Goodfellow et al. [GPAM+14], and Rad-
250
+ ford et al. [RMC15] improved it in a sustainable way.
251
+ Furthermore, Karras et al. [KALL17] achieved photo-
252
+ realistic portrait images that are indistinguishable from
253
+ real photographs by using the principle of Progressive
254
+ Growing GAN. However, according to Karras et al.,
255
+ the GAN has little to no external control over the ap-
256
+ pearance of the generated object or face because the
257
+ input to the network is a latent vector without any di-
258
+ rect relation to a face property such as hair color, fa-
259
+ cial expression, or gender. In further works Karras
260
+ et al. [KLA18] enhanced the architecture of the GAN
261
+ and were able to automatically separate higher-level
262
+ attributes (e.g. pose, identity) from stochastic varia-
263
+ tions (e.g. freckles, hair). Nevertheless, this approach
264
+ does not allow to explicitly control the facial expres-
265
+ sion.
266
+ Conditional GANs (cGANs) have been shown to be
267
+ able to learn and reproduce concrete relationships be-
268
+ tween inputs and outputs.
269
+ For example, Mirza and
270
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
271
+
272
+ Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
273
+ Osindero [MO14] have extended the input to the gen-
274
+ erator and discriminator with a label y, which makes it
275
+ possible to generate images from a particular category
276
+ y. This method for conditioning GANs was developed
277
+ further by Radfort et al. [RMC15] with the DCGAN
278
+ and by Isola et al. [IZZE17] with the Pix2Pix GAN.
279
+ They replaced the noise input vector z with a user-
280
+ defined input vector.
281
+ Without a noise vector, there
282
+ is no latent space Z (since z ∈ Z).
283
+ If the stochas-
284
+ tic aspect contained in the noise vector is not com-
285
+ pensated, the GAN will only memorize the training
286
+ examples. Any inputs that deviate from the training
287
+ data would lead to inadequate results, as described
288
+ by Isola et al. [IZZE17]. By using a U-net architec-
289
+ ture [RFB15] with dropouts in the Pix2Pix GAN, the
290
+ stochastic aspect as well as the missing latent space
291
+ can be otherwise integrated into the generator. The
292
+ discriminator of the Pix2Pix GAN receives the same
293
+ input image x as the generator as well as its output
294
+ image yfake = G(x) or the image yreal matching x
295
+ from the dataset. This is basically equivalent to the
296
+ idea of cGANs [MO14] where not only the output of
297
+ the generator is evaluated but also its difference from
298
+ the input. Unlike the cGAN, the output of the discrim-
299
+ inator of the Pix2Pix GAN is not a scalar that decides
300
+ between ’real’ or ’false’ but a matrix. By using con-
301
+ volutional layers (cf. Radford et al. [RMC15]), each
302
+ entry in the output matrix represents an n ∗ m-sized
303
+ region (so-called patch) of the input image. This al-
304
+ lows abstract representations to be admitted as matri-
305
+ ces (e.g. images) for conditioning the network to have
306
+ a controlled influence on the output of the generator.
307
+ This approach was further developed by [WLZ+17]
308
+ with the Pix2PixHD GAN to generate images with a
309
+ higher resolution and more details. In this paper, we
310
+ adapt the idea of cGANs, especially of the Pix2Pix and
311
+ Pix2PixHD frameworks, and tailor them to our appli-
312
+ cation domain.
313
+ 3
314
+ System
315
+ In the following, we briefly explain the process and
316
+ structure of the proposed system, as shown in Fig. 1,
317
+ and then discuss the steps in more detail in the subse-
318
+ quent sections.
319
+ Our process starts with the acquisition of a personal
320
+ RGBD dataset. The acquired data is preprocessed by
321
+ an automated procedure.
322
+ A Facial Landmark Map
323
+ (FLM) per RGB image is extracted and saved beside
324
+ the corresponding RGB image. It decodes the facial
325
+ expression of the respective RGB image in a binary
326
+ image as so-called landmarks as shown in the second
327
+ image from left in Fig.1. Our proposed GAN is trained
328
+ with the captured RGBD images as well as with the
329
+ corresponding FLM. After the training, the system can
330
+ be used for real-time telepresence or live broadcasting.
331
+ In the application scenario, the user would wear a face-
332
+ tracking head-mounted display (HMD) that could cre-
333
+ ate an FLM in real time, which we then feed into the
334
+ trained generator module of our GAN, as described in
335
+ our previous work [LPDG20]. The GAN could create
336
+ an RGB and a D image of the ’learned’ person based
337
+ on the FLM, and finally, we fuse the generated RGB
338
+ and D images into a textured point cloud. Instead of
339
+ our face-tracking HMD [LPDG20], we use raw face-
340
+ tracking data from our evaluation dataset captured in
341
+ the first step with the camera helmet mount. We thus
342
+ ensure that no tracking errors of the HMD are incor-
343
+ rectly evaluated as reconstruction errors of the GAN.
344
+ 3.1
345
+ Training data
346
+ Figure 2: Helmet camera mount for RGBD data acqui-
347
+ sition. This mount ensures that head rotations are not
348
+ included in the training dataset and, therefore, reduces
349
+ the entropy in the dataset. Moreover, this method sig-
350
+ nificantly reduces the training time and increases the
351
+ visual quality of the output images. The material price
352
+ of the helmet mount without RGBD camera is about
353
+ 60 USD.
354
+ An RGBD dataset of the respective person forms
355
+ the basis for the training process. For the acquisition
356
+ process, an RGBD camera is mounted in a fixed po-
357
+ sition to a self-made helmet mount, as shown in Fig-
358
+ ure 1 left and Figure 2. This mount ensures that the
359
+ entropy in the dataset is as minimal as possible. Vary-
360
+ ing distances, positions and head rotations do not con-
361
+ tribute to ’learning’ the user’s face. By using the hel-
362
+ met mount, we ensure that the training time of the net-
363
+ works is short and the reconstruction quality of the fi-
364
+ nal results is high, as we learned from previous exper-
365
+ iments conducted without the helmet mount.
366
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
367
+
368
+ RGBD camera
369
+ (Microsoft Azure Kinect)Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
370
+ The RGBD sensor in the mount captures facial ex-
371
+ pressions and stores the color information as a 3-
372
+ channel image file (8-bit for each channel) with 2048
373
+ to 1536 pixels, while it stores the depth information
374
+ as a 1-channel image file (16-bit grayscale) with 640
375
+ to 576 pixels. During the capture process, the user is
376
+ encouraged to perform a variety of different facial ex-
377
+ pressions. A dataset of a person should contain about
378
+ 1500 to 2000 RGBD images, and the acquisition pro-
379
+ cess takes about 10 minutes.
380
+ The data is then pre-
381
+ processed for further steps. First, the foreground and
382
+ background pixels are clipped in front of and behind
383
+ the face and the depth resolution is reduced from 16
384
+ to 8 bit, which reduces the depth range from 65,535
385
+ to 255 mm.
386
+ This speeds up the training process of
387
+ the GAN and significantly reduces depth noise. With
388
+ the help of the helmet mount, we ensure that a depth
389
+ range of 255 mm is sufficient for spatially reproducing
390
+ the frontal part of the head since the distance between
391
+ the sensor and the face in the helmet mount is always
392
+ fixed. Furthermore, the data is normalized and the im-
393
+ age data is converted into values between -1 and 1 for
394
+ the training process. Contrasts are sharpened by nor-
395
+ malizing the histogram.
396
+ 3.2
397
+ Determination of Facial Landmarks
398
+ To control the output of the generator and thus the fa-
399
+ cial expressions of a person’s face, the dataset must be
400
+ labeled before the training process. We use 70 facial
401
+ landmarks (68 of the Multi-PIE scheme [GMC+08]
402
+ and two for the location of the irises) as binary images
403
+ for each tuple of RGB and corresponding depth im-
404
+ age in the dataset. As mentioned before, we call these
405
+ binary images Facial Landmark Maps (FLM). The po-
406
+ sition of the landmarks identifies the expressions of the
407
+ person in each image of the dataset. To determine the
408
+ landmarks, we use the Face Alignment Network (FAN)
409
+ of Bulat and Tzimiropoulos [BT17]. The FAN is not
410
+ able to determine the position of the person’s pupils
411
+ within an image. Therefore, two additional landmarks
412
+ were implemented based on an eye-tracking procedure
413
+ by Timm and Barth [TB11, pup18]. We experimented
414
+ with an Tobii Eye Tracker 4C, but users reported that
415
+ additional weight on the helmet mount felt uncomfort-
416
+ able during the capture procedure. Both the tracking
417
+ results of the Tobii Eye Tracker and Timm and Barth
418
+ were similar and sufficient for our application.
419
+ When the landmarks in each image had been lo-
420
+ cated, a rectangle of the maximal and minimal loca-
421
+ tions of the landmarks was created and the RGB and
422
+ D images were cropped to this area and resized to
423
+ 512 × 512 pixels.
424
+ 3.3
425
+ Neural Network Architecture
426
+ Previous work with neural networks, such as Wu
427
+ et al. [WZX+16], has shown that a voxel-based ap-
428
+ proach is associated with high training and execu-
429
+ tion times of the model. Therefore, an RGBD-based
430
+ solution was targeted.
431
+ The advantage of this ap-
432
+ proach lies in the compact representation of the data
433
+ as a point cloud and the possibility to adapt previ-
434
+ ous RGB-based methods. Our underlying network ar-
435
+ chitecture is derived from the Pix2Pix GAN by Isola
436
+ et al. [IZZE17].
437
+ In earlier experiments, we discov-
438
+ ered that this architecture was able to produce ac-
439
+ ceptable results [LPG20, LPDG20], but the images
440
+ have a low resolution of 256 × 256 pixels, often lack
441
+ details in high frequency areas such as facial hair
442
+ and tend to produce time-inconsistent reconstructions
443
+ with noise.
444
+ Therefore, we experimented with the
445
+ Pix2PixHD framework [WLZ+17], which is an exten-
446
+ sion of the Pix2Pix GAN [IZZE17]. Although it pro-
447
+ duces images with a higher resolution and better qual-
448
+ ity, the inference time is not capable of retaining real-
449
+ time frame rates on commodity hardware. Therefore,
450
+ we kept the Pix2Pix framework as a base and gradually
451
+ added elements from the Pix2PixHD framework and
452
+ further improvements from other works until we ob-
453
+ tained an acceptable image quality with a reasonable
454
+ processing speed for interactive frame rates. In sum-
455
+ mary, we propose the following changes to the Pix2Pix
456
+ framework:
457
+ 1. We added a multi-scale discriminators that re-
458
+ ceives three different resolutions of the input im-
459
+ age and an additional Feature Matching Loss as
460
+ described in Pix2PixHD [WLZ+17].
461
+ 2. We changed the Sigmoid Cross Entropy Loss of
462
+ Pix2Pix’s discriminator with the Least-Squares
463
+ Loss of the LSGAN [MLX+17] suggested by
464
+ Wang et al. [WLZ+17].
465
+ 3. We
466
+ exchanged
467
+ the
468
+ Perceptual-VGG
469
+ Loss [JAFF16] originally suggested by Wang
470
+ et al. [WLZ+17] with the better performing
471
+ Learned
472
+ Perceptual
473
+ Image
474
+ Patch
475
+ Similarity
476
+ (LPIPS) by Zhang et al. [ZIE+18].
477
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
478
+
479
+ Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
480
+ For the discriminator we obtain the following objec-
481
+ tive:
482
+ min
483
+ D1,D2,D3 VGAN(D) =
484
+
485
+ k=1,2,3
486
+ LcLSGAN D(Dk, G)
487
+ (1)
488
+ while D1, D2 and D3 describe the three different res-
489
+ olution of the input image. For the generator we end
490
+ up with the following objective function:
491
+ min
492
+ G VGAN(G) =
493
+
494
+ k=1,2,3
495
+
496
+ LcLSGAN G(Dk, G) + λFMLFM(Dk, G)
497
+
498
+ + λL1LL1(G) + λLPIPSLLPIPS(y, G(x))
499
+ (2)
500
+ where we choose the following hyper parameters:
501
+ λFM
502
+ = 10, λL1 = 100, λLPIPS
503
+ = 10.
504
+ The
505
+ functions LcLSGAN D and LcLSGAN G can be found
506
+ in Mao et al. [MLX+17], whereas the function LFM
507
+ is based on the feature matching loss of Wang et
508
+ al. [WLZ+17]. In order to maintain faster inference
509
+ time than the Pix2PixHD we did not implement the
510
+ coarse to fine approach for the generator. We sacrifice
511
+ very high resolution of the output images for compu-
512
+ tational speed. We provide further details to the archi-
513
+ tecture on GitHub.
514
+ Using these improvements helps to prevent high-
515
+ frequency details such as facial hair and significantly
516
+ increases the reconstruction quality, as explained fur-
517
+ ther in the results section (sec. 4). Because we mainly
518
+ enhanced the loss function and the discriminator side,
519
+ we did not need to change the generator. Therefore,
520
+ we are able to maintain high frame rates during infer-
521
+ ence since only the generator module is used in the
522
+ telepresence and live broadcasting scenario. As a side
523
+ effect, the training process requires more memory, but
524
+ the overall training time decreases by more than a half
525
+ compared to our Pix2Pix-only approach (from about
526
+ 19 hours to 8 hours) because the new loss term is more
527
+ purposeful for our application and, therefore, helps to
528
+ obtain better results in less time.
529
+ The generator of our GAN receives a 512×512 pixel
530
+ FLM of the facial landmarks as input. Compared to
531
+ the Pix2Pix GAN, the output has been extended by a
532
+ fourth feature map to be able to generate depth im-
533
+ ages. In addition, the discriminator receives five fea-
534
+ ture maps as input instead of only four. While the
535
+ first four correspond to the four channels of the RGBD
536
+ image, the remaining feature map contains the corre-
537
+ sponding FLM, as visualized in Figure 3. One of our
538
+ Figure 3: Example convolution for the discriminator
539
+ input. Each RGBD channel and the FLM are weighted
540
+ individually.
541
+ early hypotheses in [LPG20] was that a higher number
542
+ of FLMs (e.g. four times) would result in better re-
543
+ construction results to start the training process with a
544
+ balanced ratio between RGBD images and FLMs (cf.
545
+ Fig. 3). This hypothesis has been disproved. Chang-
546
+ ing the number of FLMs does not change the quality
547
+ of the results but only increases the training time.
548
+ 3.4
549
+ Training Process
550
+ Before the training process, all weights of the GAN are
551
+ initialized with a random value. The random values
552
+ follow a Gaussian distribution with an expected value
553
+ of 0 and a standard deviation of 0.02. The batch size of
554
+ the input data into the GAN is 1, and the epoch count
555
+ is 100. Both generator and discriminator are trained
556
+ with an initial learning rate of 0.0002, which decreases
557
+ linearly towards zero over the last 70 epochs. The dis-
558
+ criminator LossD = (LossDreal + LossDfake) ∗ 0.5
559
+ reduces the learning rate, making it slower to learn rel-
560
+ ative to the generator. This is necessary because at the
561
+ beginning of the training phase the discriminator can
562
+ effortlessly accomplish its task. If the discriminator
563
+ learns too quickly, the generator has no chance to learn
564
+ how to create the desired face.
565
+ 4
566
+ Results
567
+ GANs are difficult to train because of their adversar-
568
+ ial training procedure to find the balance between the
569
+ learning rate and losses of the generator and discrim-
570
+ inator networks. As we use many different losses, a
571
+ long line of hyper parameter tuning was necessary to
572
+ find the optimal settings. Fig. 5 shows the reconstruc-
573
+ tion results for four different persons with the best
574
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
575
+
576
+ R
577
+ Featuremap
578
+ G
579
+ (showninsimplifiedform)
580
+ B
581
+ 4x4 kernel
582
+ FLM
583
+ Convolutional resultJournal of Virtual Reality and Broadcasting, Volume n(200n), no. n
584
+ Figure 4: Our new pipeline, network architecture and
585
+ losses significantly improved the quality.
586
+ Image a)
587
+ shows a sample from the previous system of Ladwig
588
+ et al.[LPG20, LPDG20]. Image b) illustrates the en-
589
+ hanced resolution (from 256×256 to 512×512 pixels)
590
+ and the improved preservation of high-frequency de-
591
+ tails.
592
+ training parameters described in section 3.4. We did
593
+ not use the face-tracking HMD for the evaluation, sim-
594
+ ilar to [LPDG20], because it can cause slight tracking
595
+ errors and could decrease the quality of the results for
596
+ comparison. Our intention is to directly compare the
597
+ improvements of our pipeline to our previous system
598
+ described in [LPG20]. For a comparison in motion, we
599
+ refer the reader to the corresponding video that can be
600
+ found via the GitHub link. As a quantitative metric for
601
+ the assessment of the reconstruction quality, we use
602
+ Structural Similarity (SSIM) [ZBSS04] and Learned
603
+ Perceptual Image Patch Similarity (LPIPS) [ZIE+18].
604
+ Our previous system [LPG20] performed on average
605
+ with an SSIM of 0.851 and a value of 0.114 on LPIPS.
606
+ Our proposed system reaches on average 0.910 for
607
+ SSIM (higher is better) and 0.082 for LPIPS (lower
608
+ is better). The numerical results of the SSIM metric
609
+ are comparable to a JPEG compression of about half
610
+ the original file size of the images in column 3. In
611
+ contrast, the previous system [LPG20] only achieved
612
+ a reconstruction quality of less than a quarter of the
613
+ original file size by means of a comparison with the
614
+ JPEG compression.
615
+ The main issue of our previous approach was the
616
+ sharpness of the generated images [LPG20, LPDG20],
617
+ as visualized in Fig. 4. Due to the new architecture and
618
+ loss functions, the system produces images with more
619
+ details. Even skin pores are reconstructed well, e.g.
620
+ on the user’s forehead, as can be seen in Fig. 5, rows
621
+ E to H. Furthermore, we noticed a better reconstruc-
622
+ tion quality in areas with high-frequency details such
623
+ as facial hair, as shown in Fig. 4. Also, the temporal
624
+ consistency is improved. Please see the linked video
625
+ for details.
626
+ The error between the generated and ground truth
627
+ depth values is mostly below 4 mm, as depicted in col-
628
+ umn 6. Exceptions are the reconstruction results with
629
+ the worst SSIM and LPIPS metrics per dataset of a
630
+ person, such as shown in Fig. 5, row D and H, as well
631
+ as in Fig. 6, row E and J. Furthermore, outliers can be
632
+ seen in column 8. Note that we use the raw depth im-
633
+ age of the Kinect and the raw output of our GAN. We
634
+ do not filter or smooth the images, therefore, we as-
635
+ sume that the network has also learned the depth noise
636
+ of the sensor, which causes additional depth errors.
637
+ To compare the faces between the images of
638
+ columns 2 and 3 without measuring background
639
+ changes, we determined the facial area based on depth
640
+ values and rejected all other pixels. At the border area
641
+ between the faces and the background, large differ-
642
+ ences in the SSIM and the depth difference visualiza-
643
+ tion can be seen. These differences are caused by the
644
+ fact that the cropped areas of the real and generated
645
+ images do not always align perfectly due to minimal
646
+ differences in the generated faces. In addition, we also
647
+ apply erosion and clipping to the face to reject parts of
648
+ the background, which can cause the minimal differ-
649
+ ences.
650
+ Although our quantitative metrics indicate better re-
651
+ sults, our system still shows limitations in the recon-
652
+ struction quality of the eyes, lips and oral cavity. Es-
653
+ pecially teeth and the tongue are often reconstructed
654
+ with noisy artifacts, as can be seen in Fig. 5, row D,
655
+ column 2. The error increases with a graceful degra-
656
+ dation when the expression moves towards exagger-
657
+ ated expressions that are far from the neutral face ex-
658
+ pression. The eyes are reconstructed with less artifacts
659
+ than the oral cavity, but we observed that even a little
660
+ amount of image artifacts can evoke the Uncanny Val-
661
+ ley Effect, as can be seen especially in Fig. 5, row B,
662
+ column 2. Please zoom in for details.
663
+ In our experiments we used PyTorch 1.8 and Python
664
+ 3.7 on Windows 10. We trained the same data set with
665
+ approximately 1600 elements (an element is a set com-
666
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
667
+
668
+ a
669
+ b)Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
670
+ prised of an RGBD image and an FLM) on two differ-
671
+ ent machines. This took 8 hours on a system equipped
672
+ with an Nvidia RTX2080Ti, an AMD Ryzen Thread-
673
+ ripper 2990WX and 128GB RAM. With a newer sys-
674
+ tem, comprised of an Nvidia RTX3090, an Intel i7-
675
+ 9900K and 32GB RAM, training took only 4 hours
676
+ and 13 minutes. Our previous system took 17-20 hours
677
+ on the machine with the RTX2080Ti for only 600 ele-
678
+ ments.
679
+ The time required for a forward pass of the gener-
680
+ ator module with an image size of 512 × 512 pixels
681
+ is between 3 and 4 ms (333 - 250 fps) on an Nvidia
682
+ RTX3090 and between 6 and 7 ms (167 - 143 fps)
683
+ on an Nvidia RTX 2080. Our previous system was
684
+ faster (between 1 and 3 ms) but generates only images
685
+ with a size of 256 × 256 pixels. The timings of the
686
+ present system are still suitable for VR-based appli-
687
+ cation, where 75 to 120 fps are common frame rates.
688
+ However, note that many face tracking systems only
689
+ work with 60 or even 30 fps, which can limit the frame
690
+ rate of the pipeline.
691
+ 5
692
+ Limitations and Future Work
693
+ As mentioned before, one of the major issues is the
694
+ low reconstruction quality of exaggerated expressions
695
+ of the eyes, lips and oral cavity. The artifacts can in-
696
+ duce the Uncanny Valley Effect and must be avoided
697
+ for telepresence or broadcasting applications.
698
+ As a
699
+ further step, we plan to use a 3DMM [BV99] such
700
+ as the FLAME model [LBB+17] as a better inductive
701
+ bias to regularize depth and color information more
702
+ efficiently. Furthermore, we observed that landmark
703
+ tracking is not sufficient for faithful lip movement dur-
704
+ ing speech. Therefore, an additional input signal be-
705
+ sides the landmarks is necessary. A conditioning of
706
+ speech as audio signals could provide a solution. An-
707
+ other issue to improve is the uncomfortable helmet
708
+ mount. A solution with a stationary RGBD camera
709
+ placed on a tripod or table is a favorable approach for
710
+ future research.
711
+ 6
712
+ Conclusion
713
+ We presented an improved end-to-end pipeline com-
714
+ pared to previous approaches [LPG20, LPDG20] and
715
+ a new GAN architecture that can learn facial iden-
716
+ tity and individual expressions of a user and repro-
717
+ duce them as a textured point cloud with frame rates
718
+ that are suitable for Virtual Reality, telepresence and
719
+ broadcasting environments.
720
+ We have incorporated
721
+ and extended the architecture, losses, and process-
722
+ ing pipelines of several approaches from the field of
723
+ neural rendering. Compared to previous works, our
724
+ proposed system generates higher quality image re-
725
+ sults with slightly longer run time at inference. We
726
+ achieved this goal by mainly changing the architec-
727
+ ture of the discriminator while keeping the architec-
728
+ ture of the generator lean. The reconstruction results
729
+ partially lie in the Uncanny Valley, but they still con-
730
+ vince with an authentic visualization of the respective
731
+ person’s identity and individual facial expressions. We
732
+ believe that neural rendering will be a crucial part of
733
+ photo-realistic rendering of humans in real-time appli-
734
+ cations in the future. Our work is a further step into
735
+ this direction and hopefully helps to understand, im-
736
+ prove and apply this technology.
737
+ 7
738
+ Acknowledgments
739
+ We thank the MIREVI group at the University of Ap-
740
+ plied Sciences D¨usseldorf and the ’Promotionszen-
741
+ trum Angewandte Informatik’ (PZAI) in Hessen, Ger-
742
+ many. This work is sponsored by the German Fed-
743
+ eral Ministry of Education and Research (BMBF)
744
+ under the project numbers 16SV8182 (HIVE-Lab),
745
+ 13FH022IX6 (iKPT 4.0) and 16SV8756 (AniBot).
746
+ References
747
+ [alt22]
748
+ AltspaceVR,
749
+ https://altvr.
750
+ com/, 2022, Accessed: 2022-03-04.
751
+ [BT17]
752
+ Adrian Bulat and Georgios Tzimiropou-
753
+ los, How Far are We from Solving the
754
+ 2D and 3D Face Alignment Problem?,
755
+ 2017 IEEE International Conference on
756
+ Computer Vision (ICCV) (2017).
757
+ [BV99]
758
+ Volker Blanz and Thomas Vetter, A
759
+ Morphable Model for the Synthesis of
760
+ 3D Faces, Proceedings of the 26th An-
761
+ nual Conference on Computer Graph-
762
+ ics and Interactive Techniques, SIG-
763
+ GRAPH, 1999.
764
+ [CFA+16]
765
+ Dan Casas, Andrew Feng, Oleg Alexan-
766
+ der,
767
+ Graham Fyffe,
768
+ Paul Debevec,
769
+ Ryosuke Ichikari, Hao Li, Kyle Ol-
770
+ szewski, Evan Suma, and Ari Shapiro,
771
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
772
+
773
+ Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
774
+ Figure 5: Results 1/2: This overview shows FLMs in column 1 from our evaluation datasets, hence the results
775
+ are based on unseen data for the neural network. The FLMs were created from the images in column 3 – a
776
+ real image from the evaluation data set. Column 2 shows the results generated by our GAN. The GAN received
777
+ the FLM from column 1 and generated the images in column 2. Column 4 depicts the SSIM difference. Darker
778
+ values indicate larger differences between the images in columns 2 and 3. Column 6 visualizes the error
779
+ between the generated depth and the ground truth depth. The combination of the generated depth and color
780
+ data can be seen in columns 7 and 8 from an angle of 30 and 90 degrees.
781
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
782
+
783
+ Color
784
+ Depth
785
+ Color and
786
+ Depth
787
+ Results of me
788
+ Depth differences
789
+ SSIM from
790
+ RGB output
791
+ RGB ground
792
+ trics on columns
793
+ between ground truth
794
+ Renderings of textured
795
+ FLM
796
+ of our GAN
797
+ truth
798
+ columns 2 and 3
799
+ 2 and 3
800
+ and GAN depth
801
+ depth map
802
+ 2
803
+ 3
804
+ 8
805
+ 1
806
+ 4
807
+ 5
808
+ 6
809
+ 7
810
+ 12mm
811
+ 10 mm
812
+ SSIM: 0.906
813
+ (higher is better)
814
+ 8 mm
815
+ LPIPS: 0.086
816
+ A
817
+ (lower is better)
818
+ 6 mm
819
+ 4mm
820
+ 2 mm
821
+ 0 mm
822
+ 12mm
823
+ SSIM: 0.903
824
+ (higher is better)
825
+ 8 mm
826
+ LPIPS: 0.076
827
+ B
828
+ (lower is better)
829
+ 4 mm
830
+ 2 mm
831
+ 12 mm
832
+ 10mm
833
+ SSIM: 0.923
834
+ (higher is better)
835
+ 8 mm
836
+ LPIPS: 0.068
837
+ C
838
+ (lower is better)
839
+ Best result in
840
+ 4 mm
841
+ data set
842
+ 12 mm
843
+ 10 mm
844
+ SSIM: 0.836
845
+ (higher is better)
846
+ 8 mm
847
+ LPIPS: 0.146
848
+ D
849
+ 6 mm
850
+ (lower is better)
851
+ 4 mm
852
+ Worst result in
853
+ data set
854
+ 2 mm
855
+ 12 mm
856
+ 10 mm
857
+ SSIM: 0.924
858
+ (higher is better)
859
+ LPIPS: 0.068
860
+ E
861
+ 6mm
862
+ (lower is better)
863
+ 4 mm
864
+ 2 mm
865
+ 12mm
866
+ 0mm
867
+ SSIM: 0.909
868
+ (higher is better)
869
+ LPIPS: 0.086
870
+ F
871
+ (lower is better)
872
+ 0mm
873
+ 10mm
874
+ SSIM: 0.933
875
+ (higher is better)
876
+ LPIPS: 0.067
877
+ G
878
+ 6 mm
879
+ (lower is better)
880
+ 4 mm
881
+ Best result in
882
+ data set
883
+ 0 mm
884
+ 12 mm
885
+ 10 mm
886
+ SSIM: 0.882
887
+ (higher is better)
888
+ 6 mm
889
+ H
890
+ LPIPS: 0.110
891
+ (lower is better)
892
+ 4 mm
893
+ Worst result in
894
+ 2 mm
895
+ data setJournal of Virtual Reality and Broadcasting, Volume n(200n), no. n
896
+ Figure 6: Results 2/2: Two further subjects are shown. The two last samples of each person summarizes the
897
+ best and the worst images (measured by SSIM and LPIPS) and reflects the range of the reconstruction quality.
898
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
899
+
900
+ Color
901
+ Depth
902
+ Color and Depth
903
+ Results of me
904
+ Depth differences
905
+ RGB output
906
+ RGB ground
907
+ sSIM from
908
+ between ground truth
909
+ Renderings of textured
910
+ trics on columns
911
+ depth map
912
+ FLM
913
+ of our GAN
914
+ truth
915
+ columns 2 and 3
916
+ 2 and 3
917
+ and GAN depth
918
+ 3
919
+ 2
920
+ 5
921
+ 6
922
+ 4
923
+ 7
924
+ 8
925
+ 0m
926
+ 12m
927
+ LPIPS: 0.082
928
+ B
929
+ (lower is better)
930
+ C
931
+ D
932
+ 8mm
933
+ LPIPS: 0.180
934
+ E
935
+ (lower is better)
936
+ G
937
+ SSIM ; 0.92
938
+ H
939
+ 0mr
940
+ mm
941
+ mm
942
+ werisb
943
+ 4mm
944
+ 10mm
945
+ 6mm
946
+ LPIPS: 0.120Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
947
+ Rapid Photorealistic Blendshape Mod-
948
+ eling from RGB-D Sensors, Computer
949
+ Animation and Social Agents, CASA
950
+ ’16, 2016.
951
+ [FSK17]
952
+ Christian Frueh,
953
+ Avneesh Sud,
954
+ and
955
+ Vivek Kwatra, Headset Removal for
956
+ Virtual and Mixed Reality, ACM SIG-
957
+ GRAPH, 2017.
958
+ [GMC+08]
959
+ Ralph Gross, Iain Matthews, Jeffrey
960
+ Cohn, Takeo Kanade, and Simon Baker,
961
+ Multi-pie, 2008 8th IEEE International
962
+ Conference on Automatic Face Gesture
963
+ Recognition, 2008.
964
+ [GPAM+14] Ian Goodfellow, Jean Pouget-Abadie,
965
+ Mehdi Mirza, Bing Xu, David Warde-
966
+ Farley, Sherjil Ozair, Aaron Courville,
967
+ and Yoshua Bengio, Generative adver-
968
+ sarial nets, Advances in Neural In-
969
+ formation Processing Systems (NIPS),
970
+ 2014.
971
+ [GPL+21]
972
+ Philip-William Grassal,
973
+ Malte Prin-
974
+ zler, Titus Leistner, Carsten Rother,
975
+ Matthias Nießner, and Justus Thies,
976
+ Neural head avatars from monocular
977
+ RGB videos, 2021, Accessed:
978
+ 2022-
979
+ 03-21 from https://arxiv.org/
980
+ abs/2112.01554.
981
+ [IZZE17]
982
+ P. Isola, J. Zhu, T. Zhou, and A. A.
983
+ Efros, Image-to-Image Translation with
984
+ Conditional
985
+ Adversarial
986
+ Networks,
987
+ IEEE Conference on Computer Vision
988
+ and Pattern Recognition (CVPR), 2017.
989
+ [JAFF16]
990
+ Justin Johnson, Alexandre Alahi, and
991
+ Li Fei-Fei, Perceptual losses for real-
992
+ time style transfer and super-resolution,
993
+ ECCV, 2016.
994
+ [JH19]
995
+ Greg Walters John Hany, Hands-On
996
+ Generative Adversarial Networks with
997
+ PyTorch 1.x,
998
+ Packt Publishing Ltd.,
999
+ 2019.
1000
+ [KALL17]
1001
+ Tero Karras, Timo Aila, Samuli Laine,
1002
+ and
1003
+ Jaakko
1004
+ Lehtinen,
1005
+ Progressive
1006
+ Growing of GANs for Improved Quality,
1007
+ Stability,
1008
+ and Variation,
1009
+ 2017,
1010
+ Ac-
1011
+ cessed:
1012
+ 2022-07-07 from https:
1013
+ //arxiv.org/abs/1710.10196.
1014
+ [KLA18]
1015
+ Tero Karras, Samuli Laine, and Timo
1016
+ Aila,
1017
+ A Style-Based Generator Ar-
1018
+ chitecture for Generative Adversarial
1019
+ Networks,
1020
+ 2018,
1021
+ Accessed:
1022
+ 2022-
1023
+ 07-07 from https://arxiv.org/
1024
+ abs/1812.04948.
1025
+ [LBB+17]
1026
+ Tianye Li, Timo Bolkart, Michael. J.
1027
+ Black, Hao Li, and Javier Romero,
1028
+ Learning a model of facial shape and
1029
+ expression from 4D scans, ACM Trans-
1030
+ actions on Graphics (2017).
1031
+ [LG18]
1032
+ Philipp Ladwig and Christian Geiger,
1033
+ A Literature Review on Collabora-
1034
+ tion in Mixed Reality, Smart Indus-
1035
+ try and Smart Education, 15th Interna-
1036
+ tional Conference on Remote Engineer-
1037
+ ing and Virtual Instrumentation (REV
1038
+ ’18), Springer International Publishing,
1039
+ 2018.
1040
+ [LPDG20]
1041
+ P. Ladwig, A. Pech, R. Dorner, and
1042
+ C. Geiger, Unmasking Communication
1043
+ Partners: A Low-Cost AI Solution for
1044
+ Digitally Removing Head-Mounted Dis-
1045
+ plays in VR-Based Telepresence, 2020
1046
+ IEEE International Conference on Ar-
1047
+ tificial Intelligence and Virtual Real-
1048
+ ity (AIVR) (Los Alamitos, CA, USA),
1049
+ IEEE Computer Society, 2020.
1050
+ [LPG20]
1051
+ Philipp Ladwig, Alexander Pech, and
1052
+ Christian Geiger,
1053
+ Auf dem Weg zu
1054
+ Face-to-Face-Telepr¨asenzanwendungen
1055
+ in Virtual Reality mit generativen neu-
1056
+ ronalen Netzen, GI VR / AR Workshop
1057
+ (Benjamin Weyers, Christoph L¨urig,
1058
+ and Daniel Zielasko, eds.), Gesellschaft
1059
+ f¨ur Informatik e.V., 2020.
1060
+ [LSSS18]
1061
+ Stephen
1062
+ Lombardi,
1063
+ Jason
1064
+ Saragih,
1065
+ Tomas
1066
+ Simon,
1067
+ and
1068
+ Yaser
1069
+ Sheikh,
1070
+ Deep Appearance Models for Face
1071
+ Rendering, ACM Trans. Graph., 2018.
1072
+ [LTO+15]
1073
+ Hao Li, Laura Trutoiu, Kyle Olszewski,
1074
+ Lingyu Wei, Tristan Trutna, Pei-Lun
1075
+ Hsieh, Aaron Nicholls, and Chongyang
1076
+ Ma, Facial Performance Sensing Head-
1077
+ Mounted Display, ACM Trans. Graph.,
1078
+ 2015.
1079
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
1080
+
1081
+ Journal of Virtual Reality and Broadcasting, Volume n(200n), no. n
1082
+ [met22]
1083
+ Meta
1084
+ Horizon
1085
+ Worlds,
1086
+ https:
1087
+ //www.oculus.com/
1088
+ horizon-worlds/?locale=
1089
+ de_DE, 2022, Accessed: 2022-03-04.
1090
+ [MLX+17]
1091
+ X. Mao, Q. Li, H. Xie, R. K. Lau,
1092
+ Z. Wang, and S. Smolley, Least squares
1093
+ generative adversarial networks, 2017
1094
+ IEEE International Conference on Com-
1095
+ puter Vision (ICCV) (Los Alamitos,
1096
+ CA, USA), 2017.
1097
+ [MO14]
1098
+ Mehdi
1099
+ Mirza
1100
+ and
1101
+ Simon
1102
+ Osin-
1103
+ dero,
1104
+ Conditional
1105
+ Generative
1106
+ Adversarial
1107
+ Nets,
1108
+ 2014,
1109
+ Ac-
1110
+ cessed:
1111
+ 2020-07-07 from https:
1112
+ //arxiv.org/abs/1411.1784.
1113
+ [OLSL16]
1114
+ Kyle Olszewski, Joseph J. Lim, Shun-
1115
+ suke Saito, and Hao Li, High-Fidelity
1116
+ Facial and Speech Animation for VR
1117
+ HMDs, ACM Trans. Graph. (2016).
1118
+ [pup18]
1119
+ Locating eye centers using means of
1120
+ gradients, https://github.com/
1121
+ jonnedtc/PupilDetector, 2018,
1122
+ Accessed: 2021-12-28.
1123
+ [RCV+19]
1124
+ A. R¨ossler, D. Cozzolino, L. Verdo-
1125
+ liva, C. Riess, J. Thies, and M. Niess-
1126
+ ner, FaceForensics++: Learning to De-
1127
+ tect Manipulated Facial Images, 2019
1128
+ IEEE/CVF International Conference on
1129
+ Computer Vision (ICCV), 2019.
1130
+ [RFB15]
1131
+ Olaf Ronneberger, Philipp Fischer, and
1132
+ Thomas Brox, U-Net:
1133
+ Convolutional
1134
+ Networks for Biomedical Image Seg-
1135
+ mentation, Medical Image Computing
1136
+ and Computer - Assisted Intervention -
1137
+ MICCAI, 2015.
1138
+ [RMC15]
1139
+ Alec Radford, Luke Metz, and Soumith
1140
+ Chintala, Unsupervised Representation
1141
+ Learning with Deep Convolutional Gen-
1142
+ erative Adversarial Networks,
1143
+ 2015,
1144
+ Accessed: 2022-07-07 from https:
1145
+ //arxiv.org/abs/1511.06434.
1146
+ [RZS+21]
1147
+ Amit Raj, Michael Zollhofer, Tomas Si-
1148
+ mon, Jason Saragih, Shunsuke Saito,
1149
+ James Hays, and Stephen Lombardi,
1150
+ Pixel-aligned volumetric avatars, Pro-
1151
+ ceedings of the IEEE/CVF Conference
1152
+ on Computer Vision and Pattern Recog-
1153
+ nition (CVPR), 2021.
1154
+ [TB11]
1155
+ Fabian Timm and Erhardt Barth, Accu-
1156
+ rate eye centre localisation by means of
1157
+ gradients, Computer Vision Theory and
1158
+ Applications (VISAPP), 2011.
1159
+ [TZS+18]
1160
+ Justus Thies, Michael Zollh¨ofer, Marc
1161
+ Stamminger, Christian Theobalt, and
1162
+ Matthias Nießner, FaceVR: Real-Time
1163
+ Gaze-Aware Facial Reenactment in Vir-
1164
+ tual Reality, ACM Trans. Graph. (2018).
1165
+ [vrc22]
1166
+ VRChat,
1167
+ https://hello.
1168
+ vrchat.com/,
1169
+ 2022,
1170
+ Accessed:
1171
+ 2022-03-04.
1172
+ [WLZ+17]
1173
+ Ting-Chun Wang, Ming-Yu Liu, Jun-
1174
+ Yan Zhu, Andrew Tao, Jan Kautz, and
1175
+ Bryan Catanzaro, High-Resolution Im-
1176
+ age Synthesis and Semantic Manipu-
1177
+ lation with Conditional GANs, CVPR,
1178
+ 2017.
1179
+ [WSS+19]
1180
+ Shih-En Wei, Jason Saragih, Tomas Si-
1181
+ mon, Adam W. Harley, Stephen Lom-
1182
+ bardi, Michal Perdoch, Alexander Hy-
1183
+ pes,
1184
+ Dawei Wang,
1185
+ Hernan Badino,
1186
+ and Yaser Sheikh, Vr facial animation
1187
+ via multiview image translation, ACM
1188
+ Trans. Graph. (2019).
1189
+ [WZX+16]
1190
+ Jiajun Wu, Chengkai Zhang, Tianfan
1191
+ Xue, William T Freeman, and Joshua B
1192
+ Tenenbaum, Learning a Probabilistic
1193
+ Latent Space of Object Shapes via 3D
1194
+ Generative-Adversarial Modeling, Ad-
1195
+ vances in Neural Information Process-
1196
+ ing Systems, 2016.
1197
+ [ZBSS04]
1198
+ Zhou Wang, A. C. Bovik, H. R. Sheikh,
1199
+ and E. P. Simoncelli, Image quality as-
1200
+ sessment: from error visibility to struc-
1201
+ tural similarity, IEEE Transactions on
1202
+ Image Processing (2004).
1203
+ [ZIE+18]
1204
+ Richard Zhang, Phillip Isola, Alexei A
1205
+ Efros, Eli Shechtman, and Oliver Wang,
1206
+ The unreasonable effectiveness of deep
1207
+ features as a perceptual metric, CVPR,
1208
+ 2018.
1209
+ urn:nbn:de:0009-6-348, ISSN 1860-2037
1210
+
BNAzT4oBgHgl3EQfhv2_/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
DNE0T4oBgHgl3EQfQQC5/content/tmp_files/2301.02191v1.pdf.txt ADDED
@@ -0,0 +1,1849 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Physics informed neural network for charged
2
+ particles surrounded by conductive boundaries
3
+ A Preprint
4
+ Fatemeh Hafezianzade
5
+ Department of Physics
6
+ Institute for Advanced Studies in Basic Sciences
7
+ Zanjan, 45195-1159, Iran
8
+ fahafe98@gmail.com
9
+ Morad Biagooi
10
+ Intelligent Data Aim Ltd (IDA Ltd)
11
+ Science and Technology Park of Institute for Advanced studies in Basic Sciences
12
+ Zanjan 45137-65697, Iran
13
+ SeyedEhsan Nedaaee Oskoee∗
14
+ Department of Physics
15
+ Institute for Advanced Studies in Basic Sciences
16
+ Zanjan, 45195-1159, Iran
17
+ nedaaee@iasbs.ac.ir
18
+ January 6, 2023
19
+ Abstract
20
+ In this paper, we developed a new PINN-based model to predict the potential of point-charged
21
+ particles surrounded by conductive walls. As a result of the proposed physics-informed neural
22
+ network model, the mean square error and R2 score are less than 7% and more than 90%
23
+ for the corresponding example simulation, respectively. Results have been compared with
24
+ typical neural networks and random forest as a standard machine learning algorithm. The
25
+ R2 score of the random forest model was 70%, and a standard neural network could not be
26
+ trained well. Besides, computing time is significantly reduced compared to the finite element
27
+ solver.
28
+ Keywords Poisson · Laplace · Physics-informed neural network · charged particles · Conductive boundaries ·
29
+ supercapacitor
30
+ 1
31
+ Introduction
32
+ Computational Electromagnetic Simulation (CES) plays a significant role in many areas of science and
33
+ engineering, such as soft matter, electrical engineering, biomedical engineering and chemistry. In addition, it
34
+ has numerous applications in industry. For example, it is one of the main tools in investigating and designing
35
+ the process of supercapacitors, which are porous energy storage devices with many applications in industry,
36
+ especially when high power consumption or transfer is neededMiller and Simon [2008]. Here, studying the
37
+ physical mechanisms arising from charge storage in supercapacitors is essential for further technological
38
+ developmentSalanne et al. [2016], Simon and Gogotsi [2008].
39
+ ∗Corresponding author
40
+ arXiv:2301.02191v1 [physics.comp-ph] 5 Jan 2023
41
+
42
+ arXiv Template
43
+ A Preprint
44
+ Solving Maxwell’s equation, especially the Poisson equation in this study, is an essential part of computational
45
+ electromagnetic algorithmsJackson [1962]. Solving the Poisson equation can help scientists to calculate the
46
+ potential of electrical sources in any system. However, many difficulties arise in practice due to the long-range
47
+ nature of electrical interactions. In particular, estimating the potential of point-charged components in an
48
+ environment with conductive walls is challenging because of the induced charges presented on the boundaries.
49
+ Generally, there are two approaches to solving the Poisson equation: analytical solutionJackson [1962]
50
+ and numerical methods. There are limited techniques for solving analytically, like image charges methods
51
+ applicable for cases with regular geometries; however, there is no guarantee to achieve practical results. If,
52
+ for example, a particle is placed in a cubic conductive container, the image charges method will produce an
53
+ infinite series. On the other hand, numerical methods lead to approximate solutions based on discretizing
54
+ space and/or time domains. One of the typical numerical methods is the Finite Element Method (FEM)Jin
55
+ [2014] which discretizes the continuous partial differential equations (PDEs) and forms a linear set of algebraic
56
+ equationsS. et al. [1991]. Nevertheless, even FEM fails in calculating the potential in a charged particle’s
57
+ position since the electrical potential is singular at the place of charges. There are a number of methods
58
+ and algorithms that have been developed to address this problem, including Induced Charge MMM2D
59
+ (ICMMM2D)Tyagi et al. [2007] for 2D, ELCICTyagi et al. [2008] for 2D + h, Induced Charge Computation
60
+ (ICC∗ )Tyagi et al. [2010], Kesselheim et al. [2010], Arnold et al. [2013] for 3D periodicity, and a method
61
+ introduced by Reed et al.Reed et al. [2007] have been developed. In addition, recently, there has been
62
+ another algorithm named PLT. It was first demonstrated for a partially periodic system constrained between
63
+ two metallic plates in Rostami et al. [2016], and then it was applied to CAVIAR Biagooi et al. [2020], a
64
+ molecular dynamics simulation package for charged particles surrounded by non-trivial conductive boundaries.
65
+ Numerical solving of these problems with the CAVIAR package is accurate; moreover, it took less time than
66
+ ICC∗ Biagooi et al. [2020] but is still time and memory-consuming.
67
+ Recently another data-driven approach to solving the PDEs based on deep machine learning is also of great
68
+ current interest. For instance, Shan et al. Shan et al. [2020] present a CNN to predict the electric potential
69
+ with different excitations and permittivity distribution in 2D and 3D models. It is fast and efficient compared
70
+ with FEMJin [2014]. However, a couple of problems prevent it from utilizing as a Poisson solver in the MD
71
+ simulation process; first, it could not work in the case of discrete density functions such as those of point
72
+ charges, and second, it is a physics-free approach which makes it hard to consider boundary conditions.
73
+ To overcome the first problem, one can use the PLT algorithm. Additionally, Raissi et al. introduced the
74
+ physics-informed neural network (PINN) that the loss function defined by Raissi et al. [2019] is an excellent
75
+ alternative to the conventional deep learning method because of the governing equations, boundary conditions,
76
+ and initial conditions used in its definition.
77
+ In this paper, we applied a new PINN-based model to predict the potential of point-charged particles
78
+ surrounded by conductive walls. We then compared the results with typical neural networks and random
79
+ forests as a standard machine learning algorithm. For instance, we tried to implement these models for a
80
+ charged particle in a spherical container. The reason for utilizing this simple example was that there is an
81
+ exact solution to this problem through the analytical method, the image charges method. As a starting point,
82
+ we used the PLT algorithm to transfer the Poisson equation into the Laplace equation with modified boundary
83
+ conditions. Then we trained the model to solve the Laplacian equation with new boundary conditions. The
84
+ input data is included the position in which we want to evaluate the potential on it and the modified boundary
85
+ conditions; the output data is the corresponding electrical potential of that position.
86
+ 2
87
+ Methods
88
+ This article aims to build a machine-learning model (ML-Model) to predict the potential of point-charged
89
+ particles surrounded by conductive walls. The potential of charged particles is calculated by solving the
90
+ Poisson equation, which can be written as Jackson [1962]:
91
+ ∇2φ = −ρ/ϵ0 = −
92
+ N
93
+
94
+ i=1
95
+ qiδ(x − xqi)/ϵ0,
96
+ (1)
97
+ where φ is the potential and ρ is a charge distribution. The first and straightforward ML-Model that jumps
98
+ to mind is a model that includes xq and x as an input, and φ(xq, x) as an output. Here xq is the position of
99
+ point-charged particle, x is the position in which we want to calculate the potential on it, and φ(xq, x) is the
100
+ corresponding potential. So the number of input features depends on the number of charged particles; for
101
+ instance, in 3 dimensions, if there is N charged particles, the input features have to be 3 + 3 × N. Therefore,
102
+ 2
103
+
104
+ arXiv Template
105
+ A Preprint
106
+ this kind of model could only predict the potential of fix number of charged particles. In many applications of
107
+ this method, such as the molecular dynamic simulation, this number is not fixed and could even increase or
108
+ decrease during the simulation. We use the PLT algorithm to transpose the Poisson equation into the Laplace
109
+ equation with new boundary conditions to overcome this problem. This algorithm will be discussed in more
110
+ detail in 2.1. So we can train a model which includes x and modified boundary conditions as input features
111
+ and φ(φb, x) as an output. We define the boundary conditions only on Nb fixed points on the boundary
112
+ {φ1, φ2, . . . , φNb}. In this case, with the PLT algorithm, we can build a model with a fixed number of input
113
+ features that can predict any charged particles’ potential.
114
+ Creating reference data set with PLT algorithm
115
+
116
+ Train Set
117
+ Test Set
118
+ Training the models:
119
+ RF
120
+ ANN
121
+ PINN
122
+ Testing and hyperparameter tuning
123
+ all models
124
+ Chosing the best model
125
+ Spliting reference set into
126
+ the train and test set
127
+ Figure 1: Methodology flow chart, The blue part: Preparing the data, in which the reference data set is
128
+ created based on the PLT algorithm. The red part: Training models process, first the reference set is split
129
+ to train and test set, then RF, ANN, and PINN model were applied on the train set, after tuning the
130
+ hyperparameters the best model were chose.
131
+ 2.1
132
+ Poisson to Laplace Transformation (PLT)
133
+ According to the PLT algorithm, the electrical potential is divided into two parts: singular potential (φsi)
134
+ and smooth potential (φsm); φ(⃗x) = φsi(⃗x) + φsm(⃗x). It is important to note that the smooth part here is
135
+ the solution of the Laplace equation with modified boundary conditions,
136
+ ∇2φsm(⃗x) = 0,
137
+ (2)
138
+ 3
139
+
140
+ formula_4_flowcharts - Word
141
+ fatemeh hafezian
142
+ FH
143
+
144
+ File
145
+ Home
146
+ nsert
147
+ Draw
148
+ Design
149
+ Layout
150
+ Mailings
151
+ Help
152
+ H
153
+ & Share
154
+ P
155
+ -7
156
+ AaB
157
+ O Find
158
+ Georgia
159
+ A
160
+ A
161
+ A
162
+ -
163
+
164
+ a
165
+ AaBbCcD
166
+ AaBbCcD AaBbC( AaBbCc /
167
+ AaBbCcD AaBbCcD AaBbCcD
168
+ aac Replace
169
+ Paste
170
+ A
171
+ ↑ Normal
172
+ I No Spac...
173
+ Heading 1
174
+ R
175
+ x
176
+ A
177
+ Heading 2
178
+ Title
179
+ Format Painter
180
+ Subtitle
181
+ Subtle Em..
182
+ Emphasis
183
+ Select
184
+ Clipboard
185
+ Paragraph
186
+ Styles
187
+ Editing
188
+ Navigation
189
+ Search document
190
+ Headings
191
+ Pages
192
+ Results
193
+ Create an interactive outline of your document
194
+ N.
195
+ T - {x ya,za, {1,P2...,
196
+ It's a great way to keep track of where you are or quickly
197
+ move your content around.
198
+ b
199
+ To get started, go to the Home tab and apply Heading styles
200
+ I - {xi, y',zi,P,P2....
201
+ 7800
202
+ 7800
203
+ i=1
204
+ Page 1 of 1
205
+ 10 words
206
+ English (United States)
207
+ C Accessibility: Good to go
208
+ 370%
209
+ O Type here to search
210
+ 0
211
+ ENG
212
+ 5:47 PM
213
+ 8
214
+ s
215
+ 9
216
+ m
217
+ C
218
+ O
219
+ W
220
+ 8/27/2022arXiv Template
221
+ A Preprint
222
+ while φsi obeys the famous Columb laws
223
+ φsi(⃗x) =
224
+ N
225
+
226
+ i=1
227
+ qi
228
+ 4πϵ0∥⃗x − ⃗xi∥.
229
+ (3)
230
+ It can be seen that the modified boundary condition for φsm is represented by
231
+ φsm∥⃗xbc = φ∥⃗xbc − φsi∥⃗xbc,
232
+ (4)
233
+ where φ∥⃗xbc corresponds to the initial electrical potential on the boundaries. Finally, with the PLT algorithm,
234
+ we could transfer the Poisson to the Laplace equation with new modified boundary conditions, then train an
235
+ ML-Model with these modified boundary conditions as an input parameter and the smooth potential as an
236
+ output. Afterward with the summation of singular and predicted smooth potential, we can reach the total
237
+ potential. Advantage of utilizing PLT algorithm is that it leads to having a fixed number of input data since
238
+ the number of input data would be independent of the number of point-charged particles.
239
+ (a)
240
+ 65
241
+ 70
242
+ 75
243
+ 80
244
+ 85
245
+ 90
246
+ 95
247
+ 100
248
+ sm
249
+ (b)
250
+ 65
251
+ 70
252
+ 75
253
+ 80
254
+ 85
255
+ 90
256
+ 95
257
+ 100
258
+ sm
259
+ (c)
260
+ Figure 2: Schematic of the PLT method: a) the main system which had point charges inside of it b) the new
261
+ system without any point charges and the boundaries were modified c) Nb points on the boundary are shown
262
+ to be used as our model input.
263
+ 2.2
264
+ Data Engineering
265
+ For training a highly accurate model, having a nice train set is crucial. In this work, the reference set is
266
+ Γ =
267
+
268
+ xi, yi, zi, ⃗
269
+ ϕi
270
+ bc, φi�N
271
+ i=1, where the input is concluded {x, y, z, ⃗ϕbc = {φ1, φ2, ..., φNb}} and φ is the target.
272
+ x, y, z is the coordinate of a point in the container on which we want to calculate their potential, φ is the
273
+ numeric value of the potential at this point, and {φ1, φ2, ..., φNb} is the boundary condition on Nb points on
274
+ the boundary. First, in the container, Np positions are chosen to predict the potential in their situations.
275
+ In fact, for each boundary condition, {φ1, φ2, ..., φNb}, there are Np points that we want to calculate the
276
+ potential at their positions. Then, the reference set could be created for Nq different boundary conditions.
277
+ So the reference set consists of N = Nq × Np samples which could split to train and test set. In this case, our
278
+ container is a sphere, we also set Np = 78, Nb = 26, and Nq = 100. So, our reference consists of 100 different
279
+ boundary conditions and for each boundary condition {φ1, φ2, ..., φ26} there are 78 points in the sphere on
280
+ which we want to calculate the potential on it. We use the solution of the image charges method (Eq.5) to
281
+ calculate targets of the reference set:
282
+ φ(⃗x) =
283
+ 1
284
+ 4πϵ0
285
+ {
286
+ q
287
+ ∥⃗x − ⃗xq∥ +
288
+ q′
289
+ ∥⃗x − ⃗
290
+ xq′∥},
291
+ q′ = −rq
292
+ a ,
293
+
294
+ xq′ = a2
295
+ r
296
+ ⃗xq
297
+ ∥⃗xq∥,
298
+ (5)
299
+ where a is the conductive spherical shell radius and r is the distance of a point charge q from its center.
300
+ The numeric value of potential is minimal (∼ 10−9), which conducts to significant rounding error during
301
+ computation; therefore, the potential of an electron in a 1m distance of it, 1.44 × 10−9[V ], is used as a unit
302
+ 4
303
+
304
+ ΦtotalarXiv Template
305
+ A Preprint
306
+ to make equation 5 dimensionless. We randomly chose 5000 and 1000 samples from the reference set to
307
+ create a train and test set. The train and test set have no samples in common. In addition, the best model
308
+ could adequately predict the potential of test samples and samples with different boundary conditions from
309
+ the train and test set. So to evaluate the model better, we prepare an extrapolation set that includes 1000
310
+ samples with 55 distinct boundary conditions.
311
+ 2.3
312
+ ML Algorithms
313
+ In this work, three different supervised learning methods have been used, and their regression accuracy,
314
+ based on the metrics presented in 2.4, has been evaluated. Mainly, we stick to Physics-Informed Neural
315
+ Networks (PINN, Raissi et al. [2019]), but to compare our results with other ML algorithms, we use Random
316
+ Forest (RF, Breiman [2001]) and Artificial Neural Networks (ANN). All the models are briefly introduced,
317
+ the hyperparameters are fine-tuned, and their performance is reported. Scikit-learn Pedregosa et al. [2011],
318
+ Tensorflow Abadi [2016], Keras Chollet [2015], and NumPy Walt et al. [2011] are all the python libraries that
319
+ have been used in this project.
320
+ 2.3.1
321
+ Random Forest(RF)
322
+ RF is one of the most popular machine learning algorithms in regression problems for many reasons, but in
323
+ this project, this model has been chosen since
324
+ a) It is speedy to learn.
325
+ b) It is robust against over-fitting.
326
+ over-fitting is detected when the performance of train samples is perfect while the performance of test samples
327
+ is poor. RF is an ensemble model in which an average of many uncorrelated trees determines the predicted
328
+ potential for the target data set. Although each tree is a weak learner, they make a strong learner when
329
+ many trees are grouped. The RF randomizes the trees by choosing a subset of training data and features for
330
+ each tree. Here we use scikit-learn Pedregosa et al. [2011] RF implementation.
331
+ 2.3.2
332
+ ANN
333
+ Typical neural network architecture consists of the input layer, multiple hidden layers, and the output layer
334
+ with several neurons in each layer. Totally:
335
+ • Input layer: The neurons in the input layer are the input features.
336
+ • Hidden layers: The value of every neuron in the hidden layers is a linear combination of the neurons
337
+ in the previous layer followed by the implementation of an activation function(Eq.6); in most cases,
338
+ the activation function is non-linear.
339
+ an = σl (an−1wn + bn) .
340
+ (6)
341
+ n is the layer number, w and b are the model parameters, weights and bias respectively, and σl is
342
+ the activation function based on Goodfellow et al. [2016].
343
+ • Output layer: The neurons in the output layer are the model targets and they are calculated with
344
+ Eq.6 with linear activation function.
345
+ • Loss function: There is a function in all neural networks that must be minimized over the model
346
+ parameters during the training stage via back-propagation, typically the loss function is the mean
347
+ square error between the true and the predicted values.
348
+ Loss(w) = MSEd + λ
349
+
350
+ w
351
+ w2,
352
+ (7)
353
+ MSEd = 1
354
+ N
355
+ N
356
+
357
+ i=1
358
+ [U (Xi, w) − Ti]2 .
359
+ (8)
360
+ U and T are the predicted output and true target values, respectively, X is the input data, and w is the
361
+ parameter of neural networks, weights, and biases. The first sentence in Eq.7 is a mean square error, and
362
+ 5
363
+
364
+ arXiv Template
365
+ A Preprint
366
+ The second sentence exists to prevent over-fitting, namely L2 regularizationa. K. Connect et al. [1992], that
367
+ is used in order to reduce the effects of the large weights.
368
+ 2.3.3
369
+ PINN
370
+ PINNRaissi et al. [2019] enforces the Laplace equation, a physical law of the electromagnetic system, as a
371
+ constraint on the neural network. This study proposes a PINN-based approach to solve the Laplace equation
372
+ with changeable boundary conditions. Fig.3 shows a schematic of the neural network layout for this approach.
373
+ PINN-based models are neural networks with modified loss functions:
374
+ Loss = λ1MSEd + λ2MSEf + λ3MSEb + λ4
375
+
376
+ w
377
+ w2,
378
+ (9)
379
+ The first and the last term of Eq.9 are the same as typical neural networks in Eq.7. The second term
380
+ corresponds to the governing physical equation, i.e., the is Laplace, and the third term corresponds to the
381
+ boundary conditions;
382
+ MSEd = 1
383
+ Nd
384
+ Nd
385
+
386
+ i=1
387
+ ∥u(xi
388
+ d, ⃗ϕi
389
+ d; w) − φi
390
+ d∥2,
391
+ (10)
392
+ MSEf =
393
+ 1
394
+ Nf
395
+ Nf
396
+
397
+ i=1
398
+ ∥f(xi
399
+ f, ui
400
+ f; w)∥2,
401
+ (11)
402
+ and
403
+ MSEb = 1
404
+ Nb
405
+ Nb
406
+
407
+ i=1
408
+ ∥B(xi
409
+ b, ⃗ϕi
410
+ b, ui
411
+ b; w)∥2.
412
+ (12)
413
+ Here we define f (x, u; w)
414
+ f (x, u; w) = 0,
415
+ x ∈ Γf
416
+ = ∇2u
417
+ = ∂2u
418
+ ∂x2 + ∂2u
419
+ ∂y2 + ∂2u
420
+ ∂z2
421
+ = ∂w
422
+ ∂x
423
+
424
+ ∂w
425
+ �∂w
426
+ ∂x
427
+ ∂u
428
+ ∂w
429
+
430
+ + ∂w
431
+ ∂y
432
+
433
+ ∂w
434
+ �∂w
435
+ ∂y
436
+ ∂u
437
+ ∂w
438
+
439
+ + ∂w
440
+ ∂z
441
+
442
+ ∂w
443
+ �∂w
444
+ ∂z
445
+ ∂u
446
+ ∂w
447
+
448
+ ,
449
+ (13)
450
+ with Dirichlet boundary conditions
451
+ B (x, ⃗ϕ, u; w) = 0,
452
+ x ∈ Γb
453
+ =
454
+ 26
455
+
456
+ j=1
457
+ (u(xj, ⃗ϕ; w) − ϕj
458
+ b).
459
+ (14)
460
+ λ1, λ2, λ3 in Eq.9 correspond to the weight coefficients for the data contributions, Laplace equation, and
461
+ boundary losses. We use the weight coefficient by motivating from the study of Kag et al. Kag et al. [2022].
462
+ The last sentence is the L2 regularizationa. K. Connect et al. [1992]. Notice that the model with λ2 = λ3 = 0.0
463
+ is exactly a typical neural network described in the previous subsection.
464
+ 2.4
465
+ Evaluating Metrics
466
+ The performance evaluation of different algorithms for potential estimation depends on different metrics,
467
+ ∆φ = φT rue − φP red,
468
+ (15)
469
+ σ =
470
+
471
+
472
+
473
+ � 1
474
+ n
475
+ n−1
476
+
477
+ i=0
478
+ (∆φ)2,
479
+ (16)
480
+ 6
481
+
482
+ arXiv Template
483
+ A Preprint
484
+ 𝜑
485
+ ƶ𝑢
486
+ I
487
+ f(𝑥)
488
+ 𝜕2 ƶ𝑢
489
+ 𝜕𝑥2 +
490
+ 𝜕2 ƶ𝑢
491
+ 𝜕𝑦2 +
492
+ 𝜕2 ƶ𝑢
493
+ 𝜕𝑧2 - 0
494
+
495
+ 𝑗=1
496
+ 26
497
+ ƶ𝑢(𝑥𝑗, 𝜑; 𝑤) − 𝜑𝑏
498
+ 𝑗)
499
+ ƶ𝑢(𝑥, 𝜑; 𝑤) − Φ(𝑥)
500
+ NN(𝑥; 𝜃)
501
+ BC
502
+ 𝑑𝑎𝑡𝑎
503
+ 𝜕2
504
+ 𝜕𝑥2
505
+ 𝜕2
506
+ 𝜕𝑧2
507
+ 𝜕2
508
+ 𝜕𝑦2
509
+ I
510
+ Loss=
511
+ 𝝀𝟏𝑴𝑺𝑬𝒅 + 𝝀𝟐𝑴𝑺𝑬𝒇
512
+ +
513
+ 𝝀𝟑 𝑴𝑺𝑬𝒃+ 𝝀𝟒 σ𝒘 𝒘𝟐
514
+ 𝑋
515
+ Figure 3: Physics-informed neural network scheme for solving Laplace equation with variable boundaries
516
+ R2 = 1 −
517
+ �n
518
+ i=1 (φTrue − φPred )2
519
+ �n
520
+ i=1
521
+
522
+ φTrue − ¯φTrue
523
+ �2 ,
524
+ (17)
525
+ MSE =< (∆φ)2 > .
526
+ (18)
527
+ Where φT rue is the true potential, φP red is the predicted potential, and ¯φT rue is the mean true potential of a
528
+ given test sample. In this study we used scatter σ, R2 score and MSE as our evaluating metrics.
529
+ 3
530
+ Result
531
+ In this paper, we predict the smooth potential of a point-charged particle in a spherical conductive container.
532
+ First, we set the train and test set with 5000 and 1000 samples (2.2), then we train our models to predict
533
+ smooth potential. We can calculate total potential by summating smooth and singular potential (more
534
+ detailed in 2.1). However, in this work, to compare our results with CAVIARBiagooi et al. [2020], we
535
+ investigate the smooth potential.
536
+ 3.1
537
+ Random Forest
538
+ 3.1.1
539
+ hyperparameter for RF
540
+ We optimize over the only hyperparameter, the number of trees in the forest that influences the fitting of the
541
+ random forest model. In Fig.4, we plot MSE (the left panel) and R2 score (the right one) as a function of
542
+ the number of trees for the test set to determine the optimal hyperparameter, which we find 100 trees since
543
+ progress after 100 trees are negligible. Afterward, we trained the RF model using 100 trees.
544
+ 7
545
+
546
+ arXiv Template
547
+ A Preprint
548
+ 0
549
+ 50
550
+ 100
551
+ 150
552
+ 200
553
+ 250
554
+ 300
555
+ Number of trees
556
+ 0.08
557
+ 0.10
558
+ 0.12
559
+ 0.14
560
+ 0.16
561
+ Mean Square Error
562
+ 0
563
+ 50
564
+ 100
565
+ 150
566
+ 200
567
+ 250
568
+ 300
569
+ Number of trees
570
+ 0.1
571
+ 0.2
572
+ 0.3
573
+ 0.4
574
+ 0.5
575
+ 0.6
576
+ 0.7
577
+ R2 score
578
+ Figure 4: MSE (left), and R2 score (right) for 1000 different sample of Test set
579
+ 3.1.2
580
+ RF Prediction
581
+ Fig.5 is illustrated the RF model with 100 trees. It shows that prediction is acceptable when the numeric
582
+ value of potential is less than 0.3 (φT rue < 0.3) while it could not predict precisely in the case of φT rue >= 0.3.
583
+ The graphs of Fig.5 compare the true potential φT rue and RF model predicted potential φRF for the train
584
+ data set(left picture) and test data set(right image). The RF method is relatively fast; however, it works when
585
+ the predicted potential is smooth and relatively small, it is not suitable in the case of point-charged particles
586
+ (where a gradient of potential as well as its numeric value is high at the position of the charge). Furthermore,
587
+ it fails to predict the potential near the boundaries since the gradient of the potential is considerable.
588
+ 0.5
589
+ 1.0
590
+ 1.5
591
+ 2.0
592
+ 2.5
593
+ RF
594
+ 0.5
595
+ 1.0
596
+ 1.5
597
+ 2.0
598
+ 2.5
599
+ True
600
+ : 0.02
601
+ Training data : 5000 targets
602
+ 0.02
603
+ 0.04
604
+ 0.06
605
+ 0.08
606
+ 0.10
607
+ 0.12
608
+ 0.14
609
+ Density distribution
610
+ (a)
611
+ 0.2
612
+ 0.3
613
+ 0.4
614
+ 0.5
615
+ 0.6
616
+ RF
617
+ 0.2
618
+ 0.3
619
+ 0.4
620
+ 0.5
621
+ 0.6
622
+ True
623
+ : 0.06
624
+ Testing data : 1000 targets
625
+ 0.05
626
+ 0.10
627
+ 0.15
628
+ 0.20
629
+ 0.25
630
+ Density distribution
631
+ (b)
632
+ Figure 5: Potential estimation of RF model: a) show the train data set with 5000 samples with scatter of
633
+ σ = 0.02, b) show the test data set with 1000 samples with scatter of σ = 0.07. The dashed red line shows
634
+ where the predicted potential equals the true potential. The pink-shaded region marks 1σ scatter of potential
635
+ errors.
636
+ 8
637
+
638
+ arXiv Template
639
+ A Preprint
640
+ 3.2
641
+ PINN based model and NN
642
+ By setting both λ2 and λ3 to zero in PINN-based model, one can get exactly NN model. Therefore we
643
+ investigate both models together and report their reulsts at the same time in the following section.
644
+ 3.2.1
645
+ hyperparameter for PINN and NN
646
+ Unlike the RF model, we define several hyperparameters: a number of neurons, a number of layers, λ2, and
647
+ λ3. To tune all hyperparameters, we train the model up to 100000 epochs, using the L-BFGS-B optimizerLiu
648
+ and Nocedal [1989], until the model’s tolerance reaches the level of machine epsilon. For all layers except the
649
+ last one, we use a tanh activation function. Table 1 is reported the MSE between the predicted and the same
650
+ potential for a different value of hyperparameters; λ2 = [0, 0.1, 0.2, 0.3], λ3 = [0, 0.1, 0.2, 0.3, 0.4], number of
651
+ hidden layers= [1, 3, 5, 7] and number of neurons per hidden layer= [10, 30, 50] for 1000 samples of the test
652
+ set. As shown in Table 1, we observe that a model with one hidden layer could not predict the potential well.
653
+ Also, a model with ten neurons per layer could not work well. So, Table 2 and Table 3 reported the results
654
+ for just [3, 5, 7] layers as well as for [30, 50] neurons per layer. We chose λ4 = 0.0001 to prevent over-fitting.
655
+ 3.2.2
656
+ PINN and NN Prediction
657
+ In Contrast with Table 1, Table 2 is reported not only the MSE but also the R2score of the test set. As can
658
+ be seen in Table 2, the model with seven layers and 50 neurons per layer resulted better when λ2 and λ3 are
659
+ 0.3, 0.3, or 0.2, 0.4, respectively. When λ2 and λ3 are zero, a standard neural network, the model has not
660
+ worked well; it is observed from Tabel 2 and Fig.4.
661
+ 0.5
662
+ 1.0
663
+ 1.5
664
+ 2.0
665
+ 2.5
666
+ ANN
667
+ 0.5
668
+ 1.0
669
+ 1.5
670
+ 2.0
671
+ 2.5
672
+ True
673
+ : 0.07
674
+ Training data : 5000 targets
675
+ 0.2
676
+ 0.4
677
+ 0.6
678
+ 0.8
679
+ 1.0
680
+ Density distribution
681
+ (a)
682
+ 0.5
683
+ 1.0
684
+ 1.5
685
+ 2.0
686
+ 2.5
687
+ PINN
688
+ 0.5
689
+ 1.0
690
+ 1.5
691
+ 2.0
692
+ 2.5
693
+ True
694
+ : 0.01
695
+ Training data : 5000 targets
696
+ 0.01
697
+ 0.02
698
+ 0.03
699
+ 0.04
700
+ 0.05
701
+ Density distribution
702
+ (b)
703
+ Figure 6: Potential estimation of best-tuned (a) NN with scatter of σ = 0.07 and (b) PINN model on the 5000
704
+ samples of the train set with scatter of σ = 0.01. The dashed red line shows where the predicted potential
705
+ equals the true potential. The pink-shaded region marks 1σ scatter of potential errors.
706
+ Both plots in Fig.6 compare the true and predicted potentials for the best-tuned NN and the best-tuned
707
+ PINN model with 7 layers and 50 neurons per layer, λ2 = 0.3 and λ3 = 0.3- on the train set. As can be seen,
708
+ the NN model is not trained well, while the PINN-based model could predict the potential precisely with a
709
+ scatter of 0.01. Although the PINN-based model predicts the train set well, aiming to clarify that over-fitting
710
+ has not been accrued, we also evaluate the model on the test set, Fig.7.
711
+ 9
712
+
713
+ arXiv Template
714
+ A Preprint
715
+ 0.2
716
+ 0.3
717
+ 0.4
718
+ 0.5
719
+ 0.6
720
+ PINN
721
+ 0.2
722
+ 0.3
723
+ 0.4
724
+ 0.5
725
+ 0.6
726
+ True
727
+ : 0.02
728
+ Testing data : 1000 targets
729
+ 0.05
730
+ 0.10
731
+ 0.15
732
+ 0.20
733
+ Density distribution
734
+ Figure 7: Potential estimation of best-tuned PINN model on the 1000 samples of the test set with scatter
735
+ of σ = 0.02, MSE = 0.069 and R2score = 0.851. The dashed red line shows where the predicted potential
736
+ equals the true potential. The pink-shaded region marks 1σ scatter of potential errors.
737
+ 10
738
+
739
+ arXiv Template
740
+ A Preprint
741
+ MSEtest
742
+ MSEtest
743
+ λ2
744
+ λ3
745
+ Neurons=10 Neurons=30
746
+ Neurons=50
747
+ Neurons=10
748
+ Neurons=30
749
+ Neurons=50
750
+ Num of hidden layer:1
751
+ Num of hidden layer:5
752
+ 0.0
753
+ 0.0
754
+ 0.246
755
+ 0.246
756
+ 0.246
757
+ 0.246
758
+ 0.246
759
+ 0.246
760
+ 0.1
761
+ 0.162
762
+ 0.145
763
+ 0.124
764
+ 0.099
765
+ 0.145
766
+ 0.078
767
+ 0.2
768
+ 0.191
769
+ 0.212
770
+ 0.132
771
+ 0.136
772
+ 0.062
773
+ 0.072
774
+ 0.3
775
+ 0.175
776
+ 0.159
777
+ 0.171
778
+ 0.14
779
+ 0.069
780
+ 0.124
781
+ 0.4
782
+ 0.24
783
+ 0.171
784
+ 0.18
785
+ 0.191
786
+ 0.064
787
+ 0.076
788
+ 0.1
789
+ 0.0
790
+ 0.246
791
+ 0.247
792
+ 0.247
793
+ 0.246
794
+ 0.246
795
+ 0.246
796
+ 0.1
797
+ 0.222
798
+ 0.143
799
+ 0.117
800
+ 0.107
801
+ 0.245
802
+ 0.192
803
+ 0.2
804
+ 0.201
805
+ 0.165
806
+ 0.129
807
+ 0.243
808
+ 0.079
809
+ 0.062
810
+ 0.3
811
+ 0.22
812
+ 0.159
813
+ 0.16
814
+ 0.112
815
+ 0.094
816
+ 0.071
817
+ 0.4
818
+ 0.249
819
+ 0.177
820
+ 0.243
821
+ 0.205
822
+ 0.088
823
+ 0.07
824
+ 0.2
825
+ 0.0
826
+ 0.246
827
+ 0.246
828
+ 0.246
829
+ 0.246
830
+ 0.246
831
+ 0.246
832
+ 0.1
833
+ 0.158
834
+ 0.144
835
+ 0.154
836
+ 0.235
837
+ 0.161
838
+ 0.083
839
+ 0.2
840
+ 0.225
841
+ 0.174
842
+ 0.174
843
+ 0.198
844
+ 0.097
845
+ 0.071
846
+ 0.3
847
+ 0.223
848
+ 0.143
849
+ 0.158
850
+ 0.192
851
+ 0.081
852
+ 0.075
853
+ 0.4
854
+ 0.197
855
+ 0.185
856
+ 0.189
857
+ 0.216
858
+ 0.065
859
+ 0.118
860
+ 0.3
861
+ 0.0
862
+ 0.246
863
+ 0.246
864
+ 0.246
865
+ 0.246
866
+ 0.246
867
+ 0.246
868
+ 0.1
869
+ 0.21
870
+ 0.132
871
+ 0.129
872
+ 0.245
873
+ 0.088
874
+ 0.098
875
+ 0.2
876
+ 0.199
877
+ 0.139
878
+ 0.157
879
+ 0.209
880
+ 0.117
881
+ 0.065
882
+ 0.3
883
+ 0.216
884
+ 0.241
885
+ 0.202
886
+ 0.182
887
+ 0.168
888
+ 0.08
889
+ 0.4
890
+ 0.225
891
+ 0.182
892
+ 0.196
893
+ 0.239
894
+ 0.083
895
+ 0.097
896
+ Num of hidden layer:3
897
+ Num of hidden layer:7
898
+ 0.0
899
+ 0.0
900
+ 0.246
901
+ 0.246
902
+ 0.246
903
+ 0.246
904
+ 0.246
905
+ 0.246
906
+ 0.1
907
+ 0.157
908
+ 0.104
909
+ 0.086
910
+ 0.244
911
+ 0.237
912
+ 0.247
913
+ 0.2
914
+ 0.151
915
+ 0.088
916
+ 0.086
917
+ 0.238
918
+ 0.087
919
+ 0.105
920
+ 0.3
921
+ 0.13
922
+ 0.078
923
+ 0.089
924
+ 0.244
925
+ 0.07
926
+ 0.067
927
+ 0.4
928
+ 0.202
929
+ 0.173
930
+ 0.102
931
+ 0.188
932
+ 0.068
933
+ 0.063
934
+ 0.1
935
+ 0.0
936
+ 0.246
937
+ 0.246
938
+ 0.246
939
+ 0.246
940
+ 0.246
941
+ 0.246
942
+ 0.1
943
+ 0.099
944
+ 0.089
945
+ 0.132
946
+ 0.244
947
+ 0.244
948
+ 0.244
949
+ 0.2
950
+ 0.198
951
+ 0.093
952
+ 0.077
953
+ 0.23
954
+ 0.221
955
+ 0.069
956
+ 0.3
957
+ 0.188
958
+ 0.088
959
+ 0.083
960
+ 0.224
961
+ 0.223
962
+ 0.082
963
+ 0.4
964
+ 0.262
965
+ 0.104
966
+ 0.079
967
+ 0.213
968
+ 0.072
969
+ 0.074
970
+ 0.2
971
+ 0.0
972
+ 0.246
973
+ 0.246
974
+ 0.246
975
+ 0.246
976
+ 0.246
977
+ 0.246
978
+ 0.1
979
+ 0.119
980
+ 0.146
981
+ 0.082
982
+ 0.244
983
+ 0.244
984
+ 0.245
985
+ 0.2
986
+ 0.172
987
+ 0.09
988
+ 0.086
989
+ 0.244
990
+ 0.244
991
+ 0.071
992
+ 0.3
993
+ 0.179
994
+ 0.084
995
+ 0.085
996
+ 0.225
997
+ 0.251
998
+ 0.08
999
+ 0.4
1000
+ 0.197
1001
+ 0.185
1002
+ 0.086
1003
+ 0.256
1004
+ 0.166
1005
+ 0.067
1006
+ 0.3
1007
+ 0.0
1008
+ 0.246
1009
+ 0.246
1010
+ 0.246
1011
+ 0.246
1012
+ 0.246
1013
+ 0.246
1014
+ 0.1
1015
+ 0.17
1016
+ 0.125
1017
+ 0.082
1018
+ 0.244
1019
+ 0.244
1020
+ 0.244
1021
+ 0.2
1022
+ 0.199
1023
+ 0.117
1024
+ 0.096
1025
+ 0.244
1026
+ 0.083
1027
+ 0.08
1028
+ 0.3
1029
+ 0.224
1030
+ 0.107
1031
+ 0.088
1032
+ 0.202
1033
+ 0.235
1034
+ 0.069
1035
+ 0.4
1036
+ 0.199
1037
+ 0.222
1038
+ 0.152
1039
+ 0.236
1040
+ 0.097
1041
+ 0.075
1042
+ Table 1: MSE between the predicted and the exact potential φ(x) for a different value of λ2, and λ3, and
1043
+ the different number of hidden layers, and neurons per hidden layer in PINN for 1000 different sample of
1044
+ the Test set. Here, λ4 = 0.0001 is fixed and λ1 = 1 − (λ2 + λ3 + λ4). In this table, the bold number means
1045
+ MSE < 0.1.
1046
+ 11
1047
+
1048
+ arXiv Template
1049
+ A Preprint
1050
+ Numlayer=3
1051
+ Numlayer=5
1052
+ Numlayer=7
1053
+ λ2
1054
+ λ3
1055
+ MSE
1056
+ R2
1057
+ MSE
1058
+ R2
1059
+ MSE
1060
+ R2
1061
+ Numneuron=30
1062
+ 0.0
1063
+ 0.0
1064
+ 0.246
1065
+ 0
1066
+ 0.246
1067
+ 0
1068
+ 0.246
1069
+ 0
1070
+ 0.1
1071
+ 0.104
1072
+ 0.774
1073
+ 0.145
1074
+ 0.365
1075
+ 0.237
1076
+ -14.7
1077
+ 0.2
1078
+ 0.088
1079
+ 0.85
1080
+ 0.062
1081
+ 0.889
1082
+ 0.087
1083
+ 0.807
1084
+ 0.3
1085
+ 0.078
1086
+ 0.826
1087
+ 0.069
1088
+ 0.884
1089
+ 0.07
1090
+ 0.904
1091
+ 0.4
1092
+ 0.173
1093
+ 0.551
1094
+ 0.064
1095
+ 0.863
1096
+ 0.068
1097
+ 0.868
1098
+ 0.1
1099
+ 0.0
1100
+ 0.246
1101
+ 0
1102
+ 0.246
1103
+ 0
1104
+ 0.246
1105
+ 0
1106
+ 0.1
1107
+ 0.089
1108
+ 0.796
1109
+ 0.245
1110
+ -1478
1111
+ 0.244
1112
+ 0
1113
+ 0.2
1114
+ 0.093
1115
+ 0.825
1116
+ 0.079
1117
+ 0.852
1118
+ 0.221
1119
+ -2.40
1120
+ 0.3
1121
+ 0.088
1122
+ 0.793
1123
+ 0.094
1124
+ 0.714
1125
+ 0.223
1126
+ 0.094
1127
+ 0.4
1128
+ 0.104
1129
+ 0.719
1130
+ 0.088
1131
+ 0.793
1132
+ 0.072
1133
+ 0.887
1134
+ 0.2
1135
+ 0.0
1136
+ 0.246
1137
+ 0
1138
+ 0.246
1139
+ 0
1140
+ 0.246
1141
+ 0
1142
+ 0.1
1143
+ 0.146
1144
+ 0.64
1145
+ 0.161
1146
+ 0.566
1147
+ 0.244
1148
+ 0
1149
+ 0.2
1150
+ 0.09
1151
+ 0.769
1152
+ 0.097
1153
+ 0.75
1154
+ 0.244
1155
+ -3167
1156
+ 0.3
1157
+ 0.084
1158
+ 0.819
1159
+ 0.081
1160
+ 0.857
1161
+ 0.251
1162
+ -2.32
1163
+ 0.4
1164
+ 0.185
1165
+ 0.521
1166
+ 0.065
1167
+ 0.874
1168
+ 0.166
1169
+ 0.07
1170
+ 0.3
1171
+ 0.0
1172
+ 0.246
1173
+ 0
1174
+ 0.246
1175
+ 0
1176
+ 0.246
1177
+ 0
1178
+ 0.1
1179
+ 0.125
1180
+ 0.626
1181
+ 0.088
1182
+ 0.747
1183
+ 0.244
1184
+ 0
1185
+ 0.2
1186
+ 0.117
1187
+ 0.666
1188
+ 0.117
1189
+ 0.689
1190
+ 0.083
1191
+ 0.863
1192
+ 0.3
1193
+ 0.107
1194
+ 0.67
1195
+ 0.168
1196
+ 0.59
1197
+ 0.235
1198
+ 0
1199
+ 0.4
1200
+ 0.222
1201
+ 0.25
1202
+ 0.083
1203
+ 0.837
1204
+ 0.097
1205
+ 0.804
1206
+ Numneuron=50
1207
+ 0.0
1208
+ 0.0
1209
+ 0.246
1210
+ 0
1211
+ 0.246
1212
+ 0
1213
+ 0.246
1214
+ 0
1215
+ 0.1
1216
+ 0.086
1217
+ 0.832
1218
+ 0.078
1219
+ 0.865
1220
+ 0.247
1221
+ -254
1222
+ 0.2
1223
+ 0.086
1224
+ 0.838
1225
+ 0.072
1226
+ 0.873
1227
+ 0.105
1228
+ 0.711
1229
+ 0.3
1230
+ 0.089
1231
+ 0.767
1232
+ 0.124
1233
+ 0.66
1234
+ 0.067
1235
+ 0.875
1236
+ 0.4
1237
+ 0.102
1238
+ 0.734
1239
+ 0.076
1240
+ 0.846
1241
+ 0.063
1242
+ 0.849
1243
+ 0.1
1244
+ 0.0
1245
+ 0.246
1246
+ 0
1247
+ 0.246
1248
+ 0
1249
+ 0.246
1250
+ 0
1251
+ 0.1
1252
+ 0.132
1253
+ 0.64
1254
+ 0.192
1255
+ -0.48
1256
+ 0.244
1257
+ 0
1258
+ 0.2
1259
+ 0.077
1260
+ 0.85
1261
+ 0.062
1262
+ 0.888
1263
+ 0.069
1264
+ 0.897
1265
+ 0.3
1266
+ 0.083
1267
+ 0.837
1268
+ 0.071
1269
+ 0.895
1270
+ 0.082
1271
+ 0.774
1272
+ 0.4
1273
+ 0.079
1274
+ 0.847
1275
+ 0.07
1276
+ 0.887
1277
+ 0.074
1278
+ 0.863
1279
+ 0.2
1280
+ 0.0
1281
+ 0.246
1282
+ 0
1283
+ 0.246
1284
+ 0
1285
+ 0.246
1286
+ 0
1287
+ 0.1
1288
+ 0.082
1289
+ 0.756
1290
+ 0.083
1291
+ 0.859
1292
+ 0.245
1293
+ -1774
1294
+ 0.2
1295
+ 0.086
1296
+ 0.796
1297
+ 0.071
1298
+ 0.88
1299
+ 0.071
1300
+ 0.908
1301
+ 0.3
1302
+ 0.085
1303
+ 0.799
1304
+ 0.075
1305
+ 0.888
1306
+ 0.08
1307
+ 0.87
1308
+ 0.4
1309
+ 0.086
1310
+ 0.8
1311
+ 0.118
1312
+ 0.727
1313
+ 0.067
1314
+ 0.902
1315
+ 0.3
1316
+ 0.0
1317
+ 0.246
1318
+ 0
1319
+ 0.246
1320
+ 0
1321
+ 0.246
1322
+ 0
1323
+ 0.1
1324
+ 0.082
1325
+ 0.75
1326
+ 0.098
1327
+ 0.753
1328
+ 0.244
1329
+ 0
1330
+ 0.2
1331
+ 0.096
1332
+ 0.777
1333
+ 0.065
1334
+ 0.894
1335
+ 0.08
1336
+ 0.798
1337
+ 0.3
1338
+ 0.088
1339
+ 0.785
1340
+ 0.08
1341
+ 0.834
1342
+ 0.069
1343
+ 0.902
1344
+ 0.4
1345
+ 0.152
1346
+ 0.557
1347
+ 0.097
1348
+ 0.818
1349
+ 0.075
1350
+ 0.867
1351
+ Table 2: MSE, and R2 score between the predicted and the exact potential φ(x) for a different value of λ2,
1352
+ and λ3, and the different number of hidden layers, and neurons per hidden layer in PINN for 1000 different
1353
+ samples of the Test set. Here, λ4 = 0.0001 is fixed and λ1 = 1 − (λ2 + λ3 + λ4). In this table bold numbers
1354
+ show cases with MSE < 0.1 and R2score > 0.9.
1355
+ 12
1356
+
1357
+ arXiv Template
1358
+ A Preprint
1359
+ Numlayer=3
1360
+ Numlayer=5
1361
+ Numlayer=7
1362
+ ��2
1363
+ λ3
1364
+ MSE
1365
+ R2
1366
+ MSE
1367
+ R2
1368
+ MSE
1369
+ R2
1370
+ Numneuron=30
1371
+ 0.0
1372
+ 0.0
1373
+ 0.281
1374
+ 0
1375
+ 0.281
1376
+ 0
1377
+ 0.281
1378
+ 0
1379
+ 0.1
1380
+ 0.14
1381
+ 0.71
1382
+ 0.148
1383
+ 0.364
1384
+ 0.275
1385
+ -15.28
1386
+ 0.2
1387
+ 0.118
1388
+ 0.764
1389
+ 0.077
1390
+ 0.847
1391
+ 0.126
1392
+ 0.646
1393
+ 0.3
1394
+ 0.104
1395
+ 0.737
1396
+ 0.09
1397
+ 0.782
1398
+ 0.089
1399
+ 0.835
1400
+ 0.4
1401
+ 0.208
1402
+ 0.587
1403
+ 0.072
1404
+ 0.842
1405
+ 0.083
1406
+ 0.822
1407
+ 0.1
1408
+ 0.0
1409
+ 0.281
1410
+ 0
1411
+ 0.281
1412
+ 0
1413
+ 0.281
1414
+ 0
1415
+ 0.1
1416
+ 0.105
1417
+ 0.808
1418
+ 0.28
1419
+ -1801
1420
+ 0.28
1421
+ 0
1422
+ 0.2
1423
+ 0.112
1424
+ 0.775
1425
+ 0.09
1426
+ 0.845
1427
+ 0.259
1428
+ -2.137
1429
+ 0.3
1430
+ 0.105
1431
+ 0.793
1432
+ 0.123
1433
+ 0.697
1434
+ 0.264
1435
+ 0.07
1436
+ 0.4
1437
+ 0.116
1438
+ 0.735
1439
+ 0.12
1440
+ 0.684
1441
+ 0.092
1442
+ 0.813
1443
+ 0.2
1444
+ 0.0
1445
+ 0.281
1446
+ 0
1447
+ 0.281
1448
+ 0
1449
+ 0.281
1450
+ 0
1451
+ 0.1
1452
+ 0.183
1453
+ 0.601
1454
+ 0.201
1455
+ 0.484
1456
+ 0.28
1457
+ 0
1458
+ 0.2
1459
+ 0.118
1460
+ 0.739
1461
+ 0.114
1462
+ 0.68
1463
+ 0.28
1464
+ -3620
1465
+ 0.3
1466
+ 0.095
1467
+ 0.829
1468
+ 0.109
1469
+ 0.799
1470
+ 0.275
1471
+ -1.82
1472
+ 0.4
1473
+ 0.209
1474
+ 0.586
1475
+ 0.078
1476
+ 0.851
1477
+ 0.205
1478
+ 0.183
1479
+ 0.3
1480
+ 0.0
1481
+ 0.281
1482
+ 0
1483
+ 0.281
1484
+ 0
1485
+ 0.281
1486
+ 0
1487
+ 0.1
1488
+ 0.154
1489
+ 0.663
1490
+ 0.105
1491
+ 0.743
1492
+ 0.28
1493
+ 0
1494
+ 0.2
1495
+ 0.13
1496
+ 0.74
1497
+ 0.139
1498
+ 0.688
1499
+ 0.109
1500
+ 0.725
1501
+ 0.3
1502
+ 0.143
1503
+ 0.565
1504
+ 0.2
1505
+ 0.604
1506
+ 0.271
1507
+ 0.012
1508
+ 0.4
1509
+ 0.269
1510
+ 0.356
1511
+ 0.158
1512
+ 0.364
1513
+ 0.118
1514
+ 0.726
1515
+ Numneuron=50
1516
+ 0.0
1517
+ 0.0
1518
+ 0.281
1519
+ 0
1520
+ 0.281
1521
+ 0
1522
+ 0.281
1523
+ 0
1524
+ 0.1
1525
+ 0.116
1526
+ 0.764
1527
+ 0.112
1528
+ 0.769
1529
+ 0.28
1530
+ -310
1531
+ 0.2
1532
+ 0.1
1533
+ 0.837
1534
+ 0.092
1535
+ 0.829
1536
+ 0.135
1537
+ 0.598
1538
+ 0.3
1539
+ 0.101
1540
+ 0.79
1541
+ 0.145
1542
+ 0.719
1543
+ 0.083
1544
+ 0.843
1545
+ 0.4
1546
+ 0.117
1547
+ 0.706
1548
+ 0.096
1549
+ 0.801
1550
+ 0.106
1551
+ 0.662
1552
+ 0.1
1553
+ 0.0
1554
+ 0.281
1555
+ 0
1556
+ 0.281
1557
+ 0
1558
+ 0.281
1559
+ 0
1560
+ 0.1
1561
+ 0.149
1562
+ 0.657
1563
+ 0.227
1564
+ -0.465
1565
+ 0.28
1566
+ 0
1567
+ 0.2
1568
+ 0.121
1569
+ 0.658
1570
+ 0.077
1571
+ 0.832
1572
+ 0.09
1573
+ 0.824
1574
+ 0.3
1575
+ 0.108
1576
+ 0.719
1577
+ 0.096
1578
+ 0.817
1579
+ 0.102
1580
+ 0.734
1581
+ 0.4
1582
+ 0.117
1583
+ 0.716
1584
+ 0.085
1585
+ 0.843
1586
+ 0.093
1587
+ 0.842
1588
+ 0.2
1589
+ 0.0
1590
+ 0.281
1591
+ 0
1592
+ 0.281
1593
+ 0
1594
+ 0.281
1595
+ 0
1596
+ 0.1
1597
+ 0.094
1598
+ 0.782
1599
+ 0.112
1600
+ 0.777
1601
+ 0.28
1602
+ -2164
1603
+ 0.2
1604
+ 0.114
1605
+ 0.758
1606
+ 0.094
1607
+ 0.785
1608
+ 0.091
1609
+ 0.854
1610
+ 0.3
1611
+ 0.101
1612
+ 0.762
1613
+ 0.093
1614
+ 0.818
1615
+ 0.104
1616
+ 0.771
1617
+ 0.4
1618
+ 0.124
1619
+ 0.644
1620
+ 0.143
1621
+ 0.703
1622
+ 0.088
1623
+ 0.849
1624
+ 0.3
1625
+ 0.0
1626
+ 0.281
1627
+ 0
1628
+ 0.281
1629
+ 0
1630
+ 0.281
1631
+ 0
1632
+ 0.1
1633
+ 0.097
1634
+ 0.772
1635
+ 0.115
1636
+ 0.727
1637
+ 0.28
1638
+ 0
1639
+ 0.2
1640
+ 0.112
1641
+ 0.768
1642
+ 0.096
1643
+ 0.807
1644
+ 0.167
1645
+ 0.363
1646
+ 0.3
1647
+ 0.11
1648
+ 0.777
1649
+ 0.113
1650
+ 0.682
1651
+ 0.089
1652
+ 0.851
1653
+ 0.4
1654
+ 0.177
1655
+ 0.563
1656
+ 0.114
1657
+ 0.76
1658
+ 0.103
1659
+ 0.741
1660
+ Table 3: MSE and R2 score between the predicted and the exact potential φ(x) for a different value of λ2,
1661
+ and λ3, and the different number of hidden layers, and neurons per hidden layer in PINN for 1000 different
1662
+ samples of the Extrapolate set. Here, λ4 = 0.0001 is fixed and λ1 = 1 − (λ2 + λ3 + λ4). In this table bold
1663
+ number shows the best hyperparameters for our PINN-based model.
1664
+ 13
1665
+
1666
+ arXiv Template
1667
+ A Preprint
1668
+ 3.3
1669
+ Comparison
1670
+ We evaluate RF, NN, and PINN to estimate the potential of point-charged particles surrounded by conductive
1671
+ walls. According to Fig.6, NN was not trained well, while RF and PINN-based models could predict potential
1672
+ precisely. However, RF did not work well to estimate φT rue > 0.3. Apart from this, the best model could
1673
+ estimate not only the potential of the train and the test sets but also the potential of point-charged particles
1674
+ that are not in the train or test set. So we evaluate the best tuned-PINN model and RF on the extrapolation
1675
+ samples; the results are reported in Table 3. As can be seen in Fig.8 PINN-based model could predict the
1676
+ potential of newly charged particles better than the RF model, where PINN could predict φT rue > 0.3 by far
1677
+ better than RF.
1678
+ 0.2
1679
+ 0.3
1680
+ 0.4
1681
+ 0.5
1682
+ 0.6
1683
+ 0.7
1684
+ RF
1685
+ 0.2
1686
+ 0.3
1687
+ 0.4
1688
+ 0.5
1689
+ 0.6
1690
+ 0.7
1691
+ True
1692
+ : 0.07
1693
+ Extrapolation data : 1000 targets
1694
+ 0.05
1695
+ 0.10
1696
+ 0.15
1697
+ 0.20
1698
+ 0.25
1699
+ Density distribution
1700
+ (a)
1701
+ 0.1
1702
+ 0.2
1703
+ 0.3
1704
+ 0.4
1705
+ 0.5
1706
+ 0.6
1707
+ 0.7
1708
+ PINN
1709
+ 0.1
1710
+ 0.2
1711
+ 0.3
1712
+ 0.4
1713
+ 0.5
1714
+ 0.6
1715
+ 0.7
1716
+ True
1717
+ : 0.02
1718
+ Extrapolation data : 1000 targets
1719
+ 0.02
1720
+ 0.04
1721
+ 0.06
1722
+ 0.08
1723
+ 0.10
1724
+ 0.12
1725
+ 0.14
1726
+ 0.16
1727
+ Density distribution
1728
+ (b)
1729
+ Figure 8: Potential estimation of best-tuned (a) RF with scatter of σ = 0.07, and (b) PINN-based model
1730
+ with scatter of σ = 0.02 on the 1000 samples of the Extrapolation set. The dashed red line shows where the
1731
+ predicted potential equals the true potential. The pink-shaded region marks 1σ scatter of potential errors.
1732
+ 3.4
1733
+ Generalization (Multi charged particles)
1734
+ For generalization, we test the PINN-based model with λ2 = 0.3 and λ3 = 0.3 for the case of more than one
1735
+ charged particle surrounded with conductive boundaries. Since the Laplace equation is a linear function,
1736
+ we predict the potential of each charged particle and then calculate the total potential with a superposition
1737
+ of the corresponding predicted potential. After that, we report the MSE between the predicted smooth
1738
+ potential and the exact smooth solution, which is calculated by the image charges method. Fig.9 shows the
1739
+ relation between MSE and N, the number of charged particles. As expected, the MSE is independent of
1740
+ the number of charged particles. Therefore, it leads to the fact that we can also use this method for problems
1741
+ with any desired particles.
1742
+ 14
1743
+
1744
+ arXiv Template
1745
+ A Preprint
1746
+ 2
1747
+ 3
1748
+ 4
1749
+ 5
1750
+ 6
1751
+ 7
1752
+ 8
1753
+ 9
1754
+ 10
1755
+ N
1756
+ 0.00
1757
+ 0.02
1758
+ 0.04
1759
+ 0.06
1760
+ 0.08
1761
+ 0.10
1762
+ MSE
1763
+ Figure 9: MSE between true and predicted potential as a function of charged particles number; in 100
1764
+ different problems
1765
+ 4
1766
+ Conclusion
1767
+ In this study, we have trained a machine to predict the smooth potential of charged components surrounded
1768
+ by conductive boundaries. In this scene, the total potential could be easily calculated by the summation
1769
+ of predicted smooth potential with singular potential due to the PLT algorithm. The reference set consists
1770
+ of analytic solutions, the solution of the image charge method, which is split into a train set with 5000
1771
+ samples and a test set with 1000 samples. To check the accuracy of our model, we set another data set called
1772
+ extrapolation set consisting of 1000 samples with different boundary conditions which were not in the train
1773
+ or even test set. Our main conclusion can be summarized as follows:
1774
+ • We find that the PINN-based model trained better than RF and NN models. RF could not predict
1775
+ high potential; on the other hand, the NN could not be trained well at all.
1776
+ • our PINN-based model could predict the potential of the test set with MSE = 0.069, R2score = 0.902,
1777
+ and scatter σ = 0.02. It also could predict the potential of the extrapolation set with MSE = 0.089,
1778
+ R2score = 0.851, and scatter σ = 0.02.
1779
+ • Since the Laplace equation is a linear equation, the trained model could predict the potential of more
1780
+ than one charged particle by summating every particle’s predicted potential. Besides, we show that
1781
+ the MSE of more than one particle is independent of a number of particles.
1782
+ References
1783
+ John R Miller and Patrice Simon. Electrochemical capacitors for energy management. science, 321(5889):
1784
+ 651–652, 2008.
1785
+ Mathieu Salanne, Benjamin Rotenberg, Katsuhiko Naoi, Katsumi Kaneko, P-L Taberna, Clare P Grey, Bruce
1786
+ Dunn, and Patrice Simon. Efficient storage mechanisms for building better supercapacitors. Nature Energy,
1787
+ 1(6):1–10, 2016.
1788
+ Patrice Simon and Yury Gogotsi. Materials for electrochemical capacitors, 2008. ISSN 14761122.
1789
+ John David Jackson. Jackson - classical electrodynamics, 1962. ISSN 00029505.
1790
+ Jian-Ming Jin. The finite element method in electromagnetics, 3rd edition. Journal of Chemical Information
1791
+ and Modeling, 2014.
1792
+ G. W. S., Gene H. Golub, and Charles F. Van Loan. Matrix computations. Mathematics of Computation, 56,
1793
+ 1991. ISSN 00255718. doi:10.2307/2008552.
1794
+ 15
1795
+
1796
+ arXiv Template
1797
+ A Preprint
1798
+ Sandeep Tyagi, Axel Arnold, and Christian Holm. Icmmm2d: An accurate method to include planar
1799
+ dielectric interfaces via image charge summation. Journal of Chemical Physics, 127, 2007. ISSN 00219606.
1800
+ doi:10.1063/1.2790428.
1801
+ Sandeep Tyagi, Axel Arnold, and Christian Holm. Electrostatic layer correction with image charges: A linear
1802
+ scaling method to treat slab 2d+h systems with dielectric interfaces. Journal of Chemical Physics, 129,
1803
+ 2008. ISSN 00219606. doi:10.1063/1.3021064.
1804
+ Sandeep Tyagi, Mehmet Süzen, Marcello Sega, Marcia Barbosa, Sofia S. Kantorovich, and Christian Holm.
1805
+ An iterative, fast, linear-scaling method for computing induced charges on arbitrary dielectric boundaries.
1806
+ Journal of Chemical Physics, 132, 2010. ISSN 00219606. doi:10.1063/1.3376011.
1807
+ S. Kesselheim, M. Sega, and C. Holm. The icc* algorithm: A fast way to include dielectric boundary effects
1808
+ into molecular dynamics simulations. arXiv:1003.1271, 2010.
1809
+ Axel Arnold, Olaf Lenz, Stefan Kesselheim, Rudolf Weeber, Florian Fahrenberger, Dominic Roehm, Peter
1810
+ Košovan, and Christian Holm. Espresso 3.1: Molecular dynamics software for coarse-grained models.
1811
+ volume 89 LNCSE, 2013. doi:10.1007/978-3-642-32979-1_1.
1812
+ Stewart K. Reed, Oliver J. Lanning, and Paul A. Madden. Electrochemical interface between an ionic liquid and
1813
+ a model metallic electrode. Journal of Chemical Physics, 126, 2007. ISSN 00219606. doi:10.1063/1.2464084.
1814
+ Samare Rostami, S Alireza Ghasemi, and Ehsan Nedaaee Oskoee. A highly accurate and efficient algorithm
1815
+ for electrostatic interactions of charged particles confined by parallel metallic plates. The Journal of
1816
+ chemical physics, 145(12):124118, 2016.
1817
+ Morad Biagooi, Mohammad Samanipour, S. Alireza Ghasemi, and Seyedehsan Nedaaee Oskoee. Caviar:
1818
+ A simulation package for charged particles in environments surrounded by conductive boundaries. AIP
1819
+ Advances, 10, 2020. ISSN 21583226. doi:10.1063/1.5140052.
1820
+ Tao Shan, Wei Tang, Xunwang Dang, Maokun Li, Fan Yang, Shenheng Xu, and Ji Wu. Study on a fast solver
1821
+ for poisson’s equation based on deep learning technique. IEEE Transactions on Antennas and Propagation,
1822
+ 68(9):6725–6733, 2020. doi:10.1109/TAP.2020.2985172.
1823
+ M. Raissi, P. Perdikaris, and G. E. Karniadakis. Physics-informed neural networks: A deep learning framework
1824
+ for solving forward and inverse problems involving nonlinear partial differential equations. Journal of
1825
+ Computational Physics, 378, 2019. ISSN 10902716. doi:10.1016/j.jcp.2018.10.045.
1826
+ Leo Breiman. Random forests. Machine Learning, 45, 2001. ISSN 08856125. doi:10.1023/A:1010933404324.
1827
+ Fabian Pedregosa, Gael Varoquaux, Alexandre Gramfort, Vincent Michel, Bertrand Thirion, Olivier Grisel,
1828
+ Mathieu Blondel, Peter Prettenhofer, Ron Weiss, Vincent Dubourg, Jake Vanderplas, Alexandre Passos,
1829
+ David Cournapeau, Matthieu Brucher, Matthieu Perrot, and Édouard Duchesnay. Scikit-learn: Machine
1830
+ learning in python. Journal of Machine Learning Research, 12, 2011. ISSN 15324435.
1831
+ Martín Abadi. Tensorflow: learning functions at scale. ACM SIGPLAN Notices, 51, 2016. ISSN 0362-1340.
1832
+ doi:10.1145/3022670.2976746.
1833
+ François Chollet. Keras: The python deep learning library. Keras.Io, 2015.
1834
+ Stéfan Van Der Walt, S. Chris Colbert, and Gaël Varoquaux.
1835
+ The numpy array: A structure for ef-
1836
+ ficient numerical computation.
1837
+ Computing in Science and Engineering, 13, 2011.
1838
+ ISSN 15219615.
1839
+ doi:10.1109/MCSE.2011.37.
1840
+ Ian Goodfellow, Yoshua Bengio, and Aaron Courville. Deep Learning. MIT Press, 2016. http://www.
1841
+ deeplearningbook.org.
1842
+ a. K. Connect, a. Krogh, and J. a. Hertz. A simple weight decay can improve generalization. Advances in
1843
+ Neural Information Processing Systems, 4, 1992.
1844
+ Vijay Kag, Kannabiran Seshasayanan, and Venkatesh Gopinath. Physics and data informed neural networks
1845
+ for two-dimensional turbulence. arXiv preprint arXiv:2203.02555, 2022.
1846
+ Dong C Liu and Jorge Nocedal. On the limited memory bfgs method for large scale optimization. Mathematical
1847
+ programming, 45(1):503–528, 1989.
1848
+ 16
1849
+
DNE0T4oBgHgl3EQfQQC5/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
I9AyT4oBgHgl3EQffvhT/content/tmp_files/2301.00345v1.pdf.txt ADDED
@@ -0,0 +1,1829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MTNeuro: A Benchmark for Evaluating
2
+ Representations of Brain Structure Across
3
+ Multiple Levels of Abstraction
4
+ Jorge Quesada1∗, Lakshmi Sathidevi1∗, Ran Liu1, Nauman Ahad1, Joy M. Jackson1,
5
+ Mehdi Azabou1, Jingyun Xiao1, Christopher Liding1, Matthew Jin1, Carolina Urzay1,
6
+ William Gray-Roncal2, Erik C. Johnson2,†, Eva L. Dyer1,†
7
+ 1- Georgia Institute of Technology
8
+ 2 - Johns Hopkins University Applied Physics Laboratory
9
+ Abstract
10
+ There are multiple scales of abstraction from which we can describe the same
11
+ image, depending on whether we are focusing on fine-grained details or a more
12
+ global attribute of the image. In brain mapping, learning to automatically parse
13
+ images to build representations of both small-scale features (e.g., the presence of
14
+ cells or blood vessels) and global properties of an image (e.g., which brain region
15
+ the image comes from) is a crucial and open challenge. However, most existing
16
+ datasets and benchmarks for neuroanatomy consider only a single downstream
17
+ task at a time. To bridge this gap, we introduce a new dataset, annotations, and
18
+ multiple downstream tasks that provide diverse ways to readout information about
19
+ brain structure and architecture from the same image. Our multi-task neuroimag-
20
+ ing benchmark (MTNeuro) is built on volumetric, micrometer-resolution X-ray mi-
21
+ crotomography images spanning a large thalamocortical section of mouse brain,
22
+ encompassing multiple cortical and subcortical regions. We generated a num-
23
+ ber of different prediction challenges and evaluated several supervised and self-
24
+ supervised models for brain-region prediction and pixel-level semantic segmenta-
25
+ tion of microstructures. Our experiments not only highlight the rich heterogeneity
26
+ of this dataset, but also provide insights into how self-supervised approaches can
27
+ be used to learn representations that capture multiple attributes of a single image
28
+ and perform well on a variety of downstream tasks. Datasets, code, and pre-trained
29
+ baseline models are provided at: https://mtneuro.github.io/.
30
+ 1
31
+ Introduction
32
+ Our understanding of our natural surroundings requires multiple levels of perceptual processing: we
33
+ can recognize a macroscopic object (e.g., a tree), while also identifying finer-grain structures within
34
+ it (e.g., leaves and branches), and context-relevant features (e.g., leafiness, height, or season). This
35
+ multi-level perception scheme also translates to the medical image domain: the process of interrogat-
36
+ ing medical images (either by a human expert or an algorithm) involves combining macrostuctural
37
+ insights (such as a region of interest) with context-relevant microstructure information and human-
38
+ interpretable features (e.g., the density of a given cell type in a microscopy image) in order to derive
39
+ a diagnosis or characterize a target sample.
40
+ ∗Equal contribution. Contact authors: (ELD, JQ, LS) {evadyer, jpacora3, lsathidevi3}@gatech.edu; (ECJ)
41
+ erik.c.johnson@jhuapl.edu; † Both senior authors contributed equally.
42
+ 35th Conference on Neural Information Processing Systems (NeurIPS 2022) Track on Datasets and Bench-
43
+ marks.
44
+ arXiv:2301.00345v1 [cs.CV] 1 Jan 2023
45
+
46
+ Figure 1: Overview of MTNeuro Benchmark. Task 1: Brain region (macrostructure) classification (3 config-
47
+ urations which vary in data availability and testing schemes); Task 2: Pixel-level microstructure segmentation
48
+ (4 configurations which vary in sample dimensionality, span and target class count); Task 3: Probing semantic
49
+ attributes from image-level embeddings obtained in Task 1.
50
+ In particular, the ongoing effort to understand the connections and dynamics of the brain involves
51
+ analyzing both macroscopic level properties such as region-level structures (1; 2) as well as detailed
52
+ microstructures like the size or shape of a given cell type (3). While significant advances have
53
+ been achieved in unveiling the properties and structures within the brain through several imaging
54
+ modalities at different scales (4; 5; 6; 7), most existing neuroimaging benchmarks are designed for
55
+ evaluation at a single spatial scale, or geared towards a particular downstream task. This can be
56
+ attributed to several causes, including the prohibitive cost of manual annotation for data spanning
57
+ multiple scales (8; 9), the associated computational cost of processing multi-scale data, and the fact
58
+ that neuroimaging technologies have only recently progressed towards pipelines that can capture
59
+ multi-area volumes at high resolutions.
60
+ To fill this gap, we present the MTNeuro benchmark (Figure 2): a multi-task, multi-scale benchmark
61
+ based on a large 3D X-ray microtomography image dataset spanning multiple areas from a mouse
62
+ brain. Code and access to the data are provided at: https://mtneuro.github.io/. We host our
63
+ dataset in the Brain Observatory Storage Service and Database (BossDB, a specialized interactive
64
+ database (10)) and provide an integrated dataloader to facilitate transfer experiments and analysis.
65
+ This benchmark provides a unified framework that allows for evaluating models and representations
66
+ arising in three distinct tasks:
67
+ • Task 1 - Image-level classification of brain region: prediction of the brain region (so-
68
+ matosensory cortex, striatum, thalamus, zona incerta) to which a given image belongs.
69
+ • Task 2 - Pixel-level segmentation of microstructures: prediction of neural microstruc-
70
+ tures (axons, cell bodies, blood vessels, background) at the pixel-level across the four core
71
+ brain regions in the dataset.
72
+ • Task 3 - Probing multiple semantic features from learned image-level embeddings:
73
+ estimation of semantic (human-interpretable) features (such as the average cell size or axon
74
+ density) from the representation of a given image, obtained after “freezing” the weights of
75
+ a trained encoder.
76
+ To understand how current models perform on these different tasks, we evaluate a family of dif-
77
+ ferent supervised and self-supervised models. Our results in Tasks 1 and 3 highlight a significant
78
+ generalization gap between self-supervised and supervised approaches, which opens up interesting
79
+ opportunities for further evaluation and development of self-supervised learning (SSL) methods for
80
+ these tasks. Through testing across a family of different models across a variety of tasks, our pro-
81
+ posed benchmark provides both an exciting platform for evaluating self-supervised learning (SSL)
82
+ methods, and a rich tool in the effort to extract fundamental insights into brain architecture at both
83
+ the micro- and macro-scale.
84
+ 2
85
+
86
+ Task 1 - ROl Classification
87
+ Task 2 - Microstructure segmentation
88
+ Task 3 - Probing semantic attributes
89
+ Probe models
90
+ 2D
91
+ 3-class
92
+ Segmentation
93
+ 256x256
94
+ Brain region
95
+ Model
96
+ image
97
+ Classifer
98
+ > Axon density
99
+ cell vessel bkgd
100
+ Cortex
101
+ Striatum
102
+ Thalamus
103
+ Zona Incerta
104
+ 3D
105
+ Model
106
+ 4-class
107
+ Sub-tasks based on different train/test splits
108
+ > Cell count
109
+ train test
110
+ model trained
111
+ C1
112
+ 11′
113
+ on task 1
114
+ C2
115
+ 3,4
116
+ latent
117
+ Cell density
118
+ representation
119
+ >
120
+ C3
121
+ 1,23,4
122
+ cell vessel axon
123
+ 4 different configurations
124
+ bkgd
125
+ / per class2
126
+ Background and Related Work
127
+ 2.1
128
+ The need for a benchmark in brain mapping and connectomics
129
+ Over the past decade, there have been major advances in our ability to resolve fine-scale neu-
130
+ roanatomical structures in the brain. With these advances, we have generated large amounts of
131
+ brain data that span many spatial scales, and can reveal different features of brain organization.
132
+ At the nanoscale, electron microscopy has provided detailed wiring diagrams of small portions of
133
+ cortex (11). At micron scale, microscopy techniques have provided detailed pictures of cytoarchi-
134
+ tecture - or how neurons and cells are organized (5). Efforts at even larger scales to capture many
135
+ brain areas simultaneously, like connectivity atlas and X-ray microtomographic datasets (12), have
136
+ provided information about the interplay between long-range connections across brain areas and
137
+ microstructures, such as cell body densities and other morphological features of brain structure.
138
+ Accompanying these new tools for data generation have been major advances in machine learning
139
+ and computational approaches for modeling and analyzing these datasets, for problems such as
140
+ object detection, segmentation, and classification. While the information provided by these methods
141
+ are incredibly rich and have a great deal of structure at many scales, any given method is typically
142
+ tested on an individual challenge at a particular scale. The expense of annotating and proofreading
143
+ can be considerable, and significant neuroanatomy knowledge is typically required of annotators
144
+ (9). Moreover, many efforts to provide high quality data, such as (5), have not focused on building
145
+ ML-oriented benchmarks, but rather on providing reference datasets and resources.
146
+ Using machine learning tools to understand these emerging brain datasets at different spatial scales
147
+ is both a challenge and an increasingly critical need. As a result, large tera- and peta-scale con-
148
+ nectomics datasets are being collected using electron microscopy and X-ray microtomography, in-
149
+ cluding data from the entire brain of Drosophila (13; 14), large portions of the mouse brain (15; 3)
150
+ and even a cubic millimeter of human cortex (6). Advances in imaging technologies promise to
151
+ continually increase the spatial extent, number of species, and number of imaged individuals. These
152
+ datasets have the micro- or nanoscale resolution and large spatial extents required to resolve sub-
153
+ cellular structures (e.g., mitochondria and synapses), microstructures (e.g., glia, neurons, and vascu-
154
+ lature), and macrostructure (e.g., brain regions, cortical layer structure, and long-range white matter
155
+ projections). The multi-scale nature and large size of these datasets requires new ML tools (16),
156
+ which drives the need for benchmarks that can extract representations of neural structure at different
157
+ scales.
158
+ 2.2
159
+ Existing datasets and benchmarks for resolving brain structure
160
+ Due to the large variety of spatial scales, neuroanatomical structure, and imaging modalities, a wide
161
+ range of segmentation and classification problems have been formulated for neuroimaging data. At
162
+ the macroscale, there has been a long history of developing benchmarks for different datasets in
163
+ MRI and related modalities like DTI and fMRI. For example, the BraTS dataset (17) focuses on the
164
+ MRI of brain tumor and motivated many segmentation works (18; 19; 20; 21). The ADNI (22) and
165
+ MIRIAD (23) datasets provide MRI-based Alzheimer’s disease imaging that focuses on tracking
166
+ disease progression (24; 25; 26; 27; 28; 29; 30). The UK Biobank (31) offers a huge collection of
167
+ Brain MRI data from 5,00,000 human participants. The low spatial resolution of MRI and related
168
+ methods, however, limit the ability to observe microscale structures at the cellular or subcellular
169
+ level.
170
+ For microscale structure, there are fewer benchmarking efforts due to the scale and complexity of
171
+ the annotation and processing. Example problems include segmentation of synapses in the CREMI
172
+ challenge (32), which provides high-resolution imaging of synapses in 5 cubic microns of non-
173
+ isotropic EM with nanometer resolution. Other problem formulations include 2D image segmenta-
174
+ tion of cell membranes (ISBI 2012 Challenge), with data from (33), and 3D segmentation of cells
175
+ (34; 35). Specific benchmarks have also been developed for axon instance segmentation (36) and
176
+ mitochondria segmentation (37). Different forms of microscopy with micrometer resolution, in-
177
+ cluding calcium fluorescence microscopy, are suitable for segmenting cell bodies and extracting
178
+ functional time-series data, but lack other microstructure information. Benchmarks have also been
179
+ established for estimation of functional traces from two-photon calcium fluorescence microscopy,
180
+ including spikefinder (38). These benchmarks have been instrumental in driving progress on specific
181
+ problems at specific spatial scales. In general, however, most benchmarks at the microscale focus on
182
+ 3
183
+
184
+ relatively small spatial extents, and lack the multi-scale macrostructure, which is also present in the
185
+ brain. To the best of our knowledge, there are no public microtomography datasets of brain structure
186
+ with both dense microstructure and macroscale annotations currently available.
187
+ Encompassing larger spatial extents, projects such as the BigBrain atlas provides high-resolution
188
+ sections from the mouse brain with Nissl contrast (39), but the resolution only allows resolution of
189
+ cells around 20 µm. Large-scale EM datasets are also being used to benchmark performance and seg-
190
+ ment neurons, augmented with iterative human proofreading (40; 41; 6), which are leading to large
191
+ segmented datasets with increasingly complex annotation. These data, however, are not suitable for
192
+ large-scale benchmarking and algorithm development in the general machine learning community
193
+ due to their size and ongoing refinement. To accelerate progress towards machine learning tools
194
+ which can operate at multiple levels of spatial abstraction within the same high-resolution dataset,
195
+ benchmarks are needed which encompass large spatial extents at high resolution. This will enable a
196
+ broader scientific community to apply state-of-the-art methods for these important applications.
197
+ 3
198
+ Dataset and Tasks
199
+ 3.1
200
+ Overview of dataset
201
+ We build our benchmark on a large open access high-resolution (1.17µm isotropic) 3D X-ray micro-
202
+ tomography imaging dataset that provides fine-scale information about brain microstructure as well
203
+ as diverse regions of interest that give more distinct global attributes (15). The dataset thus con-
204
+ tains a uniquely rich set of both macroscale (region of interest) and microscale (cells, blood vessels,
205
+ axons) structures that can be interrogated throughout the dataset (42; 16).
206
+ The full volumetric dataset provides micron resolution of an intact brain sample totalling 5805 ×
207
+ 1420 × 720 pixels. The dataset spans four regions of interest: somatosensory cortex (CTX), stria-
208
+ tum (STR), ventral posterior region of thalamus (VP), or the zona incerta (ZI) (see Figure 2 A-B)
209
+ and provides pixel-level microstructure (cell, axons, blood vessels, background) and macrostructure
210
+ labels over 2D slices distributed over the dataset.
211
+ To provide more data for training and validation, we expanded the pixel-level microstructure and
212
+ macrostructure labels provided in the original data resource (15) to larger contiguous volumes in the
213
+ data. To expand the pixel-level labels, we trained a 4-class Unet model on the sparse 2D annotations
214
+ and had a trained expert proofread the annotations to create dense pixel-level microstructural labels
215
+ (see Figure 2B), identifying each point as either part of an axon, cell, blood vessel, or background.
216
+ The final curated pixel-level labels span 4 ROIs, with each ROI consisting 360 256x256 densely
217
+ labelled images.
218
+ To examine semantic attributes of the different images, in Task 3, we leveraged the dense pixel-
219
+ level labels to compute a number of semantic features from the reconstructions: (i) the density of
220
+ blood vessels, (ii) axon density, (iii) the number of cells, (iv) size of cells, and (v) the average
221
+ inter-cell distance in each slice. These semantic labels provide information into different features
222
+ of the cytoarchitecture that can be used to interpret the embeddings learned by models tested on
223
+ this dataset. In addition to these microstructure annotations, we have expanded the macrostructure
224
+ annotations from the original dataset to include interpolations of the region labels across all 720
225
+ slices of size 5805 × 1420. From these interpolated sections, we extracted 12 new subvolumes
226
+ (three for each of the four regions of interest) for examining the generalization of models in Task 1.
227
+ 3.2
228
+ Data access
229
+ The dataset and all corresponding labels are stored in BossDB (10), the Brain Observatory Storage
230
+ Service and Database. The dataset project page can be found at: http://bossdb.org/project/
231
+ prasad2020. BossDB is a specialized spatial database for Electron Microscopy and X-Ray Micro-
232
+ tomography Datasets, with seamless visualization through Neuroglancer, which enables interactive
233
+ visualization of large-scale 3D annotated volumes and annotations. All data are available publicly,
234
+ using public log on credentials (no account creation required). The project page documents project
235
+ metadata, citation instructions, and the data creators and curators.
236
+ For benchmarking, data are accessed through the Python intern API (43). This API allows a remote
237
+ connection to the BossDB system, including downloads of arbitrary, on-demand 3D cutouts of data,
238
+ 4
239
+
240
+ 500 um
241
+ A
242
+ ZI
243
+ VP
244
+ STR
245
+ CTX
246
+ intern SDK
247
+ Image Cube
248
+ Label Cube
249
+ BossDBDataset
250
+ DataLoader
251
+ __getitem__
252
+ images
253
+ labels
254
+ C
255
+ Figure 2:
256
+ Overview of datasets and annotations used in MTNeuro.
257
+ In A we show how the dataset spans
258
+ multiple brain areas including the somatosensory cortex (CTX), striatum (STR), thalamus (VP), and zona in-
259
+ certa (ZI). Each of these areas contains annotations of pixel-level microstructures like axons, blood vessels, and
260
+ cells visualized as dense X-ray microCT volumetric imaging data (1.17 micron isotropic resolution), as seen
261
+ in B. The pipeline and BossDB data access are shown in C: channels are accessed through the Intern API to
262
+ access cutouts without the need to download the entire dataset. This enables creating a data loader for specific
263
+ task-relevant cutouts.
264
+ including raw images and annotations, without the need to download the entire dataset to disk. To
265
+ facilitate the use of this API, we provide a Pytorch DataLoader for rapid algorithm development
266
+ and testing. We also provide sample Jupyter notebooks to demonstrate how the task cutouts can be
267
+ dowloaded, and saved as Numpy files for development in other frameworks. The tasks are defined
268
+ with task-specific JSON files specifying the metadata for each task. The use of this dataloader is
269
+ illustrated in Fig. 2C, where we detail how the dataset can be efficiently accessed through BossDB.
270
+ The data are structured into channels. Raw images, macrostructure annotations, and microstructure
271
+ annotations, each have their own separate channel. This is detailed below.
272
+ Raw images: All tasks utilize the same raw images, which are single color (grayscale) with 8-bit
273
+ unsigned integer values. The total raw data volume is 720 × 1420 × 5805 voxels at a resolution of
274
+ 1.17µm. This data is available through the raw images channel: https://api.bossdb.io/v1/
275
+ mgmt/resources/prasad/prasad2020/image.
276
+ Macrostructure annotations: Tasks 1 and 3 utilize dense pixel-level annotations of different
277
+ brain regions (macrostructure) : CTX (label 0); STR (label 1); VP (label 2); ZI (label 3). At
278
+ this scale/level, there is an equal number of samples of each class and hence the classes are bal-
279
+ anced. This label data is represented with 64-bit unsigned integers and is accessible through the
280
+ macrostructure annotations channel: https://api.bossdb.io/v1/mgmt/resources/prasad/
281
+ prasad_analysis/roi_labels.
282
+ Microstructure annotations: Task 2 and Task 3 utilize pixel-level annotations of brain microstruc-
283
+ ture. Volumes of 256 × 256 × 360 are densely annotated with microstructure labels. The labels are
284
+ 0: no label (background); 1: blood vessels; 2: cells; 3: mylenated axons. Represented with 64-bit
285
+ unsigned integers, it is accessible through the microstructure annotations channel: https://api.
286
+ bossdb.io/v1/mgmt/resources/prasad/prasad_analysis/pixel_labels.
287
+ A key aspect of our approach is portability of data access and training infrastructure across neu-
288
+ roimaging datasets in BossDB. This allows for easy extension of code and baselines to new vol-
289
+ umetric imaging datasets stored in BossDB. These include data from new species, with new mi-
290
+ crostructure labels (synapses, membranes, mitochondria), and with new macrostructure labels (brain
291
+ regions, experimental state). By modifying the dataloader JSON, developers can specify different
292
+ BossDB datasets, spatial regions, and annotation (label) sources. This allows for the flexibility re-
293
+ quired to support the different tasks in this dataset, and will enable further training and deployment
294
+ 5
295
+
296
+ A
297
+ B
298
+ STR
299
+ CTX
300
+ 7
301
+ Cortex
302
+ Striatum
303
+ Thalamus
304
+ Zona
305
+ 500 um
306
+ axon
307
+ (CTX)
308
+ (STR)
309
+ (VP)
310
+ Incerta (ZI)
311
+ vessel
312
+ Task 1 - Classify ROl
313
+ Task 2 - Segment Microstructure
314
+ Task 3 - Predict Attributes
315
+ BrainRegion
316
+ classification
317
+ 2D
318
+ BloodVessel
319
+ regression
320
+ Cell Count
321
+ regression
322
+ 3D
323
+ Frozen
324
+ C3)
325
+ Cell Density
326
+ Network
327
+ regression
328
+ Train
329
+ Test
330
+ 3-class
331
+ 4-class500 μmof these baseline models to new datasets. This is an important contribution of this work towards
332
+ developing machine learning tools for emerging large-scale neuroimaging datasets.
333
+ 3.3
334
+ Tasks
335
+ 3.3.1
336
+ Task 1: Image-level classification of brain region-of-interest (ROI)
337
+ When analyzing imaging data that spans many different brain regions, one important question is
338
+ the degree to which global image features correlate with the brain region from which the sample
339
+ is drawn (16). Thus, we can pose this as a classification problem, where we pull a small patch (or
340
+ small region) from the data and estimate which of the 4 brain regions the sample was drawn from.
341
+ Given the abundance of samples available for this task across the entire volume, we can sub-divide
342
+ this task into three different training schemes (see Figure 1), detailed below. Taken together, these
343
+ three training schemes allow us to evaluate how different types of models are able to generalize as
344
+ more data becomes available during training.
345
+ ROI-C1.
346
+ For this sub-task, we use the 4 densely-annotated cubes (shown in blue in Figure 1, Task
347
+ 1), each corresponding to one of the regions of interest (CTX, STR, VP, or ZI). We divide each of
348
+ the four subvolumes by selecting the first 300 (256 × 256) images slices for training, and the last 50
349
+ images for testing, leaving 10-slices between the train and test data to avoid any structural overlap.
350
+ The resulting overall sample size for this sub-task is thus 1400 images, with 1200 images for the
351
+ train set and 200 for the test set.
352
+ ROI-C2.
353
+ In this sub-task, we evaluate the performance of the models when tested on new areas
354
+ within the larger 3D context of the dataset. We extract two additional 256 × 256 × 360 cubes per
355
+ class (shown in Figure 1, Task 1), to serve as additional test data for the models. This sub-task
356
+ employs 4080 images, with 1200 in the train set, and 2880 in the test set.
357
+ ROI-C3.
358
+ In this sub-task, we evaluate the performance of the models when allowed to learn from
359
+ a larger set of data. We extracted one additional cube per class to serve as an additional source of
360
+ training data for the models and use the same test set as that employed in ROI-C2. This sub-task
361
+ employs 5520 images, with 2640 in the train set, and 2880 in the test set.
362
+ 3.3.2
363
+ Task 2: Pixel-level segmentation of microstructures
364
+ Another important requirement for a comprehensive mapping of brain data is correctly identifying
365
+ brain microstructures such as cells and blood vessels. In this task, we utilize the dense pixel-level
366
+ microstructure labels of the volumetric cutouts from 4 brain regions of interest (macrostructure; Cor-
367
+ tex, Striatum, VP & ZI), and evaluate different baselines on their prediction accuracy in classifying
368
+ each pixel from test volumes across the brain regions into appropriate brain microstructure classes
369
+ (blood vessel, cell, axon & background).
370
+ 3-class segmentation task.
371
+ We first consider the pixel-level segmentation of images into one of
372
+ three classes: either cell bodies, blood vessels, or other (background and axons). We use the same
373
+ train and test split as in ROI-C1 in Task 1 on the main subvolumes that are densely annotated at the
374
+ pixel-level (300 images for train, 50 for test, with a gap of 10 slices between datasets). This sub-task
375
+ employs 1200 images for training, and 200 images for testing.
376
+ 4-class segmentation task.
377
+ In this task, we consider the pixel-level segmentation of images into
378
+ one of four classes: either cell bodies, blood vessels, background or axons. When we consider dense
379
+ axonal segmentation, we remove the ZI region from our training and testing set due to the difficulty
380
+ to reliably segment axons in this subvolume even for human annotators. This sub-task employs 900
381
+ images for training, and 150 images for testing.
382
+ 3.3.3
383
+ Task 3: Probing multiple semantic features from learned image-level embeddings
384
+ In this task, we wanted to explore the possibility of decoding semantic and human-interpretable
385
+ features from the image-level representations learned in Task 1. If possible, it could provide a
386
+ way to build interpretable image-level feature maps that contain information about microstructure
387
+ without needing the expensive pixel-level labels necessary to compute these attributes in most brain
388
+ mapping settings.
389
+ 6
390
+
391
+ Table 1: Results on image classification accuracy for brain region prediction (Task 1).
392
+ ROI - C1
393
+ ROI - C2
394
+ ROI - C3
395
+ Supervised
396
+ 0.88 ± 0.03
397
+ 0.77 ± 0.03
398
+ 0.88 ± 0.02
399
+ Sup w/ Mixup
400
+ 0.90 ± 0.04
401
+ 0.78 ± 0.03
402
+ 0.90 ± 0.02
403
+ BYOL
404
+ 0.88 ± 0.02
405
+ 0.76 ± 0.02
406
+ 0.97 ± 0.01
407
+ MYOW
408
+ 0.90 ± 0.02
409
+ 0.78 ± 0.05
410
+ 0.98 ± 0.01
411
+ MYOW-m
412
+ 0.94 ± 0.02
413
+ 0.78 ± 0.03
414
+ 0.98 ± 0.01
415
+ PCA
416
+ 0.59
417
+ 0.25
418
+ 0.07
419
+ NMF
420
+ 0.62
421
+ 0.27
422
+ 0.50
423
+ Specifically, we try to predict the following global properties of an image: (i) blood vessels density,
424
+ (i) cell count, (iii) average cell size, (iv) axon density, and (v) average distance between cells, all
425
+ through a simple linear readout. In the case of supervised models, these models are trained to classify
426
+ images into its respective brain region; However, in the case of SSL methods, where the region-level
427
+ labels aren’t used to guide learning, it may be possible that other global attributes of the images may
428
+ be encoded in the latent space of the model.
429
+ 4
430
+ Results
431
+ 4.1
432
+ Task 1: Image-level classification of brain ROI
433
+ Experiment setup.
434
+ In this task, we consider the classification of different images into a number
435
+ of candidate brain areas (CTX, STR, VP and ZI) using representations learned through supervised
436
+ and self-supervised approaches. All of the models in this task are trained using a Resnet18 encoder
437
+ (44). We benchmarked two supervised models: the first trained using standard approaches for reg-
438
+ ularization (20% dropout and weight decay factor of 0.3) and the other trained using mixup (45).
439
+ Given the underlying shared structure of image samples in volumetric data, we also consider a num-
440
+ ber of self-supervised learning methods suitable for this task: (i) BYOL (46), (ii) MYOW (47), (iii)
441
+ and a variant of MYOW that we tested with a single projector and predictor (MYOW-merged, or
442
+ MYOW-m). For the SSL models, we follow the standard procedure of freezing the network weights
443
+ after training, and then training a linear layer on top of the representations. This tells us how well
444
+ the SSL loss captures the classes in the data after only a linear transformation. All models have a
445
+ latent dimensions of 256. As additional baselines, we also extracted 256-dimensional embeddings
446
+ from our data using Principal Component Analysis (PCA) and Non-Negative Matrix Factorization
447
+ (NMF), and trained a linear layer on these representations.
448
+ In our experiments, we evaluate all methods across 5 training instances with different random seeds,
449
+ and report the overall mean accuracy and standard deviation. All models are trained for 100 epochs
450
+ using an SGD optimizer with a learning rate of 0.03. For more details on our experimental setup
451
+ and models, see Section 3 in the Appendix.
452
+ Results in classifying brain ROIs.
453
+ We report the results of the three subtasks for ROI classifica-
454
+ tion in Table 1. In our first subtask (ROI-C1), we find that many of the SL and SSL models achieve
455
+ comparable accuracy, with the MYOW-m model achieving the highest accuracy in this limited train-
456
+ ing regime. In ROI-C2, we test the generalization capabilities of the models trained in ROI-C1 by
457
+ evaluating how well they performed on subvolumes in other parts of the larger dataset (Table 1,
458
+ ROI-C2). There is considerable heterogenity across brain regions and thus we can consider this as
459
+ a form of domain generalization. In this case, we observe a significant decrease in accuracy that
460
+ can be attributed to the domain shift in data. In ROI-C3, we add another set of training data (see
461
+ Figure 2) and test on the same volumes as in our last subtask. In this case, we can observe that SSL
462
+ methods significantly outperform the SL models, with SSL models achieving even higher accuracy
463
+ than in the small-scale case (97-98%) and supervised models achieving a much more modest im-
464
+ provement (88-90%) with the new training data. We can thus observe a significant gap in SSL over
465
+ SL models, showing that exposure to additional data (in this case, the additional cube with respect
466
+ to task ROI-C1) drastically improves the generalization of the SSL models to unobserved data.
467
+ 4.2
468
+ Task 2: Pixel-level segmentation of microstructures
469
+ In Task 2, we consider four different variants of pixel-level segmentation. The first variant employs
470
+ 2D models for pixel-level segmentation. The second variant employs 3D models instead. The third
471
+ 7
472
+
473
+ Table 2: F1 & IoU scores for models trained on the pixel-level segmentation task (Task 2).
474
+ I. 2D Pixel-level Segmentation
475
+ 3-Class
476
+ 4-Class
477
+ Method
478
+ Metric Bg + Axons Vessels Cells
479
+ Avg.
480
+ Bg
481
+ Vessels Cells Axons
482
+ Avg.
483
+ 2D U-Net
484
+ F1
485
+ 0.99
486
+ 0.76
487
+ 0.85
488
+ 0.87 ± 0.012 0.97
489
+ 0.82
490
+ 0.87
491
+ 0.94
492
+ 0.90 ± 0.003
493
+ 2D U-Net
494
+ IoU
495
+ 0.98
496
+ 0.64
497
+ 0.75
498
+ 0.79 ± 0.014 0.89
499
+ 0.70
500
+ 0.77
501
+ 0.60
502
+ 0.74 ± 0.008
503
+ MA-Net
504
+ F1
505
+ 0.99
506
+ 0.79
507
+ 0.87
508
+ 0.88 ± 0.003 0.97
509
+ 0.83
510
+ 0.87
511
+ 0.94
512
+ 0.90 ± 0.002
513
+ MA-Net
514
+ IoU
515
+ 0.98
516
+ 0.68
517
+ 0.78
518
+ 0.81 ± 0.003 0.89
519
+ 0.71
520
+ 0.78
521
+ 0.76
522
+ 0.78 ± 0.011
523
+ FPN
524
+ F1
525
+ 0.99
526
+ 0.72
527
+ 0.84
528
+ 0.85 ± 0.01
529
+ 0.96
530
+ 0.73
531
+ 0.84
532
+ 0.93
533
+ 0.86 ± 0.004
534
+ FPN
535
+ IoU
536
+ 0.97
537
+ 0.59
538
+ 0.73
539
+ 0.76 ± 0.015 0.87
540
+ 0.59
541
+ 0.72
542
+ 0.72
543
+ 0.72 ± 0.021
544
+ U-Net++
545
+ F1
546
+ 0.99
547
+ 0.79
548
+ 0.87
549
+ 0.89 ± 0.002 0.97
550
+ 0.81
551
+ 0.85
552
+ 0.93
553
+ 0.89 ± 0.015
554
+ U-Net++
555
+ IoU
556
+ 0.98
557
+ 0.68
558
+ 0.78
559
+ 0.81 ± 0.002 0.88
560
+ 0.68
561
+ 0.75
562
+ 0.73
563
+ 0.76 ± 0.036
564
+ PAN
565
+ F1
566
+ 0.98
567
+ 0.60
568
+ 0.80
569
+ 0.79 ± 0.035 0.95
570
+ 0.69
571
+ 0.80
572
+ 0.93
573
+ 0.84 ± 0.007
574
+ PAN
575
+ IoU
576
+ 0.96
577
+ 0.46
578
+ 0.66
579
+ 0.69 ± 0.039 0.85
580
+ 0.53
581
+ 0.67
582
+ 0.76
583
+ 0.70 ± 0.014
584
+ PSPNet
585
+ F1
586
+ 0.97
587
+ 0.48
588
+ 0.74
589
+ 0.73 ± 0.013 0.94
590
+ 0.54
591
+ 0.71
592
+ 0.91
593
+ 0.78 ± 0.012
594
+ PSPNet
595
+ IoU
596
+ 0.94
597
+ 0.39
598
+ 0.61
599
+ 0.65 ± 0.043 0.82
600
+ 0.38
601
+ 0.55
602
+ 0.74
603
+ 0.62 ± 0.015
604
+ II. 3D Pixel-level Segmentation
605
+ 3-Class
606
+ 4-Class
607
+ Method
608
+ Metric Bg + Axons Vessels Cells
609
+ Avg.
610
+ Bg
611
+ Vessels Cells Axons
612
+ Avg.
613
+ 3D U-Net
614
+ F1
615
+ 0.99
616
+ 0.77
617
+ 0.87
618
+ 0.88 ± 0.006 0.93
619
+ 0.76
620
+ 0.80
621
+ 0.87
622
+ 0.84 ± 0.032
623
+ 3D U-Net
624
+ IoU
625
+ 0.98
626
+ 0.65
627
+ 0.76
628
+ 0.80 ± 0.007 0.81
629
+ 0.62
630
+ 0.67
631
+ 0.50
632
+ 0.65 ± 0.045
633
+ VNetLight
634
+ F1
635
+ 0.99
636
+ 0.75
637
+ 0.83
638
+ 0.85 ± 0.012 0.90
639
+ 0.65
640
+ 0.73
641
+ 0.76
642
+ 0.76 ± 0.063
643
+ VNetLight
644
+ IoU
645
+ 0.97
646
+ 0.61
647
+ 0.70
648
+ 0.76 ± 0.013 0.78
649
+ 0.46
650
+ 0.58
651
+ 0.43
652
+ 0.56 ± 0.061
653
+ HighResNet
654
+ F1
655
+ 0.99
656
+ 0.74
657
+ 0.84
658
+ 0.85 ± 0.019 0.89
659
+ 0.51
660
+ 0.73
661
+ 0.77
662
+ 0.72 ± 0.083
663
+ HighResNet
664
+ IoU
665
+ 0.97
666
+ 0.61
667
+ 0.72
668
+ 0.77 ± 0.026 0.73
669
+ 0.35
670
+ 0.58
671
+ 0.42
672
+ 0.52 ± 0.075
673
+ variant is a 4-class setting where ZI is removed from the brain regions involved (as axons in this
674
+ region are difficult to distinguish accurately for even human annotators). The fourth variant is a
675
+ 3-class setting where all 4 brain regions (Cortex, Striatum, VP and ZI) are utilized but only 3 classes
676
+ are considered (blood vessels, cells, background+axons; avoids axon segmentation).
677
+ Experiment setup.
678
+ In our experiments, we perform pixel-level segmentation using a selected set
679
+ of 2D and 3D models. Each model is put through a separate hyper-parameter tuning process for
680
+ finding an optimal learning rate and batch size by training on the train set and evaluating on the
681
+ validation split. Each model is trained for 20 epochs with its optimal learning rate and batch size
682
+ and evaluated across 5 training instances (each with its own random seed). The class-wise F1-
683
+ score, the class-wise IoU, and the overall mean and standard deviation of both metrics are reported
684
+ for each model (Table 2). We do not report accuracy because for this particular task of pixel-
685
+ level segmentation, it does not aptly represent the model performance due to class imbalance (see
686
+ Appendix Section 4.2.3 for breakdown of different classes in the training and test sets).
687
+ The models we used for the 2D segmentation task are the standard 2D U-Net model (48; 49) and
688
+ selected models from the ’segmentation models.pytorch’ library (50): MAnet (51), FPN (52), U-
689
+ Net++ (53), PAN (54) and PSPNet (55). The models we used for the 3D segmentation task are the
690
+ standard 3D U-Net model (48) and selected models from ‘MedicalZooPytorch’ (56): VNetLight
691
+ (57; 56) and HighResNet (58). For more details refer Section 4 in the Appendix.
692
+ Pixel-level segmentation in 2D.
693
+ The results from the selected models on our 2D pixel-level seg-
694
+ mentation task are tabulated in Part I of Table 2 and visualized in Figure 3. The individual slices are
695
+ the input to the models and they are fed in a batched manner during training. For training and eval-
696
+ uation, we consider both the 3-class and 4-class settings and we use EfficientNet-b7 encoder for all
697
+ the models as it was seen to give the best performance among the 25 encoders that were attempted.
698
+ From our results we see that MA-Net performed the best overall among the 2D models with an av-
699
+ erage IoU of 0.78 followed by U-Net++ with an average IoU of 0.76 in the 4-class setting (Table 2).
700
+ As can be seen from the class-wise IoU and class-wise F1-scores, for most of the best performing
701
+ models, the most challenging components to differentiate are cells and blood vessels, which are also
702
+ difficult for human annotators to identify from 2D slices without further 3D context. Also, we can
703
+ note, the average IoU (across classes and models) increases from 0.72 to 0.79 upon moving from
704
+ 4-class to 3-class setting. This indicates an expected performance improvement as there are more
705
+ slices (more data) and fewer classes (easier) in the 3-class setting.
706
+ 8
707
+
708
+ Figure 3: Visualization of predictions from pixel-level segmentation task (Task 2). From left to right: original
709
+ image, ground truth overlaid, prediction from U-Net model in the 3-class and 4-class settings (top row is
710
+ Striatum, bottom is VP). Axons are visualized in cyan, blood vessels in red, and cell bodies are in yellow. B)
711
+ 3D reconstruction from the predictions for Striatum for the 4-class (top) and 3-class (bottom) settings.
712
+ Pixel-level segmentation in 3D.
713
+ We provide the same breakdown and accuracy measures as in
714
+ the 2D case, for this 3D case (Part II of Table 2). For providing 3D input to the models, we pass
715
+ in consecutive slices (8 slices) as a single subvolume and build a prediction over all slices jointly in
716
+ 3D. The U-Net model performed best with 0.80 IoU (3-class) which is only competent with the best
717
+ 2D models. This would be because we define the subvolumes in a non-overlapping manner, so even
718
+ though we have improved 3D context, the model also sees fewer samples/inputs. Due to the same
719
+ reason, moving from 3-class to 4-class setting, we can note a drop in performance since there are
720
+ fewer samples. Larger sample counts / allowing overlap could enable 3D models to perform better.
721
+ 4.3
722
+ Task 3: Probing multiple semantic features from learned image-level embeddings
723
+ In Task 3, we explore the prediction of different semantic attributes (e.g., density of blood vessels or
724
+ cells) from the latent space learned by the models trained in Task 1.
725
+ To do this, we leveraged the high-quality dense microstructure annotations to extract information
726
+ about the attributes of each image that included: the proportion of pixels that are either blood vessels
727
+ or axons, the cell count and size, and average distance between cells and their nearest neighbor.
728
+ Further details on the experimental setup for this task can be found in the Appendix in Section 5.
729
+ We use the trained models from Task 1, freeze their weights, and compute the representations of
730
+ all of the annotated images used in ROI-C1 (both train and test, 360 slices). From these latent
731
+ Table 3: Task 3: R2 scores on multi-task feature readout for supervised and SSL models trained in Task 1.
732
+ I. Linear Readouts from Models Trained on a Single Subvolume (ROI-C1)
733
+ Methods
734
+ Vessels
735
+ Axons
736
+ Cell Count
737
+ Cell Size
738
+ Dist (k=1)
739
+ Supervised
740
+ 0.77 ± 0.06
741
+ 0.94 ± 0.01
742
+ 0.67 ± 0.06
743
+ 0.61 ± 0.05
744
+ 0.48 ± 0.05
745
+ Sup w/ Mixup
746
+ 0.82 ± 0.02
747
+ 0.95 ± 0.00
748
+ 0.71 ± 0.02
749
+ 0.67 ± 0.03
750
+ 0.47 ± 0.02
751
+ BYOL
752
+ 0.85 ± 0.01
753
+ 0.94 ± 0.01
754
+ 0.75 ± 0. 01
755
+ 0.69 ± 0.01
756
+ 0.49 ± 0.01
757
+ MYOW
758
+ 0.85 ± 0.01
759
+ 0.94 ± 0.01
760
+ 0.74 ± 0.01
761
+ 0.69 ± 0.01
762
+ 0.50 ± 0.02
763
+ MYOW-m
764
+ 0.87 ± 0.01
765
+ 0.95 ± 0.01
766
+ 0.77 ± 0.01
767
+ 0.69 ± 0.01
768
+ 0.51 ± 0.01
769
+ PCA
770
+ 0.75
771
+ 0.82
772
+ 0.55
773
+ 0.47
774
+ 0.31
775
+ NMF
776
+ 0.81
777
+ 0.85
778
+ 0.59
779
+ 0.55
780
+ 0.34
781
+ II. Linear Readouts from Models Trained on Two Subvolumes (ROI-C3)
782
+ Methods
783
+ Vessels
784
+ Axons
785
+ Cell Count
786
+ Cell Size
787
+ Dist (k=1)
788
+ Supervised
789
+ 0.79 ± 0.02
790
+ 0.94 ± 0.02
791
+ 0.73 ± 0.02
792
+ 0.63 ± 0.04
793
+ 0.49 ± 0.02
794
+ Sup w/ Mixup
795
+ 0.75 ± 0.04
796
+ 0.88 ± 0.04
797
+ 0.64 ± 0.04
798
+ 0.54 ± 0.07
799
+ 0.37 ± 0.05
800
+ BYOL
801
+ 0.88 ± 0.00
802
+ 0.96 ± 0.00
803
+ 0.79 ± 0.00
804
+ 0.73 ± 0.01
805
+ 0.53 ± 0.02
806
+ MYOW
807
+ 0.88 ± 0.01
808
+ 0.96 ± 0.00
809
+ 0.79 ± 0.01
810
+ 0.72 ± 0.01
811
+ 0.52 ± 0.01
812
+ MYOW-m
813
+ 0.87 ± 0.01
814
+ 0.96 ± 0.01
815
+ 0.78 ± 0.01
816
+ 0.72 ± 0.01
817
+ 0.53 ± 0.01
818
+ PCA
819
+ 0.75
820
+ 0.82
821
+ 0.53
822
+ 0.46
823
+ 0.29
824
+ NMF
825
+ 0.75
826
+ 0.83
827
+ 0.56
828
+ 0.49
829
+ 0.31
830
+ 9
831
+
832
+ original
833
+ ground truth
834
+ prediction -3 class
835
+ prediction - 4 class
836
+ A
837
+ B
838
+ axon
839
+ cell
840
+ vessel
841
+ original
842
+ prediction - 3 class
843
+ prediction - 4 class
844
+ StriatumFigure 4: Visualization of the representations learned by MYOW-m in Task 1 (ROI-C1) with global semantic
845
+ attributes for each image visualized as different colors. From left to right, latents are colored by brain area
846
+ (class), % blood vessels, % axons, cell count, and cell size.
847
+ features, we fit a simple linear regression model on these representations to predict each of the
848
+ desired semantic attributes. We have limited data for this task and thus report the R2 on all of the
849
+ latents (no train and test split in this case).
850
+ We report the R2 values in Table 3 for all 360 slices in the densely annotated volume used in Task 1
851
+ and 2 for the models trained in ROI-C1 (Table 3 I) and on more data in ROI-C3 (Table 3 II). In both
852
+ cases, we find that the SSL models significantly outperform the rest of the baseline models. The gap
853
+ is more pronounced in the two volume training condition, highlighting the generalization difference
854
+ between supervised and SSL approaches (59) in providing good representations for a wide range of
855
+ tasks. In Figure 4, we project the MYOW-m (ROI-C1) embeddings using UMAP (60), and overlay
856
+ the semantic features.
857
+ 5
858
+ Discussion
859
+ In this paper, we introduced a multi-task benchmark for analysis of brain structure from high-
860
+ resolution neuroimaging data spanning many brain areas. In addition, we also built general in-
861
+ frastructure for training models from datasets in the BossDB framework, like dataloaders which can
862
+ be adapted to different datasets. This will expand the use of large-scale volumetric neuroimaging
863
+ data for machine learning tool development.
864
+ Limitations and future work. In neuroscience, often obtaining high quality labeled data (especially
865
+ for dense segmentation tasks like those provided in Task 2), is very costly (61). The intensive nature
866
+ of manual annotation and proofreading data thus limits the amount of labeled and annotated data
867
+ that we provide to train models on, or the amount of distributional shift that can be assessed. While
868
+ this is a limitation of the work, it also is an accurate reflection of the challenges faced in the field,
869
+ and thus requires more label-efficient approaches for learning like the SSL methods we highlight.
870
+ Moving forward, we hope to leverage the models tested in this work to generate even more high
871
+ quality annotated data to further improve model performance and segmentation.
872
+ When designing our current benchmark, we focused on building a multi-scale challenge where vari-
873
+ ability was due to changes in brain structure and not different preparations or imaging parameters.
874
+ Therefore we focused on a single animal where we have a large intact brain volume that spans many
875
+ heterogeneous brain regions. While this may limit the generalization of the models to new sam-
876
+ ples or datasets, it also addresses the heterogeneous nature of different brain regions that is often
877
+ overlooked. Our results show that even within a single brain, there is rich heterogeneity across dif-
878
+ ferent brain regions that makes it difficult for some models to generalize. This also reveals important
879
+ generalization gaps between SL and SSL models.
880
+ In the future, we hope to expand this effort to learn models of brain structure from other non-
881
+ convolutional architectures (e.g., point cloud-based models of neural structure (62)), and deploy our
882
+ tools on new multi-scale brain datasets, perhaps using new lightsheet (63) or whole-brain scaling
883
+ (64; 65) techniques.
884
+ Broader impacts. The high heterogeneity of brain data, coupled with the variability in the scale
885
+ and nature of the tasks presented in this benchmark make it a challenging and useful resource for
886
+ the broader ML community. Furthermore, we hope the provided dataset and tasks, as well as the
887
+ supporting codebase and BossDB infrastructure will help accelerate development of ML techniques
888
+ for emerging, high-resolution neuroimaging datasets being collected in the broader community.
889
+ 10
890
+
891
+ Brain Area
892
+ Blood
893
+ Axons (%)
894
+ Cell Count
895
+ Cell Size
896
+ 234
897
+ .
898
+ 12
899
+ 40
900
+ 16
901
+ 50
902
+ 28
903
+ 32
904
+ Cortex
905
+ Striatum
906
+ VP
907
+ ZI
908
+ Brain Area
909
+ Blood Vessels (%)
910
+ Axons (%)
911
+ Cell Size
912
+ Cell Count
913
+ 15
914
+ 10
915
+ 1(
916
+ 20
917
+ 30
918
+ 12
919
+ AO
920
+ 16
921
+ Cortex
922
+ 50
923
+ Striatum
924
+ 60
925
+ C
926
+ 24
927
+ VP
928
+ 80
929
+ 32Acknowledgements
930
+ This project was supported by NSF award IIS-2039741, NIH award 1R01EB029852-01, NIH award
931
+ R01MH126684, NIH award R24MH114785 in addition to generous gifts from the Alfred Sloan
932
+ Foundation, the McKnight Foundation, and the CIFAR Azrieli Global Scholars Program.
933
+ References
934
+ [1] Danielle S Bassett and Edward T Bullmore, “Small-world brain networks revisited,” The
935
+ Neuroscientist, vol. 23, no. 5, pp. 499–516, 2017.
936
+ [2] Ran Liu, Cem Subakan, Aishwarya H. Balwani, Jennifer Whitesell, Julie Harris, Sanmi
937
+ Koyejo, and Eva L. Dyer, “A generative modeling approach for interpreting population-level
938
+ variability in brain structure,” in Medical Image Computing and Computer Assisted Interven-
939
+ tion – MICCAI 2020, Cham, 2020, pp. 257–266, Springer International Publishing.
940
+ [3] Casey M Schneider-Mizell, Agnes L Bodor, Forrest Collman, Derrick Brittain, Adam A Bleck-
941
+ ert, Sven Dorkenwald, Nicholas L Turner, Thomas Macrina, Kisuk Lee, Ran Lu, et al., “Chan-
942
+ delier cell anatomy and function reveal a variably distributed but common signal,” bioRxiv,
943
+ 2020.
944
+ [4] Timothy EJ Behrens and Olaf Sporns, “Human connectomics,” Current opinion in neurobiol-
945
+ ogy, vol. 22, no. 1, pp. 144–153, 2012.
946
+ [5] Michael Hawrylycz, Lydia Ng, David Feng, Susan Sunkin, Aaron Szafer, and Chinh Dang,
947
+ “The allen brain atlas,” Springer Handbook of Bio-/Neuroinformatics, pp. 1111–1126, 2014.
948
+ [6] Alexander Shapson-Coe, Michał Januszewski, Daniel R Berger, Art Pope, Yuelong Wu, Tim
949
+ Blakely, Richard L Schalek, Peter H Li, Shuohong Wang, Jeremy Maitin-Shepard, et al., “A
950
+ connectomic study of a petascale fragment of human cerebral cortex,” bioRxiv, 2021.
951
+ [7] Federico Scala, Dmitry Kobak, Matteo Bernabucci, Yves Bernaerts, Cathryn Ren´e Cadwell,
952
+ Jesus Ramon Castro, Leonard Hartmanis, Xiaolong Jiang, Sophie Laturnus, Elanine Miranda,
953
+ et al., “Phenotypic variation of transcriptomic cell types in mouse motor cortex,” Nature, vol.
954
+ 598, no. 7879, pp. 144–150, 2021.
955
+ [8] Jeff W Lichtman, Hanspeter Pfister, and Nir Shavit, “The big data challenges of connectomics,”
956
+ Nature neuroscience, vol. 17, no. 11, pp. 1448–1454, 2014.
957
+ [9] Alessandro Motta, Meike Schurr, Benedikt Staffler, and Moritz Helmstaedter, “Big data in
958
+ nanoscale connectomics, and the greed for training labels,” Current opinion in neurobiology,
959
+ vol. 55, pp. 180–187, 2019.
960
+ [10] Robert Hider Jr, Dean Kleissas, Timothy Gion, Daniel Xenes, Jordan Matelsky, Derek Pryor,
961
+ Luis Rodriguez, Erik C Johnson, William Gray-Roncal, and Brock Wester, “The brain observa-
962
+ tory storage service and database (bossdb): A cloud-native approach for petascale neuroscience
963
+ discovery,” Frontiers in Neuroinformatics, vol. 16, 2022.
964
+ [11] Narayanan Kasthuri, Kenneth Jeffrey Hayworth, Daniel Raimund Berger, Richard Lee
965
+ Schalek, Jos´e Angel Conchello, Seymour Knowles-Barley, Dongil Lee, Amelio V´azquez-
966
+ Reina, Verena Kaynig, Thouis Raymond Jones, et al., “Saturated reconstruction of a volume
967
+ of neocortex,” Cell, vol. 162, no. 3, pp. 648–661, 2015.
968
+ [12] Eva L Dyer, William Gray Roncal, Judy A Prasad, Hugo L Fernandes, Doga G¨ursoy, Vin-
969
+ cent De Andrade, Kamel Fezzaa, Xianghui Xiao, Joshua T Vogelstein, Chris Jacobsen, et al.,
970
+ “Quantifying mesoscale neuroanatomy using x-ray microtomography,” eNeuro, vol. 4, no. 5,
971
+ 2017.
972
+ [13] C Shan Xu, Michal Januszewski, Zhiyuan Lu, Shin-ya Takemura, Kenneth J Hayworth, Gary
973
+ Huang, Kazunori Shinomiya, Jeremy Maitin-Shepard, David Ackerman, Stuart Berg, et al., “A
974
+ connectome of the adult drosophila central brain,” bioRxiv, 2020.
975
+ 11
976
+
977
+ [14] Zhihao Zheng, J Scott Lauritzen, Eric Perlman, Camenzind G Robinson, Matthew Nichols,
978
+ Daniel Milkie, Omar Torrens, John Price, Corey B Fisher, Nadiya Sharifi, et al., “A complete
979
+ electron microscopy volume of the brain of adult drosophila melanogaster,” Cell, vol. 174, no.
980
+ 3, pp. 730–743, 2018.
981
+ [15] Judy A Prasad, Aishwarya H Balwani, Erik C Johnson, Joseph D Miano, Vandana Sampathku-
982
+ mar, Vincent De Andrade, Kamel Fezzaa, Ming Du, Rafael Vescovi, Chris Jacobsen, et al., “A
983
+ three-dimensional thalamocortical dataset for characterizing brain heterogeneity,” Scientific
984
+ Data, vol. 7, no. 1, pp. 1–7, 2020.
985
+ [16] Aishwarya Balwani, Joseph Miano, Ran Liu, Lindsey Kitchell, Judy A Prasad, Erik C Johnson,
986
+ William Gray-Roncal, and Eva L Dyer, “Multi-scale modeling of neural structure in x-ray
987
+ imagery,” in 2021 IEEE International Conference on Image Processing (ICIP). IEEE, 2021,
988
+ pp. 141–145.
989
+ [17] Bjoern H Menze, Andras Jakab, Stefan Bauer, Jayashree Kalpathy-Cramer, Keyvan Farahani,
990
+ Justin Kirby, Yuliya Burren, Nicole Porz, Johannes Slotboom, Roland Wiest, et al., “The mul-
991
+ timodal brain tumor image segmentation benchmark (brats),” IEEE Transactions on Medical
992
+ Imaging, vol. 34, no. 10, pp. 1993–2024, 2014.
993
+ [18] Liang Chen, Paul Bentley, Kensaku Mori, Kazunari Misawa, Michitaka Fujiwara, and Daniel
994
+ Rueckert, “Self-supervised learning for medical image analysis using image context restora-
995
+ tion,” Medical Image Analysis, vol. 58, pp. 101539, 2019.
996
+ [19] Dheerendranath Battalapalli, B. Rao, P. Yogeeswari, Chandrasekharan Kesavadas, and
997
+ Venkateswaran Rajagopalan,
998
+ “An optimal brain tumor segmentation algorithm for clinical
999
+ mri dataset with low resolution and non-contiguous slices,” BMC Medical Imaging, vol. 22,
1000
+ 05 2022.
1001
+ [20] Javeria Amin, Muhammad Anjum, Muhammad Sharif, Saima Jabeen, Seifedine Kadry, and
1002
+ Pablo Ger,
1003
+ “A new model for brain tumor detection using ensemble transfer learning and
1004
+ quantum variational classifier,” Computational Intelligence and Neuroscience, vol. 2022, pp.
1005
+ 1–13, 04 2022.
1006
+ [21] Ghazanfar Latif, Ghassen Ben Brahim, D. N. F. Awang Iskandar, Abul Bashar, and Jaafar Al-
1007
+ ghazo, “Glioma tumors&rsquo; classification using deep-neural-network-based features with
1008
+ svm classifier,” Diagnostics, vol. 12, no. 4, 2022.
1009
+ [22] C.R. Jack Jr., M.A. Bernstein, N.C. Fox, P. Thompson, G. Alexander, D. Harvey, B. Borowski,
1010
+ P.J. Britson, J.L. Whitwell, C. Ward, A.M. Dale, J.P. Felmlee, J.L. Gunter, D.L.G. Hill, R. Kil-
1011
+ liany, N. Schuff, S. Fox-Bosetti, C. Lin, C. Studholme, C.S. DeCarli, G. Krueger, H.A. Ward,
1012
+ G.J. Metzger, K.T. Scott, R. Mallozzi, D. Blezek, J. Levy, J.P. Debbins, A.S. Fleisher, M. Al-
1013
+ bert, R. Green, G. Bartzokis, G. Glover, J. Mugler, and M.W. Weiner, “The alzheimer’s disease
1014
+ neuroimaging initiative (adni): Mri methods,” Journal of Magnetic Resonance Imaging, vol.
1015
+ 27, no. 4, pp. 685–691, 2008.
1016
+ [23] Ian B Malone, David Cash, Gerard R Ridgway, David G MacManus, Sebastien Ourselin,
1017
+ Nick C Fox, and Jonathan M Schott,
1018
+ “Miriad—public release of a multiple time point
1019
+ alzheimer’s mr imaging dataset,” NeuroImage, vol. 70, pp. 33–36, 2013.
1020
+ [24] Manhua Liu, Danni Cheng, Kundong Wang, and Yaping Wang, “Multi-modality cascaded
1021
+ convolutional neural networks for alzheimer’s disease diagnosis,” Neuroinformatics, vol. 16,
1022
+ pp. 295–308, 2018.
1023
+ [25] Donghuan Lu, Karteek Popuri, Gavin Weiguang Ding, Rakesh Balachandar, and Mirza Faisal
1024
+ Beg, “Multiscale deep neural network based analysis of fdg-pet images for the early diagnosis
1025
+ of alzheimer’s disease,” Medical Image Analysis, vol. 46, pp. 26–34, 2018.
1026
+ [26] Jyoti Islam and Yanqing Zhang, “Brain mri analysis for alzheimer’s disease diagnosis using
1027
+ an ensemble system of deep convolutional neural networks,” Brain informatics, vol. 5, no. 2,
1028
+ pp. 1–14, 2018.
1029
+ 12
1030
+
1031
+ [27] Mingxia Liu, Jun Zhang, Ehsan Adeli, and Dinggang Shen, “Joint classification and regression
1032
+ via deep multi-task multi-channel learning for alzheimer’s disease diagnosis,” IEEE Transac-
1033
+ tions on Biomedical Engineering, vol. 66, no. 5, pp. 1195–1206, 2019.
1034
+ [28] Alejandro Puente-Castro, Enrique Fernandez-Blanco, Alejandro Pazos, and Cristian R.
1035
+ Munteanu, “Automatic assessment of alzheimer’s disease diagnosis based on deep learning
1036
+ techniques,” Computers in Biology and Medicine, vol. 120, pp. 103764, 2020.
1037
+ [29] Eman N. Marzban, Ayman M. Eldeib, Inas A. Yassine, Yasser M. Kadah, and for the
1038
+ Alzheimer’s Disease Neurodegenerative Initiative, “Alzheimer’s disease diagnosis from dif-
1039
+ fusion tensor images using convolutional neural networks,” PLOS ONE, vol. 15, no. 3, pp.
1040
+ 1–16, 03 2020.
1041
+ [30] N. Deepa and S.P. Chokkalingam, “Optimization of vgg16 utilizing the arithmetic optimiza-
1042
+ tion algorithm for early detection of alzheimer’s disease,” Biomedical Signal Processing and
1043
+ Control, vol. 74, pp. 103455, 2022.
1044
+ [31] Sudlow C, Gallacher J, Allen N, Beral V, Burton P, Danesh J, et al., “Uk biobank: An open
1045
+ access resource for identifying the causes of a wide range of complex diseases of middle and
1046
+ old age,” PLoS Med 12(3): e1001779., 2015.
1047
+ [32] Zhihao Zheng, Cam G. Robinson, Daniel Milkie, Eric Perlman, John Price, Davi Bock, Misha
1048
+ Kazhdan, Khaled Khairy, Bill Karsh, Eric Trautman, Peter Li, Chris Ordish, Chong Zhang, Ju-
1049
+ lia Buhmann, Jan Funke, and Stephan Saalfeld, “MICCAI challenge on circuit reconstruction
1050
+ from electron microscopy images,” https://cremi.org/data/, 2022.
1051
+ [33] Albert Cardona, Stephan Saalfeld, Stephan Preibisch, Benjamin Schmid, Anchi Cheng, Jim
1052
+ Pulokas, Pavel Tomancak, and Volker Hartenstein, “An integrated micro-and macroarchitec-
1053
+ tural analysis of the drosophila brain by computer-assisted serial section electron microscopy,”
1054
+ PLOS Biology, vol. 8, no. 10, pp. e1000502, 2010.
1055
+ [34] Kisuk Lee, Jonathan Zung, Peter Li, Viren Jain, and H Sebastian Seung, “Superhuman accu-
1056
+ racy on the snemi3d connectomics challenge,” arXiv preprint arXiv:1706.00120, 2017.
1057
+ [35] Stephen M Plaza, “Focused proofreading to reconstruct neural connectomes from em images at
1058
+ scale,” in Deep Learning and Data Labeling for Medical Applications, pp. 249–258. Springer,
1059
+ 2016.
1060
+ [36] Donglai Wei, Kisuk Lee, Hanyu Li, Ran Lu, J Alexander Bae, Zequan Liu, Lifu Zhang,
1061
+ M´arcia dos Santos, Zudi Lin, Thomas Uram, et al., “Axonem dataset: 3d axon instance seg-
1062
+ mentation of brain cortical regions,” in International Conference on Medical Image Computing
1063
+ and Computer-Assisted Intervention. Springer, 2021, pp. 175–185.
1064
+ [37] Donglai Wei, Zudi Lin, Daniel Franco-Barranco, Nils Wendt, Xingyu Liu, Wenjie Yin, Xin
1065
+ Huang, Aarush Gupta, Won-Dong Jang, Xueying Wang, et al., “Mitoem dataset: large-scale
1066
+ 3d mitochondria instance segmentation from em images,”
1067
+ in International Conference on
1068
+ Medical Image Computing and Computer-Assisted Intervention. Springer, 2020, pp. 66–76.
1069
+ [38] Philipp Berens, Jeremy Freeman, Thomas Deneux, Nikolay Chenkov, Thomas McColgan, Ar-
1070
+ tur Speiser, Jakob H Macke, Srinivas C Turaga, Patrick Mineault, Peter Rupprecht, et al.,
1071
+ “Community-based benchmarking improves spike rate inference from two-photon calcium
1072
+ imaging data,” PLOS Computational Biology, vol. 14, no. 5, pp. e1006157, 2018.
1073
+ [39] Katrin Amunts, Claude Lepage, Louis Borgeat, Hartmut Mohlberg, Timo Dickscheid, Marc-
1074
+ ´Etienne Rousseau, Sebastian Bludau, Pierre-Louis Bazin, Lindsay B Lewis, Ana-Maria Oros-
1075
+ Peusquens, et al., “Bigbrain: an ultrahigh-resolution 3d human brain model,” Science, vol.
1076
+ 340, no. 6139, pp. 1472–1475, 2013.
1077
+ [40] Michał Januszewski, J¨orgen Kornfeld, Peter H Li, Art Pope, Tim Blakely, Larry Lindsey,
1078
+ Jeremy Maitin-Shepard, Mike Tyka, Winfried Denk, and Viren Jain, “High-precision auto-
1079
+ mated reconstruction of neurons with flood-filling networks,” Nature Methods, vol. 15, no. 8,
1080
+ pp. 605–610, 2018.
1081
+ 13
1082
+
1083
+ [41] Peter H Li, Larry F Lindsey, Michał Januszewski, Mike Tyka, Jeremy Maitin-Shepard, Tim
1084
+ Blakely, and Viren Jain, “Automated reconstruction of a serial-section em drosophila brain
1085
+ with flood-filling networks and local realignment,” Microscopy and Microanalysis, vol. 25,
1086
+ no. S2, pp. 1364–1365, 2019.
1087
+ [42] Aishwarya H Balwani and Eva L Dyer, “Modeling variability in brain architecture with deep
1088
+ feature learning,” in 2019 53rd Asilomar Conference on Signals, Systems, and Computers.
1089
+ IEEE, 2019, pp. 1186–1191.
1090
+ [43] Jordan Matelsky, Luis Rodriguez, Daniel Xenes, Timothy Gion, Robert Hider, Brock Wester,
1091
+ and William Gray-Roncal, “Intern: Integrated toolkit for extensible and reproducible neuro-
1092
+ science,” bioRxiv, 2020.
1093
+ [44] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun, “Deep residual learning for image
1094
+ recognition,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern
1095
+ Recognition (CVPR), 2016, pp. 770–778.
1096
+ [45] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz, “mixup: Beyond
1097
+ empirical risk minimization,” arXiv preprint arXiv:1710.09412, 2017.
1098
+ [46] Jean-Bastien Grill, Florian Strub, Florent Altch´e, Corentin Tallec, Pierre Richemond, Elena
1099
+ Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Ghesh-
1100
+ laghi Azar, et al., “Bootstrap your own latent-a new approach to self-supervised learning,”
1101
+ Advances in Neural Information Processing Systems, vol. 33, pp. 21271–21284, 2020.
1102
+ [47] Mehdi Azabou, Mohammad Gheshlaghi Azar, Ran Liu, Chi-Heng Lin, Erik C Johnson, Kiran
1103
+ Bhaskaran-Nair, Max Dabagia, Bernardo Avila-Pires, Lindsey Kitchell, Keith B Hengen, et al.,
1104
+ “Mine your own view: Self-supervised learning through across-sample prediction,”
1105
+ arXiv
1106
+ preprint arXiv:2102.10106, 2021.
1107
+ [48] Olaf Ronneberger, Philipp Fischer, and Thomas Brox, “U-net: Convolutional networks for
1108
+ biomedical image segmentation,” CoRR, vol. abs/1505.04597, 2015.
1109
+ [49] Johannes Schmidt, “Pytorch-2d-3d-unet-tutorial,” https://github.com/johschmidt42/
1110
+ PyTorch-2D-3D-UNet-Tutorial, 2021.
1111
+ [50] Pavel Yakubovskiy,
1112
+ “Segmentation models pytorch,” https://github.com/qubvel/
1113
+ segmentation_models.pytorch, 2020.
1114
+ [51] Tongle Fan, Guanglei Wang, Yan Li, and Hongrui Wang, “Ma-net: A multi-scale attention
1115
+ network for liver and tumor segmentation,” IEEE Access, vol. 8, pp. 179656–179665, 2020.
1116
+ [52] Tsung-Yi Lin, Piotr Doll´ar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Be-
1117
+ longie, “Feature pyramid networks for object detection,” in Proceedings of the IEEE/CVF
1118
+ Conference on Computer Vision and Pattern Recognition (CVPR), 2017, pp. 936–944.
1119
+ [53] Zongwei Zhou, Md Mahfuzur Rahman Siddiquee, Nima Tajbakhsh, and Jianming Liang,
1120
+ “Unet++: A nested u-net architecture for medical image segmentation,” in Deep Learning in
1121
+ Medical Image Analysis and Multimodal Learning for Clinical Decision Support, Danail Stoy-
1122
+ anov, Zeike Taylor, Gustavo Carneiro, Tanveer Syeda-Mahmood, Anne Martel, Lena Maier-
1123
+ Hein, Jo˜ao Manuel R.S. Tavares, Andrew Bradley, Jo˜ao Paulo Papa, Vasileios Belagiannis,
1124
+ Jacinto C. Nascimento, Zhi Lu, Sailesh Conjeti, Mehdi Moradi, Hayit Greenspan, and Anant
1125
+ Madabhushi, Eds., Cham, 2018, pp. 3–11, Springer International Publishing.
1126
+ [54] Hanchao Li, Pengfei Xiong, Jie An, and Lingxue Wang, “Pyramid attention network for se-
1127
+ mantic segmentation,” CoRR, vol. abs/1805.10180, 2018.
1128
+ [55] Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang, and Jiaya Jia, “Pyramid scene
1129
+ parsing network,” CoRR, vol. abs/1612.01105, 2016.
1130
+ [56] Adaloglou Nikolaos, “Deep learning in medical image analysis: a comparative analysis of
1131
+ multi-modal brain-mri segmentation with 3d deep neural networks,” M.S. thesis, University of
1132
+ Patras, 2019, https://github.com/black0017/MedicalZooPytorch.
1133
+ 14
1134
+
1135
+ [57] Fausto Milletari, Nassir Navab, and Seyed-Ahmad Ahmadi, “V-net: Fully convolutional neural
1136
+ networks for volumetric medical image segmentation,” CoRR, vol. abs/1606.04797, 2016.
1137
+ [58] Wenqi Li, Guotai Wang, Lucas Fidon, S´ebastien Ourselin, M. Jorge Cardoso, and Tom Ver-
1138
+ cauteren, “On the compactness, efficiency, and representation of 3d convolutional networks:
1139
+ Brain parcellation as a pretext task,” CoRR, vol. abs/1707.01992, 2017.
1140
+ [59] Atharva Tendle and Mohammad Rashedul Hasan, “A study of the generalizability of self-
1141
+ supervised representations,” Machine Learning with Applications, vol. 6, pp. 100124, 2021.
1142
+ [60] Leland McInnes, John Healy, and James Melville, “Umap: Uniform manifold approximation
1143
+ and projection for dimension reduction,” arXiv preprint arXiv:1802.03426, 2018.
1144
+ [61] David Rolnick and Eva L Dyer,
1145
+ “Generative models and abstractions for large-scale neu-
1146
+ roanatomy datasets,” Current Opinion in Neurobiology, vol. 55, pp. 112–120, 2019, Machine
1147
+ Learning, Big Data, and Neuroscience.
1148
+ [62] Joy M Jackson, Ran Liu, and Eva L Dyer, “Building representations of different brain areas
1149
+ through hierarchical point cloud networks,” in Medical Imaging with Deep Learning, 2022.
1150
+ [63] Elizabeth M.C. Hillman, Venkatakaushik Voleti, Wenze Li, and Hang Yu, “Light-sheet mi-
1151
+ croscopy in neuroscience,” Annual Review of Neuroscience, vol. 42, no. 1, pp. 295–313, 2019,
1152
+ PMID: 31283896.
1153
+ [64] Scott Trinkle, Sean Foxley, Narayanan Kasthuri, and Patrick La Rivi`ere, “Synchrotron x-ray
1154
+ micro-ct as a validation dataset for diffusion mri in whole mouse brain,” Magnetic Resonance
1155
+ in Medicine, vol. 86, no. 2, pp. 1067–1076, 2021.
1156
+ [65] Sean Foxley, Vandana Sampathkumar, Vincent De Andrade, Scott Trinkle, Anastasia Sorokina,
1157
+ Katrina Norwood, Patrick La Riviere, and Narayanan Kasthuri, “Multi-modal imaging of a
1158
+ single mouse brain over five orders of magnitude of resolution,” NeuroImage, vol. 238, pp.
1159
+ 118250, 2021.
1160
+ [66] Paul A. Yushkevich, Joseph Piven, Heather Cody Hazlett, Rachel Gimpel Smith, Sean Ho,
1161
+ James C. Gee, and Guido Gerig, “User-guided 3D active contour segmentation of anatomical
1162
+ structures: Significantly improved efficiency and reliability,” Neuroimage, vol. 31, no. 3, pp.
1163
+ 1116–1128, 2006.
1164
+ 15
1165
+
1166
+ Appendix
1167
+ 1
1168
+ Dataset Details
1169
+ 1.1
1170
+ Spatial autocorrelation between images
1171
+ We computed the cross covariance of the images in our main subvolumes to see how much cor-
1172
+ relation exists between consecutive or nearby image slices. This analysis confirmed that after 5-7
1173
+ slices, the cross-correlation between images drops off consistently (see Figure A1). Based upon this
1174
+ analysis, we crop out 10 slices between the training and testing sets in all the subvolumes.
1175
+ cortex
1176
+ striatum
1177
+ VP
1178
+ ZI
1179
+ Full
1180
+ zoomed in
1181
+ Figure A1: Cross-covariance matrices. Per-class cross-covariance of all 4 considered regions (right), zoomed-
1182
+ in view of cortex cross-covariance(top left) and cross-covariance of entire dataset (bottom left).
1183
+ 1.2
1184
+ Dense pixel-level annotations
1185
+ Utilizing the sparse annotations from (15), we trained a 2D UNet to segment the image data into
1186
+ 4 classes (blood vessels, cell bodies, axons, and background) and applied it to the slices in the
1187
+ four volumes which were not already annotated. A proofreader then reviewed the annotations for
1188
+ each slice in the volume using ITK-Snap (66) in the y-z plane. During the proofreading process
1189
+ of a slice, any structure with labels that were split between classes (errors in the UNet) were first
1190
+ corrected. Then, any structures without split labels were reviewed for correctness and changed if
1191
+ incorrect. Finally, the annotator checked the following Z slice to ensure continuity of components.
1192
+ This process continued for each slice in the volume. To ensure consistency of the annotations, the
1193
+ volume was proofread by the same annotator a second time, now going through the x-z plane. After
1194
+ proofreading we generated densely annotated volumes of size 256x256x360 in each of the four
1195
+ ROIs.
1196
+ 1.3
1197
+ Interpolated ROI annotations
1198
+ A full ROI annotation of the x-ray microtomography dataset was interpolated from manually anno-
1199
+ tated z-level slices that were roughly 50 pixels apart. To generate the interpolated layers, a recursive
1200
+ algorithm was implemented to linearly shift the boundary between two annotation classes from one
1201
+ pre-existing layer to the next: once a shifting boundary region was located, the midpoint of the shift
1202
+ was identified and full, one voxel-wide annotations were created along the midpoint spanning all
1203
+ necessary z levels. This process split transition regions into two roughly equal spaces, in which the
1204
+ recursive process repeated for the two respective transition spaces until the entire transition region
1205
+ was fully annotated.
1206
+ 16
1207
+
1208
+ 0
1209
+ 1.0
1210
+ 200
1211
+ 0.8
1212
+ 400
1213
+ 600
1214
+ 0.6
1215
+ 800
1216
+ 0.4
1217
+ 1000
1218
+ 1200
1219
+ 0.2
1220
+ 1400
1221
+ 250
1222
+ 500
1223
+ 0.0
1224
+ 0
1225
+ 750
1226
+ 1000
1227
+ 12500
1228
+ 10
1229
+ 1.0
1230
+ 50
1231
+ 50
1232
+ 0.8
1233
+ 0.8
1234
+ 100
1235
+ 100
1236
+ 150
1237
+ 0.6
1238
+ 150
1239
+ 0.6
1240
+ 200
1241
+ 200
1242
+ 0.4
1243
+ 0.4
1244
+ 250
1245
+ 250
1246
+ 300
1247
+ 0.2
1248
+ 300
1249
+ 0.2
1250
+ 350
1251
+ 0.0
1252
+ 350
1253
+ 300
1254
+ OOE
1255
+ 0.0
1256
+ 0
1257
+ 100
1258
+ 200
1259
+ 0
1260
+ 100
1261
+ 200
1262
+ 0
1263
+ 10
1264
+ 0
1265
+ 10
1266
+ 50
1267
+ 50
1268
+ 80
1269
+ 0.8
1270
+ 100
1271
+ 100
1272
+ 150
1273
+ 0.6
1274
+ 150
1275
+ 0.6
1276
+ 200
1277
+ 200
1278
+ 0.4
1279
+ 250
1280
+ 250
1281
+ 300
1282
+ 0.2
1283
+ 300
1284
+ 0.2
1285
+ 350
1286
+ 0.0
1287
+ 350
1288
+ 200
1289
+ 300
1290
+ 100
1291
+ 200
1292
+ 300
1293
+ 0.0
1294
+ 0
1295
+ 100
1296
+ 0However, this process was not able to account for all border transitions. Thus, about 10% of the
1297
+ border transitions were annotated manually using the visualization software application ITK-SNAP.
1298
+ Fully interpolated ROI annotations are available for z-levels between 109 and 459, inclusive.
1299
+ Figure A2: The Recursive Process of the Interpolation of ROI Annotations. A four step recursive process
1300
+ was implemented to produce interpolated ROI annotation layers from manually annotated layers. This process
1301
+ recursively repeated until the entire transition region was interpolated with ROI annotations.
1302
+ 2
1303
+ Data Access
1304
+ To allow for easy, publicly accessible data, the dataset is stored in the BossDB system. Critically,
1305
+ this database enables efficient access of arbitrary cutouts of large volumetric datasets. This project
1306
+ page is available at https://bossdb.org/project/prasad2020. No user account is required.
1307
+ Public credentials will allow read-only access of the data without the need for user account creation.
1308
+ The raw images are available with the resource identifier bossdb://prasad/prasad2020/image,
1309
+ which is the “prasad” experiment, “prasad2020” experiment, and “image” channel. The raw image
1310
+ data are in an unsigned 8-bit integer format, with preferred indexing in ZY X format. Areas with
1311
+ invalid data (e.g. outside the image volume) are assigned the value 0. The maximum and minimum
1312
+ indices are z = [0, 720], y = [0, 1420], and x = [0, 5805]. The data have 1.17µm isotropic
1313
+ resolution.
1314
+ The annotation images are available with the resource identifier bossdb://prasad/prasad_
1315
+ analysis/pixel_labels and bossdb://prasad/prasad_analysis/roi_labels, which is
1316
+ the “prasad” collection, “prasad analysis” experiment, and “pixel labels” and “roi labels” chan-
1317
+ nels. The annotation data are in an unsigned 64-bit integer format, with preferred indexing in ZY X
1318
+ format. Areas with invalid data (e.g. outside the image volume) are assigned the value 0. As for
1319
+ the raw images channel, the maximum and minimum indices are z = [0, 720], y = [0, 1420], and
1320
+ x = [0, 5805]. The data have 1.17µm isotropic resolution.
1321
+ Access is available through the Python intern array API, which provides numpy-like referencing.
1322
+ Using this library, the user creates an intern array, image_array = array(boss_url), where
1323
+ boss_url is one of the resource identifiers listed above. The user can then access data from the
1324
+ channel using the numpy-like syntax, demonstrated by this code example to download a cutout
1325
+ corresponding to cortex.
1326
+ from intern import array
1327
+ image = array(boss_url)
1328
+ data = image[110:379, 900:1156, 4600:4856]
1329
+ A data loader is provided for rapid development of Pytorch models, which is used for all mod-
1330
+ els tested in this work. On creation, the data loader loads a task configuration ‘.json’ file which
1331
+ specifies the parameters of the task. Two modes of operation are allowed, download=true and
1332
+ download=false. For the former, on creation of the data loader, data are pulled locally and stored
1333
+ 17
1334
+
1335
+ as a numpy file. If the numpy file already exists, it is instead loaded from disk. In the later case,
1336
+ data are downloaded to memory and not written to disk. The data are stored in a numpy tensor
1337
+ containing the concatenated slices from cortex, striatum, vp, zi (for four class problems). The item
1338
+ retrieval function can serve up integer region labels (for task 1), or microstructure masks (for task
1339
+ 2), as dictated by the configuration. The basic input data configuration parameters are:
1340
+ • image chan - BossDB channel from which to pull the raw image data.
1341
+ • annotation chan - BossDB channel from which to pull the image annotations (either
1342
+ macro- or micro-structures).
1343
+ • xrange cor - range along the x-axis (on the full data) for the slices from Cortex.
1344
+ • yrange cor - range along the y-axis (on the full data) for the slices from Cortex.
1345
+ • xrange stri - range along the x-axis (on the full data) for the slices from Striatum.
1346
+ • yrange stri - range along the y-axis (on the full data) for the slices from Striatum.
1347
+ • xrange vp - range along the x-axis (on the full data) for the slices from VP.
1348
+ • yrange vp - range along the y-axis (on the full data) for the slices from VP.
1349
+ • xrange zi - range along the x-axis (on the full data) for the slices from ZI.
1350
+ • yrange zi - range along the y-axis (on the full data) for the slices from ZI.
1351
+ • z train - the range (slices) along the z-axis to use as the train set.
1352
+ • z val - the range (slices) along the z-axis to use as the val set.
1353
+ • z test - the range (slices) along the z-axis to use as the test set. A buffer of 10 slices is
1354
+ recommended between the train/val set and the test set.
1355
+ • volume z - the number of slices to include in a volume slice. For the 2D cases, the individ-
1356
+ ual slices are the input to the models (number of slices in a volume slice is 1). In the 3D
1357
+ case, volume slices are the input to the models.
1358
+ The repository, https://github.com/MTNeuro/MTNeuro, contains scripts and python notebooks
1359
+ for data access and running models. Python notebooks which download the numpy files for im-
1360
+ ages and annotations are provided for users not using Pytorch. These are based on the original data
1361
+ access notebooks developed for this dataset https://github.com/nerdslab/xray-thc. The
1362
+ repository and requirements can be installed via the pip package manager. The repository is struc-
1363
+ tured as
1364
+ • MTNeuro: the core code for running pytorch dataloaders and pytorch models
1365
+ • Notebooks: access notebooks for users who do not use pytorch
1366
+ • Scripts: example scripts for running pytorch models for each task, which form the basis for
1367
+ new algorithm development
1368
+ 3
1369
+ Further Details on Task 1
1370
+ 3.1
1371
+ Models
1372
+ 3.1.1
1373
+ Supervised models
1374
+ • Resnet18: 18-layer version of a model that reformulates the layers as learning residual
1375
+ functions with reference to the layer inputs in order to efficiently train deeper architectures
1376
+ (44).
1377
+ • Resnet18 + Mixup: same model architecture described above (Resnet18), but trained using
1378
+ mixup, an augmentation that mixes inputs as well as their corresponding labels through a
1379
+ convex combination, and has been shown to yield significant improvement in supervised
1380
+ classification models (45)
1381
+ 18
1382
+
1383
+ 3.1.2
1384
+ Self-supervised models
1385
+ We test several contrastive-based SSL methods that do not need negative examples to generate good
1386
+ representations of data (46). All SSL models are trained using a Resnet18 encoder backbone in
1387
+ order to make them comparable to the evaluated supervised models.
1388
+ We tested the following SSL models:
1389
+ • BYOL: BYOL relies on a mirrored structure of online and target networks that learn from
1390
+ each other (46). In particular, the online network tries to predict the encoding of a target
1391
+ image view x by minimizing the distance in latent space to an augmented view xa.
1392
+ • MYOW: MYOW builds on BYOL by also minizing the distance in latent space between
1393
+ views, but incorporates a view mining approach in order to search the dataset for neigh-
1394
+ boring samples in representations space. The augmented and mined samples are then inte-
1395
+ grated into a unified latent space through the use of an additional predictor network (47).
1396
+ • MYOW-m: MYOW-m is an extension of MYOW that integrates MYOW’s sample mining
1397
+ procedure, but uses a single predictor on both the augmented and mined views rather than
1398
+ using the cascaded projector design proposed in MYOW.
1399
+ Since all of these SSL methods rely on some form of transformation to process the views during
1400
+ training, we choose the simplest possible augmentation for all three methods: randomly cropping
1401
+ patches half the width and height of each image sample to generate augmented and mined views (see
1402
+ Figure A3). In order to standardize the evaluation of the methods, we evaluate the trained encoder’s
1403
+ performance in classifying which class each of the four corners of an image sample belongs to.
1404
+ Figure A3: View generation for self-supervised approaches. In A we show how augmented views are gener-
1405
+ ated by extracting random crops from a given image sample. In B, we schematize the view mining process,
1406
+ involving comparing different random crops from a pool of samples to a target view (red) and choosing the
1407
+ closest one in representation space as the mined view (Image 3, highlighted).
1408
+ 3.2
1409
+ More Details on Training
1410
+ 3.2.1
1411
+ Configuration Files
1412
+ Two types configuration files are provided as input for training:
1413
+ • Network Configuration - a ’.json’ file containing settings for the optimizer, augmentations
1414
+ to use, the parameters for the corresponding supervised or self-supervised models, and the
1415
+ seed to use for the training.
1416
+ • Task Configuration - a ’.json’ file specifying information regarding the data slices to ac-
1417
+ cess for the training. This includes the database channel to access for the data and region-
1418
+ level annotations, the x and y ranges for the slices from each region, the size of the slices
1419
+ (corresponding to the level of downsampling applied), the train-val-test split, the depth of
1420
+ each slice sample (1, since we focus on 2D processing for this task).
1421
+ 19
1422
+
1423
+ random
1424
+ A
1425
+ crop
1426
+ resize
1427
+ view augmentation
1428
+ random
1429
+ crop
1430
+ B Image 1
1431
+ Image 2
1432
+ Image 3
1433
+ Image 4
1434
+ view miningSetting
1435
+ Values cube set A
1436
+ xrange cor
1437
+ [4600,4856]
1438
+ yrange cor
1439
+ [900,1156]
1440
+ xrange stri
1441
+ [3700,3956]
1442
+ yrange stri
1443
+ [500,756]
1444
+ xrange vp
1445
+ [3063,3319]
1446
+ yrange vp
1447
+ [850,1106]
1448
+ xrange zi
1449
+ [1543,1799]
1450
+ yrange zi
1451
+ [650,906]
1452
+ z train
1453
+ [110, 379]
1454
+ z val
1455
+ [380, 409]
1456
+ z test
1457
+ [420, 470]
1458
+ Table A1: Dataset configuration used for task 1 (ROI-C1) and task 2
1459
+ Setting
1460
+ Values cube set B
1461
+ Values cube set C
1462
+ Values cube set D
1463
+ xrange cor
1464
+ [5312,5568]
1465
+ [5056,5312]
1466
+ [4600,4856]
1467
+ yrange cor
1468
+ [388,644]
1469
+ [644,900]
1470
+ [400,656]
1471
+ xrange stri
1472
+ [3828,4084]
1473
+ [3344,3600]
1474
+ [3800,4056]
1475
+ yrange stri
1476
+ [912,1168]
1477
+ [400,656]
1478
+ [244,500]
1479
+ xrange vp
1480
+ [2551,2807]
1481
+ [2151,2407]
1482
+ [2295,2551]
1483
+ yrange vp
1484
+ [850,1106]
1485
+ [950,1206]
1486
+ [694,950]
1487
+ xrange zi
1488
+ [1287,1543]
1489
+ [1031,1287]
1490
+ [1799,2055]
1491
+ yrange zi
1492
+ [906,1162]
1493
+ [906:1162]
1494
+ [650:906]
1495
+ Table A2: Coordinates for the additional cube sets used in ROI-C2 (C and D) and ROI-C3 (B, C and D)
1496
+ 3.2.2
1497
+ Model Settings
1498
+ The basic model settings are:
1499
+ • model - either the classifier (if supervised) or encoder backbone (if SSL) to train for the
1500
+ classification task.
1501
+ • classes - the number of output classes of the model.
1502
+ • method - (only for SSL) the self-supervised approach to train.
1503
+ 3.2.3
1504
+ Input Data Configuration
1505
+ The dataset configurations used for the results in Table 1, column ROI-C1 are listed in Table A1.
1506
+ The coordinates for the additional cubes used in ROI-C2 and ROI-C3 are shown in Table A2.
1507
+ 3.2.4
1508
+ Training and evaluation setup
1509
+ We train all supervised and self-supervised models using a 0.03 learning rate, a batch size of 256
1510
+ (chosen for a stable linear layer evaluation) and 5 different random seed values: 1, 100, 350, 631
1511
+ and 872. We compile these 5 results in order to calculate the mean and standard deviation of the
1512
+ classification accuracy for each considered approach. For the Resnet18-Mixup setting, we evaluate
1513
+ 5 different probabilities of applying the augmentation during training: 0.01, 0.1, 0.3, 0.5 and 0.7;
1514
+ and report the setting with the best performing mean accuracy.
1515
+ 3.3
1516
+ Additional experiments
1517
+ We report additional tests in Table A3, where we evaluate all considered models under the ROI-
1518
+ C1 training setting, but using different downsampling factors on the image samples: 4x, 2x and
1519
+ full resolution (no downsampling). We observe that performance increases with the resolution (as
1520
+ more information is available to the models). Surprisingly, we see a significantly larger performance
1521
+ jump in the SSL methods when moving from 2x downsampling to full resolution tests with respect
1522
+ to the supervised methods (which increase their performance only by a slight margin). This further
1523
+ 20
1524
+
1525
+ Table A3: Results on image classification for brain area prediction (Task 1).
1526
+ ROI-C1
1527
+ Downsampling =4
1528
+ Downsampling =2
1529
+ Full-res
1530
+ Resnet18
1531
+ 0.83 ± 0.06
1532
+ 0.88 ± 0.03
1533
+ 0.87 ± 0.1
1534
+ Resnet18-Mixup
1535
+ 0.85 ± 0.06
1536
+ 0.90 ± 0.04
1537
+ 0.91 ± 0.03
1538
+ BYOL
1539
+ 0.83 ± 0.04
1540
+ 0.88 ± 0.02
1541
+ 0.98 ± 0.01
1542
+ MYOW
1543
+ 0.84 ± 0.04
1544
+ 0.90 ± 0.02
1545
+ 0.96 ± 0.03
1546
+ MYOW-m
1547
+ 0.84 ± 0.05
1548
+ 0.94 ± 0.02
1549
+ 0.99 ± 0.01
1550
+ PCA
1551
+ 0.59
1552
+ 0.59
1553
+ 0.59
1554
+ NMF
1555
+ 0.55
1556
+ 0.62
1557
+ 0.54
1558
+ supports our observation that SSL models benefit more from exposure to additional data than their
1559
+ supervised counterparts. Each SSL training instance took on average 30 minutes to train under 4x
1560
+ downsampling, 1.5 hours under 2x downsampling, and 4 hours under full resolution setting (runtime
1561
+ for a single random seed, trained on an RTX 3090 GPU node). We chose to use 2x downsampling
1562
+ for the rest of our experiments in task 1, since it provided a good compromise between performance
1563
+ and runtime.
1564
+ 4
1565
+ Further Details on Task 2
1566
+ 4.1
1567
+ Baselines
1568
+ 4.1.1
1569
+ 2D Models
1570
+ The models we use for the 2D segmentation task are:
1571
+ • A standard 2D U-Net model (48; 49)
1572
+ • Selected models from the ’segmentation models.pytorch’ library (50):
1573
+ – MAnet - a model utilizing a multi-scale attention mechanism, originally design for
1574
+ liver and liver tumor segmentation (51),
1575
+ – FPN - the Feature Pyramid Network architecture for object detection modified for
1576
+ image segmentation (52),
1577
+ – U-Net++ - a nested U-Net architecture developed for medical image segmentation
1578
+ (53),
1579
+ – PAN (Pyramid Attention Network) - a model incorporating spatial pyramid attention
1580
+ structure, designed to utilize global contextual information in semantic segmentation
1581
+ (54),
1582
+ – PSPNet (Pyramid Scene Parsing Network) - a model utilizing pyramid pooling and a
1583
+ scene parsing network to learn global context information better (55).
1584
+ 4.1.2
1585
+ 3D Models
1586
+ The models we use for the 3D segmentation task are:
1587
+ • A standard 3D U-Net model (48),
1588
+ • Selected models from ‘MedicalZooPytorch’ (56):
1589
+ – VNetLight - a lighter version of the V-Net convolutional network architecture devel-
1590
+ oped to perform volumetric segmentation (57; 56),
1591
+ – HighResNet - a compact, high-resolution convolutional network for volumetric seg-
1592
+ mentation, originally demonstrated on brain parcellation pretext task on brain MR
1593
+ images (58).
1594
+ 4.2
1595
+ More Details on Training
1596
+ 4.2.1
1597
+ Configuration Files
1598
+ Two types configuration files are provided as input to training:
1599
+ 21
1600
+
1601
+ • Network Configuration - a ’.json’ file containing settings for the optimizer, augmentation
1602
+ setting, the model settings, evaluation settings, settings for saving the output and the seed
1603
+ to use for the training.
1604
+ • Task Configuration - a ’.json’ file specifying information regarding the data slices to ac-
1605
+ cess for the training. This includes the database channel to access for the data and an-
1606
+ notations, the x and y ranges for the slices from each region, the size of the slices, the
1607
+ train-val-test split, the size of the volume slice (3D) and the whether the training is for 3-
1608
+ Class or 4-Class setting. The task configuration used for the results in Table 2 are listed in
1609
+ Table A1.
1610
+ Together, these two files completely specify the configurations for the training run.
1611
+ 4.2.2
1612
+ Model Settings
1613
+ The basic model settings are:
1614
+ • encoder name - the encoder to use with the model. This is applicable only for the 2D mod-
1615
+ els and the ’UNet 3D’ model. For more information visit ’segmentation models.pytorch‘
1616
+ library ((50)). For the training results in Table 2, ’efficientnet-b7’ was used as the encoder
1617
+ as it gave the best performance among several other encoders that were tried.
1618
+ • encoder weights - the pre-trained weights to use for the model. For the 2D model and
1619
+ ’UNet 3D’ results in Table 2, weights trained on ImageNet were used.
1620
+ • in channels - the number of input channels.
1621
+ • classes - the number of output classes.
1622
+ 4.2.3
1623
+ Details on Class Proportions
1624
+ At the microstructure level/scale, the frequency of each class across the brain regions are as follows:
1625
+ • Cortex - Background: 93%; Blood Vessel: 2.33%; Cell: 4.64%; Axons: 0%
1626
+ • Striatum - Background: 72.63%; Blood Vessel: 2.5%; Cell: 5.66%; Axons: 19.22%
1627
+ • VP - Background: 22.75%; Blood Vessel: 4.73%; Cell: 5.73%; Axons: 66.8%
1628
+ • ZI - Background: 32.15%; Blood Vessel: 5.4%; Cell: 9.41%; Axons: 53.04%
1629
+ • Total - Background: 55.13%; Blood Vessel: 3.74%; Cell: 6.36%; Axons: 34.77%
1630
+ 4.2.4
1631
+ Hyper-Parameters and Random Seeds
1632
+ For the selection of optimal hyper parameters for each model a hyper parameter search is performed
1633
+ by training the models for the following learning rates: 0.1, 0.05, 0.01, 0.005, 0.001; and the follow-
1634
+ ing batch sizes: 2,4,6,8,10. The best performing learning rate and batch size is chosen and 5 separate
1635
+ instances of each model are trained with this optimal learning rate and batch size, with seeds: 1, 100,
1636
+ 350, 631 and 872 respectively. These 5 results (for each model) are used to calculate the mean and
1637
+ SD (Mean ± SD) of the performance metrics (which is reported in Table 2). Also across several
1638
+ models it was seen that Adam optimizer was yielding a better result than SGD, so Adam optimizer
1639
+ was used for all the training runs. The optimal hyperparameters found for each model in each setting
1640
+ are listed in Table A4.
1641
+ 5
1642
+ Further Details on Task 3
1643
+ In order to extract the semantic features from the microstructure annotations in the 4 densely con-
1644
+ nected cubes, we first isolate the relevant corresponding pixel-level labels (either cells, blood vessels
1645
+ or axons). Once we have extracted the desired class and labeled everything else as background, we
1646
+ can perform a connected component analysis to compute the desired semantic features (cell count,
1647
+ size, and distance, as well as axon and vessel density) for each image sample.
1648
+ Once the semantic features for different tasks are calculated, we extract the representations of all
1649
+ samples across the 4 cubes using the models trained in task 1 ROI-C1 (Table 3, top) and ROI-C3
1650
+ 22
1651
+
1652
+ Table A4: Optimal Hyperparameters used for Task 2 models in Table 2
1653
+ I. 2D Pixel-level Segmentation
1654
+ 3-Class
1655
+ 4-Class without ZI
1656
+ Model
1657
+ Learning Rate Batch Size Learning Rate Batch Size
1658
+ 2D U-Net
1659
+ 0.1
1660
+ 8
1661
+ 0.01
1662
+ 4
1663
+ MA-Net
1664
+ 0.01
1665
+ 2
1666
+ 0.001
1667
+ 8
1668
+ FPN
1669
+ 0.01
1670
+ 2
1671
+ 0.01
1672
+ 4
1673
+ U-Net++
1674
+ 0.001
1675
+ 8
1676
+ 0.01
1677
+ 8
1678
+ PAN
1679
+ 0.01
1680
+ 10
1681
+ 0.001
1682
+ 8
1683
+ PSPNet
1684
+ 0.01
1685
+ 4
1686
+ 0.1
1687
+ 2
1688
+ II. 3D Pixel-level Segmentation
1689
+ 3-Class
1690
+ 4-Class without ZI
1691
+ Model
1692
+ Learning Rate Batch Size Learning Rate Batch Size
1693
+ 3D U-Net
1694
+ 0.005
1695
+ 1
1696
+ 0.01
1697
+ 1
1698
+ VNetLight
1699
+ 0.01
1700
+ 1
1701
+ 0.01
1702
+ 1
1703
+ HighResNet
1704
+ 0.005
1705
+ 1
1706
+ 0.001
1707
+ 1
1708
+ (Table 3, bottom), and use scikit-learn to fit a linear regression in order to predict the semantic fea-
1709
+ tures and report the R2 on the entire subvolume of interest. Furthermore, we provide visualizations
1710
+ of the representations and the corresponding semantic features of the 4 interest cubes using a BYOL
1711
+ encoder trained under two different dataset conditions (ROI-C1 and ROI-C3) in Figure A4.
1712
+ Figure A4: Visualizations of semantic features overlaid on two dimensional learned representations.
1713
+ Here,
1714
+ the first row shows 2D U-map projections of learned BYOL (ROI-C1) embeddings and overlay the different
1715
+ semantic attributes on the latents . From left to right, we color the embeddings by brain area (class), % blood
1716
+ vessels, % axons, cell count, and cell size. The second row shows the 2D projections of learned BYOL (ROI-
1717
+ C3) embedding which too are overlaid with different semantic attributes.
1718
+ 6
1719
+ Maintenance, Licensing, and Ethical Concerns
1720
+ The dataset, data access API, and dataloaders are maintained by the BossDB team (bossdb.org),
1721
+ which is tasked as the BRAIN Initiative archive of record for nanoscale connectomics datasets.
1722
+ The team is developing standards in accordance with FAIR principles (https://www.go-fair.
1723
+ org/fair-principles/) to ensure permanent identifiers and broad accessibility. The data are
1724
+ available under the CC-BY-4.0 license. The baseline code is available open source hosted in a
1725
+ github organization, and community forks and pull requests will be welcome, to be reviewed by the
1726
+ repository maintainers. As improvements are made to the baseline codebase for future challenges,
1727
+ changes will be pushed to the MTNeuro repository as a new versioned release.
1728
+ The dataset used includes animal data which was collected under an approved IACUC protocol, as
1729
+ detailed in the original paper. There is no human derived data in this dataset. We emphasize this
1730
+ 23
1731
+
1732
+ Axon Pixels (%)
1733
+ 10
1734
+ 10
1735
+ 20
1736
+ 30
1737
+ 40
1738
+ 50
1739
+ 60
1740
+ 70
1741
+ 80
1742
+ 90BloodVesselpixels(%)
1743
+ 18
1744
+ 10
1745
+ 16
1746
+ 14
1747
+ 12Cell Count
1748
+ 4
1749
+ 10
1750
+ 8
1751
+ 12
1752
+ 16
1753
+ 20
1754
+ 24
1755
+ 28
1756
+ 32
1757
+ 36Cell Size
1758
+ 0
1759
+ 15
1760
+ 10
1761
+ 30
1762
+ 45
1763
+ 60
1764
+ 75
1765
+ 90
1766
+ 105
1767
+ 12010
1768
+ Cortex
1769
+ Striatum
1770
+ VP
1771
+ ZIBloodVesselpixels(%)
1772
+ 0
1773
+ 24
1774
+ 10
1775
+ 6
1776
+ 10
1777
+ 12Axon Pixels (%
1778
+ 10
1779
+ 20
1780
+ .8
1781
+ 30
1782
+ 40
1783
+ 50
1784
+ 60
1785
+ 70
1786
+ 80
1787
+ 90
1788
+ 14
1789
+ 12Cell Count
1790
+ 4
1791
+ 8
1792
+ .8
1793
+ 12
1794
+ 16
1795
+ 20
1796
+ 24
1797
+ 16
1798
+ 28
1799
+ 32
1800
+ 36
1801
+ 14
1802
+ 12
1803
+ 10Cell Size
1804
+ 0
1805
+ 15
1806
+ 30
1807
+ .8
1808
+ 45
1809
+ 60
1810
+ 75
1811
+ 90
1812
+ 105
1813
+ 120
1814
+ 14
1815
+ .2
1816
+ 018
1817
+ 16-
1818
+ 14
1819
+ 12
1820
+ Cortex
1821
+ Striatum
1822
+ 10
1823
+ VP
1824
+ ZIdataset is for development of algorithms for fundamental analysis of structural neuroscience data,
1825
+ but it is possible future efforts could use these data inappropriately for the development of clinically
1826
+ relevant algorithms which could result in negative societal impacts. This use is discouraged due to
1827
+ the unknown generalizations of high-resolution X-ray Microtomography to clinical modalities.
1828
+ 24
1829
+
I9AyT4oBgHgl3EQffvhT/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ItE3T4oBgHgl3EQfXAqD/content/tmp_files/2301.04475v1.pdf.txt ADDED
@@ -0,0 +1,3162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.04475v1 [math-ph] 11 Jan 2023
2
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE
3
+ POISSON PENCILS
4
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
5
+ Abstract. We show that the equivalence classes of deformations of localizable semisimple
6
+ Poisson pencils of hydrodynamic type with respect to the action of the Miura-reciprocal group
7
+ contain a local representative and are in one-to-one correspondence with the equivalence classes
8
+ of deformations of local semisimple Poisson pencils of hydrodynamic type with respect to the
9
+ action of the Miura group.
10
+ Contents
11
+ 1.
12
+ Introduction
13
+ 1
14
+ 1.1.
15
+ A variety of jet space transformation groups
16
+ 2
17
+ 1.2.
18
+ Action of the transformation groups
19
+ 4
20
+ 1.3.
21
+ Weakly non-local Poisson bi-vectors of localizable shape
22
+ 5
23
+ 1.4.
24
+ Localizability
25
+ 6
26
+ 1.5.
27
+ Projective group and Doyle–Pot¨emin form
28
+ 7
29
+ 1.6.
30
+ Acknowledgments
31
+ 8
32
+ 2.
33
+ Formulae for the action
34
+ 8
35
+ 2.1.
36
+ The Ferapontov–Pavlov formula
37
+ 10
38
+ 2.2.
39
+ Weakly non-local bi-vectors of localizable shape
40
+ 11
41
+ 3.
42
+ Schouten bracket for weakly non-local operators of localizable shape
43
+ 13
44
+ 3.1.
45
+ The two approaches
46
+ 13
47
+ 3.2.
48
+ Identification of the two approaches
49
+ 14
50
+ 4.
51
+ Pencils of weakly non-local bi-vectors of localizable shape
52
+ 16
53
+ 4.1.
54
+ Bi-Hamiltonian cohomology
55
+ 16
56
+ 4.2.
57
+ Comparison with the purely local deformations
58
+ 20
59
+ 4.3.
60
+ Roots of the characteristic polynomial of the symbol
61
+ 21
62
+ 5.
63
+ Projective-reciprocal invariance of the Doyle–Pot¨emin form
64
+ 23
65
+ References
66
+ 24
67
+ 1. Introduction
68
+ In 2001, Dubrovin and Zhang initiated a classification programme of bi-Hamiltonian inte-
69
+ grable PDEs in two independent variables [DZ01].
70
+ The group action that they considered
71
+ was that of Miura transformations, i.e., transformations depending on the field variables and,
72
+ polynomially, by their derivatives of higher order through a perturbative series.
73
+ Among the questions that the above approach raises there is the issue of extending the group
74
+ action to include (possibly non-local) changes of variables in one of the independent variables.
75
+ Indeed, an important class of such transformations is that of reciprocal transformations, which
76
+ play an important role in Mathematical Physics (see e.g. [Rog69; Rog68; Fer89; Fer91; FP03;
77
+ XZ06; AG07; Abe09; BS09; LZ11; AL13]).
78
+ 2020 Mathematics Subject Classification. 37K05, 37K10, 37K20, 37K25.
79
+ Key words and phrases. bi-Hamiltonian PDE, Hamiltonian operator, Miura transformation, reciprocal trans-
80
+ formation, integrable systems.
81
+ 1
82
+
83
+ 2
84
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
85
+ This paper is concerned with the action of the group of Miura-reciprocal transformations,
86
+ that is a natural group of simultaneous transformations of the independent and the dependent
87
+ variables of a (bi-)Hamiltonian system through a perturbative series of derivatives of the field
88
+ variables. Among other things, we consider (1) the Miura-reciprocal transformations of the
89
+ 1st kind and rederive from the scratch the Ferapontov–Pavlov formula for the transformation
90
+ of a hydrodynamic bivector; (2) Miura-reciprocal transformations of the 2nd kind (close to
91
+ identity) and classify the orbits of their action on Poisson pencils of weakly non-local bi-vectors
92
+ of localizable shape with localizable semi-simple hydrodynamic leading term; (3) a smaller
93
+ group of projective-reciprocal transformations and prove that they preserve the Doyle–Pot¨emin
94
+ canonical form of the bi-vectors.
95
+ A detailed comparison between previous results and our results can be read in the following
96
+ Subsections; we just stress that our result on the classification of bi-Hamiltonian integrable
97
+ structures provides a natural extension for the classification program in [DZ01] (that also in-
98
+ corporates and explains some of the results in [LZ11]). To the best of our knowledge it is the
99
+ first result in the literature that provides a systematic classification of orbits of the action of the
100
+ group of Miura-reciprocal transformations in the bi-Hamiltonian context (for a single Poisson
101
+ structure this type of result is established in [FP03; LZ11]).
102
+ 1.1. A variety of jet space transformation groups. We consider a jet space Jr(1, N),
103
+ r ≥ 0, with independent variable x and dependent variables ui, i = 1, . . . , N, considered as
104
+ coordinates on some open domain U ⊂ RN. Let ui,σ denote the x-derivative of ui taken σ times.
105
+ Consider the transformations (i.e. diffeomorphisms) of the jet space Jr(1, n).
106
+ We begin
107
+ from the most general type of transformation: a reciprocal transformation coupled with a
108
+ differential substitution. Reciprocal transformations in a modern setting were introduced in
109
+ [Rog69; Rog68] in the study of gas dynamics, and later analyzed under a geometric viewpoint
110
+ in [Fer89; Fer91] and many other authors (see e. g. [FP03; XZ06; AG07; Abe09; BS09; LZ11;
111
+ AL13] and references therein). The class of differential substitutions was introduced in [Ibr85],
112
+ although many particular differential substitutions were already present in the literature (in
113
+ particular, the Miura transformations).
114
+ Definition 1.1. A reciprocal differential substitution is a nonlocal transformation of the inde-
115
+ pendent variable x into the independent variable y of the type
116
+ (1)
117
+ dy = Bdx,
118
+ B = B(x, ui, ui,σ)
119
+ coupled with a differential substitution of the dependent variables of the form
120
+ (2)
121
+ wi = Qi(x, uj, uj,σ).
122
+ By the fact that dx(∂x) = 1 = dy(∂y) we obtain that total derivatives are related by the
123
+ formula ∂x = B∂y.
124
+ Note that, in general, the inversion of a differential substitution is a
125
+ nonlocal operation. We will soon focus on a more restrictive class of transformations.
126
+ Reciprocal differential substitutions admit several interesting subclasses:
127
+ • A reciprocal transformation is a nonlocal transformation of the independent variables x
128
+ into the independent variable y of the type
129
+ (3)
130
+ dy = Bdx,
131
+ B = B(x, ui, ui,σ)
132
+ coupled with the identical transformation of the dependent variables. In practical ap-
133
+ plications the functions ui depend also on an additional parameter that plays the role
134
+ of “time” of the system of evolutionary PDEs
135
+ ui
136
+ t = F i(x, ui, ui,σ),
137
+ i = 1, ..., n
138
+ governing their evolution. Taking into account this additional variable reciprocal trans-
139
+ formations are often defined as
140
+ dy = Adt + Bdx,
141
+
142
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
143
+ 3
144
+ where the function A, B are submitted to the closure condition Bt = Ax, that is, dy
145
+ is a conservation low for the equation. Note that the coefficient A doesn’t enter the
146
+ transformation law for ∂x, and thus can be disregarded throughout this paper.
147
+ • A reciprocal differential substitution is said to be holonomic if there exists a differential
148
+ function P such that B = ∂xP.
149
+ • A general differential substitution of (x, ui) into (y, wj):
150
+ (4)
151
+ y = P(x, uj, uj,σ),
152
+ wi = Qi(x, uj, uj,σ),
153
+ yields a holonomic reciprocal differential substitution dy = ∂xPdx, wi = Qi by differen-
154
+ tiation (in this sense the two classes of transformations coincide).
155
+ The above two categories of transformations, basically local and nonlocal differential substi-
156
+ tutions, are, on the one hand, too wide to be used in the context of the classification programs
157
+ for evolutionary PDEs and related geometric structures as, for instance, the one initiated by
158
+ Dubrovin and Zhang in [DZ01], and on the other hand too restrictive since we are limited by
159
+ fixing the parameter r ≥ 0 that controls the maximal order of jets.
160
+ For this reason, we introduce the space of differential polynomials A, and the following group
161
+ of transformations, which is a subclass of the reciprocal differential substitutions. Consider a
162
+ jet space J∞(1, N) (considered as an inductive limit of the jet spaces Jr(1, N), r → ∞) with
163
+ independent variable x and dependent variables ui, i = 1, . . . , N. Denote ui,σ the x-derivative
164
+ of ui taken σ times. We associate with this space the algebra of functions A := C∞(U)[[ui,σ, i =
165
+ 1, . . . , N, σ ≥ 1]], where C∞(U) is the space of smooth functions on a domain U ⊂ RN in the
166
+ coordinates u1, . . . , uN. There is a natural gradation on the algebra of densities A given by
167
+ deg∂x ui,σ. Let Ad denote the deg∂x-degree d part of A, which is a finite dimensional module
168
+ over C∞(U).
169
+ Definition 1.2. A Miura-type reciprocal differential substitution, or Miura-reciprocal trans-
170
+ formation for short, is a transformation of the type
171
+ dy =
172
+ � ∞
173
+
174
+ k=0
175
+ ǫkHk(uj, uj,1, . . . , uj,k)
176
+
177
+ dx,
178
+ (5)
179
+ wi =
180
+
181
+
182
+ k=0
183
+ ǫkKi
184
+ k(uj, uj,1, . . . , uj,k),
185
+ i = 1, . . . , N,
186
+ with Hk, Ki
187
+ k ∈ Ak and
188
+ H0 ̸= 0,
189
+ det
190
+ �∂Ki
191
+ 0(uj)
192
+ ∂uk
193
+
194
+ ̸= 0.
195
+ (6)
196
+ The formal dispersive parameter ǫ that we introduce here to control the deg∂x-degree is,
197
+ in principle, not strictly necessary but it is very convenient in particular computations and
198
+ applications.
199
+ The set of all Miura-reciprocal transformations is denoted by R. It is a group with respect
200
+ to the composition, and it has some distinguished subgroups:
201
+ • the subgroup RDS of Miura differential substitutions, that are Miura-type reciprocal
202
+ differential substitutions which are also holonomic differential substitutions of the fol-
203
+ lowing type:
204
+ There exists P = x + P0, with P0 = �∞
205
+ k=0 ǫkFk and Fk ∈ Ak, such that
206
+ (7)
207
+ ∂xP =
208
+
209
+
210
+ k=0
211
+ ǫkHk(uj, uj
212
+ x, . . . , uj
213
+ σ);
214
+ • the subgroup of Miura transformations characterized by H0 = 1 and Hk = 0 for all
215
+ k ≥ 1. This subgroup is called the Miura group G ⊂ R [DZ01] and bears his name
216
+ from the transformation relating KdV and modified KdV equations introduced by Miura
217
+
218
+ 4
219
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
220
+ [Miu68]. Note that the Miura group is also a subgroup of the group of Miura differential
221
+ substitutions: G ⊂ RDS.
222
+ Definition 1.3. By analogy with the way the standard Miura group is typically presented, we
223
+ introduce the following two subgroups.
224
+ • We define Miura-reciprocal transformations of the 1st kind to be the Miura-reciprocal
225
+ transformations of the form
226
+ dy = H0(uj)dx,
227
+ (8)
228
+ wi = Ki
229
+ 0(uj),
230
+ i = 1, . . . , N.
231
+ The group of all Miura RDS of the first type is denoted by RI. This group contains as
232
+ a subgroup the group of Miura transformations of the 1st kind, GI ⊂ RI, characterized
233
+ by H0 = 1.
234
+ • We define Miura-reciprocal transformations of the 2nd kind to be the Miura-reciprocal
235
+ transformations of the form
236
+ dy =
237
+
238
+ 1 +
239
+
240
+
241
+ k=1
242
+ ǫkHk(uj, uj,1, . . . , uj,σ)
243
+
244
+ dx,
245
+ (9)
246
+ wi = ui +
247
+
248
+
249
+ k=1
250
+ ǫkKi
251
+ k(uj, uj,x, . . . , uj,σ),
252
+ i = 1, . . . , N.
253
+ The group of all Miura-reciprocal transformations of the second type is denoted by
254
+ RII. It contains as a subgroup the group of Miura transformations of the 2nd kind,
255
+ GII ⊂ RII, characterized by Hk = 0 for all k ≥ 1.
256
+ Definition 1.4. A distinguished subgroup of RI is the group of projective reciprocal transfor-
257
+ mations P. Such transformations are characterized by the requirements that Ki in Equation (8)
258
+ is a projective transformation (in an affine chart) and H0 is the common denominator of the
259
+ projective transformation. More explicitly,
260
+ dy = (a0
261
+ juj + a0
262
+ 0)dx,
263
+ (10)
264
+ wi = ai
265
+ juj + ai
266
+ 0
267
+ a0
268
+ juj + a0
269
+ 0
270
+ ,
271
+ i = 1, . . . , N.
272
+ The goal of this paper is to discuss some aspects of the actions of these groups on the natural
273
+ suitable geometric structures that emerge in the study of integrable systems of evolutionary
274
+ PDEs. In order to describe our results we have to recall some of these structures, which we do
275
+ in the rest of the introduction.
276
+ 1.2. Action of the transformation groups. The above group of Miura reciprocal differential
277
+ substitutions act on spaces of geometric entities that play important roles in the geometric
278
+ theory of integrability. In particular, it acts on:
279
+ • densities, that have the form
280
+ (11)
281
+ F =
282
+
283
+ f(uj, uj,σ) dx ∈ F := A/∂xA,
284
+ with
285
+ f ∈ A;
286
+ • variational vector fields, that include symmetries of partial differential equations, and
287
+ have the form
288
+ (12)
289
+ ϕ = ϕi(uj, uj,σ)δui,
290
+ ϕi ∈ A;
291
+ • covector-valued densities, that include characteristic vectors of conserved quantities of
292
+ differential equations, and have the form
293
+ (13)
294
+ ψ = ψi(uj, uj,σ)dui ⊗ dx,
295
+ ψi ∈ A;
296
+
297
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
298
+ 5
299
+ • the Euler–Lagrange operator, which sends densities into covector-valued densities,
300
+ (14)
301
+ E(F) = δuiFdui ⊗ dx;
302
+ • variational multivectors of degree p, that include Hamiltonian operators of partial dif-
303
+ ferential equations as particular bivectors. They can be regarded as maps from (p − 1)-
304
+ covector-valued densities to variational vector fields.
305
+ In Section 2 we will prove our change of coordinate formulae for reciprocal differential substi-
306
+ tutions for these geometric objects. As an example, we re-derive in Section 2.1 the Ferapontov–
307
+ Pavlov formula for the reciprocal transformation of a Poisson bi-vector of the differential order
308
+ 1 [FP03], and this brings us to the realm of weakly non-local Poisson structures of localizable
309
+ shape.
310
+ 1.3. Weakly non-local Poisson bi-vectors of localizable shape. Let dependent variables
311
+ ui also dependent on one external parameter, denoted by t. The most studied structures in
312
+ geometric theory of integrability are the local Poisson structures needed for representation of
313
+ equations of the form
314
+ ui
315
+ t = f i(uj, uj,σ)
316
+ (15)
317
+ in Hamiltonian form (note that we don’t allow possible explicit dependence of f i’s on x), that
318
+ is, in the form
319
+ ui
320
+ t =
321
+ d
322
+
323
+ s=0
324
+ P ij
325
+ s ∂s
326
+ δ
327
+ δuj
328
+
329
+ h(uk, uk,σ)dx,
330
+ (16)
331
+ where H =
332
+
333
+ h(uk, uk,σ)dx is the Hamiltonian functional and P = �d
334
+ s=0 P ij
335
+ s ∂s, P ij
336
+ s ∈ A defines
337
+ a bi-vector which in the language of densities can be written as
338
+ {ui(x), uj(y)}P =
339
+ d
340
+
341
+ s=0
342
+ P ij
343
+ s ∂s
344
+ xδ(x − y)
345
+ (17)
346
+ (in this paper bi-vectors and, more generally, multivectors are assumed to be skew-symmetric
347
+ by default).
348
+ In addition to the language of densities, there is a very convenient formalism, the so-called
349
+ θ-formalism, to encode the variational multivectors [Get02], see also [IVV02]. Namely, extend
350
+ the space A to a space ˆ
351
+ A := A[[θσ
352
+ i , i = 1, . . . , N, σ ≥ 0]], where θσ
353
+ i are formal odd variables. We
354
+ often denote θ0
355
+ i by θi, and we extend the ∂x operator to ˆ
356
+ A as ∂x := �∞
357
+ s=0 ui,s+1∂ui,s + θs+1
358
+ i
359
+ ∂θs
360
+ i .
361
+ The deg∂x-gradation is extended to ˆ
362
+ A by deg∂x θσ
363
+ i = σ, and there is a natural θ-degree given by
364
+ degθ ui,σ = 0 and degθ θσ
365
+ i = 1. Let ˆ
366
+ Ap denote the subspace ˆ
367
+ A of θ-degree p. We can consider it
368
+ as a space of densities of variational p-vectors. Let ˆ
369
+ Ap
370
+ d := ˆ
371
+ Ad ∩ ˆ
372
+ Ap.
373
+ The space ˆF := ˆ
374
+ A/∂x ˆ
375
+ A can be considered as the space of variational multivectors. It inherits
376
+ under the projection
377
+
378
+ : ˆ
379
+ A → ˆF both gradations, deg∂x and degθ, and F p
380
+ d denotes its subspace
381
+ of p-vectors of differential degree deg∂x = d. The Schouten bracket is defined as
382
+ (18)
383
+ � �
384
+ P,
385
+
386
+ Q
387
+
388
+ =
389
+
390
+ (−1)degθ PδuiPδθiQ + δθiPδuiQ
391
+ for P, Q ∈ ˆ
392
+ A, where δui := �∞
393
+ s=0(−∂x)s∂ui,s and δθi := �∞
394
+ s=0(−∂x)s∂θs
395
+ i . Various cohomological
396
+ computations in terms of this space and related formalism allow to efficiently control the de-
397
+ formation theory of Poisson bi-vectors and their pencils, see e. g. [LZ05; LZ11; DLZ06; LZ13a;
398
+ CPS18; CKS18; CPS16a; CPS16b; CCS17; CCS18].
399
+ However, studying the action of the group of Miura-reciprocal transformations we can not
400
+ restrict ourselves to the local Poisson bi-vectors. As we have seen, the reciprocal transformations
401
+
402
+ 6
403
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
404
+ generate non-locality of some very particular shape, and in terms of the operator P we have to
405
+ extend its possible shape to
406
+ P =
407
+ d
408
+
409
+ s=0
410
+ P ij
411
+ s ∂s + ui,1∂−1
412
+ x V j + V i∂−1
413
+ x uj,1,
414
+ P ij
415
+ s , V i ∈ A.
416
+ (19)
417
+ Hamiltonian operators of the form above with d = 1, P ij
418
+ 1 = gij(u) (det gij ̸= 0), P ij
419
+ 0 = −gilΓj
420
+ lkuk
421
+ x
422
+ and V i = V i
423
+ j (u)uj
424
+ x were studied by Ferapontov in [Fer95a]. They belong to the larger class of
425
+ weakly non-local operators, that was introduced in [MN01]. Like in the local case the coefficients
426
+ gij define a metric and the coefficients Γj
427
+ lk the Christoffel symbols of the associated Levi-Civita
428
+ connection but unlike in the local case the metric is no longer flat.
429
+ It turns out that the
430
+ Riemann tensor R and the tensor field V defining the non-local tail satisfy the conditions
431
+ gisV s
432
+ j = gjsV s
433
+ i ,
434
+ ∇jV k
435
+ i = ∇iV k
436
+ j ,
437
+ Rij
438
+ kl = V i
439
+ kδj
440
+ l + V j
441
+ l δi
442
+ k − V j
443
+ k δi
444
+ l − V i
445
+ l δj
446
+ k.
447
+ These are a particular instance of Ferapontov’s conditions for weakly non-local Hamiltonian
448
+ operators of hydrodynamic type [Fer95b]. An algorithm to compute such conditions for general
449
+ weakly non-local Hamiltonian operators has been developed in [CLV20] and implemented in
450
+ three different computer algebra systems in [Cas+22].
451
+ A natural question here is how to extend the θ-formalism briefly recalled above to accommo-
452
+ date this type of non-locality. There are two recipes in the literature given in [LZ11] (specific
453
+ for this case) and [LV20] (suitable for general weakly non-local operators). The identification of
454
+ the two approaches should indirectly follow from the uniqueness arguments in [LZ11], but we
455
+ wanted to establish an explicit identification. We do it by an explicit computation in Section 3.
456
+ Remark 1.5. It is important to comment on the action of the operator ∂−1
457
+ x . It can be defined
458
+ on ∂xA by ∂−1
459
+ x (∂x(f)) = f + C for any f ∈ A, here C is some constant. For a more general
460
+ element g ∈ A, g ̸∈ ∂xA, we can represent ∂−1
461
+ x (g), for instance, as an element of a localization
462
+ of A given by A((
463
+ 1
464
+ u1,1)), that is, we can perturbatively represent it as a series C + �∞
465
+ i=1
466
+ hi
467
+ (u1,1)i
468
+ with hi ∈ A such that ∂u1,1hi = 0 (this idea is coming from [DLZ06]), here C is also a constant.
469
+ Both approaches that we compare assert that for the analysis of the weakly non-local Poisson
470
+ bi-vectors of localizable shape it is sufficient to formally apply ∂−1
471
+ x
472
+ to just one element −ui,1θi ∈
473
+ ˆ
474
+ A and denote the result by ζ, which has different meaning in these two approaches.
475
+ The
476
+ subsequent usage of ζ in computations implies that the extra constant that might occur by
477
+ inverting ∂x is uniformly set to C = 0.
478
+ 1.4. Localizability. Consider a dispersive weakly non-local Poisson structure of localizable
479
+ shape given by
480
+ P =
481
+
482
+
483
+ d=1
484
+ ǫd−1
485
+
486
+ d
487
+
488
+ s=0
489
+ P ij
490
+ d,d−s∂s + ui,1∂−1
491
+ x V j
492
+ d + V i
493
+ d∂−1
494
+ x uj,1
495
+
496
+ ,
497
+ P ij
498
+ d,k, V i
499
+ k ∈ Ak.
500
+ (20)
501
+ The leading term (d = 1) of this structure is a Poisson structure of hydrodynamic type and
502
+ thus the full Poisson structure can be thought as a deformation of a Poisson structure of
503
+ hydrodynamic type. If det P ij
504
+ 1,0 ̸= 0, Liu and Zhang prove in [LZ11] that there is always an
505
+ element in R that turns P into a constant local Poisson structure ηij∂x.
506
+ In the case of a
507
+ purely local structure the same results is established under the action of group G in [Get02]
508
+ (see also [DMS05] and [DZ01]), and in the case ǫ = 0 (that is, a purely degree 1 case) it is
509
+ established under the action of the group RI in [LZ11] for N = 1, 2 and in [FP03] for N ≥ 3.
510
+ Now consider a pencil P − λQ of dispersive weakly non-local Poisson structure of localizable
511
+ shape. Let us fix the leading term (P − λQ)|ǫ=0 of the pencil and assume it is semi-simple. In
512
+ the purely local case (that is, under the additional assumption that both P and Q are purely
513
+ local), it was suggested in [LZ05; DLZ06] (see also [Lor02] for the scalar case) and proved
514
+ in [LZ13a] (N = 1 case) and [CPS18; CKS18] (any N ≥ 1) that the space of orbits of the
515
+
516
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
517
+ 7
518
+ action of GII on such pencils is naturally parametrized by N smooth functions of one variable,
519
+ called the central invariants.
520
+ In Section 4 we generalize these results in the following way. Let us fix the leading term
521
+ (P − λQ)|ǫ=0 of the pencil and assume that P|ǫ=0 and Q|ǫ=0 are simultaneously localizable
522
+ under the action of the group RI. We also still assume that (P − λQ)|ǫ=0 is semi-simple. In
523
+ this case, we prove that the set of orbits of the action of RII on such pencils is also naturally
524
+ parametrized by N smooth functions of one variable. Note that while the statement is literally
525
+ the same as in the purely local case, it is quite different as both the group and the space of
526
+ structures on which the group acts is much bigger. We show that it is still possible to read the
527
+ central invariants from the symbol of the pencil.
528
+ This result is proved by a direct application of various techniques and results proposed
529
+ in [LZ11; LZ13a; CPS18; CKS18]. From the comparison with the computations in the local
530
+ case, we obtain the following extra result: under the assumptions above, each orbit of RII
531
+ contains a purely local representative. In other words, we prove that if the leading term of a
532
+ semi-simple pencil P −λQ of dispersive weakly non-local Poisson structure of localizable shape
533
+ is localizable by the action of the group RI, then the whole pencil is localizable by the action
534
+ of the group R.
535
+ It is worth to mention that this result also generalizes and put in the right context a theorem
536
+ of Liu and Zhang [LZ11, Theorem 1.3] that states that if two local Poisson pencils with the
537
+ leading semi-simple hydrodynamic term are related by a reciprocal transformation, then their
538
+ central invariants are the same.
539
+ 1.5. Projective group and Doyle–Pot¨emin form. Finally, we consider the action of the
540
+ group P ⊂ RI. It is a quite small group with transparent structure, and we expect that in
541
+ general the orbits of its action should have a rich geometric structure. In this paper we find a
542
+ surprising connection of this group to a conjecture of Mokhov on the possible form of the local
543
+ Poisson structures of differential degree deg∂x ≥ 2.
544
+ It was independently proved by Doyle [Doy93] and Pot¨emin [Pot91; Pot97] that homogeneous
545
+ local Poisson structures of differential degree d = 2, 3, i.e. of the form
546
+ (21)
547
+ d
548
+
549
+ s=0
550
+ P ij
551
+ d−s∂s
552
+ x,
553
+ P ij
554
+ k ∈ Ak,
555
+ can always be transformed by the action of the group GI to an operator of the shape
556
+ (22)
557
+ ∂x ◦
558
+ d−2
559
+
560
+ s=0
561
+ Qij
562
+ d−2−s∂s
563
+ x ◦ ∂x,
564
+ Qij
565
+ k ∈ Ak.
566
+ Mokhov made the following interesting conjecture:
567
+ Conjecture 1.6 (See [Mok98, Proposition 2.3 and text afterwards]). Let P = �d+2
568
+ e=1 P ij
569
+ e ∂d+2−e
570
+ x
571
+ be a local operator of homogeneous differential order d + 2 (that is, deg∂x P ij
572
+ e
573
+ = e), d ≥ 0.
574
+ Assume that P defines a Poisson bracket. Then there exists a local skew-symmetric operator
575
+ Qij of homogenenous differential order d such that P = ∂x ◦ Qij ◦ ∂x.
576
+ The form (22) is called the Doyle–Pot¨emin form of a local homogeneous bi-vector of differ-
577
+ ential degree deg∂x ≥ 2.
578
+ It was recently proved that in the cases of homogeneous local Poisson structures of degree
579
+ d = 2 [VV] and d = 3 [FPV14] the form (22) is preserved by the action of the group P. In
580
+ Section 5, thanks to our change of coordinates formulae from Subsection 1.2, we generalize the
581
+ above results to local homogeneous bi-vectors (i.e., not necessarily Poisson structures) of degree
582
+ d ≥ 2 and prove that the group P preserves the set of local bi-vectors of Doyle–Pot¨emin form.
583
+ A nice example of application to a Hamiltonian operator for the Dubrovin–Zhang hierarchy is
584
+ pointed out.
585
+
586
+ 8
587
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
588
+ 1.6. Acknowledgments. S. S. and R. V. were supported by the Netherlands Organization
589
+ for Scientific Research. P. L. and R. V. are supported by funds of INFN (Istituto Nazionale di
590
+ Fisica Nucleare) by IS-CSN4 Mathematical Methods of Nonlinear Physics. P. L. is supported
591
+ by funds of H2020-MSCA-RISE-2017 Project No. 778010 IPaDEGAN. P. L. and R. V. are also
592
+ thankful to GNFM (Gruppo Nazionale di Fisica Matematica) for supporting activities that
593
+ contributed to the research reported in this paper.
594
+ 2. Formulae for the action
595
+ The goal of this Section is to compute from the scratch the effect of general reciprocal
596
+ differential substitutions on variational (multi)vector fields and related geometric objects. It is
597
+ clear that a reciprocal differential substitution given by dy = Bdx (or y = P in the holonomic
598
+ case), wi = Qi, also yields a coordinate change of the y-derivative variables:
599
+ (23)
600
+ wi,τ = ∂τ
601
+ y Qi =
602
+ � 1
603
+ B ∂x
604
+ �τ
605
+ Qi.
606
+ It is convenient to introduce the Fr´echet derivative1 of a differential function F ∈ A, as
607
+ (24)
608
+ ℓF(X) = (ℓF)i(Xi) =
609
+
610
+
611
+ σ=0
612
+ ∂F
613
+ ∂ui,σ ∂σ
614
+ xXi,
615
+ where X = Xiδui is a variational vector field. The formal adjoint of the above operator is
616
+ (25)
617
+ (ℓ∗
618
+ F)i =
619
+
620
+
621
+ σ=0
622
+ (−∂x)σ ◦ ∂F
623
+ ∂ui,σ
624
+ acting on covector-valued densities.
625
+ A change of coordinates formula for Hamiltonian operators under the action of differential
626
+ substitutions was already given in [Mok87; Olv88]. We rephrase the arguments of the proof
627
+ in [Olv88] and obtain change of coordinates formulae for the geometric objects that we listed
628
+ in Subsection 1.2 which turn out to be valid in the more general case of reciprocal differential
629
+ substitutions.
630
+ We observe that also in [LZ11] there are formulae for coordinate change, but their validity
631
+ is limited to the action of Miura reciprocal transformations on operators of localizable shape,
632
+ while we do not have this limitation.
633
+ First of all, we provide a formula for the coordinate change of an variational vector field
634
+ under a differential substitution.
635
+ Proposition 2.1. Let Xiδui = Y iδwi be a variational vector field in the coordinate systems
636
+ (x, ui,σ) and (y, wi,σ), respectively, where the latter coordinates systems are related by a holo-
637
+ nomic reciprocal differential substitution y = P, wi = Qi. Then the following change of coordi-
638
+ nate formula holds:
639
+ (26)
640
+ Y j =
641
+ 1
642
+ ∂xP Dj
643
+ i (Xi)
644
+ where
645
+ (27)
646
+ Dj
647
+ i = ∂xP(ℓQj)i − ∂xQj(ℓP)i.
648
+ Proof. The proof uses arguments that provide a change of coordinates formula for Euler–
649
+ Lagrange operators in [Olv93], Theorem 4.8 and Exercise 5.49. Let
650
+ (28)
651
+ ui = f i(x),
652
+ x ∈ Ω,
653
+ wi = gi(y),
654
+ y ∈ ˜Ω
655
+ be functions that are put in correspondence by a transformation.
656
+ We can consider a one-
657
+ parameter family of such functions defined by the variation field Xi∂ui:
658
+ (29)
659
+ ui
660
+ ǫ = f i(x, ǫ) = f i(x) + ǫXi(x),
661
+ 1It should be the Gateaux derivative, but Fr´echet is prevailing in the literature.
662
+
663
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
664
+ 9
665
+ where Xi∂ui has compact support in Ω. Its transformed version
666
+ (30)
667
+ wi
668
+ ǫ = gi(y, ǫ) = gi(y) + ǫY i(y) + O(ǫ2).
669
+ is determined by the formulae
670
+ (31)
671
+ y = P(x, ∂σ
672
+ x(f j(x) + ǫXj(x))),
673
+ wi
674
+ ǫ = Qi(x, ∂σ
675
+ x(f j(x) + ǫXj(x))).
676
+ Since η has compact support on Ω, each gi(y, ǫ) is defined on a common compact domain ˜Ω =
677
+ {x ∈ Ω | y = P(x, ∂σ
678
+ xf j(x))}. The transformed variation field is given by Y i(y) = ∂ǫgi(y, ǫ)
679
+ ��
680
+ ǫ=0.
681
+ As variation fields do not depend on ǫ we have
682
+ (32)
683
+ ∂ǫy = 0 = ∂xP∂ǫx +
684
+
685
+
686
+ σ=0
687
+ ∂uj,σP∂σ
688
+ xXj,
689
+ hence
690
+ (33)
691
+ ∂ǫx
692
+ ��
693
+ ǫ=0 = − 1
694
+ ∂xP
695
+
696
+
697
+ σ=0
698
+ ∂uj,σP∂σ
699
+ xXj.
700
+ We have:
701
+ Y j = ∂ǫgj(y, ǫ)
702
+ ��
703
+ ǫ=0 =
704
+
705
+
706
+ σ=0
707
+ ∂ui,σQj∂σ
708
+ x∂ǫf i(x, ǫ)
709
+ ��
710
+ ǫ=0 + ∂xQj∂ǫx
711
+ ��
712
+ ǫ=0
713
+ (34)
714
+ =
715
+ 1
716
+ ∂xP
717
+
718
+ ∂xP(ℓQj)i − ∂xQj(ℓP)i
719
+
720
+ Xi.
721
+
722
+ In the non-holonomic case, we have to regard the differential function P as the primitive of
723
+ a differential function B, P = ∂−1
724
+ x B, and we obtain the following Corollary.
725
+ Corollary 2.2. In the non-holonomic case of the reciprocal differential substitution dy = Bdx,
726
+ wi = Qi the following change of coordinate formula holds for an variational vector field Xiδui =
727
+ Y iδwi:
728
+ (35)
729
+ Y j = 1
730
+ B Dj
731
+ i (Xi),
732
+ where
733
+ (36)
734
+ Dj
735
+ i = B(ℓQj)i − ∂xQj ◦ ∂−1
736
+ x
737
+ ◦ (ℓB)i.
738
+ Note that we used the property ℓB ◦ ∂−1 = ∂−1 ◦ ℓB, which is very useful in computations.
739
+ Dualizing the computation above, we obtain the formulae for the change of coordinates
740
+ formula for the Euler–Lagrange operator.
741
+ Corollary 2.3. Let the coordinate systems (x, ui,σ) and (y, wi,σ), respectively, where the latter
742
+ coordinates systems are related by a reciprocal differential distribution dy = Bdx, wi = Qi, and
743
+ let Ex
744
+ i , Ey
745
+ i be the Euler–Lagrange operator with respect to the coordinates (x, ui
746
+ σ), (y, wi,σ). Then
747
+ the change of coordinate formula is
748
+ (37)
749
+ Ey
750
+ i = (D∗)k
751
+ i ◦ Ex
752
+ k ,
753
+ with D given by Equation (36).
754
+ In the holonomic case, the formula reduces to the known formula in [Olv93, Exercise 5.49]
755
+ (with D given by Equation (27)). In the particular case of a differential substitution of the
756
+ dependent variable only we have ℓB = 0 and the above formula reduces to the well-known
757
+ formula Ex = (ℓ∗
758
+ Qk)i ◦ Ey
759
+ k.
760
+
761
+ 10
762
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
763
+ Corollary 2.4. Consider a covector-valued density Ξidui ⊗ dx = Ψidwi ⊗ dy in the coordinate
764
+ systems (x, ui,σ) and (y, wi,σ) related by dy = Bdx, wi = Qi. Then we have the following change
765
+ of coordinates formula:
766
+ (38)
767
+ Ξi = (D∗)k
768
+ i (Ψk),
769
+ where D is as in Equation (36) (or as in Equation (27) in the holonomic case y = P).
770
+ Finally, we obtain the following:
771
+ Proposition 2.5. Consider a reciprocal differential substitution dy = Bdx, wi = Qi and let
772
+ P ij
773
+ x , P ij
774
+ y
775
+ be its coordinate expressions of a (possibly non-local or non-Poisson) bi-vector with
776
+ respect to the coordinates (x, ui
777
+ σ) and (y, wi
778
+ σ). Then we have the change of coordinate formula
779
+ (39)
780
+ P hk
781
+ y
782
+ = 1
783
+ B (D)h
784
+ i P ij
785
+ x (D∗)k
786
+ j,
787
+ where D is as in Equation (36).
788
+ Proof. The proposition has already been proved in [Mok87; Olv88] for the particular case
789
+ of Hamiltonian operators and differential substitutions. In our case, the proof follows from
790
+ the fact that P ij maps covector-valued densities into variational vector fields.
791
+ So, we can
792
+ use the change of coordinates formulae for these two geometric objects (independently of the
793
+ Hamiltonian property) and find the above result, that holds also in the case of (nonlocal)
794
+ reciprocal differential substitutions.
795
+
796
+ Remark 2.6. The same argument can be applied to multivector fields considered as maps from
797
+ multicovector-valued densities to variational vector fields. For instance, in the same set-up as
798
+ Theorem 2.5 let T ijk and ˜T ijk be the coordinate expressions of a trivector. Then
799
+ (40)
800
+ ˜T ijk = 1
801
+ B (D)i
802
+ mT mnp((D∗)j
803
+ n, (D∗)k
804
+ p).
805
+ 2.1. The Ferapontov–Pavlov formula. Let us apply a special case of Theorem 2.5 to a local
806
+ Poisson bi-vector of order deg∂x = 1 and a reciprocal transformation in R that only changes
807
+ the independent variable. This should reproduce the Ferapontov–Pavlov formula first derived
808
+ in [FP03, Section 3] (based on [Fer95a]).
809
+ Consider the change of x given by
810
+ ∂x = B∂y,
811
+ ∂−1
812
+ y B−1 = ∂−1
813
+ x
814
+ (41)
815
+ as an element of RI, that is, we assume that B = B(uj).
816
+ Let a local Poisson bracket of
817
+ differential degree 1 be given by the operator
818
+ P ij := gij∂x + Γij
819
+ k uk
820
+ x,
821
+ (P ∗)ji = −P ij
822
+ (42)
823
+ Convention 2.7. Throughout the computations in this Section it is important for us to distin-
824
+ guish between ∂xuk and ∂yuk, so we use the notation uk
825
+ x and uk
826
+ y rather than uk,1.
827
+ Proposition 2.8. The action of the reciprocal transformation (41) on the operator (42) pro-
828
+ duces a weakly non-local operator of localizable shape, whose local part is given explicitly as
829
+ (43)
830
+ gijB2∂y + Γij
831
+ k B2uk
832
+ y − 1
833
+ 2giℓB2
834
+
835
+ gℓm
836
+ ∂B−2
837
+ ∂uk + gkm
838
+ ∂B−2
839
+ ∂uℓ − gℓk
840
+ ∂B−2
841
+ ∂um
842
+
843
+ gmjB2uk
844
+ y
845
+ and the non-local part is equal to
846
+ (44)
847
+
848
+ P iℓ
849
+ �∂B
850
+ ∂uℓ
851
+
852
+ − 1
853
+ 2ui
854
+ y
855
+ ∂B
856
+ ∂uk gkℓ ∂B
857
+ ∂uℓ
858
+
859
+ ∂−1
860
+ y uj
861
+ y + ui
862
+ y∂−1
863
+ y
864
+
865
+ P jk
866
+ � ∂B
867
+ ∂uk
868
+
869
+ − 1
870
+ 2
871
+ ∂B
872
+ ∂uk gkℓ ∂B
873
+ ∂uℓuj
874
+ y
875
+
876
+ .
877
+ Remark 2.9. Note that Γij
878
+ k B2 − 1
879
+ 2giℓB2 (gℓm∂ukB−2 + gkm∂uℓB−2 − gℓk∂umB−2) gmjB2 is exactly
880
+ the covariant Christoffel symbol for the metric gijB2, so we indeed reproduce the Ferapontov–
881
+ Pavlov formula in [FP03].
882
+
883
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
884
+ 11
885
+ Proof of Proposition 2.8. By Theorem 2.5 the bi-vector P ij is transformed under the substitu-
886
+ tion (41) to
887
+ B−1
888
+
889
+ Bδi
890
+ k − ui
891
+ x∂−1
892
+ x
893
+ ∂B
894
+ ∂uk
895
+
896
+ P kℓ
897
+
898
+ Bδj
899
+ ℓ + ∂B
900
+ ∂uℓ∂−1
901
+ x uj
902
+ x
903
+
904
+ .
905
+ (45)
906
+ Expanding the brackets, we have the following four summands (we intentionally keep derivatives
907
+ in x instead of y as long as possible):
908
+ −B−1ui
909
+ x∂−1
910
+ x
911
+ ∂B
912
+ ∂uk P kℓ ∂B
913
+ ∂uℓ∂−1
914
+ x uj
915
+ x = −1
916
+ 2B−1ui
917
+ x∂−1
918
+ x
919
+
920
+ ∂x
921
+ ∂B
922
+ ∂uk gkℓ ∂B
923
+ ∂uℓ + ∂B
924
+ ∂uk gkℓ ∂B
925
+ ∂uℓ∂x
926
+
927
+ ∂−1
928
+ x uj
929
+ x
930
+ (46)
931
+ = −1
932
+ 2B−1ui
933
+ x
934
+ ∂B
935
+ ∂uk gkℓ ∂B
936
+ ∂uℓ∂−1
937
+ x uj
938
+ x − 1
939
+ 2B−1ui
940
+ x∂−1
941
+ x
942
+ ∂B
943
+ ∂uk gkℓ ∂B
944
+ ∂uℓuj
945
+ x
946
+ = −1
947
+ 2ui
948
+ y
949
+ ∂B
950
+ ∂uk gkℓ ∂B
951
+ ∂uℓ∂−1
952
+ y uj
953
+ y − 1
954
+ 2ui
955
+ y∂−1
956
+ y
957
+ ∂B
958
+ ∂uk gkℓ ∂B
959
+ ∂uℓuj
960
+ y ;
961
+ B−1Bδi
962
+ kP kℓ ∂B
963
+ ∂uℓ∂−1
964
+ x uj
965
+ x = P iℓ
966
+ �∂B
967
+ ∂uℓ
968
+
969
+ ∂−1
970
+ x uj
971
+ x + giℓ ∂B
972
+ ∂uℓuj
973
+ x
974
+ (47)
975
+ = P iℓ
976
+ �∂B
977
+ ∂uℓ
978
+
979
+ ∂−1
980
+ y uj
981
+ y + giℓ ∂B
982
+ ∂uℓ Buj
983
+ y ;
984
+ −B−1ui
985
+ x∂−1
986
+ x
987
+ ∂B
988
+ ∂uk P kℓBδj
989
+ ℓ = −B−1ui
990
+ x∂−1
991
+ x (P ∗)kj
992
+ � ∂B
993
+ ∂uk
994
+
995
+ B − B−1ui
996
+ x
997
+ ∂B
998
+ ∂uk gkjB
999
+ (48)
1000
+ = ui
1001
+ y∂−1
1002
+ y P jk
1003
+ � ∂B
1004
+ ∂uk
1005
+
1006
+ − ui
1007
+ y
1008
+ ∂B
1009
+ ∂uk gkjB ;
1010
+ B−1Bδi
1011
+ kP kℓBδj
1012
+ ℓ = P ijB .
1013
+ (49)
1014
+ Thus the non-local term is given by Equation (44), and the local term is given by
1015
+ P ijB + giℓ ∂B
1016
+ ∂uℓBuj
1017
+ y − ui
1018
+ y
1019
+ ∂B
1020
+ ∂uk gkjB
1021
+ (50)
1022
+ = gijB2∂y + gijB ∂B
1023
+ ∂uk uk
1024
+ y + Γij
1025
+ k B2uk
1026
+ y − 1
1027
+ 2giℓB4∂B−2
1028
+ ∂uℓ uj
1029
+ y + 1
1030
+ 2ui
1031
+ y
1032
+ ∂B−2
1033
+ ∂uk gkjB4,
1034
+ where the latter expression is equal to (43).
1035
+
1036
+ 2.2. Weakly non-local bi-vectors of localizable shape. The goal of this Section is to prove
1037
+ that the space of weakly non-local bi-vectors of localizable shape is closed under the action of
1038
+ reciprocal differential substitutions. We narrow the scope to the Miura-type substitutions R
1039
+ as in Definition 1.2.
1040
+ Let us consider the effect of a reciprocal transformation of the form (41) on a general weakly
1041
+ nonlocal bi-vector of localizable shape:
1042
+ (51)
1043
+ P ij =
1044
+
1045
+
1046
+ d=1
1047
+ ǫd−1
1048
+
1049
+ d
1050
+
1051
+ s=0
1052
+ P ij
1053
+ d,d−s∂s
1054
+ x + ui,1∂−1
1055
+ x V j
1056
+ d + V i
1057
+ d∂−1
1058
+ x uj,1
1059
+
1060
+ = P ij
1061
+ loc + P ij
1062
+ nonloc,
1063
+ where P ij
1064
+ d,d−s ∈ Ad−s and V i
1065
+ d ∈ Ad. Note that both P ij
1066
+ loc and P ij
1067
+ nonloc define skew-symmetric
1068
+ bi-vectors, that is (P ∗
1069
+ loc)ij = −P ji
1070
+ loc and (P ∗
1071
+ nonloc)ij = −P ji
1072
+ nonloc.
1073
+ Proposition 2.10. Consider a Miura-reciprocal transformation in R given by dy = Bdx,
1074
+ wi = Qi. Under this transformation any weakly non-local bi-vector P ij of localizable shape (51)
1075
+ is transformed into a weakly non-local bi-vector of localizable shape.
1076
+ Remark 2.11. In principle, this proposition follows from the arguments of [LZ11] and [FP03].
1077
+ However, it can also be directly obtained using Theorem 2.5.
1078
+
1079
+ 12
1080
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
1081
+ Proof. Repeating mutatis mutandis the proof of Proposition 2.8 one can check that the local
1082
+ part P ij
1083
+ loc produces a weakly non-local operator of localizable shape (the only thing that matters
1084
+ for that computation is skew-symmetry of the bi-vector defined by P ij
1085
+ loc).
1086
+ So let us focus
1087
+ on the non-local part P ij
1088
+ nonloc = ui
1089
+ x∂−1
1090
+ x V j + V i∂−1
1091
+ x uj
1092
+ x, where V i = �∞
1093
+ d=1 ǫd−1V i
1094
+ d and we use
1095
+ Convention 2.7 here and below in computations. We have:
1096
+ B−1
1097
+
1098
+ B ∂Qi
1099
+ ∂uk,s∂s
1100
+ x − Qi
1101
+ x∂−1
1102
+ x
1103
+ ∂B
1104
+ ∂uk,s∂s
1105
+ x
1106
+
1107
+
1108
+
1109
+ uk
1110
+ x∂−1
1111
+ x V l + V k∂−1
1112
+ x ul
1113
+ x
1114
+
1115
+ (52)
1116
+
1117
+ (−∂x)t ◦ ∂Qj
1118
+ ∂ul,tB + (−∂x)t ◦ ∂B
1119
+ ∂ul,t∂−1
1120
+ x Qj
1121
+ x
1122
+
1123
+ (we omit the summation over s and t for brevity). We compute (52) as follows. First, note that
1124
+ ∂Qi
1125
+ ∂uk,s∂s
1126
+ x ◦ uk
1127
+ x∂−1
1128
+ x V l(−∂x)t ◦ ∂Qj
1129
+ ∂ul,tB = Qi
1130
+ x∂−1
1131
+ x (ℓQj)l(V l)B + loc;
1132
+ (53)
1133
+ ∂Qi
1134
+ ∂uk,s∂s
1135
+ x ◦ V k∂−1
1136
+ x ul
1137
+ x(−∂x)t ◦ ∂Qj
1138
+ ∂ul,tB = (ℓQi)k(V k)∂−1
1139
+ x Qj
1140
+ xB + loc.
1141
+ Here loc are the terms where we collect some purely local operators. Furthermore,
1142
+ − 1
1143
+ B Qi
1144
+ x∂−1
1145
+ x
1146
+ ∂B
1147
+ ∂uk,s∂s
1148
+ x ◦ uk
1149
+ x∂−1
1150
+ x V l(−∂x)t ◦ ∂Qj
1151
+ ∂ul,tB = − 1
1152
+ B Qi
1153
+ x∂−1
1154
+ x Bx∂−1
1155
+ x (ℓQj)l(V l)B − 1
1156
+ B Qi
1157
+ x∂−1
1158
+ x Oj
1159
+ BuV QB;
1160
+ (54)
1161
+ − 1
1162
+ B Qi
1163
+ x∂−1
1164
+ x
1165
+ ∂B
1166
+ ∂uk,s∂s
1167
+ x ◦ V k∂−1
1168
+ x ul
1169
+ x(−∂x)t ◦ ∂Qj
1170
+ ∂ul,tB = − 1
1171
+ B Qi
1172
+ x∂−1
1173
+ x (ℓB)k(V k)∂−1
1174
+ x Qj
1175
+ xB − 1
1176
+ B Qi
1177
+ x∂−1
1178
+ x Oj
1179
+ BV uQB;
1180
+ ∂Qi
1181
+ ∂uk,s∂s
1182
+ x ◦ uk
1183
+ x∂−1
1184
+ x V l(−∂x)t ◦ ∂B
1185
+ ∂ul,t∂−1
1186
+ x Qj
1187
+ x = Qi
1188
+ x∂−1
1189
+ x (ℓB)l(V l)∂−1
1190
+ x Qj
1191
+ x + Oi
1192
+ QuV B∂−1
1193
+ x Qj
1194
+ x;
1195
+ ∂Qi
1196
+ ∂uk,s∂s
1197
+ x ◦ V k∂−1
1198
+ x ul
1199
+ x(−∂x)t ◦ ∂B
1200
+ ∂ul,t∂−1
1201
+ x Qj
1202
+ x = (ℓQi)l(V l)∂−1
1203
+ x Bx∂−1
1204
+ x Qj
1205
+ x + Oi
1206
+ QV uB∂−1
1207
+ x Qj
1208
+ x.
1209
+ Here Oj
1210
+ BuV Q, Oj
1211
+ BV uQ, Oi
1212
+ QuV B, and Oi
1213
+ QV uB are some scalar local operators, whose main property
1214
+ is that (Oj
1215
+ BuV Q)∗ = −Oj
1216
+ QV uB and (Oj
1217
+ BV uQ)∗ = −Oj
1218
+ QuV B.
1219
+ We omit their explicit formulas.
1220
+ Finally,
1221
+ − 1
1222
+ B Qi
1223
+ x∂−1
1224
+ x
1225
+ ∂B
1226
+ ∂uk,s∂s
1227
+ x ◦ uk
1228
+ x∂−1
1229
+ x V l(−∂x)t ◦ ∂B
1230
+ ∂ul,t∂−1
1231
+ x Qj
1232
+ x = − 1
1233
+ B Qi
1234
+ x∂−1
1235
+ x Bx∂−1
1236
+ x (ℓB)l(V l)∂−1
1237
+ x Qj
1238
+ x
1239
+ (55)
1240
+ − 1
1241
+ B Qi
1242
+ x∂−1
1243
+ x OBuV B∂−1
1244
+ x Qj
1245
+ x;
1246
+ − 1
1247
+ B Qi
1248
+ x∂−1
1249
+ x
1250
+ ∂B
1251
+ ∂uk,s∂s
1252
+ x ◦ V k∂−1
1253
+ x ul
1254
+ x(−∂x)t ◦ ∂B
1255
+ ∂ul,t∂−1
1256
+ x Qj
1257
+ x = − 1
1258
+ B Qi
1259
+ x∂−1
1260
+ x (ℓB)l(V l)∂−1
1261
+ x Bx∂−1
1262
+ x Qj
1263
+ x
1264
+ − 1
1265
+ B Qi
1266
+ x∂−1
1267
+ x OBV uB∂−1
1268
+ x Qj
1269
+ x,
1270
+ where OBuV B and OBV uB are scalar local operators such that O∗
1271
+ BuV B = −OBV uB. We omit
1272
+ their explicit formulas, but we use below that OBuV B +OBV uB = − ˜O∗∂x −∂x ◦ ˜O for some local
1273
+ operator ˜O.
1274
+ Now we collect the terms together. Firstly, we list all terms with Bx that emerged in (54)
1275
+ and (55):
1276
+ − 1
1277
+ B Qi
1278
+ x∂−1
1279
+ x Bx∂−1
1280
+ x (ℓQj)l(V l)B = −Qi
1281
+ x∂−1
1282
+ x (ℓQj)l(V l)B + 1
1283
+ B Qi
1284
+ x∂−1
1285
+ x (ℓQj)l(V l)B2
1286
+ (56)
1287
+ (ℓQi)l(V l)∂−1
1288
+ x Bx∂−1
1289
+ x Qj
1290
+ x = (ℓQi)l(V l)B∂−1
1291
+ x Qj
1292
+ x − (ℓQi)l(V l)∂−1
1293
+ x Qj
1294
+ xB
1295
+ − 1
1296
+ B Qi
1297
+ x∂−1
1298
+ x Bx∂−1
1299
+ x (ℓB)l(V l)∂−1
1300
+ x Qj
1301
+ x = −Qi
1302
+ x∂−1
1303
+ x (ℓB)l(V l)∂−1
1304
+ x Qj
1305
+ x + 1
1306
+ B Qi
1307
+ x∂−1
1308
+ x B(ℓB)l(V l)∂−1
1309
+ x Qj
1310
+ x
1311
+
1312
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
1313
+ 13
1314
+ − 1
1315
+ B Qi
1316
+ x∂−1
1317
+ x (ℓB)l(V l)∂−1
1318
+ x Bx∂−1
1319
+ x Qj
1320
+ x = − 1
1321
+ B Qi
1322
+ x∂−1
1323
+ x (ℓB)l(V l)B∂−1
1324
+ x Qj
1325
+ x + 1
1326
+ B Qi
1327
+ x∂−1
1328
+ x (ℓB)l(V l)∂−1
1329
+ x Qj
1330
+ xB
1331
+ Note some cancellations: the non-local terms in (53) cancel with the corresponding summands
1332
+ in the first and the second line of (56), two non-local terms in the second and third line of (54)
1333
+ cancel with the two terms in the third and forth line of (56), are there are two terms in the
1334
+ latter lines that cancel each other. So, modulo the purely local terms, (52) is equal to the sum
1335
+ of the following four expressions:
1336
+ 1
1337
+ B Qi
1338
+ x∂−1
1339
+ x (ℓQj)l(V l)B2 + (ℓQi)l(V l)B∂−1
1340
+ x Qj
1341
+ x = wi
1342
+ y∂−1
1343
+ y (ℓQj)l(V l)B + (ℓQi)l(V l)B∂−1
1344
+ y wj
1345
+ y;
1346
+ (57)
1347
+ 1
1348
+ B Qi
1349
+ x∂−1
1350
+ x (Oj
1351
+ QV uB)∗B + Oi
1352
+ QV uB∂−1
1353
+ x Qj
1354
+ x = 1
1355
+ B Qi
1356
+ x∂−1
1357
+ x Oj
1358
+ QV uB(1)B + Oi
1359
+ QV uB(1)∂−1
1360
+ x Qj
1361
+ x + loc
1362
+ = wi
1363
+ y∂−1
1364
+ y Oj
1365
+ QV uB(1) + Oi
1366
+ QV uB(1)∂−1
1367
+ y wj
1368
+ y + loc;
1369
+ 1
1370
+ B Qi
1371
+ x∂−1
1372
+ x (Oj
1373
+ QuV B)∗B + Oi
1374
+ QuV B∂−1
1375
+ x Qj
1376
+ x = 1
1377
+ B Qi
1378
+ x∂−1
1379
+ x Oj
1380
+ QuV B(1)B + Oi
1381
+ QuV B(1)∂−1
1382
+ x Qj
1383
+ x + loc
1384
+ = wi
1385
+ y∂−1
1386
+ y Oj
1387
+ QuV B(1) + Oi
1388
+ QuV B(1)∂−1
1389
+ y wj
1390
+ y + loc;
1391
+ − 1
1392
+ B Qi
1393
+ x∂−1
1394
+ x OBuV B∂−1
1395
+ x Qj
1396
+ x − 1
1397
+ B Qi
1398
+ x∂−1
1399
+ x OBV uB∂−1
1400
+ x Qj
1401
+ x = 1
1402
+ B Qi
1403
+ x∂−1
1404
+ x ( ˜O∗∂x + ∂x ◦ ˜O)∂−1
1405
+ x Qj
1406
+ x
1407
+ = 1
1408
+ B Qi
1409
+ x∂−1
1410
+ x
1411
+ ˜O(1)Qj
1412
+ x + 1
1413
+ B Qi
1414
+ x ˜O(1)∂−1
1415
+ x Qj
1416
+ x + loc
1417
+ = wi
1418
+ y∂−1
1419
+ y
1420
+ 1
1421
+ B
1422
+ ˜O(1)Qj
1423
+ x + 1
1424
+ B Qi
1425
+ x ˜O(1)∂−1
1426
+ y wj
1427
+ y + loc,
1428
+ which is manifestly a weakly non-local operator of localizable shape.
1429
+
1430
+ 3. Schouten bracket for weakly non-local operators of localizable shape
1431
+ The goal of this Section is to compare two ways to encode weakly non-local Poisson structures
1432
+ of localizable shape: the one given in [LZ11] (by design only working for the localizable shape
1433
+ case) and [LV20] (it is working for general weakly non-local case, but we specialize it for the
1434
+ localizable shape). In principle, the identification of these two approaches follows from the
1435
+ uniqueness property of the bracket, c.f. [LZ11, Theorem 2.4.1], but we want to present an
1436
+ explicit computation for this identification.
1437
+ 3.1. The two approaches. In both approaches the weakly non-local p-vectors of localizable
1438
+ shape are encoded as
1439
+
1440
+ P =
1441
+
1442
+ PL + ζPN,
1443
+ (58)
1444
+ where PL ∈ ˆ
1445
+ Ap, PN ∈ ˆ
1446
+ Ap−1, and
1447
+ (59)
1448
+ ∂xζ = −ui,1θi.
1449
+ The difference in two approaches is the meaning of ζ. In the approach of [LZ11], ζ is a new
1450
+ dependent variable such that deg∂x ζ = 0 and degθ ζ = 1. The new space of multivector densities
1451
+ is defined as S := ˆ
1452
+ A[ζ], equipped with the operator
1453
+ ∂x = −ui,1θi∂ζ +
1454
+
1455
+ ui,d+1∂ui,d + θd+1
1456
+ i
1457
+ ∂θd
1458
+ i ,
1459
+ (60)
1460
+ and the space of weakly non-local multivectors of localizable shape is defined as E := S/∂xS.
1461
+ In the approach of [LV20], ζ is not a new dependent variable, but rather an expression in
1462
+ the existing dependent variables (still of differential degree deg∂x ζ = 0 and multivector degree
1463
+ degθ ζ = 1), such that Equation (59) is satisfied for the standard operator
1464
+ ˜∂x =
1465
+
1466
+ ui,d+1∂ui,d + θd+1
1467
+ i
1468
+ ∂θd
1469
+ i .
1470
+ (61)
1471
+
1472
+ 14
1473
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
1474
+ For instance, one can find such a function in ˆ
1475
+ A((
1476
+ 1
1477
+ u1,1)), cf. [DLZ06]. To this end, one looks for
1478
+ a unique solution ˜∂xζ = −ui,1θi of the form ζ = �∞
1479
+ i=1
1480
+ fi
1481
+ (u1,1)i, with fi ∈ ˆ
1482
+ A such that ∂u1,1fi = 0.
1483
+ Once the objects are defined, we have two different formulae for the Schouten bracket in
1484
+ these two approaches:
1485
+ • The formula in the approach of [LV20] is
1486
+ (62)
1487
+ � �
1488
+ P,
1489
+
1490
+ Q
1491
+
1492
+ =
1493
+
1494
+ (−1)degθ P ˜δuiP ˜δθiQ + ˜δθiP ˜δuiQ.
1495
+ Recall that ζ is regarded as a function of (ui
1496
+ σ, θσ
1497
+ i ) in the variational derivatives (which
1498
+ are denoted by ˜δui and ˜δθi for that reason).
1499
+ • The formula in the approach of [LZ11] is
1500
+ (63)
1501
+ � �
1502
+ P,
1503
+
1504
+ Q
1505
+
1506
+ =
1507
+
1508
+ (−1)degθ PδuiPδθiQ + δθiPδuiQ + (−1)degθ P ˆE(P)∂ζQ + ∂ζP ˆE(Q)
1509
+ Here ζ is regarded as an extra dependent variable, and the operator ˆE is defined as
1510
+ ˆE =
1511
+
1512
+ s≥1
1513
+ t≥0
1514
+
1515
+ ui,s(−∂x)t∂ui,s+t + θs
1516
+ i (−∂x)t∂θs+t
1517
+ i
1518
+
1519
+ − 1 + θiδθi.
1520
+ (64)
1521
+ 3.2. Identification of the two approaches. We prove the following:
1522
+ Theorem 3.1. The identity map ˆ
1523
+ A[ζ] → ˆ
1524
+ A[ζ] induces the isomorphism of the Lie algebras of
1525
+ local multivector fields defined by the Schouten brackets in these two approaches.
1526
+ Proof. We represent any density P ∈ ˆ
1527
+ A[ζ] as P = PL + ζPN and consider ζ to be a nonlocal
1528
+ function. Note that
1529
+ ˜δuiP = δuiP + (−∂x)σ (∂ui,σζPN)
1530
+ (65)
1531
+ = δuiPL + (−∂x)σ (ζ∂ui,σPN) + (−∂x)σ (∂ui,σζPN) ,
1532
+ ˜δθiP = δθiP + (−∂x)σ �
1533
+ ∂θσ
1534
+ i ζPN
1535
+
1536
+ (66)
1537
+ = δθiPL − (−∂x)σ �
1538
+ ζ∂θσ
1539
+ i PN
1540
+
1541
+ + (−∂x)σ �
1542
+ ∂θσ
1543
+ i ζPN
1544
+
1545
+ ,
1546
+ where we used that
1547
+ δuiP =δuiPL + (−∂x)σ (ζ∂ui,σPN) ,
1548
+ (67)
1549
+ δθiP =δθiPL − (−∂x)σ �
1550
+ ζ∂θσ
1551
+ i PN
1552
+
1553
+ .
1554
+ (68)
1555
+ Using these formulas, we obtain
1556
+ ˜δuiP ˜δθiQ = (δuiP + (−∂x)σ (∂ui,σζPN))
1557
+
1558
+ δθiQ + (−∂x)σ �
1559
+ ∂θσ
1560
+ i ζQN
1561
+ ��
1562
+ (69)
1563
+ = δuiPδθiQ + δuiP(−∂x)σ �
1564
+ ∂θσ
1565
+ i ζQN
1566
+
1567
+ + (−∂x)σ (∂ui,σζPN) δθiQ
1568
+ + (−∂x)σ (∂ui,σζPN) (−∂x)σ �
1569
+ ∂θσ
1570
+ i ζQN
1571
+
1572
+ ;
1573
+ ˜δθiP ˜δuiQ =
1574
+
1575
+ δθiP + (−∂x)σ �
1576
+ ∂θσ
1577
+ i ζPN
1578
+ ��
1579
+ (δuiQ + (−∂x)σ (∂ui,σζQN))
1580
+ (70)
1581
+ = δθiPδuiQ + δθiP(−∂x)σ (∂ui,σζQN) + (−∂x)σ �
1582
+ ∂θσ
1583
+ i ζPN
1584
+
1585
+ δuiQ
1586
+ + (−∂x)σ �
1587
+ ∂θσ
1588
+ i ζPN
1589
+
1590
+ (−∂x)σ (∂ui,σζQN) .
1591
+
1592
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
1593
+ 15
1594
+ If we want to treat ζ as a new dependent variable, we have ˆE(P)∂ζQ = ˆE(P)QN (and
1595
+ similarly for the other summand in the formula), so we have to prove that
1596
+
1597
+ (−1)degθ P ˆE(P)QN + PN ˆE(Q)
1598
+ (71)
1599
+ =
1600
+
1601
+ (−1)degθ P�
1602
+ δuiP(−∂x)σ �
1603
+ ∂θσ
1604
+ i ζQN
1605
+
1606
+ + (−∂x)σ (∂ui,σζPN) δθiQ
1607
+ + (−∂x)σ (∂ui,σζPN) (−∂x)σ �
1608
+ ∂θσ
1609
+ i ζQN
1610
+ � �
1611
+ +
1612
+
1613
+ δθiP(−∂x)σ (∂ui,σζQN) + (−∂x)σ �
1614
+ ∂θσ
1615
+ i ζPN
1616
+
1617
+ δuiQ
1618
+ + (−∂x)σ �
1619
+ ∂θσ
1620
+ i ζPN
1621
+
1622
+ (−∂x)σ (∂ui,σζQN)
1623
+
1624
+ .
1625
+ Let us use the following property of the operator ˆE:
1626
+ ∂x ˆE = −ui,1δui + θi∂xδθi + ui,1θiδζ;
1627
+ (72)
1628
+ So, we obtain
1629
+
1630
+ ˆE(P)QN =
1631
+
1632
+ ∂−1
1633
+ x
1634
+
1635
+ −ui,1δuiP + θi∂xδθiP + ui,1θiPN
1636
+
1637
+ QN
1638
+ (73)
1639
+ =
1640
+
1641
+
1642
+
1643
+ −ui,1δuiP + θi∂xδθiP + ui,1θiPN
1644
+
1645
+ ∂−1
1646
+ x (QN),
1647
+
1648
+ PN ˆE(Q) =
1649
+
1650
+ PN∂−1
1651
+ x
1652
+
1653
+ −ui,1δuiQ + θi∂xδθiQ + ui,1θiQN
1654
+
1655
+ (74)
1656
+ =
1657
+
1658
+ −∂−1
1659
+ x (PN)
1660
+
1661
+ −ui,1δuiQ + θi∂xδθiQ + ui,1θiQN
1662
+
1663
+ .
1664
+ Substituting Equations (73) and (74) into (71), we see that the statement of the theorem reduces
1665
+ to the following equality:
1666
+
1667
+ (−1)degθ P +1 �
1668
+ −ui,1δuiP + θi∂xδθiP + ui,1θiPN
1669
+
1670
+ ∂−1
1671
+ x (QN)
1672
+ (75)
1673
+ − ∂−1
1674
+ x (PN)
1675
+
1676
+ −ui,1δuiQ + θi∂xδθiQ + ui,1θiQN
1677
+
1678
+ =
1679
+
1680
+ (−1)degθ P�
1681
+ δuiP(−∂x)σ �
1682
+ ∂θσ
1683
+ i ζQN
1684
+
1685
+ + (−∂x)σ (∂ui,σζPN) δθiQ
1686
+ + (−∂x)σ (∂ui,σζPN) (���∂x)σ �
1687
+ ∂θσ
1688
+ i ζQN
1689
+ � �
1690
+ +
1691
+
1692
+ δθiP(−∂x)σ (∂ui,σζQN) + (−∂x)σ �
1693
+ ∂θσ
1694
+ i ζPN
1695
+
1696
+ δuiQ
1697
+ + (−∂x)σ �
1698
+ ∂θσ
1699
+ i ζPN
1700
+
1701
+ (−∂x)σ (∂ui,σζQN)
1702
+
1703
+ .
1704
+ In order to prove this equality, our strategy is move ∂−1
1705
+ x
1706
+ in ζ = ∂−1
1707
+ x (−ui,1θi) to the other
1708
+ factor (PN or QN) using integration by parts. We have:
1709
+ δuiP(−∂x)σ �
1710
+ ∂θσ
1711
+ i ζQN
1712
+
1713
+ = δuiPui,1∂−1
1714
+ x (QN),
1715
+ (76)
1716
+ (−∂x)σ �
1717
+ ∂uiσζPN
1718
+
1719
+ δθiQ = −∂x
1720
+
1721
+ θi∂−1
1722
+ x (PN)
1723
+
1724
+ δθiQ
1725
+ (77)
1726
+ (−∂x)σ �
1727
+ ∂uiσζPN
1728
+
1729
+ (−∂x)σ �
1730
+ ∂θσ
1731
+ i ζQN
1732
+
1733
+ = −∂x
1734
+
1735
+ θi∂−1
1736
+ x (PN)
1737
+
1738
+ ui,1∂−1
1739
+ x (QN)
1740
+ (78)
1741
+ (−∂x)σ �
1742
+ ∂θσ
1743
+ i ζPN
1744
+
1745
+ δuiQ = ui,1∂−1
1746
+ x (PN) δuiQ
1747
+ (79)
1748
+ δθiP(−∂x)σ �
1749
+ ∂uiσζQN
1750
+
1751
+ = −δθiP∂x
1752
+
1753
+ θi∂−1
1754
+ x (QN)
1755
+
1756
+ (80)
1757
+ (−∂x)σ �
1758
+ ∂θσ
1759
+ i ζPN
1760
+
1761
+ (−∂x)σ �
1762
+ ∂uiσζQN
1763
+
1764
+ = −
1765
+
1766
+ ui,1∂−1
1767
+ x (PN)
1768
+
1769
+ ∂x
1770
+
1771
+ θi∂−1
1772
+ x (QN)
1773
+
1774
+ (81)
1775
+
1776
+ 16
1777
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
1778
+ Substituting the above expressions into the equality (75) that we shall prove, we are led to the
1779
+ simplified equality:
1780
+
1781
+ (−1)degθ P +1 �
1782
+ θi∂xδθiP + ui,1θiPN
1783
+
1784
+ ∂−1
1785
+ x (QN) − ∂−1
1786
+ x (PN)
1787
+
1788
+ θi∂xδθiQ + ui,1θiQN
1789
+
1790
+ (82)
1791
+ =
1792
+
1793
+ (−1)degθ P +1�
1794
+ ∂x(θi∂−1
1795
+ x (PN))δθiQ + ∂x(θi∂−1
1796
+ x (PN))ui,1∂−1
1797
+ x (QN)
1798
+
1799
+
1800
+
1801
+ δθiP∂x(θi∂−1
1802
+ x (QN)) + ui,1∂−1
1803
+ x (PN)∂x(θi∂−1
1804
+ x (QN))
1805
+
1806
+ .
1807
+ Integrating by parts the summands containing ∂xδθiP, ∂xδθiQ we obtain the further simplifica-
1808
+ tion of the equality (75) (note that degθ PN = degθ P − 1):
1809
+
1810
+ (−1)degθ P +1ui,1θiPN∂−1
1811
+ x (QN) − ∂−1
1812
+ x (PN)ui,1θiQN
1813
+ (83)
1814
+ =
1815
+
1816
+ (−1)degθ P +1∂x(θi∂−1
1817
+ x (PN))ui,1∂−1
1818
+ x (QN) − ui,1∂−1
1819
+ x (PN)∂x(θi∂−1
1820
+ x (QN))
1821
+ Expanding the total derivatives on the right-hand side we easily see that the above equality is
1822
+ an identity. This completes the proof of the theorem.
1823
+
1824
+ 4. Pencils of weakly non-local bi-vectors of localizable shape
1825
+ In this Section we compute the bi-Hamiltonian cohomology for a semi-simple pencil of weakly
1826
+ non-local Poisson bi-vectors of localizable shape of differential order deg∂x = 1 satisfying the
1827
+ extra condition: the pencil of these bi-vectors should be localizable (or, equivalently, they
1828
+ should be simultaneously localizable) with respect to the Miura-reciprocal group. As a result
1829
+ of this computation and some further arguments we prove the following theorem:
1830
+ Theorem 4.1. Let P1 and P2 be two of commuting non-local Poisson bi-vectors of localizable
1831
+ shape.
1832
+ We assume that P1 and P2 have dispersive expansion given by Pa = �∞
1833
+ i=1 ǫi−1Pa,i,
1834
+ deg∂x Pa,i = i, a = 1, 2, i = 1, 2, . . . .
1835
+ If the leading terms of degree deg∂x = 1, P1,1 and P2,1, are simultaneously localizable under
1836
+ the action of the Miura-reciprocal group and form a semi-simple Poisson pencil, then the full
1837
+ dispersive brackets P1 and P2 are simultaneously localizable under the action of the Miura-
1838
+ reciprocal group.
1839
+ In order to prove this theorem, we have to make a few preliminary computations with bi-
1840
+ Hamiltonian cohomology, following the ideas in [LZ11] subsequent steps in [CPS18; CKS18].
1841
+ 4.1. Bi-Hamiltonian cohomology.
1842
+ 4.1.1. Setup for a deformation problem. Recall that following Liu and Zhang [LZ11] we denote
1843
+ S := ˆ
1844
+ A[ζ], with ∂x : S → S given by ∂x = −ui,1θi∂ζ + � ui,d+1∂ui,d + θd+1
1845
+ i
1846
+ ∂θd
1847
+ i , and E := S/∂xS.
1848
+ Let P1, P2 ∈ S2
1849
+ 1 such that
1850
+
1851
+ P1 and
1852
+
1853
+ P2 form a pencil of Poisson structures (possibly non-
1854
+ local, but then they are automatically weakly non-local of localizable shape, since it is the only
1855
+ type of non-locality accommodated in the space E), that is, we assume that
1856
+ � �
1857
+ P2 − λP1,
1858
+
1859
+ P2 − λP1
1860
+
1861
+ = 0
1862
+ (84)
1863
+ Recall that there is a group RI of the Miura-reciprocal transformations of the 1st kind acting
1864
+ on them, see Equation (8). We assume that the pencil
1865
+
1866
+ P2−λP1 is localizable under the action
1867
+ of RI. We also assume that the pencil formed by P1 and P2 is semi-simple, which together with
1868
+ the assumption of localizability implies that the we can choose the coordinates x, u1, . . . , uN
1869
+
1870
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
1871
+ 17
1872
+ such that the densities P1 and P2 of the bivectors
1873
+
1874
+ P1 and
1875
+
1876
+ P2 take the form
1877
+ P1 =
1878
+
1879
+ N
1880
+
1881
+ i=1
1882
+ f iθiθ1
1883
+ i
1884
+
1885
+ + Γij
1886
+ 1,kuk,1θiθj;
1887
+ (85)
1888
+ P2 =
1889
+
1890
+ N
1891
+
1892
+ i=1
1893
+ uif iθiθ1
1894
+ i
1895
+
1896
+ + Γij
1897
+ 2,kuk,1θiθj.
1898
+ (86)
1899
+ We are interested to classify the equivalence classes of the higher order dispersive deforma-
1900
+ tions of the Poisson pencil
1901
+
1902
+ P2−λP1 in E with respect to the Miura-reciprocal transformations
1903
+ of the 2nd kind, RII. Let di := adPi : E → E, i = 1, 2. Then the deformation problem is con-
1904
+ trolled by the bi-Hamiltonian cohomology BHp
1905
+ d(E, d1, d2) of cohomological degree p = 2 and
1906
+ p = 3 and of differential degrees d ≥ 2 and d ≥ 4, respectively. It is a rather standard argument,
1907
+ see e. g. [LZ11, Proposition 3.3.5]. The only extra bit that one needs in our case, that is, the
1908
+ space E and the group RII of Miura-reciprocal transformations of the 2nd kind, in comparison
1909
+ with the usual local case, that is, the space ˆF and the group GII of Miura transformations of
1910
+ the 2nd kind, is the identification of the action of the Lie algebra of RII on weakly non-local
1911
+ bi-vectors (or, more generally, multivectors) of localizable shape with the adjoint action of E1
1912
+ on E2 (resp., E). This is established in [LZ11, Theorems 2.5.7 and 2.6.5]
1913
+ 4.1.2. Bi-Hamiltonian cohomology computation. We prove the following
1914
+ Theorem 4.2. We have:
1915
+ BH2
1916
+ d(E, d1, d2) ∼=
1917
+
1918
+ 0,
1919
+ d = 2 and d ≥ 4;
1920
+ �N
1921
+ i=1 C∞(R, ui),
1922
+ d = 3.
1923
+ (87)
1924
+ BH3
1925
+ d(E, d1, d2) ∼= 0,
1926
+ d ≥ 4.
1927
+ (88)
1928
+ Proof. For the proof we use that for d ≥ 2 we have [LZ13b, Lemma 4.4]:
1929
+ BHp
1930
+ d(E, d1, d2) ∼= Hp
1931
+ d(E[λ], d2 − λd1).
1932
+ (89)
1933
+ In order to compute Hp
1934
+ d(E[λ], d2−λd1), we recall the definition of Di := DPi : S → S from [LZ11]:
1935
+ DPi := ˆE(Pi)∂ζ +
1936
+
1937
+
1938
+ s=0
1939
+ ∂s
1940
+ x
1941
+
1942
+ δujPi
1943
+
1944
+ ∂θs
1945
+ j + ∂s
1946
+ x
1947
+
1948
+ δθjPi
1949
+
1950
+ ∂uj,s,
1951
+ i = 1, 2.
1952
+ (90)
1953
+ Note that [∂x, Di] = 0 (by direct computation). We prove that it is a homological vector field
1954
+ (which is not true in general, for a non-local bi-vector Pi):
1955
+ Lemma 4.3. For a purely local bivector
1956
+
1957
+ P the operator DP does not depend on the choice of
1958
+ a purely local density P. Moreover, for purely local densities of the bivectors P, Q ∈ ˆ
1959
+ A2 and for
1960
+ any T ∈ S we have:
1961
+
1962
+ DP(T) =
1963
+ � �
1964
+ P,
1965
+
1966
+ T
1967
+
1968
+ (91)
1969
+ and
1970
+ [DP, DQ] = D[P,Q],
1971
+ (92)
1972
+ where [P, Q] = δθiPδuiQ + δuiPδθiQ.
1973
+ In particular, for a purely local density P of a Poisson bivector
1974
+
1975
+ P we have D2
1976
+ P = 0 on S.
1977
+ Remark 4.4. The statements of Lemma 4.3 do not hold for not purely local densities.
1978
+
1979
+ 18
1980
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
1981
+ Proof of Lemma 4.3. Firstly, we check the DP does not depend on the choice of a local density
1982
+ P. To this end, we remind the definitions and basic properties of ˆE and ∂x. We have:
1983
+ ∂x = −ui,1θi∂ζ +
1984
+
1985
+ ui,d+1∂ui,d + θd+1
1986
+ i
1987
+ ∂θd
1988
+ i ;
1989
+ (93)
1990
+ ˆE =
1991
+
1992
+ s≥1
1993
+ t≥0
1994
+
1995
+ ui,s(−∂x)t∂ui,s+t + θs
1996
+ i (−∂x)t∂θs+t
1997
+ i
1998
+
1999
+ − 1 + θiδθi;
2000
+ (94)
2001
+ ∂x ˆE = −ui,1δui + θi∂xδθi + ui,1θiδζ;
2002
+ (95)
2003
+ ˆE∂x = −ui,1θi∂ζ;
2004
+ (96)
2005
+ δui∂x = ∂xθi∂ζ,
2006
+ i = 1, . . . , N;
2007
+ (97)
2008
+ δθi∂x = −ui,1∂ζ,
2009
+ i = 1, . . . , N.
2010
+ (98)
2011
+ With the last three equations we immediately see that for any local X ∈ ˆ
2012
+ A
2013
+ ˆE(∂xX)∂ζ +
2014
+
2015
+
2016
+ s=0
2017
+
2018
+ ∂s
2019
+ x
2020
+
2021
+ δuj∂xX
2022
+
2023
+ ∂θs
2024
+ j + ∂s
2025
+ x
2026
+
2027
+ δθj∂xX
2028
+
2029
+ ∂uj,s
2030
+
2031
+ =
2032
+ (99)
2033
+ − ui,1θi∂ζX∂ζ +
2034
+
2035
+
2036
+ s=0
2037
+
2038
+ ∂s
2039
+ x
2040
+
2041
+ ∂x(θi∂ζX)
2042
+
2043
+ ∂θs
2044
+ j + ∂s
2045
+ x
2046
+
2047
+ − ui,1∂ζX
2048
+
2049
+ ∂uj,s
2050
+
2051
+ = 0,
2052
+ since ∂ζ = 0, which implies the first assertion of the lemma.
2053
+ Now, Equation (91) is obvious from the definition of the Schouten bracket. So we focus on
2054
+ Equation (92). Let us compute the coefficient of ∂ζ on the left hand side. Using the vanishing
2055
+ of ∂ζ derivatives, we have:
2056
+
2057
+
2058
+ s=0
2059
+
2060
+ ∂s
2061
+ x
2062
+
2063
+ δujP
2064
+
2065
+ ∂θs
2066
+ j + ∂s
2067
+ x
2068
+
2069
+ δθjP
2070
+
2071
+ ∂uj,s
2072
+
2073
+ ˆE(Q) =
2074
+ (100)
2075
+ ∂−1
2076
+ x
2077
+
2078
+
2079
+ s=0
2080
+
2081
+ ∂s
2082
+ x
2083
+
2084
+ δujP
2085
+
2086
+ ∂θs
2087
+ j + ∂s
2088
+ x
2089
+
2090
+ δθjP
2091
+
2092
+ ∂uj,s
2093
+
2094
+ (−ui,1δui + θi∂xδθi)(Q) =
2095
+ ∂−1
2096
+ x
2097
+
2098
+ δujP∂x(δθjQ) − ∂x(δθjP)δujQ
2099
+
2100
+ + ∂−1
2101
+ x (−ui,1)
2102
+
2103
+
2104
+ s=0
2105
+
2106
+ ∂s
2107
+ x
2108
+
2109
+ δujP
2110
+
2111
+ ∂θs
2112
+ j + ∂s
2113
+ x
2114
+
2115
+ δθjP
2116
+
2117
+ ∂uj,s
2118
+
2119
+ δuiQ
2120
+ + ∂−1
2121
+ x (−θi∂x)
2122
+
2123
+
2124
+ s=0
2125
+
2126
+ ∂s
2127
+ x
2128
+
2129
+ δujP
2130
+
2131
+ ∂θs
2132
+ j + ∂s
2133
+ x
2134
+
2135
+ δθjP
2136
+
2137
+ ∂uj,s
2138
+
2139
+ δθiQ
2140
+ Adding to the latter expression the same one with interchanged P and Q and using that for
2141
+ purely local densities
2142
+
2143
+
2144
+ s=0
2145
+
2146
+ ∂s
2147
+ x
2148
+
2149
+ δujP
2150
+
2151
+ ∂θs
2152
+ j + ∂s
2153
+ x
2154
+
2155
+ δθjP
2156
+
2157
+ ∂uj,s
2158
+
2159
+ δuiQ +
2160
+
2161
+
2162
+ s=0
2163
+
2164
+ ∂s
2165
+ x
2166
+
2167
+ δujQ
2168
+
2169
+ ∂θs
2170
+ j + ∂s
2171
+ x
2172
+
2173
+ δθjQ
2174
+
2175
+ ∂uj,s
2176
+
2177
+ δuiP
2178
+ (101)
2179
+ = δui
2180
+
2181
+
2182
+ s=0
2183
+
2184
+ ∂s
2185
+ x
2186
+
2187
+ δujP
2188
+
2189
+ ∂θs
2190
+ jQ + ∂s
2191
+ x
2192
+
2193
+ δθjP
2194
+
2195
+ ∂uj,sQ
2196
+
2197
+ = δui
2198
+
2199
+
2200
+ s=0
2201
+
2202
+ δujPδθjQ + δθjPδujQ
2203
+
2204
+ and
2205
+
2206
+
2207
+ s=0
2208
+
2209
+ ∂s
2210
+ x
2211
+
2212
+ δujP
2213
+
2214
+ ∂θs
2215
+ j + ∂s
2216
+ x
2217
+
2218
+ δθjP
2219
+
2220
+ ∂uj,s
2221
+
2222
+ δθiQ +
2223
+
2224
+
2225
+ s=0
2226
+
2227
+ ∂s
2228
+ x
2229
+
2230
+ δujQ
2231
+
2232
+ ∂θs
2233
+ j + ∂s
2234
+ x
2235
+
2236
+ δθjQ
2237
+
2238
+ ∂uj,s
2239
+
2240
+ δθiP
2241
+ (102)
2242
+ = −δθi
2243
+
2244
+
2245
+ s=0
2246
+
2247
+ ∂s
2248
+ x
2249
+
2250
+ δujP
2251
+
2252
+ ∂θs
2253
+ jQ + ∂s
2254
+ x
2255
+
2256
+ δθjP
2257
+
2258
+ ∂uj,sQ
2259
+
2260
+ = −δθi
2261
+
2262
+
2263
+ s=0
2264
+
2265
+ δujPδθjQ + δθjPδujQ
2266
+
2267
+ ,
2268
+
2269
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
2270
+ 19
2271
+ we obtain that the coefficient of ∂ζ on the left hand side of Equation (92) is equal to
2272
+ ∂−1
2273
+ x (−ui,1δui + θi∂xδθi)[P, Q] = ˆE
2274
+
2275
+ [P, Q]
2276
+
2277
+ ,
2278
+ (103)
2279
+ which is the coefficient of ∂ζ on the right hand side of Equation (92). The coefficients of all
2280
+ other components of the vector fields on the left hand side of Equation (92) are computed in a
2281
+ very similar way.
2282
+
2283
+ Lemma 4.3 implies that D2−λD1 is a differential on S[λ], and we have a short exact sequence
2284
+ 0
2285
+ � S[λ]
2286
+ R
2287
+ ∂x
2288
+
2289
+ D2−λD1
2290
+
2291
+ S[λ]
2292
+
2293
+
2294
+ D2−λD1
2295
+
2296
+ E[λ]
2297
+
2298
+ d2−λd1
2299
+
2300
+ 0
2301
+ (104)
2302
+ and it implies a long exact sequence in the cohomology which reads
2303
+ Hp
2304
+ d−1(S[λ]/R, D2 − λD1)
2305
+ � Hp
2306
+ d(S[λ], D2 − λD1)
2307
+ � Hp
2308
+ d(E[λ], d2 − λd1)
2309
+
2310
+ Hp+1
2311
+ d
2312
+ (S[λ]/R, D2 − λD1)
2313
+ � Hp+1
2314
+ d+1(S[λ], D2 − λD1)
2315
+ (105)
2316
+ Lemma 4.5. We have
2317
+ Hp
2318
+ d(S[λ], D2 − λD1) ∼=
2319
+
2320
+
2321
+
2322
+
2323
+
2324
+ 0
2325
+ p ≤ d and (p, d) ̸= (3, 3), (0, 0)
2326
+ R[λ]
2327
+ p = 0, d = 0;
2328
+ �N
2329
+ i=1 C∞(R, ui)
2330
+ p = 3, d = 3.
2331
+ (106)
2332
+ Also, H3
2333
+ 2(S[λ], D2 − λD1) ∼= 0.
2334
+ Proof. This lemma can be derived from [CKS18, Theorems 2.12 and 2.13]. Indeed, Lemma 4.3
2335
+ in particular implies that we have a bicomplex (S[λ], Dloc, Dζ) with the differentials given by
2336
+ Dζ := ( ˆE(P2)−λ ˆE(P1))∂ζ and Dloc := D2 −λD1 −Dζ. We start a spectral sequence associated
2337
+ with this bicomplex. Obviously, it converges on the second page. The computation of the first
2338
+ page splits as
2339
+ Hp
2340
+ d(S[λ], Dloc) ∼= Hp
2341
+ d(A[λ], Dloc) ⊕ Hp
2342
+ d(A[λ]ζ, Dloc)
2343
+ (107)
2344
+ ∼= Hp
2345
+ d(A[λ], Dloc) ⊕ Hp−1
2346
+ d
2347
+ (A[λ], Dloc),
2348
+ which implies all desired vanishings (for p ≤ d the only non-trivial cohomology groups are
2349
+ H0
2350
+ 0(A[λ], Dloc) ∼= R[λ] and H3
2351
+ 3(A[λ], Dloc) ∼= �N
2352
+ i=1 C∞(R, ui) [CKS18, Theorems 2.12 and
2353
+ 2.13]).
2354
+ Since both Hi
2355
+ i(S[λ], Dloc) = 0 for i = 1, 2, 4, and the induced differential on the first page
2356
+ has the (p, d)-degree (1, 1), we conclude that
2357
+ H0
2358
+ 0(S[λ], D2 − λD1) ∼= H0
2359
+ 0(S[λ], Dloc) ∼= R[λ];
2360
+ (108)
2361
+ H3
2362
+ 3(S[λ], D2 − λD1) ∼= H3
2363
+ 3(S[λ], Dloc) ∼=
2364
+ N
2365
+
2366
+ i=1
2367
+ C∞(R, ui).
2368
+ (109)
2369
+
2370
+ Remark 4.6. Almost the same statement holds for the cohomology of S[λ]/R, the only difference
2371
+ is H0
2372
+ 0(S[λ]/R, D2 − λD1) ∼= 0.
2373
+ Now we can complete the computation of the cohomology Hp
2374
+ d(E[λ], d2 − λd1) for p < d and
2375
+ p = 2, d = 2 using the long exact sequence (105). The relevant pieces of this long exact sequence
2376
+
2377
+ 20
2378
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
2379
+ are
2380
+ 0 = Hp
2381
+ d(S[λ], D2 − λD1)
2382
+ � Hp
2383
+ d(E[λ], dloc
2384
+ 2 − λdloc
2385
+ 1 )
2386
+
2387
+ Hp+1
2388
+ d
2389
+ (S[λ]/R, D2 − λD1) = 0
2390
+ ,
2391
+ (110)
2392
+ for p < d and (p + 1, d) ̸= (3, 3), which implies the vanishing for p < d, (p, d) ̸= (2, 3), and
2393
+ p = 2, d = 2. Moreover, we have
2394
+ 0 = H2
2395
+ 3(S[λ], D2 − λD1)
2396
+ � H2
2397
+ 3(E[λ], dloc
2398
+ 2 − λdloc
2399
+ 1 )
2400
+
2401
+ H3
2402
+ 3(S[λ]/R, D2 − λD1) ∼= �N
2403
+ i=1 C∞(R, ui)
2404
+ � H3
2405
+ 4(S[λ], D2 − λD1) = 0
2406
+ ,
2407
+ (111)
2408
+ which gives the answer for (p, d) = (2, 3). Now, the special cases of these computations for
2409
+ p = 2, d ≥ 2 and p = 3, d ≥ 4 imply all statements of Theorem 4.2.
2410
+
2411
+ An immediate corollary of Theorem 4.2 is the following:
2412
+ Corollary 4.7. Let
2413
+
2414
+ P2 − λP1 be a semi-simple pencil of local Poisson bivectors of differen-
2415
+ tial order 1. We consider the higher order dispersive extensions of
2416
+
2417
+ P2 − λP1 in the realm
2418
+ of weakly non-local Poisson pencils of localizable shape, that is, we consider Poisson pencils
2419
+ � �∞
2420
+ d=1 ǫd−1(P2,d −λP1,d) ∈ E such that deg∂x(P2,d −λP1,d) = d and
2421
+
2422
+ P2,1 −λP1,1 =
2423
+
2424
+ P2 −λP1.
2425
+ The space of orbits of the action of the group RII (the group of Miura-reciprocal transfor-
2426
+ mation of the 2nd kind) onto the set of these dispersive extensions is isomorphic to the space
2427
+ �N
2428
+ i=1 C∞(R, ui).
2429
+ This result is strikingly similar to the corresponding statement in the local case, cf. [CPS18,
2430
+ Theorem 1], see also [LZ05; LZ13a; DLZ06]. However, in the local case both the space ˆF where
2431
+ the deformations of
2432
+
2433
+ P2 − λP1 are allowed as well as the group GII acting on them are much
2434
+ smaller than in Corollary 4.7. Our next goal is to compare these two situations.
2435
+ 4.2. Comparison with the purely local deformations. Within this section it is important
2436
+ to have a notation that distinguishes between the operator ∂x as given by Equation (93) on the
2437
+ space S = ˆ
2438
+ A[ζ] and its purely local version ˜∂x := ∂x + ui,1θi∂ζ defined both on S and on A.
2439
+ Note that on S the operator ˜∂x commutes with multiplication by ζ.
2440
+ Let T nl denote the space of dispersive weakly non-local Poisson pencils of localizable shape
2441
+ � �∞
2442
+ d=1 ǫd−1(P2,d − λP1,d) ∈ E with the fixed leading term
2443
+
2444
+ P2,1 − λP1,1 =
2445
+
2446
+ P2 − λP1 that
2447
+ is purely local and semi-simple. Let T loc denote the space of dispersive local Poisson pencils
2448
+ � �∞
2449
+ d=1 ǫd−1(P2,d − λP1,d) ∈ ˆF with the same fixed leading term
2450
+
2451
+ P2,1 − λP1,1 =
2452
+
2453
+ P2 − λP1.
2454
+ The group RII acts on T nl and the group GII acts on T loc. Moreover, there is a natural
2455
+ embedding I : T loc → T nl that is GII-equivariant (GII acts on T nl as a subgroup of RII). The
2456
+ map I induces a map of the sets of orbits ι: T loc/GII → T nl/RII.
2457
+ Proposition 4.8. The map ι is injective.
2458
+ Proof. This proposition immediately follows from [LZ11, Theorem 1.3] and [CPS18, Theorem
2459
+ 2].
2460
+ By the latter result in the local case, we have an isomorphism of sets ˜c: T loc/GII →
2461
+ �N
2462
+ i=1 C∞(R, ui) (these are the so-called central invariants in the local case).
2463
+ On the other
2464
+ hand, [LZ11, Theorem 1.3] states that for any x, y ∈ T loc/GII such that ι(x) = ι(y) we have
2465
+ ˜c(x) = ˜c(y). Hence, x = y, and ι is surjective.
2466
+
2467
+ Corollary 4.7 implies that there is a RII invariant map C : T nl → �N
2468
+ i=1 C∞(R, ui) that
2469
+ descends to a bijection c: T nl/RII → �N
2470
+ i=1 C∞(R, ui). We have the following
2471
+ Proposition 4.9. The composition c ◦ ι: T loc/GII → �N
2472
+ i=1 C∞(R, ui) is surjective.
2473
+
2474
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
2475
+ 21
2476
+ Proof. Basically, we want to show that any cohomology class in H2
2477
+ 3(E) has a representative with
2478
+ a purely local density. Let
2479
+
2480
+ a2 + a1ζ represent a class in H2
2481
+ 3(E), a2 ∈ ˆ
2482
+ A2
2483
+ 3[λ] and a1 ∈ ˆ
2484
+ A1
2485
+ 3[λ].
2486
+ This means that
2487
+ (D2 − λD1)(a2 + a1ζ) ∈ ∂x(S3
2488
+ 3[λ]),
2489
+ (112)
2490
+ or, in other words, that there exist b3 ∈ ˆ
2491
+ A3
2492
+ 3[λ] and b2 ∈ ˆ
2493
+ A2
2494
+ 3[λ] such that
2495
+ Dloc(a2) − ( ˆE(P2) − λ ˆE(P1))(a1) + Dloc(a1)ζ = ˜∂x(b3) + ui,1θib2 + ˜∂x(b2)ζ.
2496
+ (113)
2497
+ Since H1
2498
+ 3( ˆ
2499
+ A[λ], Dloc) = 0 [CKS18, Theorem 2.13], there exist e0 ∈ ˆ
2500
+ A0
2501
+ 2[λ] and f 1 ∈ ˆ
2502
+ A1
2503
+ 2[λ] such
2504
+ that Dloce0 = a1 + ˜∂x(f 1). Then,
2505
+ (D2 − λD1)(e0ζ) = a1ζ + ˜∂x(f 1)ζ − ( ˆE(P2) − λ ˆE(P1))(e0)
2506
+ (114)
2507
+ = a1ζ − ( ˆE(P2) − λ ˆE(P1))(e0) − ui,1θif 1 + ∂x(f 1ζ),
2508
+ which implies that
2509
+ (d2 − λd1)
2510
+
2511
+ e0ζ =
2512
+
2513
+ a1ζ − ( ˆE(P2) − λ ˆE(P1))(e0) − ui,1θif 1
2514
+ (115)
2515
+ Thus, the cocycle
2516
+
2517
+ a2 + a1ζ is cohomologous to
2518
+
2519
+ a2 + ( ˆE(P2) − λ ˆE(P1))(e0) + ui,1θif 1, which
2520
+ gives a pure local deformation for
2521
+
2522
+ P2 − λP1.
2523
+
2524
+ Taking into account that that c is a bijection, an immediate corollary of Proposition 4.9 is
2525
+ the following:
2526
+ Corollary 4.10. The map ι is surjective (and hence a bijection). In particular, every orbit of
2527
+ the action of RII on T nl contains a purely local representative.
2528
+ It is just a different way to state Theorem 4.1, so this corollary also completes the proof of
2529
+ Theorem 4.1
2530
+ 4.3. Roots of the characteristic polynomial of the symbol. In the purely local case the
2531
+ central invariants, besides a purely cohomological definition, can be computed directly from a
2532
+ representative of a deformation (see [DLZ06] for details). More precisely, one has to compute
2533
+ the eigenvalues of the symbol of a representative of a deformation, which behave as scalars with
2534
+ respect to the Miura group action. In this section we extend this viewpoint to the invariants
2535
+ of the Miura-reciprocal group.
2536
+ First, we recall the construction from [DLZ06]. Let
2537
+ � �∞
2538
+ d=1 ǫd−1(P2,d −λP1,d) ∈ T loc, and the
2539
+ densities are expanded as �d
2540
+ s=0(P2,d,s − λP1,d,s)ijθiθd−s
2541
+ j
2542
+ , d ≥ 1, such that
2543
+ d
2544
+
2545
+ s=0
2546
+ (P2,d,s − λP1,d,s)ij∂d−s
2547
+ x
2548
+ = −
2549
+ d
2550
+
2551
+ s=0
2552
+ (−∂x)d−s ◦ (P2,d,s − λP1,d,s)ji
2553
+ (116)
2554
+ Consider the symbol of the densities of the bi-vector
2555
+ � �∞
2556
+ d=1 ǫd−1(P2,d −λP1,d), that is, the sum
2557
+ �∞
2558
+ d=1 ǫd−1(P2,d,0 − λP1,d,0)ij = �∞
2559
+ d=1(−ǫ)d−1(P2,d,0 − λP1,d,0)ji. The construction of the Miura
2560
+ group invariants from the eigenvalues of the symbol is based on the following lemma:
2561
+ Lemma 4.11. Under the group of Miura transformations G the symbol transforms linearly as
2562
+ a pencil of bi-linear forms:
2563
+
2564
+
2565
+ d=1
2566
+ ǫd−1(P2,d,0 − λP1,d,0)ij �→
2567
+
2568
+
2569
+ d=0
2570
+ ǫd ∂wi
2571
+ d
2572
+ ∂uk,d
2573
+
2574
+
2575
+ d=1
2576
+ ǫd−1(P2,d,0 − λP1,d,0)kℓ
2577
+
2578
+
2579
+ d=1
2580
+ (−ǫ)d ∂wj
2581
+ d
2582
+ ∂uℓ,d
2583
+ (117)
2584
+ (here wi = �∞
2585
+ d=0 ǫdwi
2586
+ d, wi
2587
+ d ∈ Ad, i = 1, . . . , N, are the new coordinates). Hence, the eigenvalues
2588
+ of this pencil behave as scalar with respect to the action of the Miura group.
2589
+
2590
+ 22
2591
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
2592
+ There are N roots λi, i = 1, . . . , N, of the λ-polynomial
2593
+ (118)
2594
+ det
2595
+ � ∞
2596
+
2597
+ d=1
2598
+ ǫd−1(P2,d,0 − λP1,d,0)
2599
+
2600
+ which are the formal power series in ǫ with the coefficients given by smooth functions in
2601
+ u1, . . . , uN, with the leading terms in ǫ given by mi = ui + O(ǫ). These eigenvalues are fur-
2602
+ ther used to derive the closed formulas for the central invariants of a pencil
2603
+ � �∞
2604
+ d=1 ǫd−1(P2,d −
2605
+ λP1,d) ∈ T loc.
2606
+ In the weakly non-local case of localizable shape, the densities of
2607
+ � �∞
2608
+ d=1 ǫd−1(P2,d −λP1,d) ∈
2609
+ T nl can be uniquely expanded as �d
2610
+ s=0(P2,d,s − λP1,d,s)ijθiθd−s
2611
+ j
2612
+ + (Q2,d − λQ1,d)iθiζ, d ≥ 1, such
2613
+ that
2614
+ d
2615
+
2616
+ s=0
2617
+ (P2,d,s − λP1,d,s)ij∂d−s
2618
+ x
2619
+ = −
2620
+ d
2621
+
2622
+ s=0
2623
+ (−∂x)d−s ◦ (P2,d,s − λP1,d,s)ji.
2624
+ (119)
2625
+ (this expansion we call the “normal form” below).
2626
+ Proposition 4.12. Let
2627
+ � �∞
2628
+ d=1 ǫd−1(P2,d − λP1,d) ∈ T nl and let
2629
+ λi = ri + ǫ2λi
2630
+ 2 + ǫ4λi
2631
+ 4 + · · ·
2632
+ be the λ-roots of the characteristic polynomial (118). The quantities
2633
+ ci
2634
+ 2k = λi
2635
+ 2k
2636
+ (f i)k ,
2637
+ k = 0, 1, 2, ...
2638
+ (where f i are the diagonal entries of the first metric in canonical coordinates) are invariant
2639
+ under the action of R.
2640
+ Proof. Taking into account the above lemma we focus on pure reciprocal transformations. The
2641
+ action of reciprocal transformations of 1st kind on the coefficients of the symbols can be easily
2642
+ obtained using the same arguments used in the proof of Proposition (2.8). Indeed
2643
+ ˜P ij
2644
+ λ
2645
+ =
2646
+ B−1
2647
+
2648
+ Bδi
2649
+ k − ui
2650
+ x∂−1
2651
+ x
2652
+ ∂B
2653
+ ∂uk
2654
+
2655
+ P kℓ
2656
+ λ
2657
+
2658
+ Bδj
2659
+ ℓ + ∂B
2660
+ ∂uℓ∂−1
2661
+ x uj
2662
+ x
2663
+
2664
+ =
2665
+ BP ij
2666
+ λ + P il
2667
+ λ
2668
+ ∂B
2669
+ ∂uℓ∂−1
2670
+ x uj
2671
+ x − 1
2672
+ B ui
2673
+ x∂−1
2674
+ x
2675
+ ∂B
2676
+ ∂uk P kj
2677
+ λ − ui
2678
+ x∂−1
2679
+ x
2680
+ ∂B
2681
+ ∂uk P kℓ
2682
+ λ
2683
+ ∂B
2684
+ ∂uℓ ∂−1
2685
+ x uj
2686
+ x.
2687
+ The second, the third and the fourth terms cannot contribute to the symbol of ˜P ij
2688
+ λ , while in
2689
+ the first term the only contributions come from
2690
+ B
2691
+
2692
+
2693
+ d=1
2694
+ ǫd−1(P2,d,0−λP1,d,0)∂d
2695
+ x = B
2696
+
2697
+
2698
+ d=1
2699
+ ǫd−1(P2,d,0−λP1,d,0)Bd∂d
2700
+ y = B2
2701
+
2702
+
2703
+ d=1
2704
+ (Bǫ)d−1(P2,d,0−λP1,d,0)∂d
2705
+ y
2706
+ that implies
2707
+
2708
+
2709
+ d=1
2710
+ ǫd−1(P2,d,0 − λP1,d,0)ij → B2
2711
+
2712
+
2713
+ d=1
2714
+ (Bǫ)d−1(P2,d,0 − λP1,d,0)ij.
2715
+ This means that
2716
+ λi → ri + (Bǫ)2λi
2717
+ 2 + (Bǫ)4λi
2718
+ 4 + · · ·
2719
+ or, equivalently, that
2720
+ λi
2721
+ 2k → B2kλi
2722
+ 2k.
2723
+ The result then follows from the transformation rule for the contravariant metric (see (43)):
2724
+ f i → B2f i. In the case of reciprocal transformation of 2nd kind we observe that they do not
2725
+ affect the symbol of the pencil. Indeed a bivector transforms according to the following rule
2726
+ ˜P ij := B−1
2727
+
2728
+ Bδi
2729
+ k − ui
2730
+ x∂−1
2731
+ x
2732
+ ∂B
2733
+ ∂uk
2734
+ σ
2735
+ ∂σ
2736
+ x
2737
+
2738
+ P kℓ
2739
+
2740
+ Bδj
2741
+ ℓ + (−∂x)τ ∂B
2742
+ ∂uℓ
2743
+ τ
2744
+ ∂−1
2745
+ x uj
2746
+ x
2747
+
2748
+ .
2749
+ (120)
2750
+
2751
+ MIURA-RECIPROCAL TRANSFORMATIONS AND LOCALIZABLE POISSON PENCILS
2752
+ 23
2753
+ where
2754
+ B = 1 + H = 1 +
2755
+
2756
+
2757
+ k=1
2758
+ ǫkHk(uj, uj
2759
+ x, . . . , uj
2760
+ σ),
2761
+ Hk ∈ Ak.
2762
+ Thus we have
2763
+ ˜P ij := P ij − 1
2764
+ B ui
2765
+ x∂−1
2766
+ x
2767
+ ∂H
2768
+ ∂uk
2769
+ σ
2770
+ ∂σ
2771
+ xP kj +
2772
+
2773
+ δi
2774
+ k − 1
2775
+ B ui
2776
+ x∂−1
2777
+ x
2778
+ ∂H
2779
+ ∂uk
2780
+ σ
2781
+ ∂σ
2782
+ x
2783
+
2784
+ P kℓ
2785
+
2786
+ Hδj
2787
+ ℓ + (−∂x)τ ∂H
2788
+ ∂uℓ
2789
+ τ
2790
+ ∂−1
2791
+ x uj
2792
+ x
2793
+
2794
+ .
2795
+ (121)
2796
+ Since the symbol of the bivector contains only the subset of the coefficients which depend only
2797
+ on the u’s but not on their x-derivatives the second term and the third terms above cannot
2798
+ contribute to it. This implies that the symbol of each bivector defining the pencil is unaffected
2799
+ by these transformations. For Miura reciprocal transformations (5) the transformation rule for
2800
+ the symbol of the pencil is obtained combining the Lemma 4.11 with the above rule. It turns
2801
+ out that the symbol of the pencil transforms in the following way
2802
+
2803
+
2804
+ d=1
2805
+ ǫd−1(P2,d,0 − λP1,d,0)ij �→ B2
2806
+
2807
+
2808
+ d=0
2809
+ ǫd ∂wi
2810
+ d
2811
+ ∂uk,d
2812
+
2813
+
2814
+ d=1
2815
+ (Bǫ)d−1(P2,d,0 − λP1,d,0)kℓ
2816
+
2817
+
2818
+ d=1
2819
+ (−ǫ)d ∂wj
2820
+ d
2821
+ ∂uℓ,d.
2822
+ (122)
2823
+
2824
+ 5. Projective-reciprocal invariance of the Doyle–Pot¨emin form
2825
+ In this Section we make a first step towards the study of the projective-reciprocal group
2826
+ action. Consider a local operator of homogeneous differential order d + 2, d ≥ 2 of the form
2827
+ P ij = ∂x ◦ Qij ◦ ∂x. We call this presentation of an operator the Doyle–Pot¨emin form (see
2828
+ Subsection 1.5).
2829
+ We prove the following theorem:
2830
+ Theorem 5.1. The projective group preserves the Doyle–Pot¨emin form of an operator. More
2831
+ precisely, the image of a homogeneous skew-symmetric operator of the form ∂x ◦ Qij ◦ ∂x,
2832
+ deg∂x Qij = d ≥ 0 under the action of an element of P is a homogeneous skew-symmetric
2833
+ operator of the form ∂x ◦ ˜Qij ◦ ∂x, deg∂x ˜Qij = d ≥ 0
2834
+ Proof. Consider an element of the group P given by
2835
+ dy = A0dx,
2836
+ (123)
2837
+ wi = Ai/A0,
2838
+ i = 1, . . . , N,
2839
+ where Ai := ai
2840
+ juj + ai
2841
+ 0, i = 0, 1, . . . , N. Since the functions Ai and A0 do not depend on the
2842
+ higher jet variables, Theorem 2.5 implies that the operator P ij = ∂x◦Qij ◦∂x in the coordinates
2843
+ y, w1, . . . , wN is represented as
2844
+ ˜P ij = 1
2845
+ A0
2846
+
2847
+ A0∂uk
2848
+ � Ai
2849
+ A0
2850
+
2851
+ − ∂x
2852
+ � Ai
2853
+ A0
2854
+
2855
+ ∂−1
2856
+ x
2857
+ ◦ ∂ukA0
2858
+
2859
+ ∂x ◦ Qkl ◦ ∂x◦
2860
+ (124)
2861
+
2862
+ ∂ul
2863
+ �Aj
2864
+ A0
2865
+
2866
+ A0 + ∂ulA0∂−1
2867
+ x
2868
+ ◦ ∂x
2869
+ �Aj
2870
+ A0
2871
+ ��
2872
+ Now we see that
2873
+ ∂x ◦
2874
+
2875
+ ∂ul
2876
+ �Aj
2877
+ A0
2878
+
2879
+ A0 + ∂ulA0∂−1
2880
+ x
2881
+ ◦ ∂x
2882
+ �Aj
2883
+ A0
2884
+ ��
2885
+ = ∂x ◦
2886
+
2887
+ aj
2888
+ l − a0
2889
+ l
2890
+ �Aj
2891
+ A0
2892
+
2893
+ + a0
2894
+ l ∂−1
2895
+ x
2896
+ ◦ ∂x
2897
+ �Aj
2898
+ A0
2899
+ ��
2900
+ (125)
2901
+ =
2902
+
2903
+ aj
2904
+ l − a0
2905
+ l
2906
+ �Aj
2907
+ A0
2908
+ ��
2909
+ ∂x = (A0)2∂ulwj∂y
2910
+
2911
+ 24
2912
+ P. LORENZONI, S. SHADRIN, AND R. VITOLO
2913
+ and analogously
2914
+ 1
2915
+ A0
2916
+
2917
+ A0∂uk
2918
+ � Ai
2919
+ A0
2920
+
2921
+ − ∂x
2922
+ � Ai
2923
+ A0
2924
+
2925
+ ∂−1
2926
+ x
2927
+ ◦ ∂ukA0
2928
+
2929
+ ∂x = 1
2930
+ A0
2931
+
2932
+ ai
2933
+ k −
2934
+ � Ai
2935
+ A0
2936
+
2937
+ a0
2938
+ k − ∂x
2939
+ � Ai
2940
+ A0
2941
+
2942
+ ∂−1
2943
+ x
2944
+ ◦ a0
2945
+ k
2946
+
2947
+ ∂x
2948
+ (126)
2949
+ = 1
2950
+ A0∂x ◦
2951
+
2952
+ ai
2953
+ k −
2954
+ � Ai
2955
+ A0
2956
+
2957
+ a0
2958
+ k
2959
+
2960
+ = ∂y ◦ 1
2961
+ A0∂ukwi(A0)2.
2962
+ Thus we see that ˜P ij takes the form ∂y ◦ ˜Qij ◦ ∂y, where the operator ˜Qij is equal to
2963
+ ˜Qij = 1
2964
+ A0∂ukwi(A0)2Qkl(A0)2∂ulwj
2965
+ (127)
2966
+ after the substitution wi(u1, . . . , uN) = Ai/A0 and ∂y = (A0)−1∂x, which makes it manifestly
2967
+ skew-symmetric and homogeneous of the same degree.
2968
+
2969
+ Remark 5.2. Note that we don’t use the Poisson property in the proof (and we don’t have it in
2970
+ the statement of the theorem). This allows us to apply the projective-reciprocal transformation
2971
+ to any homogeneous skew-symmetric operators of the Doyle–Pot¨emin form, and the action
2972
+ would preserve the form.
2973
+ Remark 5.3. Interesting examples of skew-symmetric operators in the Doyle–Pot¨emin form
2974
+ are coming from the theory of Dubrovin–Zhang hierarchies [DZ01]. Is it proved in [BPS12b;
2975
+ BPS12a] that Dubrovin–Zhang hierarchies posses a Poisson bracket given by an operator of the
2976
+ shape �∞
2977
+ p=0 ǫ2pP ij
2978
+ 2p+1, where P ij
2979
+ 1 = ηij∂x for some constant inner product ηij, and for p ≥ 1 the
2980
+ operators P ij
2981
+ 2p+1 are homogeneous skew-symmetric operators of the shape �2p+1
2982
+ e=0 P ij
2983
+ 2p+1,e∂2p+1−e
2984
+ x
2985
+ ,
2986
+ where deg∂x P ij
2987
+ 2p+1,e = e, such that P ij
2988
+ 2p+1,0 = 0. Using that the operators P ij
2989
+ 2p+1, p ≥ 1, are
2990
+ skew-symmetric, it is easy to show that each of them is of the Doyle–Pot¨emin form.
2991
+ References
2992
+ [Abe09]
2993
+ S. Abenda. “Reciprocal transformations and local Hamiltonian structures of hydro-
2994
+ dynamic-type systems”. In: J. Phys. A 42.9 (2009), pp. 095208, 20. url: https://doi.org/10.1088/1751-8113/42/9/095208.
2995
+ [AG07]
2996
+ S. Abenda and T. Grava. “Reciprocal transformations and flat metrics on Hurwitz
2997
+ spaces”. In: J. Phys. A 40.35 (2007), pp. 10769–10790. url: https://doi.org/10.1088/1751-8113/40/35/004.
2998
+ [AL13]
2999
+ A. Arsie and P. Lorenzoni. “Reciprocal F-manifolds”. In: J. Geom. Phys. 70 (2013),
3000
+ pp. 185–204. url: https://doi.org/10.1016/j.geomphys.2013.03.029.
3001
+ [BPS12a]
3002
+ A. Buryak, H. Posthuma, and S. Shadrin. “On deformations of quasi-Miura trans-
3003
+ formations and the Dubrovin-Zhang bracket”. In: J. Geom. Phys. 62.7 (2012),
3004
+ pp. 1639–1651. url: https://doi.org/10.1016/j.geomphys.2012.03.006.
3005
+ [BPS12b]
3006
+ A. Buryak, H. Posthuma, and S. Shadrin. “A polynomial bracket for the Dubrovin-
3007
+ Zhang hierarchies”. In: J. Differential Geom. 92.1 (2012), pp. 153–185. url: http://projecteuclid.org/euclid.jdg/1352211225.
3008
+ [BS09]
3009
+ M. B�laszak and A. Sergyeyev. “A coordinate-free construction of conservation laws
3010
+ and reciprocal transformations for a class of integrable hydrodynamic-type systems”.
3011
+ In: Rep. Math. Phys. 64.1-2 (2009), pp. 341–354. url: https://doi.org/10.1016/S0034-4877(09)90038-6.
3012
+ [Cas+22]
3013
+ M. Casati, P. Lorenzoni, D. Valeri, and R. Vitolo. “Weakly nonlocal Poisson brack-
3014
+ ets: tools, examples, computations”. In: Comput. Phys. Commun. 274 (2022), Paper
3015
+ No. 108284, 18. url: https://doi.org/10.1016/j.cpc.2022.108284.
3016
+ [CCS17]
3017
+ G. Carlet, M. Casati, and S. Shadrin. “Poisson cohomology of scalar multidimen-
3018
+ sional Dubrovin-Novikov brackets”. In: J. Geom. Phys. 114 (2017), pp. 404–419.
3019
+ url: https://doi.org/10.1016/j.geomphys.2016.12.008.
3020
+ [CCS18]
3021
+ G. Carlet, M. Casati, and S. Shadrin. “Normal forms of dispersive scalar Pois-
3022
+ son brackets with two independent variables”. In: Lett. Math. Phys. 108.10 (2018),
3023
+ pp. 2229–2253. url: https://doi.org/10.1007/s11005-018-1076-x.
3024
+
3025
+ REFERENCES
3026
+ 25
3027
+ [CKS18]
3028
+ G. Carlet, R. Kramer, and S. Shadrin. “Central invariants revisited”. In: J. ´Ec.
3029
+ polytech. Math. 5 (2018), pp. 149–175. url: https://doi.org/10.5802/jep.66.
3030
+ [CLV20]
3031
+ M. Casati, P. Lorenzoni, and R. Vitolo. “Three computational approaches to weakly
3032
+ nonlocal Poisson brackets”. In: Stud. Appl. Math. 144.4 (2020), pp. 412–448. url:
3033
+ https://doi.org/10.1111/sapm.12302.
3034
+ [CPS16a]
3035
+ G. Carlet, H. Posthuma, and S. Shadrin. “Bihamiltonian cohomology of KdV brack-
3036
+ ets”. In: Comm. Math. Phys. 341.3 (2016), pp. 805–819. url: https://doi.org/10.1007/s00220-015-2540-4.
3037
+ [CPS16b]
3038
+ G. Carlet, H. Posthuma, and S. Shadrin. “The bi-Hamiltonian cohomology of a
3039
+ scalar Poisson pencil”. In: Bull. Lond. Math. Soc. 48.4 (2016), pp. 617–627. url:
3040
+ https://doi.org/10.1112/blms/bdw017.
3041
+ [CPS18]
3042
+ G. Carlet, H. Posthuma, and S. Shadrin. “Deformations of semisimple Poisson pen-
3043
+ cils of hydrodynamic type are unobstructed”. In: J. Differential Geom. 108.1 (2018),
3044
+ pp. 63–89. url: https://doi.org/10.4310/jdg/1513998030.
3045
+ [DLZ06]
3046
+ B. Dubrovin, S.-Q. Liu, and Y. Zhang. “On Hamiltonian perturbations of hyperbolic
3047
+ systems of conservation laws. I. Quasi-triviality of bi-Hamiltonian perturbations”.
3048
+ In: Comm. Pure Appl. Math. 59.4 (2006), pp. 559–615. url: https://doi.org/10.1002/cpa.20111.
3049
+ [DMS05]
3050
+ L. Degiovanni, F. Magri, and V. Sciacca. “On deformation of Poisson manifolds of
3051
+ hydrodynamic type”. In: Comm. Math. Phys. 253.1 (2005), pp. 1–24. url: https://doi.org/10.1007/s00220-004-1190-8.
3052
+ [Doy93]
3053
+ P. W. Doyle. “Differential geometric Poisson bivectors in one space variable”. In: J.
3054
+ Math. Phys. 34.4 (1993), pp. 1314–1338. url: https://doi.org/10.1063/1.530213.
3055
+ [DZ01]
3056
+ B. Dubrovin and Y. Zhang. Normal forms of hierarchies of integrable PDEs, Frobe-
3057
+ nius manifolds and Gromov - Witten invariants. 2001. url: https://arxiv.org/abs/math/0108160.
3058
+ [Fer89]
3059
+ E. V. Ferapontov. “Reciprocal transformations and their invariants”. In: Differ-
3060
+ entsial ′nye Uravneniya 25.7 (1989), pp. 1256–1265, 1286.
3061
+ [Fer91]
3062
+ E. V. Ferapontov. “Autotransformations with respect to the solution, and hydro-
3063
+ dynamic symmetries”. In: Differentsial ′nye Uravneniya 27.7 (1991), pp. 1250–1263,
3064
+ 1287.
3065
+ [Fer95a]
3066
+ E. V. Ferapontov. “Conformally flat metrics, systems of hydrodynamic type and
3067
+ nonlocal Hamiltonian operators”. In: Uspekhi Mat. Nauk 50.4(304) (1995), pp. 175–
3068
+ 176. url: https://doi.org/10.1070/RM1995v050n04ABEH002582.
3069
+ [Fer95b]
3070
+ E. V. Ferapontov. “Nonlocal Hamiltonian operators of hydrodynamic type: differ-
3071
+ ential geometry and applications”. In: Topics in topology and mathematical physics.
3072
+ Vol. 170. Amer. Math. Soc. Transl. Ser. 2. Amer. Math. Soc., Providence, RI, 1995,
3073
+ pp. 33–58. url: https://doi.org/10.1090/trans2/170/03.
3074
+ [FP03]
3075
+ E. V. Ferapontov and M. V. Pavlov. “Reciprocal transformations of Hamiltonian op-
3076
+ erators of hydrodynamic type: nonlocal Hamiltonian formalism for linearly degener-
3077
+ ate systems”. In: J. Math. Phys. 44.3 (2003), pp. 1150–1172. url: https://doi.org/10.1063/1.1542921.
3078
+ [FPV14]
3079
+ E. V. Ferapontov, M. V. Pavlov, and R. F. Vitolo. “Projective-geometric aspects
3080
+ of homogeneous third-order Hamiltonian operators”. In: J. Geom. Phys. 85 (2014),
3081
+ pp. 16–28. url: https://doi.org/10.1016/j.geomphys.2014.05.027.
3082
+ [Get02]
3083
+ E. Getzler. “A Darboux theorem for Hamiltonian operators in the formal calculus of
3084
+ variations”. In: Duke Math. J. 111.3 (2002), pp. 535–560. url: https://doi.org/10.1215/S0012-7094-02-11136-3.
3085
+ [Ibr85]
3086
+ N. H. Ibragimov. Transformation groups applied to mathematical physics. Mathe-
3087
+ matics and its Applications (Soviet Series). Translated from the Russian. D. Reidel
3088
+ Publishing Co., Dordrecht, 1985, pp. xv+394. url: https://doi.org/10.1007/978-94-009-5243-0.
3089
+ [IVV02]
3090
+ S. Igonin, A. Verbovetsky, and R. Vitolo. On the formalism of local variational differ-
3091
+ ential operators. Memorandum 1641. University of Twente, Department of Applied
3092
+ Mathematics, 2002, pp. 1–34. url: https://research.utwente.nl/en/publications/on-the-formalism-of-local-variational-differential-operators.
3093
+ [Lor02]
3094
+ P. Lorenzoni. “Deformations of bi-Hamiltonian structures of hydrodynamic type”.
3095
+ In: J. Geom. Phys. 44.2-3 (2002), pp. 331–375. url: https://doi.org/10.1016/S0393-0440(02)00080-3.
3096
+ [LV20]
3097
+ P. Lorenzoni and R. Vitolo. “Weakly nonlocal Poisson brackets, Schouten brackets
3098
+ and supermanifolds”. In: J. Geom. Phys. 149 (2020), pp. 103573, 8. url: https://doi.org/10.1016/j.geomphys.2019.103573.
3099
+
3100
+ 26
3101
+ REFERENCES
3102
+ [LZ05]
3103
+ S.-Q. Liu and Y. Zhang. “Deformations of semisimple bihamiltonian structures of
3104
+ hydrodynamic type”. In: J. Geom. Phys. 54.4 (2005), pp. 427–453. url: https://doi.org/10.1016/j.geomphys.2004.11.003.
3105
+ [LZ11]
3106
+ S.-Q. Liu and Y. Zhang. “Jacobi structures of evolutionary partial differential equa-
3107
+ tions”. In: Adv. Math. 227.1 (2011), pp. 73–130. url: https://doi.org/10.1016/j.aim.2011.01.015.
3108
+ [LZ13a]
3109
+ S.-Q. Liu and Y. Zhang. “Bihamiltonian cohomologies and integrable hierarchies I: A
3110
+ special case”. In: Comm. Math. Phys. 324.3 (2013), pp. 897–935. url: https://doi.org/10.1007/s00220-013-1822-y.
3111
+ [LZ13b]
3112
+ S.-Q. Liu and Y. Zhang. “Bihamiltonian cohomologies and integrable hierarchies I: A
3113
+ special case”. In: Comm. Math. Phys. 324.3 (2013), pp. 897–935. url: https://doi.org/10.1007/s00220-013-1822-y.
3114
+ [Miu68]
3115
+ R. M. Miura. “Korteweg-de Vries equation and generalizations. I. A remarkable
3116
+ explicit nonlinear transformation”. In: J. Mathematical Phys. 9 (1968), pp. 1202–
3117
+ 1204. url: https://doi.org/10.1063/1.1664700.
3118
+ [MN01]
3119
+ A. Y. Maltsev and S. P. Novikov. “On the local systems Hamiltonian in the weakly
3120
+ non-local Poisson brackets”. In: Phys. D 156.1-2 (2001), pp. 53–80. url: https://doi.org/10.1016/S0167-2789(01)00280-9.
3121
+ [Mok87]
3122
+ O. I. Mokhov. “Hamiltonian differential operators and contact geometry”. In: Funk-
3123
+ tsional. Anal. i Prilozhen. 21.3 (1987), pp. 53–60, 96.
3124
+ [Mok98]
3125
+ O. I. Mokhov. “Symplectic and Poisson structures on loop spaces of smooth mani-
3126
+ folds, and integrable systems”. In: Uspekhi Mat. Nauk 53.3(321) (1998), pp. 85–192.
3127
+ url: https://doi.org/10.1070/rm1998v053n03ABEH000019.
3128
+ [Olv88]
3129
+ P. J. Olver. “Darboux’s theorem for Hamiltonian differential operators”. In: J. Dif-
3130
+ ferential Equations 71.1 (1988), pp. 10–33. url: https://doi.org/10.1016/0022-0396(88)90036-8.
3131
+ [Olv93]
3132
+ P. J. Olver. Applications of Lie groups to differential equations. Second. Vol. 107.
3133
+ Graduate Texts in Mathematics. Springer-Verlag, New York, 1993, pp. xxviii+513.
3134
+ url: https://doi.org/10.1007/978-1-4612-4350-2.
3135
+ [Pot91]
3136
+ G. V. Pot¨emin. “Some questions of differential geometry and algebraic geometry in
3137
+ the theory of solitons”. PhD thesis. Moscow State University, 1991.
3138
+ [Pot97]
3139
+ G. V. Pot¨emin. “On third-order differential-geometric Poisson brackets”. In: Uspekhi
3140
+ Mat. Nauk 52.3(315) (1997), pp. 173–174. url: https://doi.org/10.1070/RM1997v052n03ABEH001817.
3141
+ [Rog68]
3142
+ C. Rogers. “Reciprocal relations in non-steady one-dimensional gasdynamics”. In:
3143
+ Z. Angew. Math. Phys. 19 (1968), pp. 58–63.
3144
+ [Rog69]
3145
+ C. Rogers. “Invariant transformations in non-steady gasdynamics and magneto-
3146
+ gasdynamics”. In: Z. Angew. Math. Phys. 20 (1969), pp. 370–382.
3147
+ [VV]
3148
+ P. Vergallo and R. Vitolo. “Projective geometry of homogeneous second order Hamil-
3149
+ tonian operators”. preprint. url: https://arxiv.org/abs/2203.04237.
3150
+ [XZ06]
3151
+ T. Xue and Y. Zhang. “Bihamiltonian systems of hydrodynamic type and reciprocal
3152
+ transformations”. In: Lett. Math. Phys. 75.1 (2006), pp. 79–92. url: https://doi.org/10.1007/s11005-005-0031-9.
3153
+ (P. Lorenzoni) Department of Mathematics and Applications, University of Milano Bicocca,
3154
+ Via Roberto Cozzi 55, 20125 Milano, Italy and INFN sezione di Milano-Bicocca
3155
+ Email address: paolo.lorenzoni@unimib.it
3156
+ (S. Shadrin) Korteweg–de Vries Institut for Mathematics, University of Amsterdam, Postbus
3157
+ 94248, 1090 GE Amsterdam, The Netherlands
3158
+ Email address: s.shadrin@uva.nl
3159
+ (R. Vitolo) Department of Mathematics and Physics “E. De Giorgi”, University of Salento,
3160
+ via per Arnesano, 73100 Lecce, Italy and INFN sezione di Lecce
3161
+ Email address: raffaele.vitolo@unisalento.it
3162
+
ItE3T4oBgHgl3EQfXAqD/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
LNFAT4oBgHgl3EQfwR78/content/tmp_files/2301.08681v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
LNFAT4oBgHgl3EQfwR78/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
M9E1T4oBgHgl3EQftQXp/content/tmp_files/2301.03376v1.pdf.txt ADDED
@@ -0,0 +1,1463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Occupant-Oriented Demand Response
2
+ with Room-Individual Building Control
3
+ Moritz Frahma, Thomas Dengizb, Philipp Zwickela, Heiko Maaßa, J¨org Matthesa, Veit Hagenmeyera
4
+ aKarlsruhe Institute of Technology, Institute for Automation and Applied Informatics, Eggenstein-Leopoldshafen, Germany
5
+ bKarlsruhe Institute of Technology, Institute for Industrial Production, Karlsruhe, Germany
6
+ Abstract
7
+ In future energy systems with high shares of renewable energy sources, the electricity demand of buildings has to react to the
8
+ fluctuating electricity generation in view of stability. As buildings consume one-third of global energy and almost half of this
9
+ energy accounts for Heating, Ventilation, and Air Conditioning (HVAC) systems, HVAC are suitable for shifting their electricity
10
+ consumption in time. To this end, intelligent control strategies are necessary as the conventional control of HVAC is not optimized
11
+ for the actual demand of occupants and the current situation in the electricity grid. In this paper, we present the novel multi-zone
12
+ controller Price Storage Control (PSC) that not only considers room-individual Occupants’ Thermal Satisfaction (OTS), but also the
13
+ available energy storage, and energy prices. The main feature of PSC is that it does not need a building model or forecasts of future
14
+ demands to derive the control actions for multiple rooms in a building. For comparison, we use an ideal, error-free Model Predictive
15
+ Control (MPC) and a conventional hysteresis-based two-point control as upper and lower benchmarks, respectively. We evaluate
16
+ the three controllers in a multi-zone environment for cooling a building in summer and consider two different scenarios that differ
17
+ in how much the permitted temperatures vary. The results show that PSC strongly outperforms the conventional control approach in
18
+ both scenarios with regard to the electricity costs and OTS. It leads to 50 % costs reduction and 15 % comfort improvements while
19
+ the ideal MPC achieves costs reductions of 58 % and comfort improvements of 29 %. Considering that PSC does not need any
20
+ building model or forecast, as opposed to MPC, the results support the suitability of our developed control strategy for controlling
21
+ HVAC systems in future energy systems.
22
+ Keywords: multi-zone, thermal building model, RC model, model predictive control, price storage control, rule-based control,
23
+ occupant behavior, demand response, smart grid
24
+ 1. Introduction
25
+ Buildings consume one-third of global final energy [1]. Al-
26
+ most half of this energy is used by Heating, Ventilation, and
27
+ Air Conditioning (HVAC) systems to heat or cool buildings [2].
28
+ Especially the cooling demand is expected to increase signifi-
29
+ cantly in many parts of the world, as the climate warms on av-
30
+ erage [3]. In buildings, the energy consumption results from
31
+ Occupant Behavior (OB) and Occupants’ Thermal Satisfaction
32
+ (OTS) as they interact with the building’s energy systems and
33
+ require comfortable thermal conditions [4]. The energy demand
34
+ of buildings can be covered with renewable energies in order to
35
+ reduce greenhouse gas emissions [5].
36
+ Flexible electrical loads are pivotal for future energy systems
37
+ in view of stability to cope with the increasing share of intermit-
38
+ tent renewable energy sources like solar and wind energy. For
39
+ exploiting flexible electric loads in buildings, the HVAC oper-
40
+ ation can be integrated into Demand Response (DR) programs.
41
+ DR refers to the change of electricity demand in response to in-
42
+ ternal or external factors like the price of electricity [6]. In the
43
+ building sector, electrical HVAC systems, like heat pumps or
44
+ air conditioners, are suitable for DR. They can exploit existing
45
+ infrastructure like the building mass or hot water tanks to shift
46
+ their electricity demand in time [7]. Thus, they can significantly
47
+ contribute to better utilization of renewable energy sources and
48
+ simultaneously help to stabilize the electricity grid. In order
49
+ to use HVAC systems for DR, optimized control strategies are
50
+ necessary.
51
+ In addition to DR, designing the HVAC operation tailored to
52
+ the actual occupants’ needs could significantly reduce energy
53
+ use. For example in office spaces, often, not all rooms are oc-
54
+ cupied. The average occupancy rates of offices are rarely over
55
+ 60 % [8]. However, the HVAC control in offices usually does
56
+ not consider the actual occupancy of offices. This leads to un-
57
+ necessary energy use in unoccupied periods. 56 % of the en-
58
+ ergy consumed by buildings is used during unoccupied hours
59
+ and 44 % in occupied hours [9].
60
+ For the optimization of HVAC to consider DR and individual
61
+ OTS, advanced control strategies are required instead of stan-
62
+ dard thermostats [10], for example Model Predictive Control
63
+ (MPC) [11] or heuristic control strategies [12]. MPC finds the
64
+ optimal input trajectory for the HVAC system’s control outputs
65
+ over a future time horizon by solving an optimization problem
66
+ under consideration of future system dynamics, forecasts, and
67
+ constraints. Therefore, it requires a dynamic thermal building
68
+ model and forecasts of OB and weather [13]. The development
69
+ of models and forecasts can make MPC less practicable and
70
+ more expensive for real-world applications [11].
71
+ Preprint submitted to Applied Energy
72
+ January 10, 2023
73
+ arXiv:2301.03376v1 [eess.SY] 9 Jan 2023
74
+
75
+ Nomenclature
76
+ Acronyms
77
+ BEV
78
+ Battery Electric Vehicle
79
+ DR
80
+ Demand Response
81
+ FMU
82
+ Functional Mock-up Unit
83
+ HVAC Heating, Ventilation, and Air Conditioning
84
+ KPIs
85
+ Key Performance Indicators
86
+ MPC
87
+ Model Predictive Control
88
+ OB
89
+ Occupant Behavior
90
+ OTS
91
+ Occupants’ Thermal Satisfaction
92
+ PI
93
+ Proportional Integral
94
+ PMV
95
+ Predicted Mean Vote
96
+ PPD
97
+ Predicted Percentage of Dissatisfied
98
+ PSC
99
+ Price Storage Control
100
+ PV
101
+ Photovoltaic
102
+ RBC
103
+ Rule-based Control
104
+ RC
105
+ Resistor Capacitor
106
+ Parameters
107
+ χdis
108
+ discomfort factor
109
+ χmod
110
+ heat pump modulation degree
111
+ χp
112
+ price factor
113
+ χsj
114
+ storage factor
115
+ ∆t
116
+ time step in s
117
+ εh
118
+ coefficient of performance of heat pump
119
+ ξ j
120
+ allowed deviation buffer in K
121
+ Ci j
122
+ heat capacity of room air in J K−1
123
+ Cm j
124
+ heat capacity of heat accumulating medium in J K−1
125
+ gsj
126
+ solar heat gain factor in m2
127
+ Pmax
128
+ max. electrical power of heat pump in W
129
+ Ra j
130
+ resistance between Ti j and Ta in K W−1
131
+ Ri j
132
+ resistance between Ti j and Tm j in K W−1
133
+ S j
134
+ state of thermal charge
135
+ ymaxj
136
+ maximal comfort temperature in °C
137
+ yminj
138
+ minimal comfort temperature in °C
139
+ yrj
140
+ reference comfort temperature in °C
141
+ Variables
142
+ ˙Qhj
143
+ heat flow of heat pump in W
144
+ ˙qs
145
+ solar radiation in W m−2
146
+ �F
147
+ empirical distribution function
148
+ Pel
149
+ electrical power of heat pump in W
150
+ t
151
+ time in s
152
+ Ta
153
+ ambient temperature in °C
154
+ Ti j
155
+ room air temperature in °C
156
+ Tm j
157
+ heat accumulating medium temperature in °C
158
+ In contrast, heuristic control strategies are model- and
159
+ forecast-free heuristic algorithms. They iteratively adjust the
160
+ power consumption of HVAC systems in order to archive cer-
161
+ tain goals. In order to do this, they use rule-based control mech-
162
+ anisms and heuristic algorithms that can adapt the HVAC sys-
163
+ tem’s heat flows to internal and external signals. Their core
164
+ advantage is that they do not require a building model to solve
165
+ an optimization problem [12]. Thus, they are applicable to any
166
+ building without significant adjustments.
167
+ 1.1. Related Work
168
+ Different control approaches are available in the literature for
169
+ controlling HVAC systems. Tab. 1 compares the most relevant
170
+ studies for the present paper. The most significant difference
171
+ between control strategies is whether they require a model to
172
+ operate or not.
173
+ Most studies in the literature use a model-based approach as
174
+ they can find the optimal solution of an optimization problem
175
+ [7]. Especially MPC is popular in the field of DR. Most authors
176
+ use MPC for controlling HVAC systems, e.g. Maddalena et al.
177
+ [14], Hu et al. [15], Pedersen et al. [16], Blum et al. [17],
178
+ Mork et al. [20], and Zwickel et al. [22]. While model-based
179
+ approaches generally yield adequate results, they suffer from
180
+ execution times and require modeling the thermal behavior of a
181
+ building which is a complex task.
182
+ Fewer studies use model-free control strategies. Compared
183
+ to model-based strategies, the controller design process is sig-
184
+ nificantly simplified, as no building-specific model is required.
185
+ Model-free control algorithms can be found in the studies of
186
+ Dengiz et al. [12], Rodriguez et al. [18], Nolting et al. [19],
187
+ and Michailidis et al. [21]. These approaches are rule-based
188
+ control mechanisms that are in few cases also combined with a
189
+ heuristic approach for optimizing an objective function.
190
+ In all studies, the objective is to reduce the energy costs while
191
+ satisfying OTS. Blum et al. [17] additionally consider the pro-
192
+ vision of ancillary services. Another essential requirement for
193
+ most of the optimized control approaches is the availability of
194
+ forecasts. However, most of the model-free approaches do not
195
+ rely on any forecast.
196
+ Our literature review emphasizes the use of control algo-
197
+ rithms for multiple zones (see Tab. 1). There are also control
198
+ approaches in the literature that consider only buildings with
199
+ one thermal zone (one uniform temperature in the whole build-
200
+ ing). However, the consideration of multiple zones is closer
201
+ 2
202
+
203
+ Table 1: Comparison of relevant papers studying approaches for demand response of HVAC systems
204
+ Literature
205
+ Model-
206
+ free
207
+ control
208
+ Forecast-
209
+ free
210
+ control
211
+ Multi-zone
212
+ control
213
+ Coupling
214
+ of multiple
215
+ buildings
216
+ possible
217
+ Comparison
218
+ with lower
219
+ benchmark
220
+ Comparison
221
+ with upper
222
+ benchmark
223
+ Use
224
+ of
225
+ measured
226
+ data
227
+ Maddalena et al., 2022 [14]
228
+ 
229
+ 
230
+ 
231
+ 
232
+ 
233
+ 
234
+ 
235
+ Hu et al., 2014 [15]
236
+ 
237
+ 
238
+ 
239
+ 
240
+ 
241
+ ()
242
+ 
243
+ Pedersen et al., 2018 [16]
244
+ 
245
+ 
246
+ 
247
+ ()
248
+ 
249
+ 
250
+ 
251
+ Blum et al., 2016 [17]
252
+ 
253
+ 
254
+ 
255
+ 
256
+ 
257
+ 
258
+ 
259
+ Dengiz et al., 2019 [12]
260
+ 
261
+ 
262
+ 
263
+ 
264
+ 
265
+ 
266
+ 
267
+ Rodr´ıguez et al., 2018 [18]
268
+ 
269
+ 
270
+ 
271
+ 
272
+ 
273
+ 
274
+ 
275
+ Nolting et al., 2019 [19]
276
+ 
277
+ 
278
+ 
279
+ 
280
+ 
281
+ 
282
+ 
283
+ Mork et al., 2022 [20]
284
+ 
285
+ 
286
+ 
287
+ 
288
+ 
289
+ 
290
+ 
291
+ Michailidis et al., 2018 [21]
292
+ 
293
+ 
294
+ 
295
+ ()
296
+ 
297
+ 
298
+ 
299
+ Zwickel et al., 2022 [22]
300
+ 
301
+ 
302
+ 
303
+ 
304
+ 
305
+ 
306
+ 
307
+ Present work
308
+ 
309
+ 
310
+ 
311
+ ()
312
+ 
313
+ 
314
+ 
315
+ to the real thermal behavior of buildings and it also increases
316
+ the complexity of the optimization problem. Another essential
317
+ feature of control algorithms for DR is their capability of cou-
318
+ pling multiple buildings in a coordinated way. While most of
319
+ the listed studies use a central controller for this, Dengiz et al.
320
+ [12] define a hybrid control architecture. Zwickel et al. [22]
321
+ compare central and decentral control approaches for multiple
322
+ buildings.
323
+ To evaluate the performance of the developed control ap-
324
+ proach, all studies, except for two, use a conventional control
325
+ approach, like simple rule-based control, hysteresis-based two-
326
+ point controller, or a Proportional Integral (PI) controller as a
327
+ lower benchmark. The studies using MPC for controlling the
328
+ heating or cooling device define their results also as an upper
329
+ benchmark for the optimization problem, as usually a MPC ap-
330
+ proach is solved by finding the global optimal solution. Most
331
+ studies use simulated synthetic data for defining the building
332
+ model and setting up the simulation. Only Maddalena et al.
333
+ [14] and Michailidis et al. [21] also use measured data for eval-
334
+ uating the OTS.
335
+ 1.2. Contribution of this Paper
336
+ The main contribution of the present paper is the introduc-
337
+ tion of a novel heuristic multi-zone control approach, called
338
+ Price Storage Control (PSC). It combines external factors (e.g.
339
+ electricity price) and internal factors (temperatures of different
340
+ zones in the building) to determine when and how much elec-
341
+ tricity should be consumed for the generation of heat flows. The
342
+ approach is model-free and does not need any forecasts. To the
343
+ best of our knowledge, our study is the only one that introduces
344
+ a novel control approach for buildings with multiple zones that
345
+ does not need any model or forecasts and that allows for a co-
346
+ ordinated coupling of multiple buildings. This is because of its
347
+ capability to use any external factor for deriving the HVAC con-
348
+ trol output. Our study is the first that evaluates an introduced
349
+ model-free and forecast-free control algorithm by using a lower
350
+ and upper benchmark that are derived from the use of measured
351
+ data (see Tab. 1).
352
+ To evaluate the PSC control performance in terms of OTS
353
+ and energy costs, we compare three different control strategies
354
+ in a multi-zone thermal building model. In the evaluation, we
355
+ use two scenarios with different degrees of variable room us-
356
+ age. In the base scenario, the temperature range is scheduled
357
+ between comfort and standby mode. The second scenario also
358
+ allows room-individual temperature ranges, based on the use
359
+ case for each room. For comparison, we use an ideal, error-free
360
+ MPC and a hysteresis-based two-point controller as upper and
361
+ lower benchmarks.
362
+ 1.3. Structure of this Paper
363
+ We develop and implement three different control strategies
364
+ and an evaluation environment in the present work. We present
365
+ the models in Sec. 2, the controllers in Sec. 3, and evaluate them
366
+ in Sec. 4. Finally, we conclude the evaluation results in Sec. 5.
367
+ 2. Models
368
+ In this section, we present the models that we apply for
369
+ the evaluation (see Sec. 4) of different control strategies (see
370
+ Sec. 3). The model-based control strategy, the MPC, also uses
371
+ the models internally to predict future system dynamics (see
372
+ Sec. 3.2). The modeling section Sec. 2 is separated into three
373
+ parts: the model for thermal dynamics of building in Sec. 2.1,
374
+ for the heat pump in Sec. 2.2, and for OTS in Sec. 2.3.
375
+ 2.1. Multi-Zone Thermal Building Model
376
+ In this section, we develop a multi-zone thermal building
377
+ model to evaluate room-individual control strategies in Sec. 4.
378
+ The model applies the Resistor Capacitor (RC) analogy to de-
379
+ scribe the heat flows between temperature nodes by resistors R
380
+ and thermal dynamics by capacitors C, as exemplarily shown
381
+ in Eq. (1).
382
+ 3
383
+
384
+ C dT(t)
385
+ dt
386
+ = ˙Qin(t) − ˙Qout(t)
387
+ ˙Qxy(t) = Tx(t) − Ty(t)
388
+ R
389
+ (1)
390
+ We illustrate our thermal building model structure in Fig. 1.
391
+ Applying this structure to each room j (j = 1 . . . n) results in a
392
+ decentral multi-zone model [23]. Mathematically, each room is
393
+ gsj ˙qs
394
+ ˙Qhj
395
+ Ta
396
+ Cij
397
+ Ti j
398
+ Cm j
399
+ Ri j
400
+ Ra j
401
+ Tm j
402
+ Ta
403
+ Cm j
404
+ Ci j
405
+ Tm j
406
+ Ti j
407
+ Raj
408
+ Ri j
409
+ gsj ˙qs
410
+ ˙Qhj
411
+ Figure 1: Thermal building model for each room j ( j = 1 . . . n), obtained from
412
+ [23] (modified from [24])
413
+ thermally defined by the two differential equations Eq. (2) and
414
+ (3).
415
+ Ci j
416
+ dTi j
417
+ dt
418
+ =
419
+ Tm j − Ti j
420
+ Ri j
421
+ +
422
+ Ta − Ti j
423
+ Raj
424
+ + gsj ˙qs + ˙Qhj,
425
+ (2)
426
+ Cm j
427
+ dTm j
428
+ dt
429
+ =
430
+ Ti j − Tm j
431
+ Ri j
432
+ .
433
+ (3)
434
+ 2.2. Heat Pump Model
435
+ The modeled air-source heat pump has a maximum electrical
436
+ power Pmax and an energy efficiency ratio εh which are both de-
437
+ pendent on the ambient temperature. We use the model AERO
438
+ SLM 3-11 HGL from the Austrian heat pump manufacturer iDM
439
+ Energiesysteme GmbH [25] with a supply temperature of the
440
+ cooling system of 18◦C. To calculate the efficiency and the
441
+ maximum cooling power at every time slot, we use the data
442
+ from the manufacturer’s technical fact sheet and linear interpo-
443
+ lation.
444
+ The heat pump can modulate its power consumption Pel and
445
+ thus the heat flow ˙Qh with χmod between 20 % and 100 %. This
446
+ leads to the following relation between the heat pump’s electri-
447
+ cal power Pel and the thermal building model’s heat pump heat
448
+ flows:
449
+ n
450
+
451
+ j=1
452
+ ��� ˙Qhj
453
+ ��� =
454
+ ��� ˙Qh
455
+ ��� = εh · Pel = εh · (χmod · Pmax),
456
+ (4)
457
+ χmod ∈ {0, [0.2, 1]},
458
+ (5)
459
+ Pel = χmod · Pmax
460
+ (6)
461
+ 2.3. Occupants’ Thermal Satisfaction (OTS) Model
462
+ In this section, we define the temperature ranges [ymin, ymax]
463
+ based on international standards for Occupants’ Thermal Satis-
464
+ faction (OTS) modeling. The three most frequently cited OTS
465
+ standards are ASHRAE Standard 55 [26], ISO 7730:2005 [27],
466
+ and EN 16798-1:2019 [28]. These standards are fundamentally
467
+ based on the Predicted Mean Vote (PMV) standard scale, which
468
+ was first introduced by Fanger’s model [29].
469
+ The PMV is a static model evaluated from a large group
470
+ of people with a given combination of thermal environmental
471
+ and personal parameters. These parameters include metabolic
472
+ activity, clothing, air temperature, radiant temperature, air ve-
473
+ locity, and relative humidity. In a survey, occupants express
474
+ their thermal sensations on a scale from -3 (too cold) to +3 (too
475
+ warm), where 0 is optimum. Fanger also developed an equation
476
+ that relates the PMV to the Predicted Percentage of Dissatisfied
477
+ (PPD). The standard OTS guidelines aim for a PMV from -0.5
478
+ to +0.5 (OTS level II, see Tab. 2). The OTS level can also be
479
+ within closer or wider PMV boundaries, e.g. ±0.2 for level I
480
+ or ±0.7 for level III. Wider temperature limits result in lower
481
+ energy consumption of HVAC systems.
482
+ Based on these OTS levels in Tab. 2, we calculate the corre-
483
+ sponding lower ymin and upper ymax temperature limits that are
484
+ required for Eq. (21). For the calculation of the temperature
485
+ limits, we use the CBE Thermal Comfort Tool [30] with EN-
486
+ 16798 standard and summer clothing. In this tool, we set the
487
+ mean radiant temperature equal to the air temperature Ti. This
488
+ implies the assumption that the operative temperature is close to
489
+ the air temperature. For more information about the operative
490
+ temperature, we refer to our previous work [31]. The resulting
491
+ temperature limits for different levels of OTS are presented in
492
+ Tab. 2.
493
+ Table 2: OTS categories, obtained from CBE Thermal Comfort Tool [30] with
494
+ EN-16798 and sommer clothings
495
+ OTS level
496
+ PMV
497
+ PPD
498
+ ymin in °C
499
+ ymax in °C
500
+ I
501
+ ±0.2
502
+ < 6 %
503
+ 25.6
504
+ 26.6
505
+ II
506
+ ±0.5
507
+ < 10 %
508
+ 24.8
509
+ 27.4
510
+ III
511
+ ±0.7
512
+ < 15 %
513
+ 24.2
514
+ 27.9
515
+ Based on the temperature limits, we calculate the reference
516
+ comfort temperature yr j in Eq. (7). This reference temperature
517
+ is required for the controller design of the PSC in Sec. 3.1.
518
+ yr j =
519
+ ymin j + ymax j
520
+ 2
521
+ (7)
522
+ 3. Control Strategies
523
+ This section describes the development of three different con-
524
+ trol strategies: PSC in Sec. 3.1, MPC in Sec. 3.2, and hysteresis-
525
+ based two point control in Sec. 3.3. The objective is to mini-
526
+ mize the electricity costs given by a time-variable electricity
527
+ price and to maximize the OTS. While we develop the PSC
528
+ as a novel control methodology for occupant-oriented demand-
529
+ response with room-individual building control, the MPC and
530
+ hysteresis-based two-point controller are used as upper and
531
+ lower benchmarks, respectively. The MPC was implemented
532
+ in Python with a prediction horizon of 16 hours and solved us-
533
+ ing Gurobi. Also, PSC and the two-point controller are imple-
534
+ mented in Python.
535
+ 4
536
+
537
+ In general, the three control strategies are applicable to cool-
538
+ ing or heating. For both cases, we use the generic term heat
539
+ flows. A heat flow is the rate of net heat energy transfer be-
540
+ tween hot and cold sides and can be positive or negative for
541
+ heating or cooling, respectively.
542
+ 3.1. Price Storage Control (PSC)
543
+ The PSC is a heuristic control algorithm for modulating
544
+ HVAC or heat pump heat flows ˙Qhj in a multi-zone building.
545
+ It essentially consists of 4 steps which it executes in every time
546
+ slot.
547
+ 1. Determine the price factor χp(t) based on [12].
548
+ 2. Determine the storage factor χs(t).
549
+ 3. Calculate the modulation degree χmod using the price fac-
550
+ tor χp(t) and the storage factor χs(t).
551
+ 4. Distribute the generated heat flow to the different rooms of
552
+ the multi-zone building.
553
+ Price Factor
554
+ To obtain the price factor χp, the algorithm calculates the em-
555
+ pirical distribution function �F(p) for the future electricity prices
556
+ p(t) of the next 24 hours at the beginning of each day. We as-
557
+ sume that we have an electricity tariff with predetermined prices
558
+ for the next 24 hours (for more information see Section 4.1.1).
559
+ At every time slot of the day, the value of the �F(p) is calculated
560
+ for the current price p(t). The calculation of the empirical dis-
561
+ tribution function �F(p) is illustrated in Fig. 2, exemplarily for
562
+ one day. �F(p) quantifies the share of electricity prices for the
563
+ current day that have a lower or equal value compared to the
564
+ price p of the current time slot. PSC sets the price factor at time
565
+ slot t as in Eq. (8). A low price results in a high price factor
566
+ (due to a high value of �F(p)) and vice versa.
567
+ χp(t) = 1 − �F(p(t))
568
+ (8)
569
+ Storage Factor
570
+ For the calculation of the storage factor χs(t), the state of
571
+ thermal charge S j(t) from Eq. (9) is needed for each room. The
572
+ state of thermal charge S j(t) quantifies the “stored” tempera-
573
+ ture room individually and results in values between 0 and 1.
574
+ Although the PSC method is applicable for heating or cooling
575
+ heat flows, we explain this method exemplarily for the cooling
576
+ case in the following.
577
+ S j(t) =
578
+ yr j + ξ j − Ti j(t − ∆t)
579
+ ξj
580
+ (9)
581
+ If the temperature of the room j from the last time slot
582
+ Ti j(t − ∆t) is lower than the reference temperature yr j the state
583
+ of thermal charge S j(t) is set to 1. This means that the thermal
584
+ storage of this room is full and there is no necessity for applying
585
+ heat flows to the room 1.
586
+ 1As we are considering cooling in the present work it has to be noted that
587
+ full thermal storage, in this case, means, that the temperature in the room is low
588
+ and thus the room already has enough ”cooling energy”.
589
+ If the temperature of the room is higher than the reference
590
+ comfort temperature yr j plus an allowed deviation buffer ξ j, for
591
+ sufficiently high OTS, the state of thermal charge S j(t) is set to
592
+ 0. In the cooling case, this results in empty thermal storage as
593
+ the temperature in the room is too high.
594
+ For every room temperature that is between the reference
595
+ temperature and the upper OTS limit (yr j + ξj), the algorithm
596
+ uses Eq. (9) to calculate the state of thermal charge S j(t) of
597
+ room j ( j = 1 . . . n). The reference temperature for every room
598
+ yrj is calculated as in Eq. (7). This value depends on the inves-
599
+ tigated scenarios (see Sec. 4) 2.
600
+ After having determined the state of thermal charge S j(t) for
601
+ every room n, the algorithm calculates the storage factor χs(t)
602
+ by using Eq. (10). If the temperatures in the different rooms
603
+ are close to the lower limit, their corresponding state of thermal
604
+ charge will be high resulting in a low storage factor χs(t) and
605
+ vice versa.
606
+ χs(t) = 1 −
607
+ �n
608
+ j=1 S j(t)
609
+ n
610
+ (10)
611
+ Modulation Degree of the HVAC system
612
+ The third step of the algorithm is the calculation of the heat
613
+ pump’s modulation degree and thus the heat flow and the elec-
614
+ trical power using Eq. (11). The modulation degree χmod(t) re-
615
+ sults from the multiplication of the price factor χp and storage
616
+ factor χs. Because both factors can have values between 0 and
617
+ 1, the modulation degree χmod(t) likewise varies between 0 and
618
+ 1. We choose a multiplication of the two factors instead of a
619
+ weighted sum as this leads to better results in our case studies.
620
+ Based on the modulation degree, Eq. (4)) and Eq. (5) calculates
621
+ the generated heat flows and electrical power.
622
+ χmod(t) = χp(t) · χs(t)
623
+ (11)
624
+ Two factors influence the heat pump power output. A high
625
+ electricity price leads to a low price factor which leads to low
626
+ values of the modulation degree. This results in low electricity
627
+ consumption at that time. On the contrary, a low price leads to a
628
+ high price factor which incentives the heat pump to cool down
629
+ the room. This is desired as we want to generate heat flows
630
+ when the electricity prices are low.
631
+ Next to the price factor, the storage factor impacts the gen-
632
+ erated heat flows and thus consumed electricity. If the temper-
633
+ atures in the rooms are generally low, the storage factor has
634
+ low values due to the high values of the state of thermal charge
635
+ S j(t). A low storage factor leads to low power consumption
636
+ and vice versa. This is also a desired property of the control
637
+ algorithm. If the room temperatures are already low, there is no
638
+ urgent need for cooling whereas high room temperatures tend to
639
+ lead to higher generation of heat flow using the PSC algorithm.
640
+ Distribution of Heat Flows
641
+ In the final step, the algorithm distributes the generated heat
642
+ flows to the different rooms j ( j = 1 . . . n). To do this, the
643
+ 2For this internal parameter of the algorithm, a buffer value of ξ j = 2 K
644
+ yields adequate results in the present work.
645
+ 5
646
+
647
+ p(t)
648
+ �F(p)
649
+ Figure 2: Empirical distribution function of the electricity prices
650
+ caused thermal discomfort of each room dc j(t) due to possibly
651
+ too high temperatures is determined. If the temperature of a
652
+ room from the previous time slot Ti j(t − ∆t) is higher than the
653
+ upper temperature limit ymaxj, Eq. (12) and Eq. (14) quantify the
654
+ caused discomfort of the room j and the total caused discomfort
655
+ dc,total(t) from Eq. (13).
656
+ dc j(t) = Ti j(t − ∆t) − ymaxj
657
+ (12)
658
+ dc,total(t) =
659
+ n
660
+
661
+ j=1
662
+ dc j(t)
663
+ (13)
664
+ Based on the total caused discomfort dc,total(t) the PSC algo-
665
+ rithm distributes the generated heat flows ˙Qh of time t to each
666
+ room j with ˙Qhj using Eq. (14). This mechanism assures that
667
+ especially rooms that have high temperatures, get more heat
668
+ flow (cooling) than rooms with less need for cooling. If the
669
+ heat pump generates heat flows although no room has violated
670
+ its temperature boundaries in the last time slot, it equally dis-
671
+ tributes the generated heat flows to every room.
672
+ ˙Qhj(t) =
673
+ dcj(t)
674
+ �n
675
+ j=1 dc j(t) · ˙Qh(t)
676
+ (14)
677
+ Overall, PSC executes the four mentioned steps for every
678
+ time slot of the day while updating the empirical distribution
679
+ function of the prices at the beginning of each day.
680
+ 3.2. Model Predictive Control (MPC)
681
+ In contrast to PSC, MPC requires a model which is obtained
682
+ by restructuring the thermal building model from Sec.2 into
683
+ state-space notation. The resulting equations for n rooms are
684
+ ˙x(t) = f (x(t), u(t), z(t)) = Ax(t) + Bu(t) + Ez(t),
685
+ y(t) = g(x(t)) = Cx(t)
686
+ (15)
687
+ where x describes the temperature states of the buildings’ air
688
+ temperatures (Ti j) and thermal masses (Tm j), u the control in-
689
+ puts ( ˙Qhj), z the measurable disturbances (˙qs, Ta), and y the
690
+ outputs (Ti j) with
691
+ x = �Ti1 Tm1 Ti2 Tm2 . . . Tin Tmn
692
+ �T ,
693
+ u =
694
+ � ˙Qh1 ˙Qh2 . . . ˙Qhn
695
+ �T ,
696
+ z = (˙qs Ta)T ,
697
+ y = �Ti1 Ti2 . . . Tin
698
+ �T .
699
+ (16)
700
+ Given the heat pump model (see Eq. (4) and Eq. (5)) and the
701
+ control model (Eq. (15)), we formulate the optimization prob-
702
+ lem with a prediction horizon of N time steps, which must be
703
+ solved at each sampling instant t, based on [22] as
704
+ min
705
+ t+N−1
706
+
707
+ k=t
708
+ l
709
+
710
+ k, χdis(k|t), Pel(k|t)
711
+
712
+ (17a)
713
+ subject to ∀k ∈ [0, N − 1] :
714
+ x(k+1|t) = Adx(k|t) + Bdu(k|t) + Edz(k|t)
715
+ (17b)
716
+ y(k|t) = Cdx(k|t)
717
+ (17c)
718
+ x(0|t) = x(t),
719
+ (17d)
720
+ Pel(k|t) = χmod(k|t) · Pmax(k),
721
+ (17e)
722
+ χmod(k|t) ∈ {0, [0.2, 1]},
723
+ (17f)
724
+ u(k|t) ∈ U, y(k|t) ∈ Y
725
+ (17g)
726
+ where l (k, ·, ·) is the stage-cost, (17b) and (17c) are the
727
+ discrete-time control model, (17d) is the initial condition, and
728
+ (17e) and (17f) are the heat pump model. x(t) is typically mea-
729
+ sured at the time t, and U as well as Y are input and output
730
+ constraint sets (see (17g)).
731
+ The k-step ahead prediction for
732
+ the states, inputs, disturbances, outputs, discomfort factor, heat
733
+ pump modulation degree, and electric power, based on the cur-
734
+ rent initial condition are denoted by x(k|t), u(k|t), z(k|t), y(k|t),
735
+ χdis(k|t), χmod(k|t), and Pel(k|t), respectively.
736
+ We consider the following stage cost
737
+ l
738
+
739
+ k, χdis(k), Pel(k)
740
+
741
+ = λ
742
+
743
+ n
744
+
745
+ j=1
746
+ χdisj(k)
747
+
748
+ + (1-λ)
749
+
750
+ P′
751
+ el(k)p′(k)
752
+
753
+ (18)
754
+ where λ ∈ [0, 1] is a user-defined weighting coefficient and
755
+ p(k), k ∈ t : t + N − 1 is a time-dependent price signal (future
756
+ 6
757
+
758
+ electricity prices). P′
759
+ el(k|t) and p′(k) are the min-max normal-
760
+ ization of Pel(k|t) and p(k) 3, respectively, calculated as
761
+ P′
762
+ el(k) =
763
+ Pel(k) − min{Pel}
764
+ max{Pel} − min{Pel} ,
765
+ p′(k) =
766
+ p(k) − min{p}
767
+ max{p} − min{p} .
768
+ (19)
769
+ Furthermore, the control inputs u are limited to cooling with
770
+ the maximum total power constraint by the heat pump model,
771
+ as formulated in Eq. (4), which leads to
772
+ U =
773
+
774
+ u ∈ Rn | (∀ j ∈ [1, n] : u j ≤ 0) ∧
775
+ n
776
+
777
+ j=1
778
+ |u| = εh · Pel
779
+
780
+ . (20)
781
+ In addition, the control outputs should meet predefined time-
782
+ variant temperature ranges [ymin j, ymaxj] for each room j. This
783
+ leads to the following soft constraints:
784
+ Y =
785
+
786
+ y ∈ Rn | (∀ j ∈ [1, n] : ymin j-χdis j ≤ yj ≤ ymaxj+χdis j)
787
+ ∧ χdis j ≥ 0
788
+
789
+ .
790
+ (21)
791
+ 3.3. Hysteresis-based Two-point Controller
792
+ The hysteresis-based two-point control serves as the lower
793
+ benchmark for the evaluation. This is a conventional control
794
+ strategy for cooling (or heating) devices that cools down a room
795
+ until a lower temperature limit. Afterward, the device switches
796
+ off and waits until the temperature in the room has reached an
797
+ upper limit. This triggers the control system to start cooling
798
+ down again. We use an adaptive hysteresis that uses the up-
799
+ per and lower temperature limits [yminj(t), ymaxj(t)] depending
800
+ on the scenarios. These predefined temperature limits for OTS
801
+ are described in the evaluation scenarios in Sec. 4.1.2.
802
+ 4. Evaluation
803
+ In this section, we compare the control algorithms and de-
804
+ scribe the used evaluation environment. First, we introduce the
805
+ used data, scenarios, and metrics in Sec. 4.1. Then, we present
806
+ the results in Sec. 4.2, discuss them in Sec. 4.3, and show limi-
807
+ tations in Sec. 4.4
808
+ 4.1. Data, Scenarios, and Metrics
809
+ 4.1.1. Data
810
+ We evaluate the control strategies by using weather data dur-
811
+ ing the summer of 2022, obtained from a weather station on
812
+ an experimental building [32]. This building is located in the
813
+ KIT EnergyLab 2.0 (Karlsruhe, Germany), and is presented in
814
+ Fig. 3a. It has a design similar to a single-family home and is
815
+ used as an office space. For the evaluation, we use the measure-
816
+ ments of the weather station (the solar radiation ˙qs and the am-
817
+ bient temperature Ta) over a period of 13 weeks (05/30/2022 –
818
+ 3In the present study, we apply min{Pel} = 0 W, max{Pel} = 3500 W,
819
+ min{p} = 6.9 Cent/kW h, and max{p} = 58.1 Cent/kW h.
820
+ 08/22/2022). Due to measurement gaps, we had to sort out three
821
+ weeks (8th, 10th, and 13th week) and could only use the remain-
822
+ ing ten weeks. We use the ten weeks of data in time steps of
823
+ ∆t = 15 min. For the variable electricity tariff, we use the data
824
+ of the day-ahead market in Germany from the ENTSO-E Trans-
825
+ parency Platform [33]. The price is different for every hour of
826
+ the day and we assume that these prices are directly forwarded
827
+ to the customers.
828
+ In Sec. 4.2, we evaluate the control algorithms from Sec. 3
829
+ on an evaluation environment that is illustrated in Fig. 3. While
830
+ the evaluation environment is inspired by the building design in
831
+ Fig. 3a, the floor plan in Fig. 3b is generated artificially. The
832
+ floor plan illustrates the different temperature demands of the
833
+ rooms in Tab. 3 and 4. In the present work, we explicitly focus
834
+ on the evaluation of control strategies instead of model parame-
835
+ ter estimation from measurements. For more information about
836
+ parameter identification, we refer to our previous work [23].
837
+ For the results in Sec 4.2, the model parameters of the model
838
+ in Sec. 2 are based on parameters from the literature [24] and
839
+ scaled to consider different room sizes. The used model and pa-
840
+ rameters can be obtained from GitHub (as a Modelica file .mo
841
+ and as a Functional Mock-up Unit (FMU) .fmu) 4.
842
+ 4.1.2. Scenarios
843
+ For the evaluation of the control algorithms, we consider two
844
+ scenarios, namely (a) base scenario and (b) multi-zone adap-
845
+ tive scenario. The scenarios differ by their variability of tem-
846
+ perature ranges [ymin j(t), ymaxj(t)]. The base scenario applies the
847
+ same ranges for all rooms, while the second allows individual
848
+ occupancy profiles. The temperature ranges are presented in
849
+ Tab. 3 and 4 and applied for each day. Despite the office sce-
850
+ nario, we do not differentiate between the different days, e.g.
851
+ between weekdays and weekends, to simplify the evaluation.
852
+ (a) Base scenario: We use two different control modes in the
853
+ base scenario (see Tab. 3): a comfort mode and a standby mode
854
+ (inspired by Peng et al. [34]). We apply the comfort mode
855
+ during working hours from 8AM to 5PM and the standby mode
856
+ else.
857
+ Table 3: (a) Base scenario, room air temperatures in °C
858
+ Period
859
+ Room
860
+ 1
861
+ 2
862
+ 3
863
+ 4
864
+ 5
865
+ 8AM to 5PM
866
+ ymin j
867
+ 24.8
868
+ 24.8
869
+ 24.8
870
+ 24.8
871
+ 24.8
872
+ ymaxj
873
+ 27.4
874
+ 27.4
875
+ 27.4
876
+ 27.4
877
+ 27.4
878
+ else
879
+ ymin j
880
+ 16.0
881
+ 16.0
882
+ 16.0
883
+ 16.0
884
+ 16.0
885
+ ymaxj
886
+ 30.0
887
+ 30.0
888
+ 30.0
889
+ 30.0
890
+ 30.0
891
+ All rooms j ( j = 1 . . . 5) apply the same modes during the
892
+ entire evaluation in the base scenario. As a result, the temper-
893
+ ature ranges [yminj(t), ymaxj(t)] in Tab. 3 are all equal over the
894
+ different rooms. In contrast, the second scenario uses different
895
+ modes in different rooms, depending on the use case of each
896
+ room.
897
+ 4https://github.com/Occupant-Oriented-Demand-Response/Conrol-Results
898
+ 7
899
+
900
+ (a) Experimental building with a design similar to a single-family home
901
+ Bath-
902
+ room
903
+ Hallway
904
+ Technical
905
+ room
906
+ Kitchen
907
+ Printer and storage
908
+ Second office
909
+ Main office
910
+ Hallway
911
+ 3
912
+ 4
913
+ 5
914
+ Ground floor
915
+ Upper floor
916
+ 1
917
+ 2
918
+ (b) Floor plan
919
+ Figure 3: Evaluation environment
920
+ (b) Multi-zone adaptive scenario: The temperature ranges
921
+ [yminj(t), ymaxj(t)] in all rooms j ( j = 1 . . . 5) can be different
922
+ (see Tab. 4). In addition to the comfort and standby mode,
923
+ we also use an eco mode that schedules the reference temper-
924
+ ature by 2 K (+2 K/-2 K for cooling/heating) difference com-
925
+ pared to the comfort mode [34]. This eco mode saves energy
926
+ compared to the comfort mode and also enables fast re-cooling
927
+ / re-heating compared to the standby mode. In addition, the eco
928
+ mode can save energy in rooms that are less frequently used
929
+ than office rooms, e.g. bathrooms or kitchens.
930
+ Table 4: (b) Multi-zone adaptive scenario, room air temperatures in °C
931
+ Period
932
+ Room
933
+ 1
934
+ 2
935
+ 3
936
+ 4
937
+ 5
938
+ 8AM to 12AM
939
+ ymin j
940
+ 24.8
941
+ 24.8
942
+ 22.8
943
+ 22.8
944
+ 22.8
945
+ ymaxj
946
+ 27.4
947
+ 27.4
948
+ 29.4
949
+ 29.4
950
+ 29.4
951
+ 12AM to 1PM
952
+ ymin j
953
+ 22.8
954
+ 22.8
955
+ 22.8
956
+ 24.8
957
+ 22.8
958
+ ymaxj
959
+ 29.4
960
+ 29.4
961
+ 29.4
962
+ 27.4
963
+ 29.4
964
+ 1PM to 5PM
965
+ ymin j
966
+ 24.8
967
+ 22.8
968
+ 22.8
969
+ 22.8
970
+ 22.8
971
+ ymaxj
972
+ 27.4
973
+ 29.4
974
+ 29.4
975
+ 29.4
976
+ 29.4
977
+ else
978
+ ymin j
979
+ 16.0
980
+ 16.0
981
+ 16.0
982
+ 16.0
983
+ 16.0
984
+ ymaxj
985
+ 30.0
986
+ 30.0
987
+ 30.0
988
+ 30.0
989
+ 30.0
990
+ In the multi-zone adaptive scenario (see Tab. 4), we let the
991
+ control operate with a high focus on OTS in occupied rooms
992
+ and energy saving in unoccupied. Therefore, we use the com-
993
+ fort mode in the offices (rooms 1 and 2) during working hours
994
+ and the kitchen (room 4) during lunch breaks from 12AM to
995
+ 1PM. In this scenario, the first office (room 1) is used over the
996
+ entire working day, except lunch break, and the second office
997
+ (room 2) only from 8AM to 12AM (part-time job). The bath-
998
+ room (room 5) and storage (room 3) should be operated in eco
999
+ mode during working hours (8AM to 5PM).
1000
+ 4.1.3. Metrics
1001
+ We use two Key Performance Indicators (KPIs) to evalu-
1002
+ ate (i) how accurately a controller meets the desire OTS and
1003
+ (ii) how much energy the control strategy therefore consumes.
1004
+ Mathematically we define the KPIs as the weekly costs cm,week
1005
+ in Eq. (22) and mean weekly discomfort dm, week in Eq. (23),
1006
+ cm,week =
1007
+ M
1008
+
1009
+ k=1
1010
+
1011
+ p(k)
1012
+
1013
+ k
1014
+ Pel(k) dtk
1015
+
1016
+ (22)
1017
+ dm, week = 1
1018
+ M
1019
+ ��������
1020
+ M
1021
+
1022
+ k=1
1023
+ n
1024
+
1025
+ j=1
1026
+ dcj(k)
1027
+ �������� .
1028
+ (23)
1029
+ The KPIs consider energy costs and OTS during each time-
1030
+ step k for all time steps M = 672 of each week. The energy
1031
+ costs cm,week depend on a dynamic energy tariff p(k) and the
1032
+ consumed electric power Pel(k). The discomfort dm, week evalu-
1033
+ ates the discomfort dc j(k) of the actual room temperature from
1034
+ the allowed OTS range. This permitted temperature range is
1035
+ time-variant, depending on room-individual usage/attendance
1036
+ profiles, as introduced in the scenarios in Sec. 4.1.2.
1037
+ Both KPIs are competing, which means when one is im-
1038
+ proved, the other is usually deteriorating. It is the objective
1039
+ to minimize both KPIs simultaneously, to have low costs and
1040
+ low discomfort.
1041
+ 4.2. Results
1042
+ We present the results of the three different control algo-
1043
+ rithms, MPC (ideal and error-free) , PSC, and hysteresis-based
1044
+ two-point controller (see Sec. 3), in Fig. 4 and A.5. The overall
1045
+ results for both scenarios over the entire evaluation period of
1046
+ ten weeks can be obtained from Fig. 4. Fig. A.5 illustrates the
1047
+ dynamic response of the thermal building model to the three ap-
1048
+ plied control strategies, exemplarily for the base scenario dur-
1049
+ ing one week.
1050
+ Control Results for One Week
1051
+ Fig. A.5 illustrates the dynamic behavior of the three con-
1052
+ trollers, MPC, PSC, and hysteresis-based two-point controller,
1053
+ on the multi-zone thermal building model. The x-axis uses the
1054
+ time in days for one week in June/July 2022. On the y-axis, we
1055
+ present the air temperatures Ti j in °C for five rooms (j = 1 . . . 5)
1056
+ and three control strategies. The blue area shows the permitted
1057
+ 8
1058
+
1059
+ temperature ranges for the air temperatures [yminj(t), ymaxj(t)].
1060
+ The bottom y-axes present the controlled variable Pel, the dis-
1061
+ turbance variables Ta and ˙qs, and the dynamic electric price
1062
+ function p.
1063
+ The control characteristics of the three controllers are distin-
1064
+ guishable, although, for all controllers, the controlled variable
1065
+ Pel of the heat pump operates only in three of seven days no-
1066
+ ticeably (06-29, 06-30, and 07-03). In general, the temperature
1067
+ trajectories controlled by MPC and PSC are more similar than
1068
+ those controlled by the hysteresis-based two-point controller.
1069
+ The MPC cools most frequently but at a lower power Pel. In a
1070
+ few cases, the MPC exceeds the upper temperature limits, e.g.
1071
+ on the 06-30. Therefore, the MPC meets temperature ranges
1072
+ more adequately on the next day (07-01), where the tempera-
1073
+ tures of the PSC and hysteresis-based two-point controller are
1074
+ too low.
1075
+ Overall Results for Ten Weeks
1076
+ We perform evaluations for the three controllers in two sce-
1077
+ narios over ten different weeks and summarize the results in
1078
+ Fig. 4. On the y-axis in Fig. 4, we visualize the two KPIs, the
1079
+ mean weekly costs (”costs”) and the mean discomfort (”discom-
1080
+ fort”) from Eq. (22) and (23). The results are shown for the two
1081
+ scenarios, the (a) base scenario and the (b) multi-zone adaptive
1082
+ scenario, where (a) and (b) are based on the temperature ranges
1083
+ in Tab. 3 and 4, respectively.
1084
+ When evaluating the three control strategies in Fig. 4, the
1085
+ MPC and PSC show superior results in terms of costs and dis-
1086
+ comfort, compared to the hysteresis-based two-point controller.
1087
+ In both scenarios, the MPC and PSC have lower discomfort and
1088
+ approximately half the costs of the hysteresis-based two-point
1089
+ controller (e.g. in (a) from 2.18 to 1.08 or 1.18).
1090
+ The performance of the MPC depends more on the evaluated
1091
+ scenario (a) vs. (b) than for the other two controllers. In the
1092
+ base scenario, The MPC and PSC have a similar overall perfor-
1093
+ mance (1.18 vs. 1.08 costs and 0.52 vs. 0.59 discomfort). In
1094
+ contrast, in the multi-zone adaptive scenario, the MPC outper-
1095
+ forms the PSC with 38.5 % lower costs (from 1.09 to 0.67) and
1096
+ also lower discomfort (from 0.19 to 0.13).
1097
+ In summary, we obtain the highest overall performance re-
1098
+ garding costs and discomfort with the MPC and PSC, while the
1099
+ hysteresis-based two-point controller shows the lowest perfor-
1100
+ mance. The performance difference between MPC and PSC
1101
+ varies depending on the evaluation scenario. In the (a) base
1102
+ scenario, the MPC and PSC have similar control results, while
1103
+ in the (b) multi-zone adaptive scenario, the MPC outperforms
1104
+ the PSC with 38.5 % lower costs and also lower discomfort.
1105
+ 4.3. Discussion
1106
+ The results in Sec. 4.2 evaluate the performance of the
1107
+ PSC by comparison with the upper and lower benchmarks us-
1108
+ ing ideal, error-free MPC and hysteresis-based two-point con-
1109
+ troller, respectively. Overall, the control performance of the
1110
+ PSC is significantly superior to the hysteresis-based two-point
1111
+ controller and close to the ideal MPC. In the following, we dis-
1112
+ cuss the differences in control performance.
1113
+ The three controllers differ in their complexity and how much
1114
+ knowledge about future system behavior they require.
1115
+ The
1116
+ hysteresis-based two-point controller uses only minimal and
1117
+ maximal temperatures [ymin j(t), ymax j(t)] without any forecasts
1118
+ or models. When a maximal temperature is reached, it cools
1119
+ over a defined period. The PSC, on the other hand, tries to
1120
+ meet a reference temperature that is in the middle of the min-
1121
+ imal and maximal ranges. The PSC requires knowledge about
1122
+ the temperature ranges, but also about the energy tariff and the
1123
+ heat pump modulation. Exploiting this knowledge reduces the
1124
+ energy costs of the PSC compared to the hysteresis-based two-
1125
+ point controller because the PSC can apply cooling during pe-
1126
+ riods of low energy prices.
1127
+ The MPC uses the largest amount of available information,
1128
+ which increases its performance accordingly. It does not only
1129
+ use temperature ranges, energy tariffs, and heat pump modu-
1130
+ lation. In addition, the MPC needs a thermal building model
1131
+ and weather forecasts. With that internal control model and
1132
+ the forecasts, the MPC can predict future system behavior in
1133
+ advance and schedule the cooling load optimally. As a result,
1134
+ MPC outperforms the PSC when high variations in the temper-
1135
+ ature ranges [yminj(t), ymaxj(t)] occur, as in the (b) multi-zone
1136
+ adaptive scenario.
1137
+ The MPC exploits the knowledge about
1138
+ thermal storage inside the building, which enables finding an
1139
+ optimal cooling trajectory.
1140
+ The PSC is suitable for tracking a reference temperature
1141
+ when fewer variations in the temperature ranges occur. In the
1142
+ (a) base scenario, the PSC and MPC show similarly high per-
1143
+ formance. It should be noted that in the base scenario, a major
1144
+ part of the discomfort results from a too-cold temperature, in-
1145
+ stead of a too-warm one. None of the three controllers was
1146
+ allowed to heat the building. Especially in the morning periods,
1147
+ the low ambient temperatures make it challenging for the con-
1148
+ trollers to meet warm enough thermal conditions in the build-
1149
+ ing. The MPC can only mitigate temperatures that are too low
1150
+ by allowing temperatures that are too warm at other times (see
1151
+ Fig. A.5, 06-30 and 07-01). Overall, the MPC cannot show its
1152
+ advantage as an optimum controller to its best advantage in the
1153
+ base scenario.
1154
+ In summary, the controllers perform as expected where a
1155
+ higher complexity and use of more information improve the
1156
+ control quality.
1157
+ While the PSC outperforms the hysteresis-
1158
+ based two-point controller, the differences between PSC and
1159
+ ideal MPC are much smaller. On the one hand, the MPC has
1160
+ a superior performance in one of the two scenarios. On the
1161
+ other hand, the MPC is significantly more complex to design,
1162
+ requiring a thermal model for each room and a forecast, which
1163
+ we both assumed to be error-free for our case study. Compared
1164
+ to the conventional hysteresis-based two-point controller, PSC
1165
+ leads to a reduction of the two combined criteria costs and dis-
1166
+ comfort of 44 % (50 % costs reduction and 15 % comfort im-
1167
+ provements) while the MPC achieves combined improvements
1168
+ of 53 % (58 % cost reduction and 29 % comfort improvements).
1169
+ 4.4. Limitations
1170
+ The evaluation of control strategies in this work is based
1171
+ on simulation results, which can neglect several effects from
1172
+ 9
1173
+
1174
+ MPC
1175
+ PSC
1176
+ Hysteresis
1177
+ 0
1178
+ 1
1179
+ 2
1180
+ 3
1181
+ 1.18
1182
+ 1.08
1183
+ 2.18
1184
+ 0.52
1185
+ 0.59
1186
+ 0.7
1187
+ Mean weekly costs [EUR]
1188
+ Mean discomfort [K]
1189
+ (a) Base scenario evaluation
1190
+ MPC
1191
+ PSC
1192
+ Hysteresis
1193
+ 0
1194
+ 1
1195
+ 2
1196
+ 3
1197
+ 0.67
1198
+ 1.09
1199
+ 2.19
1200
+ 0.13
1201
+ 0.19
1202
+ 0.22
1203
+ Mean weekly costs [EUR]
1204
+ Mean discomfort [K]
1205
+ (b) Multi-zone adaptive scenario evaluation
1206
+ Figure 4: Control results of three controllers, evaluated in two different scenarios (a) and (b)
1207
+ the real application. The control strategies are performed on a
1208
+ multi-zone thermal building model instead of a real building.
1209
+ The model parameters are based on literature values instead
1210
+ of identification from parameter identification. The model and
1211
+ weather forecasts of the MPC are assumed as error-free.
1212
+ The evaluation is limited to a cooling scenario of a single
1213
+ building. Weather data is used for ten weeks during summer in
1214
+ Karlsruhe, Germany. The cooling demand in Germany is lower
1215
+ than in other regions of the world. A heating scenario is not
1216
+ investigated. The evaluated scenarios consider no Photovoltaic
1217
+ (PV), battery, Battery Electric Vehicle (BEV), or thermal water
1218
+ storage in the optimization.
1219
+ 5. Conclusion
1220
+ In this study, we investigate how a novel multi-zone Price
1221
+ Storage Control (PSC) can provide Demand Response (DR)
1222
+ while considering room-individual Occupants’ Thermal Sat-
1223
+ isfaction (OTS) without using a thermal building model and
1224
+ weather forecasts. Therefore, we develop three different con-
1225
+ trol strategies, a multi-zone evaluation environment, and two
1226
+ different scenarios to compare the controllers. We compare the
1227
+ PSC with an ideal, error-free Model Predictive Control (MPC)
1228
+ and hysteresis-based two-point controller as upper and lower
1229
+ benchmarks, respectively.
1230
+ The ideal MPC and PSC achieve higher control performance
1231
+ than the hysteresis-based two-point controller in terms of en-
1232
+ ergy costs and mean discomfort. The PSC leads to a reduction
1233
+ of both criteria combined of 44 % while the MPC achieves im-
1234
+ provements of 53 %. Under consideration that the PSC requires
1235
+ no models and no forecasts, this control strategy seems espe-
1236
+ cially beneficial for real-world control applications. Our devel-
1237
+ oped control approach is easy to implement and can be used for
1238
+ every building without large-scale adjustments. Further, it can
1239
+ include other external signals in its decision-making like the
1240
+ load of the electricity grid or a generation signal of renewable
1241
+ energy sources. Thus, it can contribute to balancing electricity
1242
+ demand and supply and lead to better utilization of renewable
1243
+ energy sources in future energy systems.
1244
+ In future work, we want to apply the developed control strat-
1245
+ egy to a real-world application. For the MPC real-world ap-
1246
+ plication, we need to perform parameter identification and de-
1247
+ sign a state estimator. For a more realistic scenario, we plan
1248
+ to include more relevant components into the optimization, e.g.
1249
+ thermal water storage, Photovoltaic (PV) self-production and
1250
+ -consumption, and batteries. Finally, we plan to evaluate the
1251
+ controller in a heating scenario.
1252
+ Data Availability
1253
+ We added the following supplementary materials to an open-
1254
+ source online repository on GitHub:
1255
+ • results of the three control strategies for all individual
1256
+ weeks in both scenarios,
1257
+ • used input data for the electricity price and weather data,
1258
+ • commented Python code of the three control strategies.
1259
+ https://github.com/Occupant-Oriented-Demand-
1260
+ Response/Conrol-Results.
1261
+ Acknowledgment
1262
+ This work was conducted within the project FlexK¨alte,
1263
+ funded by the German Federal Ministry for Economic Affairs
1264
+ and Climate Action (BMWK). The authors would like to thank
1265
+ their colleagues from the Energy Lab 2.0 and the Institute for
1266
+ Automation and Applied Informatics (IAI) for all the fruitful
1267
+ discussions and collaborations.
1268
+ Appendix A. Control Results for One Week
1269
+ The control results for one week, as discussed in Sec. 4.2, are
1270
+ presented in Fig. A.5.
1271
+ 10
1272
+
1273
+ References
1274
+ [1] IEA, Buildings - a source of enormous untapped efficiency potential, (ac-
1275
+ cessed: 15.12.2022) (2022).
1276
+ URL https://www.iea.org/topics/buildings
1277
+ [2] L. Yang, H. Yan, J. C. Lam, Thermal comfort and building energy con-
1278
+ sumption implications – a review, Applied Energy 115 (2014) 164–173.
1279
+ doi:https://doi.org/10.1016/j.apenergy.2013.10.062.
1280
+ [3] R. Mutschler, M. R¨udis¨uli, P. Heer, S. Eggimann, Benchmarking cool-
1281
+ ing and heating energy demands considering climate change, population
1282
+ growth and cooling device uptake, Applied Energy 288 (2021) 116636.
1283
+ doi:https://doi.org/10.1016/j.apenergy.2021.116636.
1284
+ [4] T. Hong, D. Yan, S. D’Oca, C. fei Chen, Ten questions concerning
1285
+ occupant behavior in buildings: The big picture, Building and Envi-
1286
+ ronment 114 (2017) 518–530.
1287
+ doi:https://doi.org/10.1016/j.
1288
+ buildenv.2016.12.006.
1289
+ [5] F. M. Vieira, P. S. Moura, A. T. de Almeida, Energy storage system for
1290
+ self-consumption of photovoltaic energy in residential zero energy build-
1291
+ ings, Renewable Energy 103 (2017) 308–320. doi:https://doi.org/
1292
+ 10.1016/j.renene.2016.11.048.
1293
+ [6] M. H. Albadi, E. F. El-Saadany, Demand response in electricity markets:
1294
+ An overview, in: 2007 IEEE Power Engineering Society General Meet-
1295
+ ing, 2007, pp. 1–5. doi:10.1109/PES.2007.385728.
1296
+ [7] T. Dengiz, Optimization approaches for exploiting the load flexibility of
1297
+ electric heating devices in smart grids, Ph.D. thesis, Karlsruher Institut
1298
+ f¨ur Technologie (KIT) (2021). doi:10.5445/IR/1000131495.
1299
+ [8] A. Mahdavi, A. Mohammadi, E. Kabir, L. Lambeva, Occupants’ op-
1300
+ eration of lighting and shading systems in office buildings, Journal of
1301
+ Building Performance Simulation 1 (1) (2008) 57–65. doi:10.1080/
1302
+ 19401490801906502.
1303
+ [9] O. Masoso, L. Grobler, The dark side of occupants’ behaviour on building
1304
+ energy use, Energy and Buildings 42 (2) (2010) 173–177. doi:https:
1305
+ //doi.org/10.1016/j.enbuild.2009.08.009.
1306
+ [10] P. H. Shaikh, N. B. M. Nor, P. Nallagownden, I. Elamvazuthi, T. Ibrahim,
1307
+ A review on optimized control systems for building energy and com-
1308
+ fort management of smart sustainable buildings, Renewable and Sustain-
1309
+ able Energy Reviews 34 (2014) 409–429. doi:https://doi.org/10.
1310
+ 1016/j.rser.2014.03.027.
1311
+ [11] J. Drgoˇna, J. Arroyo, I. Cupeiro Figueroa, D. Blum, K. Arendt, D. Kim,
1312
+ E. P. Oll´e, J. Oravec, M. Wetter, D. L. Vrabie, L. Helsen, All you need
1313
+ to know about model predictive control for buildings, Annual Reviews in
1314
+ Control 50 (2020) 190–232. doi:10.1016/j.arcontrol.2020.09.
1315
+ 001.
1316
+ [12] T. Dengiz, P. Jochem, W. Fichtner, Demand response with heuristic con-
1317
+ trol strategies for modulating heat pumps, Applied Energy 238 (2019)
1318
+ 1346–1360.
1319
+ doi:https://doi.org/10.1016/j.apenergy.2018.
1320
+ 12.008.
1321
+ [13] M. Frahm, P. Zwickel, J. Wachter, F. Langner, P. Strauch, J. Matthes,
1322
+ V. Hagenmeyer, Occupant-Oriented Economic Model Predictive Control
1323
+ for Demand Response in Buildings, in: 13th ACM International Confer-
1324
+ ence on Future Energy Systems, e-Energy’22, Association for Computing
1325
+ Machinery, 2022. doi:10.1145/3538637.3538864.
1326
+ [14] E. T. Maddalena, S. A. M¨uller, R. M. dos Santos, C. Salzmann, C. N.
1327
+ Jones, Experimental data-driven model predictive control of a hospital
1328
+ hvac system during regular use, Energy and Buildings 271 (2022) 112316.
1329
+ doi:https://doi.org/10.1016/j.enbuild.2022.112316.
1330
+ [15] J. Hu, P. Karava, A state-space modeling approach and multi-level op-
1331
+ timization algorithm for predictive control of multi-zone buildings with
1332
+ mixed-mode cooling, Building and Environment 80 (2014) 259–273.
1333
+ doi:https://doi.org/10.1016/j.buildenv.2014.05.003.
1334
+ [16] T. H. Pedersen, S. Petersen, Investigating the performance of scenario-
1335
+ based model predictive control of space heating in residential buildings,
1336
+ Journal of Building Performance Simulation 11 (4) (2018) 485–498.
1337
+ doi:10.1080/19401493.2017.1397196.
1338
+ [17] D. H. Blum, N. Xu, L. K. Norford, A novel multi-market optimization
1339
+ problem for commercial heating, ventilation, and air-conditioning sys-
1340
+ tems providing ancillary services using multi-zone inverse comprehen-
1341
+ sive room transfer functions, Science and Technology for the Built En-
1342
+ vironment 22 (6) (2016) 783–797.
1343
+ doi:10.1080/23744731.2016.
1344
+ 1197718.
1345
+ [18] L. Romero Rodr´ıguez, J. S´anchez Ramos, S. ´Alvarez Dom´ınguez,
1346
+ U. Eicker, Contributions of heat pumps to demand response: A case
1347
+ study of a plus-energy dwelling, Applied Energy 214 (2018) 191–204.
1348
+ doi:https://doi.org/10.1016/j.apenergy.2018.01.086.
1349
+ [19] L. Nolting, A. Praktiknjo, Techno-economic analysis of flexible heat
1350
+ pump controls, Applied Energy 238 (2019) 1417–1433. doi:https:
1351
+ //doi.org/10.1016/j.apenergy.2019.01.177.
1352
+ [20] M. Mork, A. Xhonneux, D. M¨uller, Nonlinear distributed model predic-
1353
+ tive control for multi-zone building energy systems, Energy and Buildings
1354
+ 264 (2022) 112066.
1355
+ doi:https://doi.org/10.1016/j.enbuild.
1356
+ 2022.112066.
1357
+ [21] I. T. Michailidis, T. Schild, R. Sangi, P. Michailidis, C. Korkas, J. F¨utterer,
1358
+ D. M¨uller, E. B. Kosmatopoulos, Energy-efficient hvac management us-
1359
+ ing cooperative, self-trained, control agents: A real-life german build-
1360
+ ing case study, Applied Energy 211 (2018) 113–125.
1361
+ doi:https:
1362
+ //doi.org/10.1016/j.apenergy.2017.11.046.
1363
+ [22] P. Zwickel, M. Frahm, J. Galenzowski, K.-H. H¨afele, H. Maaß, S. Wac-
1364
+ zowicz, V. Hagenmeyer, Demand response in smart districts: Model pre-
1365
+ dictive control of building cooling, in: 2022 IEEE PES Innovative Smart
1366
+ Grid Technologies Europe (ISGT-Europe), 2022.
1367
+ [23] M. Frahm, S. Meisenbacher, E. Klumpp, R. Mikut, J. Matthes, V. Ha-
1368
+ genmeyer, Multi-Zone Grey-Box Thermal Building Identification with
1369
+ Real Occupants, in: 9th ACM International Conference on Systems for
1370
+ Energy-Efficient Buildings, Cities, and Transportation, BuildSys’22, As-
1371
+ sociation for Computing Machinery, 2022.
1372
+ doi:10.1145/3563357.
1373
+ 3567403.
1374
+ [24] H. Madsen, J. Holst, Estimation of continuous-time models for the heat
1375
+ dynamics of a building, Energy and Buildings 22 (1) (1995) 67–79. doi:
1376
+ https://doi.org/10.1016/0378-7788(94)00904-X.
1377
+ [25] iDM Energiesysteme GmbH, Aero slm luftw¨armepumpe - idm en-
1378
+ ergiesysteme gmbh (2020).
1379
+ URL
1380
+ https://www.idm-energie.at/
1381
+ aero-slm-luftwaermepumpe/
1382
+ [26] ANSI/ASHRAE, Standard 55, Thermal Environmental Conditions for
1383
+ Human Occupancy (2017).
1384
+ [27] ISO, 7730 - Ergonomics of the thermal environment — Analytical deter-
1385
+ mination and interpretation of thermal comfort using calculation of the
1386
+ PMV and PPD indices and local thermal comfort criteria (2005).
1387
+ [28] CEN, EN 16798-1 - Energy performance of buildings - Ventilation for
1388
+ buildings. Part 1: Indoor environmental input parameters for design and
1389
+ assessment of energy performance of buildings addressing indoor air
1390
+ quality, thermal environment, lighting and acoustics (2019).
1391
+ [29] P. O. Fanger, Thermal Comfort Analysis and Applications in Environ-
1392
+ mental Engineering, Mcgraw-Hill, New York, 1970.
1393
+ [30] F. Tartarini, S. Schiavon, T. Cheung, T. Hoyt, Cbe thermal comfort
1394
+ tool: Online tool for thermal comfort calculations and visualizations,
1395
+ SoftwareX 12 (2020) 100563.
1396
+ doi:https://doi.org/10.1016/j.
1397
+ softx.2020.100563.
1398
+ [31] M. Frahm, F. Langner, P. Zwickel, J. Matthes, R. Mikut, V. Hagenmeyer,
1399
+ How to Derive and Implement a Minimalistic RC Model from Thermo-
1400
+ dynamics for the Control of Thermal Parameters for Assuring Thermal
1401
+ Comfort in Buildings, in: Open Source Modelling and Simulation of En-
1402
+ ergy Systems (OSMSES), IEEE, 2022. doi:10.1109/OSMSES54027.
1403
+ 2022.9769134.
1404
+ [32] V. Hagenmeyer et al., Information and communication technology in en-
1405
+ ergy lab 2.0, Energy Technology 4 (1) (2016). doi:https://doi.org/
1406
+ 10.1002/ente.201500304.
1407
+ [33] Entso-e transparency platform (21.07.2022).
1408
+ URL https://transparency.entsoe.eu/
1409
+ [34] Y. Peng, A. Rysanek, Z. Nagy, A. Schl¨uter, Using machine learning tech-
1410
+ niques for occupancy-prediction-based cooling control in office buildings,
1411
+ Applied Energy 211 (2018) 1343–1358. doi:https://doi.org/10.
1412
+ 1016/j.apenergy.2017.12.002.
1413
+ 11
1414
+
1415
+ 20
1416
+ 30
1417
+ Ti1
1418
+ [◦C]
1419
+ 20
1420
+ 30
1421
+ Ti2
1422
+ [◦C]
1423
+ 20
1424
+ 30
1425
+ Ti3
1426
+ [◦C]
1427
+ 20
1428
+ 30
1429
+ Ti4
1430
+ [◦C]
1431
+ 20
1432
+ 30
1433
+ Ti5
1434
+ [◦C]
1435
+ 0
1436
+ 3.5
1437
+ Pel
1438
+ [kW]
1439
+ 10
1440
+ 30
1441
+ Ta
1442
+ [◦C]
1443
+ 0
1444
+ 1.2
1445
+ ˙qs
1446
+ [kW/m2]
1447
+ 2022−06−27
1448
+ 2022−06−28
1449
+ 2022−06−29
1450
+ 2022−06−30
1451
+ 2022−07−01
1452
+ 2022−07−02
1453
+ 2022−07−03
1454
+ 0
1455
+ 15
1456
+ p
1457
+ [cent/kwh]
1458
+ Hysteresis
1459
+ PSC
1460
+ MPC
1461
+ Figure A.5: Control results for three different controllers in the (a) base scenario over a period of one week
1462
+ 12
1463
+
M9E1T4oBgHgl3EQftQXp/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
ONE0T4oBgHgl3EQfjgHF/content/tmp_files/2301.02461v1.pdf.txt ADDED
@@ -0,0 +1,1388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Preprint version of: Ghorbani, Fatemeh, Mahsa Farshi Taghavi, and Mehdi Delrobaei. "Towards an intelligent assistive system based on augmented reality and
2
+ serious games." Entertainment Computing 40 (2022): 100458. https://doi.org/10.1016/j.entcom.2021.100458.
3
+
4
+
5
+
6
+
7
+
8
+ Towards an Intelligent Assistive System Based on
9
+ Augmented Reality and Serious Games
10
+ Fatemeh Ghorbani1, Mahsa Farshi Taghavi1, Mehdi Delrobaei1, 2
11
+ 1 Faculty of Electrical Engineering, K. N. Toosi University of Technology, Tehran 1631714191, Iran.
12
+ 2 Department of Electrical and Computer Engineering, Western University, London, ON N6A 5B9, Canada.
13
+
14
+ E-mail addresses:
15
+ fatemeghorbani@email.kntu.ac.ir, m.farshitaghavi@email.kntu.ac.ir, delrobaei@kntu.ac.ir, https://orcid.org/my-orcid?orcid=0000-0002-4188-6958
16
+
17
+
18
+
19
+
20
+
21
+ Abstract - Age-related cognitive impairment is generally characterized by gradual memory loss and decision-making difficulties.
22
+ The aim of this study is to investigate multi-level support and suggest relevant helping means for the elderly with mild cognitive
23
+ impairment as well as their caregivers as the primary end-users. This work reports preliminary results on an intelligent assistive
24
+ system, achieved through the integration of Internet of Things, augmented reality, and adaptive fuzzy decision-making methods.
25
+ The proposed system operates in different modes, including automated and semi-automated modes. The former helps the user
26
+ complete their daily life activities by showing augmented reality messages or making automatic changes; while the latter allows
27
+ manual changes after the real-time assessment of the user’s cognitive state based on the augmented reality serious game score. We
28
+ have also evaluated the accuracy of the serious game score with 37 elderly participants and compared it with users’ paper-based
29
+ cognitive test results. We further noted that there is an acceptable correlation between the paper-based test and users’ serious game
30
+ scores. Moreover, we observed that the system response in the semi-automated mode causes less data loss compared with the
31
+ automated mode, as the number of active devices decreases.
32
+
33
+ Keywords: Augmented reality, Fuzzy decision-making, intelligent assistive technology, Internet of things, serious game.
34
+
35
+
36
+
37
+ 1. Introduction
38
+ Recent projections of the world population indicate obvious trends
39
+ tending towards more elderly people with the proportion of the global
40
+ population aged 65 years or over is expected to increase from 9.3 percent in
41
+ 2020 to 16.0 percent by 2050 [1]. Moreover, age-related cognitive
42
+ impairment is a degenerative neurological disorder charac- terized by
43
+ progressive loss of memory affecting the elderly [2]. Loss of memory and
44
+ lacking capacity to make decisions are two main difficulties
45
+ experienced by the elderly and patients with cognitive impairments. Nearly
46
+ 5.8 million patients are currently suffering from Alzheimer’s disease (AD),
47
+ as the common cause of cognitive impairments, in the
48
+ United States alone [3]. They generally have problems in remembering recent
49
+ information and completing everyday tasks. Therefore, they should always be
50
+ reminded of the required tasks that improve their confidence and quality of
51
+ life [4].
52
+ In response to the increasing number of elder people with mild
53
+ cognitive impairment (MCI) and a lack of treatment that slows or stops its
54
+ progression, the pervasive deployment of intelligent assistive (IA) systems
55
+ could have a disruptive effect on the elderly and their care-
56
+ givers’ daily life [5]. IA systems can (i) reduce the burden on public
57
+ finances throughout the postponement or removal of institutional care,
58
+ (ii) lessen the psychological burden on formal caregivers and families,
59
+ (iii) compensate for the lack of human caregivers while improving and
60
+ optimizing the quality of care, and (iv) empower older adults with MCI and
61
+ thereby enhancing their confidence [6]. For example, in [7], the authors
62
+ proposed MED-AR system based on the intelligent augmented reality (AR)
63
+ for helping older adults in the medication task. The system was designed to
64
+ present a research methodology for tracking and distributing prescribed
65
+ medicines for older adults in a home health care scenario.
66
+ On the other hand, serious games have had positive impacts on
67
+ reconstructing a functional environment where the user could poten-
68
+ tially encounter real-world scenarios [8–10]. Moreover, researchers
69
+
70
+
71
+
72
+
73
+ 2
74
+
75
+
76
+ have shown that it is a valid evaluation tool for activities of daily living
77
+ [11] as well as cognitive screening [12]. For instance, researchers used to
78
+ evaluate activities of daily living through a serious game platform by
79
+ integrating game playing with a serious purpose. The aim of the study was to
80
+ assess functional independence in older people through five tasks using the
81
+ Smart Aging game [13]. In another study, the authors devel- oped a serious
82
+ game based on virtual reality to estimate cognitive dis- abilities during
83
+ navigational tasks. Users had to follow arrows to get to special places and
84
+ find the way back home on their own [14]. Even though the number of
85
+ researches in this area has been significantly expanding, few researchers have
86
+ addressed the development of IA sys- tems with possible application into
87
+ age-related MCI care based on the
88
+ fact that users’ daily cognitive abilities could be changed.
89
+ The aim of our study is to suggest an IA system for elderly people
90
+ with MCI, and improve their ability to complete everyday tasks on their own.
91
+ In our previous study [15], we implemented a task prompting system based
92
+ on AR messages without any adaptive decision-making
93
+ engine to evaluate the user’s general cognitive condition. To increase the
94
+ system’s intelligence for making a correct reminder, we take advantage of an
95
+ AR-based serious game assessment tool.
96
+ The suggested AR-based serious game in this work is potentially capable
97
+ of providing entertainment and mental state examination simultaneously. Our
98
+ target population and primary end-users are in- dividuals experiencing MCI,
99
+ who can understand the technology they are using and its expected
100
+ assistance and risks. This preliminary study is
101
+ focused on assessing the proposed system’s usability by the older adults
102
+ (with and without MCI) and considering their feedback.
103
+ In more detail, based upon the above suggestions, this paper pro- poses a
104
+ system the following features:
105
+ To implement the Internet of Things (IoT) system, the system bene- fits
106
+ from the Message Queuing Telemetry Transport (MQTT) con- nectivity
107
+ protocol for communication. All data values, such as indoor positioning
108
+ data, embedded sensors, and cognitive serious game
109
+ scores, can be published to the server in the system’s automatic
110
+ mode. Family or caregivers can also subscribe to each particular
111
+ topic and override the message for the user to send reminders if needed.
112
+ In the automatic mode, once an event happens in a specific indoor
113
+ location, all the relevant fuzzy rules are checked on the server. Thus,
114
+ following the decision-making algorithm, data values are updated. Both
115
+ the user and the caregiver could receive appropriate notifications.
116
+ To monitor the user’s real-time location, the user wears an indoor
117
+ localization tag. The localization tag sends the positioning informa-
118
+ tion to the monitor module, and the monitor module receives all the
119
+ published data.
120
+ According to the AR serious game score, the performing mode of the
121
+ IA system changes to the manual mode by real-time assessment of the
122
+ user’s cognitive state. This approach leads to saving the devices’ energy,
123
+ enhancing accuracy and performance.
124
+
125
+ 2. Materials and methods
126
+ 2.1. System architecture
127
+ As the daily routine of an elderly with MCI is contingent on their
128
+ surrounding objects in the environment, environment conditions such as
129
+ temperature, humidity, and CO2 level is essential for designing a smart home
130
+ environment. In our work, such data is published on the cloud layer by
131
+ placing different sensors and actuators in several locations such as kitchen,
132
+ bedroom and washing room.
133
+ Furthermore, Wireless Sensor Networks (WSNs) have been deployed in
134
+ the local fog layer to collect data about the user’s location. Three anchors are
135
+ placed in the home environment to estimate the user’s real-
136
+ time location.
137
+ All the communication to our IA system is done over MQTT with data
138
+ serialized in JavaScript Object Notation format (JSON). To avoid un-
139
+ necessary transmission of the data, the performing mode of the IA sys- tem
140
+ can be changed to the semi-automated mode based on the decision- making
141
+ algorithm results.
142
+ We propose our model based on the localization tag worn by the user for
143
+ positioning system. The application for making interaction with the end-user
144
+ and assessing their cognitive functions is developed on an Android operating
145
+ system, implemented on a smartphone.
146
+ In an attempt to send all the data to the cloud, we consider MQTT
147
+ protocol as the messaging protocol. MQTT is a commonly used appli- cation
148
+ layer protocol to transmit data between the devices in IoT ar- chitecture due
149
+ to its simplicity and scalability [16]. We have utilized HiveMQ server as a
150
+ MQTT message broker and Wemos D1 Mini modules for each sensor,
151
+ actuator, and data coordinator in the positioning system
152
+ to collect user’s indoor data. Fig. 1 shows the general architecture of the
153
+ proposed IA system in which the IA system works in its automated mode.
154
+ We have also designed a service-based application (based on the C#
155
+ cloud service) for caregivers to present the data related to the events detected
156
+ by the sensors and to control the actuators, embedded in the smart home
157
+ environment [17]. The user cognitive functions state and game results can
158
+ also be monitored by the caregivers.
159
+ 2.2. Adaptive decision-making process
160
+ In this section, we consider an adaptive fuzzy method to convert our
161
+ system into an intelligent and context-aware system that promotes monitoring
162
+ of users and improving their ability to perform their daily tasks. To interact
163
+ with the user correctly, the decision-making process should be adaptive and
164
+ precise to avoid false notifications or false- positive AR messages. In some
165
+ situations, such as completing daily tasks, people occasionally require
166
+ assistance for memory recall.
167
+ In order to update each variable of the IA system on the cloud and
168
+ making the right decision, we employed an adaptive fuzzy logic model that
169
+ selects the most proper values. The model of the fuzzy adaptive decision-
170
+ making is shown in Fig. 2.
171
+ A fuzzy controller has four main components: rule base, inference
172
+ system, fuzzifier interface, and defuzzifier interface. In our system, we have
173
+ designed two knowledge bases elicited from the user’s cognitive state and the
174
+ AR game score. These rule-bases are necessary for deciding when to provide
175
+ aids and predict the user’s target accurately. Now we go through each part of
176
+ them in our model.
177
+ 2.2.1. Variables and membership functions
178
+ In our proposed model, the input variables are several types of data from
179
+ embedded devices, the user’s real-time location, and their cogni- tive state.
180
+ The output variables are different types of AR messages which
181
+ are received by the user or their caregivers. Table 1 shows input and output
182
+ parameters, data types, and their membership functions in our work.
183
+ We can add other home objects and calculate the distance as a fuzzy input
184
+ variable. By considering each input and output variable, mem- bership
185
+ functions are defined based on the data types and fuzzy rules.
186
+ Fig. 3 shows the membership function for the user’s movement time
187
+ as well as the AR game score. Output variables are voice, image, and text
188
+ messages, defined as fuzzy singleton membership functions. Each mes- sage
189
+ has an identification number (ID) and can be activated by particular inputs. A
190
+ triangular membership function describes the states of the relay actuators.
191
+ 2.2.2. Fuzzy rule-bases
192
+ After defining each variable’s membership function, we must build the
193
+ rule-base, including all of the expert IF-THEN rules. In our IA system,
194
+ two main rule-bases are essential to creating an adaptive decision- making
195
+ process and multi-level support. The first one helps the user complete the
196
+ activities independently in their daily life by showing AR
197
+
198
+
199
+
200
+
201
+
202
+ 3
203
+
204
+
205
+
206
+ Device
207
+ Layer
208
+
209
+
210
+ User
211
+ Interface
212
+
213
+ Patient
214
+
215
+
216
+
217
+
218
+ Expert
219
+
220
+
221
+ Device layer
222
+ (sensors and
223
+ actuators)
224
+
225
+ Fig. 1. Intelligent Assistive system architecture. All IoT data values, such as indoor positioning data, embedded sensors, and serious game scores, are published to the server.
226
+ Following the fuzzy decision-making algorithm and serious game score, data values are updated in the automatic mode.
227
+
228
+
229
+
230
+
231
+
232
+
233
+
234
+ Fig. 2. Fuzzy adaptive decision-making model. Two knowledge bases are obtained from the user’s cognitive state and the serious game score. User and caregiver receive
235
+ appropriate notifications based on the fuzzy rules’ outputs.
236
+
237
+ Table 1
238
+ Description of Inputs and outputs of the fuzzy system.
239
+
240
+ Parameter
241
+ Fuzzy membership function
242
+ Data type
243
+
244
+
245
+ Game score
246
+ Low, high
247
+ Linguistic
248
+ Game
249
+ Start, Stop
250
+ Boolean
251
+
252
+ Once an event occurs (for example, medication time alarm), input data
253
+ values such as location, AR game score, and sensors data value are published
254
+ on the cloud via MQTT protocol. Moreover, fuzzy rules are checked, and
255
+ corresponding output variables are updated and published on their topics.
256
+ Then, according to which rule-bases are activated, the
257
+ user receives different types of messages, or the user’s manual changes
258
+ Time
259
+ Early morning, morning, early afternoon, late
260
+ Linguistic
261
+ can be enabled.
262
+ Distance
263
+ Humidity
264
+ afternoon, evening, night, midnight
265
+ Near, far, very far
266
+ Linguistic
267
+ Very dry, dry, humid, very humid
268
+ Linguistic
269
+ The adaptive fuzzy decision-making algorithm is capable of scaling
270
+ up to more rules to improve the user’s independence. The location of
271
+ Temperature
272
+ Very cold, cold, cool, mild, warm, hot, very hot
273
+ Linguistic
274
+ Reminder
275
+ Yes, No
276
+ Boolean
277
+ Movement
278
+ Low, medium, high
279
+ Linguistic
280
+ different objects and the most used ones can also be added to the
281
+ database.
282
+ Therefore, new rules or new objects can be added, or the other rules
283
+ Flame
284
+ detection
285
+ Gas detection
286
+ Yes, No
287
+ Boolean
288
+
289
+ Yes, No
290
+ Boolean
291
+ can be removed and updated by the caregivers remotely based on the
292
+ knowledge and dependent of each user’s lifestyle, and they can manu-
293
+ Relay status
294
+ Yes, No
295
+ Boolean
296
+ Voice message
297
+ 1, 2, …
298
+ Integer
299
+ ally start a scenario or send simple reminders to the user. The fuzzy rules
300
+ must be endowed with a base of knowledge of each user provided by the
301
+ Image
302
+ 1, 2, …
303
+ Integer
304
+ caregiver, so all the activities are adapted to their preferences [18].
305
+
306
+ message
307
+
308
+
309
+ messages or generating automatic changes such as actuators activation. The
310
+ second one allows manual changes after real-time assessment of the
311
+ user’s cognitive state according to the AR-based serious game score. In
312
+ this situation, some of the smart home sensors and reminders can be
313
+ turned off or disabled.
314
+ Some parts of such scenarios and their fuzzy rules are presented in
315
+ Table 2. These scenarios are precisely defined to choose the most proper ones
316
+ for the first experiment with individuals with cognitive impair- ments [4,18].
317
+ According to [4], elderly people with MCI often lose their ability to
318
+ sequence in activities such as putting on clothes or cooking. When the user
319
+ walks in different rooms and tries to interact with other objects, the
320
+ Fuzzy decision-making
321
+ engine based on
322
+ MQTT protocol
323
+ Indoor
324
+ localization
325
+ data
326
+ Cloud
327
+ layer
328
+ Game
329
+ score/AR
330
+ messages
331
+ Service-
332
+ based
333
+ application
334
+ Wireless sensor
335
+ network
336
+ (fog layer)
337
+ Input
338
+ Output
339
+ Inference System
340
+ Data
341
+ Base
342
+ Rule
343
+ Base
344
+
345
+
346
+
347
+ Application
348
+ Layer
349
+
350
+ Fuzzifier
351
+
352
+ Defuzzifier
353
+
354
+ 4
355
+
356
+
357
+
358
+
359
+ Fig. 3. Gaussian membership function of the user’s (a) movement and (b) game score.
360
+ IA system tries to predict the user’s target based on the distance between the
361
+ user and predefined objects and other inputs.
362
+ Lack of movement is also an important issue that should be consid- ered
363
+ in the users’ daily life. The system measures the user’s movement to check
364
+ this factor by localization tag worn by the user. If he moves be-
365
+ tween 3.5 and 4.5 h per day, the IA system sends a reminder to the user and
366
+ their caregiver [19]. In another scenario, once the user is near the drawer of a
367
+ wardrobe or closet in the bedroom, the IA system can suggest to them what to
368
+ wear. This approach can help the user make everyday decisions before they
369
+ get nervous.
370
+
371
+ 2.3. Real-time position estimation
372
+ To provide users’ interaction with the objects and monitoring plat- form
373
+ for caregivers, we need the real-time location of the user. There are
374
+ many challenges introduced by GPS into the system such as high energy
375
+ consumption and vulnerabilities that may cause poor accuracy rates in
376
+ specific environments, for example, indoor locations. Based on the literature,
377
+ the most accurate beacon-based local positioning system (LPS) solutions are
378
+ those using ultrasound [20], or ultrawideband (UWB) radio signals [21].
379
+ However, ultrasound has a weak capability of a limited maximum
380
+ range (about 10 m) and cannot pass through walls; thus, its coverage is
381
+ limited by the number of beacons to install [22]. UWB can penetrate walls in
382
+ buildings and is able to resolve individual multipath mecha- nisms due to
383
+ its large bandwidth. Therefore, we have designed our
384
+ positioning system based on the UWB, because user’s location can be
385
+ changed over time through different rooms. To achieve this goal, a new
386
+ prototypical indoor monitoring method developed on a low-cost UWB
387
+ WSNs, based on IEEE 802.15.4 and IEEE 802.15.4e standards, has been
388
+ created as a part of our IA system [23].
389
+ It is mentioned that the location of the frequently used objects is
390
+ predetermined; therefore, the distance from the objects can be simply
391
+ estimated. Locations of the objects are supposed to be fixed and stored
392
+ on the server. Thus, to calculate the user’s distance from a predefined
393
+ object, the Euclidean distance is easily assessed between the user and
394
+ locations of the objects. A true-range multilateration technique is employed
395
+ as a positioning algorithm in the implemented indoor posi- tioning system
396
+ [24].
397
+ Position calculation consists of two main parts; finding distances from
398
+ the tag to the anchors, and solving the location inverse geometry problem.
399
+ The WSNs based on UWB is implemented using a single-chip wireless
400
+ transceiver, Decawave (Dublin, Ireland) Sensor DW1000 Module [22]. The
401
+ impulse-based UWB facilitates a precise ranging and
402
+
403
+ 7
404
+ Low
405
+ Medium
406
+ High
407
+ 0.8
408
+ 0.6
409
+
410
+ 0.4
411
+ 0.2
412
+ 0
413
+ 3
414
+ 4
415
+ 5
416
+ 6
417
+ 7
418
+ 8
419
+ 9
420
+ Movement(hour)
421
+ (a)
422
+ 1
423
+ LoW
424
+ High
425
+ 0.8
426
+ 0.6
427
+ 0.4
428
+ 0.2
429
+ 0
430
+ 0
431
+ 20
432
+ 40
433
+ 60
434
+ 80
435
+ 100
436
+ 120
437
+ GameScore(%)
438
+ (b)5
439
+
440
+
441
+
442
+ Table 2
443
+ Examples of fuzzy rules.
444
+
445
+
446
+
447
+
448
+ assessment
449
+ localization tag wearing by the user sends position information to the monitor
450
+ module, and the monitor module receives all the positioning data.
451
+ All the data related to the localization system and AR messages can be
452
+ stored on the cloud for further studies. To improve the user’s safety,
453
+ caregivers can add the location of their home’s danger zone. Hence, the
454
+ IA system can provide warnings if the user is near to these locations. The
455
+ second user interface is built on Android Studio and SDK tools to interact
456
+ Medication
457
+ schedule
458
+
459
+ Leaving the stove
460
+ Not
461
+ specific
462
+
463
+ Not
464
+ Time is
465
+ “morning”
466
+
467
+ Gas detection
468
+ Voice message
469
+ is “1′′ and
470
+ Image message
471
+ is “1”
472
+ Reminder
473
+
474
+
475
+ Alert
476
+ with the individual with MCI as a personal assistant device [31].
477
+ Android Studio is the official integrated development environment (IDE) for
478
+ the Android operating system created particularly for Android
479
+ on
480
+ specific
481
+ Relay status is
482
+ is “Yes”
483
+ ��Yes” and
484
+ Voice message
485
+ is “2”
486
+ development. In this work, two different AR messages are defined to send
487
+ reminders or alerts to the user, including visual and audio
488
+ messages.
489
+ Trouble with
490
+ cooking
491
+ Kitchen
492
+ Distance* is
493
+ “near”
494
+ Voice message
495
+ is “3′′ and
496
+ Image message
497
+ is “2”
498
+ Reminder
499
+ The AR messages show information using virtual data based on the
500
+ predefined fuzzy rules. These multimedia contents are for taking medication,
501
+ playing AR cognitive training games, reminding meal times,
502
+ Putting on clothes
503
+ Bedroom
504
+ Distance is
505
+ “near”
506
+ Voice message
507
+ is “4′′ and
508
+ Image message
509
+ Reminder
510
+ and alerting danger zones. For example, to recall medication schedule, a voice
511
+ message, “Let’s take our medicine,” and an image message indi-
512
+ cating the drug’s picture are defined. These messages must not remind
513
+
514
+ Meal time
515
+ Not
516
+ specific
517
+
518
+ Lack of movement
519
+ Not
520
+ specific
521
+
522
+ High game score
523
+ Not
524
+
525
+ Time is
526
+ “early
527
+ afternoon”
528
+ Time is
529
+ “early
530
+ afternoon”
531
+ Game score
532
+ is “3”
533
+ Voice message
534
+ is “5′′ and
535
+ Image message
536
+ is “4”
537
+ Voice message
538
+ is “6′′
539
+ Reminder is
540
+
541
+ Reminder
542
+
543
+
544
+ Alert
545
+
546
+ Disable
547
+ users’ disability.
548
+ Based on the similar studies and their recommendations on designing
549
+ serious games for people with MCI [32–34], we have also considered other
550
+ designing factors in the AR interaction, including:
551
+ A summary screen showing scores, errors or are included in the game time
552
+ to foster competition and make an appropriate interaction.
553
+
554
+ specific
555
+ is “high”
556
+ “No”
557
+ reminders
558
+ *Distance between the user and particular object (refrigerator, drawer, etc.)
559
+
560
+ high-accurate localization of the network nodes [25,26].
561
+ Our range finding algorithm is a double-sided and two way ranging based
562
+ on time difference of arrival (TDoA) value [27]. In the designed local fog,
563
+ calculated ranges are always transmitted between the tag (processor:
564
+ STM32F4 Cortex-M4) and the anchors (processor: STM32F1 Cortex-M3),
565
+ sniffed by the data coordinator.
566
+ Furthermore, the data coordinator concentrates and encapsulates all the
567
+ transferred distances on the local fog and publishes them in JSON format via
568
+ MQTT protocol to the cloud server. For activity recognition,
569
+ pedometer for estimating user’s total movement and fall detection, all
570
+ the motion processing libraries available in X-CUBE-MEMS1 and can be
571
+ simply implemented.
572
+
573
+ 2.4. Visualizations
574
+ The IA system uses two different user interfaces (UI) to interact with the
575
+ users and their family. Both user interfaces are developed by uti- lizing
576
+ ARCore SDK (Software Development Kit) for Unity and Android features
577
+ [28]. In this section, we consider the applications and their features for
578
+ making interaction with the user, monitoring service for the caregivers, and
579
+ the development process of designing a serious game environment.
580
+ 2.4.1. AR interaction
581
+ Unity is a cross-platform game engine that can be applied to design
582
+ games and simulations for computers, consoles, and smartphones [29].
583
+ To display the user’s localization information clearly, a 3D indoor
584
+ positioning system based on the Unity 3D platform is created. This
585
+ configurable user interface can represent complex indoor area geographic
586
+ information and enhance the user experience [30].
587
+ Furthermore, to monitor and track the user’s real-time location and
588
+ provide their interaction with the objects, the user wears an indoor
589
+ localization tag. For presenting and transmitting positioning data in JSON
590
+ format via MQTT protocol to the cloud server, MQTTnet and Json. NET
591
+ libraries were also employed in the Unity application. The
592
+ Instructions before the game about how to solve game tasks are kept brief
593
+ and concise.
594
+ • Only one active screen area to interact with is designed.
595
+ • Large and highly contrasted icons are used.
596
+ The game is played from the user’s perspective to prevent confusion with
597
+ avatars.
598
+ The difficulty level of the game increases when the user completes the
599
+ first task correctly, so this can result in early success to foster motivation
600
+ and prevent frustration.
601
+ 2.4.2. AR-based serious game and scenarios
602
+ Initiating treatment at the early stage of cognitive problems would slow
603
+ the progression of the situation and expand the individual’s quality of life
604
+ [35]. In this study, an AR-based serious game provides enter-
605
+ tainment and mental state examination simultaneously. The IA system
606
+ suggests playing the game several times per day to get the user involved and
607
+ keep them active. Some parts of the game’s features are as follows:
608
+ It is designed based on “World Tracking” that allows users to put an AR
609
+ object anywhere they would like in the camera view.
610
+ Before starting each task, the system provides some details within the
611
+ questions to trigger memory through voice interactions.
612
+ After performing each task, users’ score is calculated based on the
613
+ number of right and wrong decisions automatically.
614
+ The time duration of each task and total time response are measured
615
+ automatically.
616
+ The AR serious game consists of a simulation of daily living situations
617
+ with five tasks: finding a particular object, remembering colors, and
618
+ arranging numbers. These tasks are defined to evaluate different cognitive
619
+ functions such as pattern separation and completion, visuo- spatial and
620
+ episodic memory, decision-making ability, concentration, and overall
621
+ processing speed by measuring response time [33].
622
+ Performing such tasks may also result in improving the user’s
623
+ cognitive abilities ultimately. Furthermore, based on the fuzzy rules, the
624
+ tasks can be suggested to the user to perform several times per day for
625
+ assessing their cognitive status and rehabilitation program. To prevent
626
+ reappearance, we have designed three series of the game scenarios in which
627
+ each task contains different 3D objects. In every session, the IA
628
+
629
+
630
+
631
+
632
+
633
+
634
+
635
+
636
+ Event
637
+ User’s
638
+ Fuzzy rules
639
+
640
+ Command
641
+
642
+ location
643
+ If
644
+ Then
645
+ type
646
+ Entertainment/
647
+ cognitive
648
+ Not
649
+ specific
650
+ Time is
651
+ “evening”
652
+ Game is “start”
653
+ Launching
654
+ AR game
655
+
656
+
657
+ 6
658
+
659
+
660
+ system randomly selects one of the game series for recommending to the
661
+ user. Table 3 describes the tasks and outputs measures.
662
+ Before beginning the assessment process and game scenarios, the user
663
+ should look at the whole objects for 10 s (second) to memorize their location
664
+ and color, as shown in Fig. 4 (a). Audio messages are provided
665
+ before starting each task to help the user realize the task requirements, for
666
+ example, which object’s color should be recalled [33]. Fig. 4 illus- trates the
667
+ game steps based on the tasks presented in Table 3.
668
+ In more detail, Fig. 5 (a) shows the task 4 in which the user should
669
+ identify the unnatural placement of objects, for instance, headphones in a
670
+ sink [33]. In the next step, the user observes a series of numbers on the screen
671
+ for 5 s, and after that, he must choose the correct sequence of numbers in the
672
+ picture frames. This step is proposed based on the fact that people with mild
673
+ cognitive impairments usually lose their ability to
674
+ sequence [4]. Fig. 5 (b) demonstrates this step of the game. Finally, the user’s
675
+ final score according to the total time response and the number of true (T)
676
+ and false (F) answers are calculated.
677
+ The total response time must not be more than 10 min, which is the
678
+ normal cognitive assessment test duration [34]. Fig. 5 (c) indicates an
679
+ example of the assessment result that is sent to the fuzzy decision- making
680
+ system for evaluating the level of support, this result is not included in the
681
+ statistical analysis.
682
+ The IA system is designed based on error-less learning to encourage the
683
+ user to recall different events. If the user gives more correct answer, the
684
+ system vanishes reminders and turns off some smart home sensors.
685
+ Thus, it can improve the user’s self-management and self-care, and it
686
+ would also slow the progression of the disease [3].
687
+ To make a user-friendly interaction, the game is presented on a touch
688
+ screen for performing different tasks, because most of the older adults can
689
+ interact with the touch screen without any training [36]. We have used
690
+ different 3D objects for creating an AR environment, and an event- based
691
+ creation supporting unit for defining interaction between the virtual space
692
+ object in a format of event script [37].
693
+ To build the AR serious game, different virtual objects are incorpo- rated
694
+ into the web-based AR platform, ZapWorks Studio and ZapWorks Designer
695
+ (Zappar Ltd., United Kingdom) [38]. The platform allows us to upload
696
+ multimedia contents to create an AR experience that lets users view the
697
+ virtual objects through the mobile device application, Zappar.
698
+ This
699
+ application could be efficiently run on both Android and iOS operating
700
+ systems. In the current study, we have used a Samsung Galaxy S9 Plus
701
+ smartphone, with an Octa-core processor, 6 GB RAM, and dual 12MP rear
702
+ camera to view the AR experience, with each virtual object being displayed
703
+ on the screen.
704
+
705
+ Table 3
706
+ Descriptions of tasks and outcomes measures.
707
+ Tasks
708
+ Outcome measures
709
+
710
+ Task 1: retrieving objects location
711
+ Time on the task 1
712
+ Task completion: selecting the correct
713
+ location
714
+ Score: 1 point for the correct answer
715
+ Task 2: memorizing objects color
716
+ Time on the task 2
717
+ Task completion: selecting the correct color
718
+ Score: 1 point for the correct answer
719
+ Task 3: recognizing an extra object
720
+ Time on the task 3
721
+ Task completion: selecting the correct object
722
+ Score: 1 point for the correct or wrong answer
723
+ Task 4: identifying unnatural
724
+ placement of objects
725
+
726
+
727
+ Task 5: recalling sequence of numbers
728
+ Time on the task 4
729
+ Task completion: selecting the correct
730
+ picture
731
+ Score: 1 point for the correct or wrong
732
+ answer
733
+ Time on the task 5
734
+ Task completion: selecting the correct
735
+ numbers sequentially
736
+ Score: 1 point for the correct answer
737
+ Fig. 4. Screenshots of the five augmented tasks. (a) Overview of the main
738
+ environment, (b) Task 1: retrieving the object location, (c) Task 2: memorizing the
739
+ object color, (d) Task 3: recognizing the extra object, (e) Task 4: identifying
740
+ unnatural placement of the objects in the picture frames, and (f) Task 5: recalling
741
+ sequence of the numbers on the screen.
742
+
743
+
744
+
745
+ (a)
746
+ (b)
747
+ (d)
748
+ (e)
749
+ 3-1-2-4-5
750
+ (f)
751
+
752
+ 7
753
+
754
+
755
+ ×
756
+ ×
757
+ ×
758
+
759
+ ×
760
+
761
+
762
+ Fig. 5. Illustration of response of a user (a) identify the unnatural placement of
763
+ objects, (b) choose the correct sequence of numbers in the picture frames, and
764
+ (c) final score is sent to the decision-making system.
765
+
766
+ 2.5. Study participants
767
+ We set out to validate the IA system and the serious game in a group of
768
+ individuals aged above 50 years to be age-matched with the end- users. They
769
+ comprised 37 older adults, including 16 females and 21 males. All
770
+ participants were volunteers recruited from universities and social media.
771
+ They all lived independently and had active social and cognitive lives.
772
+ Persian was the mother tongue for all participants in the study. Before
773
+ playing the AR serious game, participants underwent the MoCA [39]. The
774
+ MoCA is a 30-point test, including a set of cognitive subtests, concerning six
775
+ different cognitive domains.
776
+ Based on the literature, a cutoff of 26 (scores of 25 or below indicate
777
+ impairment) yields the best balance between sensitivity and specificity for
778
+ the MCI and other groups [40]. Thus, in our study, we have divided
779
+ participants into two groups based on their MoCA results: (A) control group
780
+ (MoCA ≥ 26) and (B) MCI group (MoCA < 26). Table 4 shows the
781
+
782
+ Table 4
783
+ Group characteristic in the study (A: control group, B: MCI group).
784
+
785
+ Participants (n = 37)
786
+ Total number
787
+ Female
788
+ MoCA score
789
+ Group A
790
+ Group B
791
+ 26
792
+ 11
793
+ 12
794
+ 4
795
+ ≥26
796
+ <26
797
+
798
+ number of people from each group and their gender.
799
+ All participants first filled a questionnaire including demographic
800
+ information, personal and medical history. They then completed a training in
801
+ the game in order to get familiarize with the interactions of
802
+ virtual objects for final evaluation. Table 5 describes all the participants’
803
+ demographics in details.
804
+
805
+ 2.6. Statistical analysis
806
+ We have analyzed all the data and game scores to consider serious game
807
+ accuracy compared to MoCA scores. We chose the MoCA as our reference
808
+ screening tool as it verified to be sensitive to recognize MCI and it is an
809
+ accurate screening measurement of cognitive ability. The
810
+ significance level was set α < 0.05 for all analyses. The SPSS 26.0 sta-
811
+ tistical software package was used to perform all the statistical analyses.
812
+ The data was normal distribution and homogeneous, so Pearson corre- lation
813
+ coefficient was used for categorical variables.
814
+ 3. Results and discussions
815
+
816
+ In this section, we present the technical feasibility and usability of the
817
+ system via simulations and user experiences. First, the IA system’s accuracy
818
+ in detecting the danger zones and making appropriate rec-
819
+ ommendations have been assessed. Then, the serious game results with
820
+ 37 participants compared with their MoCA test scores have been pro- vided.
821
+ Finally, we evaluated the system’s performance in working in different
822
+ modes.
823
+
824
+ 3.1. Forbidden zones
825
+ In this experience, we evaluated the system’s accuracy in detecting
826
+ danger zones area. Four different danger areas have been defined with equal
827
+ sizes of 0.52 0.7 0.23 m3 in an 8.5 4.6 2 m3 room space, as
828
+ shown in Fig. 6. Fig. 7 illustrated these zones, which were determined in the
829
+ Unity game engine to display the user’s localization information clearly.
830
+ In a typical scenario, once the user entered a dangerous zone (for
831
+ example, the fireplace), they received an image message alarm. This alarm
832
+ reminded them to keep away from the area. The near membership function
833
+ was defined from 1 to 5 dm (the danger zone). Fig. 8 demon- strates the AR
834
+ message showing the fire alert sent to the user by acti- vating the fuzzy rule
835
+ number 16. Other reminders could be for pharmacological management and
836
+ completing daily tasks.
837
+ The positioning data and the movement time of the user was recor- ded
838
+ during the experiment. According to the system’s operating mode and the
839
+ users’ cognitive state, if the users’ movement duration was
840
+ lower than 4.5 h, they received a voice message. This audio message (ID:
841
+ 13), which was sent by the IA system, reminded the users their lack of
842
+ movement [19]. If the movement duration was higher than 4.5 or the
843
+
844
+ Table 5
845
+ Participants’ demographics.
846
+
847
+
848
+ Participants
849
+ Average
850
+ Minimum
851
+ Maximum
852
+ Standard
853
+ deviation
854
+ Age of group A
855
+ 60.07
856
+ 51
857
+ 81
858
+ 7.98
859
+ Age of group B
860
+ 65.73
861
+ 56
862
+ 80
863
+ 7.91
864
+ MoCA score of group
865
+ 27.71
866
+ 26
867
+ 29
868
+ 1.069
869
+ A
870
+ MoCA score of group B
871
+ 23.36
872
+ 21
873
+ 25
874
+ 1.36
875
+
876
+ (a)
877
+ 3
878
+ 2
879
+ 4
880
+ (b)
881
+ Level
882
+ T
883
+ F
884
+ Time
885
+ 1
886
+ 1
887
+ 14
888
+ 2
889
+ 0
890
+ 14
891
+ 3
892
+ 0
893
+ 13
894
+ 1
895
+ 4
896
+ 1
897
+ 0
898
+ 8
899
+ 5
900
+ 1
901
+ 18
902
+ 4
903
+ 0
904
+ Total
905
+ 162
906
+ (c)
907
+
908
+ 8
909
+
910
+
911
+ -
912
+ ±
913
+
914
+
915
+ ±
916
+ = -
917
+ ±
918
+
919
+
920
+
921
+
922
+ Fig. 6. Four danger zone areas are defined for evaluation of the localization
923
+ system in the experimental condition.
924
+
925
+
926
+ Fig. 7. Model of the 3D-indoor positioning system and four danger zone areas
927
+ simulated in the Unity game engine.
928
+ user’s serious game result was high, they did not receive any messages or
929
+ alarms. Table 6 summarizes the results of these experiments in different
930
+ conditions based on the data values.
931
+
932
+ 3.2. Serious game
933
+ We have evaluated the serious game’s accuracy with 37 participants and
934
+ compared the game score with the users’ MoCA test scores. Table 7
935
+ compares both scores in group A and group B.
936
+ We used the Pearson correlation coefficient for control group par-
937
+ ticipants with respect to the parametricity of the data and applied the
938
+ Spearman correlation coefficient due to the nonparametric data for the
939
+ MCI group. These correlations have been calculated between the score of the
940
+ MoCA and the score of the game, and the total time and participants’ age.
941
+ The results are given in Table 8 and Table 9.
942
+ According to the results, the total score of MoCA had a significant
943
+ correlation (0.911) with the total score of the game in the control group,
944
+ while the correlation coefficient of Spearman in the MCI group has a lower
945
+ correlation (0.722). Furthermore, there was no correlation be- tween the total
946
+ score of MoCA and the total time of the game in the control group. While in
947
+ the MCI Group, there was a relative correlation ( 0.773) between the total
948
+ score of MoCA and the total game time duration. This may indicate that the
949
+ response time in the MCI group is a more effective factor than the control
950
+ group.
951
+ Also, in both groups, there was no correlation between the score of the
952
+ game and the age of the participants, which means that the age of the
953
+ participants was not a determining factor for the game score.
954
+ Table 10 compares the scores of MoCA and serious game results. These
955
+ grades showed that the participants with the MoCA score higher than 27,
956
+ could get at least 4 scores in the serious game.
957
+ Spearman correlation coefficient between the total score of MoCA with
958
+ total time was 0.692 for the participants who scored 3 points in the game.
959
+ This number increased to 0.841 for the participants who scored 2 points and 1
960
+ point in the game. However, a comprehensive evaluation cannot be
961
+ concluded from these correlations due to the very small amount of data for
962
+ each group of participants who received the same score of 1 to 3 from the
963
+ game.
964
+ We also performed a T-test for the total score of the game to deter- mine a
965
+ meaningful performance difference between the control and MCI group
966
+ members. The results of this evaluation showed that the MCI group had a
967
+ lower overall game score (2.09 0.893) compared to the
968
+ control group participants (4.21 0.70); t (37) 6.939, p < 0.001.
969
+ Also, the result of the T-test for the total game duration time between the
970
+ control and MCI groups indicated that the MCI group had a higher total
971
+ game time duration (5.13 2.03) than the control group (3.35
972
+ 1.9); t (37) 2.275, p < 0.05.
973
+ This means that there is a considerable difference between the con- trol
974
+ group and MCI group participants’ cognitive performance. Our evaluation
975
+ findings were in line with previous results indicating that the
976
+ touchscreen was well accepted by participants without computer expe- rience
977
+ based on the participants’ feedback [41,42].
978
+
979
+ 3.3. System operation modes
980
+ We considered a wide range of devices that must cover the maximum
981
+
982
+ Table 6
983
+ Result of the experimental test.
984
+ Movement
985
+ (hour)
986
+ Serious game
987
+ result (out of
988
+ 100)
989
+ Voice
990
+ message ID
991
+ Danger zone
992
+ number
993
+ Image
994
+ message ID
995
+
996
+ Fig. 8. Sample of an image message received by the user when a specific fuzzy rule
997
+ was activated, picture message number 16 (fire alert).
998
+
999
+ 1.15
1000
+ 55
1001
+ 13
1002
+ 1
1003
+ 16
1004
+ 5.30
1005
+ 48
1006
+
1007
+ 2
1008
+ 17
1009
+ 5.25
1010
+ 97
1011
+
1012
+ 3
1013
+ 18
1014
+ 3.40
1015
+ 76
1016
+ 13
1017
+ 4
1018
+ 19
1019
+
1020
+ TERRACE
1021
+ ONNIO
1022
+ STOF
1023
+ HLO
1024
+
1025
+ 9
1026
+
1027
+
1028
+ 55
1029
+ 5 8 7
1030
+ 15
1031
+ 3 2 4 10
1032
+
1033
+ Table 7
1034
+ Comparison between group A and group B game results.
1035
+
1036
+
1037
+
1038
+ 5) group A
1039
+
1040
+ 100
1041
+ 97
1042
+ 80
1043
+ 60
1044
+ 40
1045
+ Serious game score (out of
1046
+ 5) group B
1047
+ Serious game time duration
1048
+ (mins) group A
1049
+ Serious game time duration
1050
+ (mins) group B
1051
+ 2.09
1052
+ 1
1053
+ 3
1054
+ 0.83
1055
+
1056
+ 3.35
1057
+ 1.38
1058
+ 7.47
1059
+ 1.9
1060
+
1061
+ 5.13
1062
+ 2.42
1063
+ 8.83
1064
+ 2.03
1065
+ 20
1066
+ 0
1067
+ automated
1068
+ mode
1069
+
1070
+
1071
+
1072
+ semi-
1073
+ automated
1074
+ mode
1075
+
1076
+ Table 8
1077
+ Pearson correlation coefficient between MoCA scores and total serious game
1078
+ score, total response time and age in the control group.
1079
+
1080
+ serious game score
1081
+ number of alarms
1082
+ number of reminders number of actuators
1083
+ number of sensors
1084
+ Pearson Correlation in
1085
+ group A
1086
+ Total Serious
1087
+ Game Score
1088
+ Total Response
1089
+ Time
1090
+ Participant
1091
+ Age
1092
+
1093
+ Fig. 9. Workload scenarios description. The number of alarms and reminder
1094
+ MoCA Total Score
1095
+ 0.911
1096
+ -0.343
1097
+ 0.003
1098
+ messages decreased as the user’s game score increased.
1099
+ Total Serious Game
1100
+ Score
1101
+ Table 9
1102
+ 1
1103
+ -0.115
1104
+ 0.031
1105
+ 3.4. Limitations
1106
+ The target population and primary end-users of our proposed system are
1107
+ older adults experiencing cognitive impairments who can under-
1108
+ Spearman Rank Correlation (Spearman’s Rho) between MoCA scores and total
1109
+ serious game score, total response time and age in MCI group.
1110
+
1111
+
1112
+ stand the technology they are using and its expected assistance and risk.
1113
+ This study was focused on assessing the proposed system’s usability by
1114
+ Pearson Correlation in
1115
+ group A
1116
+ Total Serious
1117
+ Game Score
1118
+ Total Response
1119
+ Time
1120
+ Participant
1121
+ Age
1122
+ the users without cognitive impairments and considering their feedback.
1123
+ However, this study has some limitations.
1124
+
1125
+ MoCA Total Score
1126
+ 0.722
1127
+ -0.773
1128
+ -0.278
1129
+ The space validity of this work was limited, for part of the study was
1130
+ Total Serious Game
1131
+ Score
1132
+
1133
+
1134
+ Table 10
1135
+ 1
1136
+ -0.477
1137
+ 0.017
1138
+ presented in a laboratory environment. The IA system prototype can be
1139
+ evaluated with more users, and it includes more scenarios and func-
1140
+ tionalities. Therefore, although the scenarios technical requirements are taken
1141
+ into account in our work, the particular content of the messages
1142
+ must be customized for each user before the system’s implementation.
1143
+ Comparison between the scores of MoCA and serious game results.
1144
+
1145
+
1146
+ Group
1147
+ Total MoCA score
1148
+ Total serious game score
1149
+ Control Group
1150
+ 29≤
1151
+ 5
1152
+ 28
1153
+ 4–5
1154
+ 27
1155
+ 4
1156
+ To achieve this goal, the IA system is capable of adding more fuzzy rules and
1157
+ being customized for each person.
1158
+ 4. Conclusions and future work
1159
+ MCI Group
1160
+ 26
1161
+ 3
1162
+ 25
1163
+ 3
1164
+ 24
1165
+ 2–3
1166
+ 23
1167
+ 2
1168
+ 22
1169
+ 1–2
1170
+ 21
1171
+ 1–2
1172
+ Memory loss and decision-making difficulties are the main symptoms of
1173
+ MCI experienced by elderly people. The person forgets recent in- cidents and
1174
+ have problems with remembering the information. Such situations have a
1175
+ considerable effect on the confidence and quality of life of both the elderly
1176
+ people and their caregivers. The aim of this study was to introduce an IA
1177
+ system for individuals with age-related MCI, and
1178
+ area of the user’s environment. The number of devices ranged from 10 to 15
1179
+ to realize different workloads, including various sensors and actua-
1180
+ tors as we mentioned in Section 2.1. Specifically, for each number of
1181
+ devices, we simulated the two different cases based on the system’s
1182
+ operation mode. The complexity of the decision-making process
1183
+ decreased from automated mode to semi-automated mode, as the number of
1184
+ active devices declined from 22 to 14.
1185
+ In the functionality test, we consider the number of audio messages
1186
+ sent to the user, and the number of IoT devices enabled during the
1187
+ experiment. Fig. 9 illustrates the numbers relevant to different types of AR
1188
+ messages included in each case and received by the user. As shown in Fig. 9,
1189
+ the number of alarms and reminder messages decreased as the
1190
+ user’s game score increased.
1191
+ The results indicated that the user required lower reminders while
1192
+ their cognitive test score was higher than previous session. These also led to
1193
+ less data loss in the decision-making algorithm and during the process of
1194
+ publishing or subscribing to the messages. Moreover, the operation in semi-
1195
+ automated mode can cause less need for battery recharging or replacement
1196
+ [43].
1197
+ improve their ability to complete everyday tasks on their own without
1198
+ compromising their privacy. We designed, implemented, and evaluated an IA
1199
+ system based on the AR serious game and IoT to help users make everyday
1200
+ decisions more independently through interaction and recall memory of
1201
+ events.
1202
+ Several ready-to-use libraries were used to facilitate system imple-
1203
+ mentation on smartphones and computers. We evaluated the system’s
1204
+ technical feasibility and usability via simulations and users’ experiences
1205
+ with 37 elderly participants. The result of our evaluation implied that
1206
+ the total score of the serious game had a high correlation with the total score
1207
+ of MoCA in the control group, and there was an acceptable cor- relation in
1208
+ the MCI group. Also, our game was capable of showing a considerable
1209
+ difference between the two groups of control and MCI
1210
+ participants’ cognitive performance. This indicates the high accuracy of the
1211
+ system in estimating users’ cognitive states compared to the tradi- tional
1212
+ assessment tools.
1213
+ Moreover, the touchscreen interaction and verbal reminders created a
1214
+ natural way of interaction and were well accepted by the participants without
1215
+ computer experiences. The system response in the semi- automated mode
1216
+ also caused less data loss than the automated mode,
1217
+ Participants
1218
+ Average
1219
+ Minimum
1220
+ Maximum
1221
+ Standard
1222
+ deviation
1223
+ Serious game score (out of
1224
+ 4.21
1225
+ 3
1226
+ 5
1227
+ 0.70
1228
+
1229
+
1230
+
1231
+ 10
1232
+
1233
+
1234
+ as the number of active devices decreased.
1235
+ The findings of this study support the idea that an IA system can
1236
+ potentially help older adults in the home environment and can be evaluated
1237
+ by users with cognitive impairments according to partici-
1238
+ pants’ feedback. Serious games also have a positive impact on present-
1239
+ ing the possibility of reconstructing a functional environment for the
1240
+ users. Some future plans could be the study of historical data, including daily
1241
+ serious game scores, to evaluate the user’s mental and physical health
1242
+ condition.
1243
+ Declaration of Competing Interest
1244
+ The authors declare that they have no known competing financial
1245
+ interests or personal relationships that could have appeared to influence the
1246
+ work reported in this paper.
1247
+ Acknowledgments
1248
+ The authors gratefully acknowledge the families and the individuals that
1249
+ participated in this study.
1250
+ References
1251
+ [1] United Nations Department of Economic and Social Affairs, 2020. United Nations
1252
+ Department of Economic and Social Affairs, World Population Ageing 2020
1253
+ Highlights: Living Arrangements Of Older Persons. United Nations Publications
1254
+ (2020) Retrieved April 8, 2021, from United Nations in https://www.un.org/de
1255
+ velopment/desa/pd/sites/www.un.org.development.desa.pd/files/undesa_pd-20
1256
+ 20_world_population_ageing_highlights.pdf.
1257
+ [2] S. Rattan, M. Kyriazi, The science of hormesis in health and longevity, (2018).
1258
+ [3] A.s. Association, 2019 Alzheimer’s disease facts and figures, Alzheimer’s &
1259
+ Dementia, 15 (2019) 321–387.
1260
+ [4] V. Bell, D. Troxel, A Dignified Life: The Best FriendsTM Approach to Alzheimer’s
1261
+ Care: A Guide for Care Partners, Health Communications, Inc., 2012.
1262
+ [5] M.E. Pollack, Intelligent assistive technology: the present and the future, in:
1263
+ International Conference on User Modeling, Springer, 2007, pp. 5–6.
1264
+ [6] M. Ienca, J. Fabrice, B. Elger, M. Caon, A. Scoccia Pappagallo, R.W. Kressig,
1265
+ T. Wangmo, Intelligent assistive technology for Alzheimer’s disease and other
1266
+ dementias: a systematic review, J. Alzheimer’s Dis. 56 (2017) 1301–1340.
1267
+ [7] E. Guerrero, M.-H. Lu, H.-P. Yueh, H. Lindgren, Designing and evaluating an
1268
+ intelligent augmented reality system for assisting older adults’ medication
1269
+ management, Cognit. Syst. Res. 58 (2019) 278–291.
1270
+ [8] M. Pasch, N. Bianchi-Berthouze, B. van Dijk, A. Nijholt, Movement-based sports
1271
+ video games: investigating motivation and gaming experience, Entertain. Comput. 1
1272
+ (2009) 49–61.
1273
+ [9] I.T. Paraskevopoulos, E. Tsekleves, C. Craig, C. Whyatt, J. Cosmas, Design
1274
+ guidelines for developing customised serious games for Parkinson’s Disease
1275
+ rehabilitation using bespoke game sensors, Entertain. Comput. 5 (2014) 413–424.
1276
+ [10] K. Seaborn, D.I. Fels, Gamification in theory and action: a survey, Int. J. Hum
1277
+ Comput Stud. 74 (2015) 14–31.
1278
+ [11] Ioannis Tarnanas, Winfried Schlee, Magda Tsolaki, Ren´e Müri, Urs Mosimann,
1279
+ Tobias Nef, Ecological validity of virtual reality daily living activities screening for
1280
+ early dementia: longitudinal study, JMIR Serious Games 1 (1) (2013) e1, https://
1281
+ doi.org/10.2196/games.2778.
1282
+ [12] H. Tannous, D. Istrate, A. Perrochon, J.-C. Daviet, A. Benlarbi-Delai, J. Sarrazin,
1283
+ M.-C.H.B. Tho, T.T. Dao, GAMEREHAB@ HOME: a new engineering system
1284
+ using serious game and multi-sensor fusion for functional rehabilitation at home, IEEE
1285
+ Trans. Games (2019).
1286
+ [13] S. Bottiroli, C. Tassorelli, M. Lamonica, C. Zucchella, E. Cavallini, S. Bernini,
1287
+ E. Sinforiani, S. Pazzi, P. Cristiani, T. Vecchi, Smart aging platform for evaluating
1288
+ cognitive functions in aging: a comparison with the MoCA in a normal population,
1289
+ Front. Aging Neurosci. 9 (2017) 379.
1290
+ [14] V. Vallejo, P. Wyss, L. Rampa, A.V. Mitache, R.M. Müri, U.P. Mosimann, T. Nef,
1291
+ Evaluation of a novel Serious Game based assessment tool for patients with
1292
+ Alzheimer’s disease, PLoS One 12 (2017), e0175999.
1293
+ [15] F. Ghorbani, M. Kia, M. Delrobaei, Q. Rahman, Evaluating the Possibility of
1294
+ Integrating Augmented Reality and Internet of Things Technologies to Help Patients
1295
+ with Alzheimer’s Disease, in: National and 4th International Iranian Conference on
1296
+ Biomedical Engineering (ICBME), IEEE, 2019, pp. 139–144.
1297
+ [16] A. Al-Fuqaha, M. Guizani, M. Mohammadi, M. Aledhari, M. Ayyash, Internet of
1298
+ things: a survey on enabling technologies, protocols, and applications, IEEE Commun.
1299
+ Surv. Tutorials 17 (2015) 2347–2376.
1300
+ [17] A. Jindal, A. Dua, N. Kumar, A.K. Das, A.V. Vasilakos, J.J. Rodrigues, Providing
1301
+ healthcare-as-a-service using fuzzy rule based big data analytics in cloud computing,
1302
+ IEEE J. Biomed. Health. Inf. 22 (2018) 1605–1618.
1303
+
1304
+ [18] M.A. Salichs, I.P. Encinar, E. Salichs, A´. Castro-Gonza´lez, M. Malfaz, Study of
1305
+ scenarios and technical requirements of a social assistive robot for Alzheimer’s
1306
+ disease patients and their caregivers, Int. J. Soc. Robot. 8 (2016) 85–102.
1307
+ [19] A. Al-Adhab, H. Altmimi, M. Alhawashi, H. Alabduljabbar, F. Harrathi, H.
1308
+ ALmubarek, IoT for remote elderly patient care based on Fuzzy logic, in: 2016
1309
+ International Symposium on Networks, Computers and Communications (ISNCC),
1310
+ IEEE, 2016, pp. 1–5.
1311
+ [20] F. Seco, A.R. Jim´enez, F. Zampella, Fine-grained acoustic positioning with
1312
+ compensation of CDMA interference, in: 2015 IEEE International Conference on
1313
+ Industrial Technology (ICIT), IEEE, 2015, pp. 3418–3423.
1314
+ [21] L. Zwirello, T. Schipper, M. Jalilvand, T. Zwick, Realization limits of impulse-based
1315
+ localization system for large-scale indoor applications, IEEE Trans. Instrum. Meas. 64
1316
+ (2014) 39–51.
1317
+ [22] Antonio Ramon Jimenez Ruiz, Fernando Seco Granja, Comparing ubisense,
1318
+ bespoon, and decawave uwb location systems: Indoor performance analysis, IEEE
1319
+ Trans. Instrum. Meas. 66 (8) (2017) 2106–2117.
1320
+ [23] I. Group, Part 15.4: Low-Rate Wireless Personal Area Networks (LR-WPANs).
1321
+ Amendment 1: MAC sublayer, IEEE, IEEE Standard for Local and metropolitan area
1322
+ networks IEEE Std, 802 (2012) 4e-2012.
1323
+ [24] C.L. Sang, M. Adams, T. Korthals, T. Ho¨rmann, M. Hesse, U. Rückert,
1324
+ A bidirectional object tracking and navigation system using a true-range multilateration
1325
+ method, in: 2019 International Conference on Indoor Positioning
1326
+ and Indoor Navigation (IPIN), IEEE, 2019, pp. 1–8.
1327
+ [25] M.Z. Win, D. Dardari, A.F. Molisch, W. Wiesbeck, W. Jinyun Zhang, History and
1328
+ Applications of UWB, Institute of Electrical and Electronics Engineers, 2009.
1329
+ [26] D. Neuhold, C. Bettstetter, A.F. Molisch, HiPR: High-Precision UWB Ranging for
1330
+ Sensor Networks, in: Proceedings of the 22nd International ACM Conference on
1331
+ Modeling, Analysis and Simulation of Wireless and Mobile Systems, 2019, pp. 103–
1332
+ 107.
1333
+ [27] G. Oguntala, R. Abd-Alhameed, S. Jones, J. Noras, M. Patwary, J. Rodriguez,
1334
+ Indoor location identification technologies for real-time IoT-based applications: an
1335
+ inclusive survey, Comput. Sci. Rev. 30 (2018) 55–79.
1336
+ [28] J. Linowes, K. Babilinski, Augmented Reality for Developers: Build practical
1337
+ augmented reality applications with Unity, ARCore, ARKit, and Vuforia, ARKit,
1338
+ and Vuforia, Packt Publishing Ltd, 2017.
1339
+ [29] J. Hocking, Unity in action: Multiplatform game development in C# with Unity 5,
1340
+ Manning Publ., 2015.
1341
+ [30] G. Chen, X. Meng, Y. Wang, Y. Zhang, P. Tian, H. Yang, Integrated
1342
+ WiFi/PDR/ Smartphone using an unscented kalman filter algorithm for 3D indoor
1343
+ localization,
1344
+ Sensors 15 (2015) 24595–24614.
1345
+ [31] K.M. Kanno, E.A. Lamounier Jr, A. Cardoso, E.J. Lopes, S.A. Fakhouri
1346
+ Filho, Assisting individuals with Alzheimer’s disease using mobile augmented
1347
+ reality with voice interaction: an acceptance experiment with individuals in the
1348
+ early stages, Res. Biomed. Eng. 35 (2019) 223–234.
1349
+ [32] C. Dietlein, B. Bock, Recommendations on the design of serious games for people
1350
+ with Dementia, EAI Endorsed Trans. Serious Games 5 (2019).
1351
+ [33] A. Vovk, A. Patel, D. Chan, Augmented reality for early Alzheimer’s disease
1352
+ diagnosis, in: Extended Abstracts of the 2019 CHI Conference on Human Factors in
1353
+ Computing Systems, 2019, pp. 1–6.
1354
+ [34] C. Dietlein, S. Eichberg, T. Fleiner, W. Zijlstra, Feasibility and effects of serious
1355
+ games for people with dementia: a systematic review and recommendations for future
1356
+ research, Gerontechnology 17 (2018) 1–17.
1357
+ [35] T. Pradeep, M.J. Bray, S. Arun, L.N. Richey, S. Jahed, B.R. Bryant, C. LoBue, C.
1358
+ G. Lyketsos, P. Kim, M.E. Peters, History of traumatic brain injury interferes with
1359
+ accurate diagnosis of Alzheimer’s dementia: a nation-wide case-control study, Int.
1360
+ Rev. Psychiatry 32 (2020) 61–70.
1361
+ [36] J. Asad, S. Kousar, N.Q. Mehmood, Dementia-related serious games: a comparative
1362
+ study, Univ. Sindh J. Inf. Commun. Technol. 3 (2019) 171–177.
1363
+ [37] L. Jun-Sup, S.-W. Lee, J.-Y. Youn, L. Suk-Hyun, G.-H. Lee, Apparatus and method
1364
+ for creating spatial augmented reality content, in, Google Patents, 2014.
1365
+ [38] G. Terzopoulos, I. Kazanidis, M. Satratzemi, A. Tsinakos, A comparative study of
1366
+ augmented reality platforms for building educational mobile applications, in:
1367
+ Interactive Mobile Communication, Technologies and Learning, Springer, 2019,
1368
+ pp. 307–316.
1369
+ [39] M. Abd Razak, N. Ahmad, Y. Chan, N.M. Kasim, M. Yusof, M.A. Ghani, M.
1370
+ Omar,
1371
+ F. Abd Aziz, R. Jamaluddin, Validity of screening tools for dementia and mild
1372
+ cognitive impairment among the elderly in primary health care: a systematic review,
1373
+ Public Health 169 (2019) 84–92.
1374
+ [40] Z.S. Nasreddine, N.A. Phillips, V. B´edirian, S. Charbonneau, V. Whitehead,
1375
+ I. Collin, J.L. Cummings, H. Chertkow, The Montreal Cognitive Assessment, MoCA:
1376
+ a brief screening tool for mild cognitive impairment, J. Am. Geriatr. Soc. 53 (2005)
1377
+ 695–699.
1378
+ [41] V. Vallejo, A.V. Mitache, I. Tarnanas, R. Müri, U.P. Mosimann, T. Nef, Combining
1379
+ qualitative and quantitative methods to analyze serious games outcomes: A pilot study
1380
+ for a new cognitive screening tool, in: 2015 37th Annual International Conference of
1381
+ the IEEE Engineering in Medicine and Biology Society (EMBC), IEEE,
1382
+ 2015, pp. 1327–1330.
1383
+ [42] V. Vallejo, I. Tarnanas, T. Yamaguchi, T. Tsukagoshi, R. Yasuda, R. Müri, U.
1384
+ P. Mosimann, T. Nef, Usability assessment of natural user interfaces during serious
1385
+ games: adjustments for dementia intervention, J Pain Manage. 9 (2016) 333–339.
1386
+ [43] G. Maselli, M. Piva, J.A. Stankovic, Adaptive communication for battery-free
1387
+ devices in smart homes, IEEE Internet Things J. 6 (2019) 6977–6988.
1388
+
ONE0T4oBgHgl3EQfjgHF/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
S9E2T4oBgHgl3EQfWwd9/content/tmp_files/2301.03837v1.pdf.txt ADDED
@@ -0,0 +1,2033 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Understanding the Complexity and Its Impact on
2
+ Testing in ML-Enabled Systems
3
+ A Case Sutdy on Rasa
4
+ Junming Cao∗, Bihuan Chen∗, Longjie Hu∗, Jie Gao† Kaifeng Huang∗ and Xin Peng∗
5
+ ∗Fudan University, Shanghai, China
6
+ †Singapore University of Technology and Design, Singapore
7
+ Abstract—Machine learning (ML) enabled systems are emerg-
8
+ ing with recent breakthroughs in ML. A model-centric view
9
+ is widely taken by the literature to focus only on the analy-
10
+ sis of ML models. However, only a small body of work takes
11
+ a system view that looks at how ML components work with
12
+ the system and how they affect software engineering for ML-
13
+ enabled systems. In this paper, we adopt this system view, and
14
+ conduct a case study on Rasa 3.0, an industrial dialogue system
15
+ that has been widely adopted by various companies around the
16
+ world. Our goal is to characterize the complexity of such a large-
17
+ scale ML-enabled system and to understand the impact of the
18
+ complexity on testing. Our study reveals practical implications
19
+ for software engineering for ML-enabled systems.
20
+ I. INTRODUCTION
21
+ The recent advances in machine learning (ML) have attracted
22
+ an increasing interest in applying ML across a breadth of busi-
23
+ ness domains, e.g., self-driving cars, virtual assistants, robotics,
24
+ and health care. According to the Global AI Adoption Index by
25
+ IBM [34], 35% of companies around the world have deployed
26
+ AI in their business, while 42% of companies are exploring AI.
27
+ Such a trend has caused the emergence of ML-enabled systems
28
+ which are composed of ML and non-ML components. ML com-
29
+ ponents are often important, but usually only a part of many
30
+ components in ML-enabled systems [38].
31
+ The previous research on software engineering for machine
32
+ learning often takes a model-centric view that focuses only on
33
+ the analysis of ML models [38, 55]. For example, many ad-
34
+ vances have been made for DL model testing (e.g., [4, 19, 25,
35
+ 40, 59, 68, 72, 82, 84]), verification (e.g., [9, 57, 58, 66, 73])
36
+ and debugging (e.g., [43, 49, 54, 71]). Only a small body of
37
+ work takes a holistic system view, e.g., architectural design
38
+ [64, 80], technical debt [63, 70], ML component entanglement
39
+ [53, 78, 83], feature interaction [1, 2, 8], and model interactions
40
+ in Apollo [60]. However, the lack of system-level understanding
41
+ of ML-enabled systems may hide problems in engineering ML-
42
+ enabled systems and hinder practical solutions.
43
+ In this paper, we adopt this system view, and conduct a case
44
+ study on Rasa 3.0 [11] to characterize the complexity of such a
45
+ large-scale ML-enabled system as well as to understand the im-
46
+ pact of the complexity on testing. Rasa is a task-oriented in-
47
+ dustrial dialogue system that has been widely used by various
48
+ companies around the world. Therefore, we believe Rasa is a
49
+ good representative of real-world ML-enabled systems.
50
+ We first investigate the complexity of Rasa at three levels. At
51
+ the system level, we explore how ML components are adopted
52
+ across the modules in Rasa. We find that there are 23 ML mod-
53
+ els in 15 ML components across 6 modules. At the interaction
54
+ level, we analyze how ML components interact with other com-
55
+ ponents in Rasa. We find that there are 43 interaction patterns
56
+ and 230 interaction instances across 4 major categories and 8
57
+ inner categories. At the component level, we investigate how
58
+ the code of ML components is composed by what kinds of code.
59
+ We find that 57.1% of the code inside components are data
60
+ processing code, and there are 8 composition patterns between
61
+ data processing code and model usage code.
62
+ We then explore the impact of the complexity on test-
63
+ ing from two perspectives. From the testing practice perspec-
64
+ tive, we analyze how is the characteristic of test cases, and
65
+ how well they cope with the complexity. We find that the
66
+ test coverage of component interactions is low because of the
67
+ complexity from huge configuration space and from hidden
68
+ component interactions. From the mutation testing perspective,
69
+ we study how is the bug-finding capability of test cases and test
70
+ data (i.e., the data for testing models), and how well they cope
71
+ with the complexity. We find that there may be many potential
72
+ bugs in data processing code that can only be detected by test
73
+ cases, due to the complexity from data processing code. The
74
+ capability of test data to kill mutants is limited because of the
75
+ complexity from huge configuration space.
76
+ Based on our case study, we highlight practical implications
77
+ to improve software engineering for ML-enabled systems. For
78
+ example, the configuration space of ML-enabled systems should
79
+ be tested adequately, and configuration suggestions should be
80
+ provided to developers. A general taxonomy of data processing
81
+ code should be constructed, and then the maintaining and test-
82
+ ing tools for it can be developed. More integration-level test
83
+ cases should be created to cover component interactions. Test
84
+ cases and test data should be used in combination to detect
85
+ both non-ML specific and ML-specific bugs.
86
+ In summary, this paper makes the following contributions.
87
+ • We conduct an in-depth case study on Rasa to characterize
88
+ its complexity and the impact of its complexity on testing.
89
+ • We highlight practical implications to improve software
90
+ engineering for ML-enabled systems.
91
+ arXiv:2301.03837v1 [cs.SE] 10 Jan 2023
92
+
93
+ II. BACKGROUND AND STUDY DESIGN
94
+ We present the architecture of a typical task-oriented dialogue
95
+ systems, an overview of Rasa, and our study design.
96
+ A. Architecture of a Typical Task-Oriented Dialogue System
97
+ A task-oriented dialogue system (TDS) aims to assist users in
98
+ performing specific tasks, such as restaurant booking and flight
99
+ booking [15]. A pipeline-based TDS consists of four parts, i.e.,
100
+ natural language understanding (NLU), dialogue state tracking
101
+ (DST), dialogue policy (Policy) and natural language generation
102
+ (NLG) [86]. NLU parses a user utterance into a structured se-
103
+ mantic representation, including intent and slot-values. The in-
104
+ tent is a high-level classification of the user utterance, such as
105
+ Inform and Request. Slot-values are task-specific entities
106
+ that are mentioned in the user utterance, such as restaurant price
107
+ range and location preference. After tokenization and featuriza-
108
+ tion of the user utterance, NLU applies classification models to
109
+ recognize intent, and named entity extraction models to extract
110
+ slot-values. DST takes the entire history of the conversation,
111
+ including both user utterances with predicted intents and slot-
112
+ values and system responses, to estimate the current dialogue
113
+ state, which is usually formulated as a sequential prediction task
114
+ [77]. Dialogue state is typically the probability distribution of
115
+ user intent and slot-values till the current timestamp. Given the
116
+ estimated dialogue state, Policy generates the next system ac-
117
+ tion, such as Query Database and Utter Question.
118
+ As Policy determines a series of actions sequentially, sequential
119
+ models such as Recurrent Neural Network (RNN) are applied.
120
+ For actions that require a response, NLG converts the action
121
+ into a natural language utterance, which is often considered as
122
+ a sequence generation task [76].
123
+ B. An Overview of Rasa 3.0
124
+ Rasa is a popular open-source ML-enabled TDS, which is
125
+ fully implemented with Python and used by many well-known
126
+ companies in customer service for real users, including Adobe,
127
+ Airbus, and N26 [11]. An architecture overview of Rasa 3.0 is
128
+ shown in Fig. 1. Each module consists of one single component
129
+ or multiple semantically similar components. Apart from the
130
+ modules in a typical TDS, Rasa proposes the Selector module to
131
+ select candidate intents and responses for FAQ questions [16].
132
+ We present some concepts in Rasa 3.0 to ease our presentation.
133
+ Components in Rasa. There are two types of components in
134
+ Rasa. We define ML components as components that are im-
135
+ plemented with ML models, and rule-based components as
136
+ components that are implemented with rule-based code logic.
137
+ General utils code in Rasa is not considered in this paper, such
138
+ as command line and database access code.
139
+ Configuration File and Component Graph in Rasa. As
140
+ there are multiple available components in each module, de-
141
+ velopers need to choose components that are actually used in
142
+ the Rasa pipeline with a configuration file to build a chatbot.
143
+ Parameters of each component are specified in the configuration
144
+ file (e.g., ML model used by a component and hyperparamers of
145
+ a ML model). Rasa applies Dask [6] to compile a configuration
146
+ file into a component graph. Each node in the component graph
147
+ denotes a component, and the edges connected with it denote up-
148
+ stream and downstream components with input and output data
149
+ dependency. Execution of components obeys the topological
150
+ order specified by edges. These components interact with each
151
+ other through fields in shared Message class instances. An
152
+ upstream component stores outputs to Message instances, and
153
+ a downstream component retrieves them for further processing.
154
+ ML Stages in Rasa. Different from Apollo, which uses
155
+ trained model files from external systems, and therefore only
156
+ contains the inference stage of ML models [60], the training,
157
+ evaluation and inference stages of ML models are all present in
158
+ Rasa. Given a configuration file, Rasa separately compiles it to
159
+ a training component graph and an inference component graph.
160
+ In training stage, the trainable upstream components are first
161
+ trained, and then process the training data used by downstream
162
+ components. In evaluation stage, only the performance metrics
163
+ of IntentClassifier, EntityExtractor and Policy are reported, as
164
+ there is no ground truth for evaluation data in other modules.
165
+ C. Study Design
166
+ Our goal is to understand the complexity and its impact on
167
+ testing in Rasa. To achieve this goal, we propose five RQs.
168
+ • RQ1 System Complexity Analysis: how ML compo-
169
+ nents are adopted across the modules in Rasa?
170
+ • RQ2 Interaction Complexity Analysis: how ML compo-
171
+ nents interact with other components in Rasa?
172
+ • RQ3 Component Complexity Analysis: how the code of
173
+ ML components is composed by what kinds of code?
174
+ • RQ4 Testing Practice Analysis: how is the characteristic
175
+ of test cases, and how well they cope with the complexity?
176
+ • RQ5 Mutation Testing Analysis: how is the bug-finding
177
+ capability of test cases and test data (i.e., the data for testing
178
+ models), and how well they cope with the complexity?
179
+ RQ1 aims to identify ML components in Rasa and broadly
180
+ view them from the perspective of dependent libraries and ML
181
+ models. RQ2 aims to summarize a comprehensive taxonomy
182
+ of component interaction patterns. RQ3 aims to inspect the
183
+ source code inside every component to characterize the statistics
184
+ and composition patterns of different code types, including
185
+ data processing code, model usage code, etc. Our findings
186
+ from RQ1, RQ2 and RQ3 could reveal how the complexity
187
+ originates and manifests in real world large-scale ML-enabled
188
+ systems, which provide both practitioners and researchers with
189
+ insights to overcome the complexities involved in implementing,
190
+ maintaining, debugging and testing such complex systems.
191
+ RQ4 aims to quantitatively assess Rasa’s test cases from code
192
+ coverage, test case statistics (i.e., granularity levels, oracle types,
193
+ and ML stages), and component interaction coverage perspec-
194
+ tives. RQ5 aims to generate mutants (i.e., artificial bugs) and
195
+ check whether these mutants can be killed (i.e., detected) by test
196
+ cases. Further, for the survived mutants, we train Rasa with 3
197
+ default configuration files on MutiWoz [15], a widely used multi-
198
+ domain TDS dataset. We calculate the statistical significance
199
+ between the performance metrics from mutated Rasa code with
200
+ metrics from pipelines trained with clean code. Our findings
201
+ from RQ4 and RQ5 evaluate the testing practice in Rasa, and
202
+
203
+ DST
204
+ “I'd like a moderate
205
+ Thai restaurant.”
206
+ NLG
207
+ [[0, 0, …, 1, 0]
208
+ …,
209
+ [1, 1, …, 0, 0 ]]
210
+ Past user and
211
+ bot events
212
+ ask_location,0.957
213
+ ask_numpeople, 0.013
214
+ ask_moreupdates,0.002
215
+
216
+ "where?"
217
+ Postprocess
218
+ Tokenizer
219
+ Preprocess
220
+ Tokenizer
221
+ Module
222
+ Token
223
+ Postprocess
224
+ Featurizer
225
+ Preprocess
226
+ Featurizer
227
+ Module
228
+ Entity
229
+ Extractor
230
+ Preprocess
231
+ EntityExtractor
232
+ Module
233
+ Postprocess
234
+ Entity
235
+ Synonym
236
+ Mapper
237
+ Intent
238
+ Classifier
239
+ Preprocess
240
+ IntentClassifier
241
+ Module
242
+ Postprocess
243
+ Fallback
244
+ Classifier
245
+ Postprocess
246
+ Selector
247
+ Preprocess
248
+ Selector
249
+ Module
250
+ [[entiy: price, value: mid,
251
+ confidence_score: 0.9997],
252
+ [entiy: cuisine, value:thai,
253
+ confidence_score: 0.9993]]
254
+ inform, 1.0
255
+ request_info, 4.122e-18
256
+ thankyou, 2.408e-18
257
+ greet, 2.364e-18
258
+ Policy
259
+ Preprocess
260
+ Policy
261
+ Module
262
+ Postprocess
263
+ Policy
264
+ Ensemble
265
+ NLU
266
+ [I,d,like,a,moderate,
267
+ Thai,restaurant]
268
+ Selected
269
+ Response
270
+ Features
271
+ Intent
272
+ List
273
+ Entity
274
+ list
275
+ Action
276
+ List
277
+ Bot
278
+ Utterance
279
+ Fig. 1: The Modules and Workflow of Rasa
280
+ shed light on automated test generation, bug localization and
281
+ bug repairing techniques for complex ML-enabled systems.
282
+ III. RQ1: SYSTEM COMPLEXITY ANALYSIS
283
+ A. Methodology
284
+ To answer RQ1, we identified ML and rule-based com-
285
+ ponents in Rasa and characterized them through a detailed
286
+ examination of Rasa’s source code and documentation. We
287
+ excluded DST and NLG as they are fully implemented with
288
+ rule-based code logic in Rasa without ML components.
289
+ All the modules we identified are listed in Table I, except for
290
+ a special module, Shared, as it contains general data processing
291
+ code and ML model definition code (e.g., Transformer), while
292
+ does not contain any independent components. We will include
293
+ it in the last three RQs. Specifically, for each component, we
294
+ recursively tracked methods within it to manually extract the
295
+ model or rule definition code. We examined implementation
296
+ details of APIs in ML libraries by reading the documentations
297
+ and source code of external libraries, including ML model type
298
+ and number of candidate models.
299
+ In particular, we analyzed whether ML components are
300
+ implemented by using external direct libraries or indirect
301
+ libraries, whether the components can be trained (notice that
302
+ not only some of ML components can be trained, but also
303
+ some rule-based components can be trained as long as they
304
+ update internal parameters when processing training data),
305
+ whether Rasa implements components with its own code and
306
+ provides built-in model and rule definition code, and the lines
307
+ of code (LoC) of each component excluding blank lines, code
308
+ comments and import statements.
309
+ B. Results
310
+ The results are summarized in Table I. Components shown
311
+ in gray color are ML components, and others are rule-based
312
+ components. There are 6 modules in total, including 15 ML and
313
+ 14 rule-based components. These components contain 23 ML
314
+ models and are implemented with 7 directly dependent external
315
+ ML libraries and 3 indirectly dependent external ML libraries.
316
+ In particular, all ML components in Tokenizer and Featurizer
317
+ are not trainable because pre-trained language models are
318
+ applied. All components in Policy are implemented in Rasa’s
319
+ own code, because there are no ready-to-use Policy models
320
+ provided by existing libraries. There are a total of 5348 LoC
321
+ in ML components and 2980 LoC in rule-based components.
322
+ In addition, the general module Shared contains 5375 LoC,
323
+ which is not listed in the table.
324
+ Notably, we find that classical machine learning models
325
+ (e.g., Support Vector Machine and Conditional Random Field)
326
+ together with deep learning models (e.g., Convolutional Neural
327
+ Networks and Transformer) play an important role in Rasa.
328
+ This is different from the previous study [60] on Apollo, which
329
+ is focused on deep learning models.
330
+ Next, we introduce components used in each module.
331
+ Tokenizer. Tokenizer splits the user utterance into tokens
332
+ with component specific split symbols (e.g., whitespace and
333
+ punctuation). (1) SpacyTokenizer provides the richest token
334
+ information, including splitting tokens with rules, lemmatizing
335
+ tokens with a look-up table, and performing part-of-speech tag-
336
+ ging with a multi-layer perceptron (MLP). (2) JiebaTokenizer
337
+ is the only component that tokenizes non-English sentences
338
+ using Hidden Markov Model (HMM) [20]. (3) MitieTokenizer
339
+ and WhitespaceTokenizer toeknize text with predefined rules.
340
+ Featurizer. As shown in Fig. 1, Featurizer converts tokens
341
+ into features for downstream module inference. (1) ConveRT-
342
+ Featurizer loads TFHub’s [27] pre-trained ConveRT (Conversa-
343
+ tional Representations from Transformers) TensorFlow model
344
+ to featurize tokens [29]. (2) LanguageModelFeaturizer loads
345
+
346
+ TABLE I: System Complexity Analysis of Rasa
347
+ Module
348
+ Component
349
+ Direct Lib.
350
+ Indirect Lib.
351
+ Model Type
352
+ No. Model
353
+ Trainable
354
+ Rasa Imp.
355
+ LoC
356
+ Tokenizer
357
+ JiebaTokenizer
358
+ Jieba
359
+ N/A
360
+ HMM
361
+ 1
362
+ False
363
+ False
364
+ 85
365
+ SpacyTokenizer
366
+ Spacy
367
+ Thinc
368
+ MLP
369
+ 1
370
+ False
371
+ False
372
+ 39
373
+ MitieTokenizer
374
+ Mitie
375
+ N/A
376
+ N/A
377
+ N/A
378
+ False
379
+ False
380
+ 43
381
+ WhitespaceTokenizer
382
+ N/A
383
+ N/A
384
+ N/A
385
+ N/A
386
+ False
387
+ True
388
+ 52
389
+ Featurizer
390
+ ConveRTFeaturizer
391
+ TensorFlow
392
+ N/A
393
+ Transformer
394
+ 1
395
+ False
396
+ False
397
+ 269
398
+ LanguageModelFeaturizer
399
+ Transformers
400
+ TensorFlow
401
+ Transformer
402
+ 6
403
+ False
404
+ False
405
+ 378
406
+ MitieFeaturizer
407
+ Mitie
408
+ Dlib
409
+ CCA
410
+ 1
411
+ False
412
+ False
413
+ 98
414
+ SpacyFeaturizer
415
+ Spacy
416
+ Thinc
417
+ CNN
418
+ 2
419
+ False
420
+ False
421
+ 66
422
+ CountVectorsFeaturizer
423
+ Scikit-learn
424
+ N/A
425
+ N/A
426
+ N/A
427
+ True
428
+ False
429
+ 520
430
+ LexicalSyntacticFeaturizer
431
+ N/A
432
+ N/A
433
+ N/A
434
+ N/A
435
+ True
436
+ True
437
+ 319
438
+ RegexFeaturizer
439
+ N/A
440
+ N/A
441
+ N/A
442
+ N/A
443
+ True
444
+ True
445
+ 151
446
+ IntentClassifier
447
+ DIETClassifier
448
+ TensorFlow
449
+ N/A
450
+ Transformer
451
+ 1
452
+ True
453
+ True
454
+ 1217
455
+ MitieIntentClassifier
456
+ Mitie
457
+ Dlib
458
+ SVM
459
+ 1
460
+ True
461
+ False
462
+ 89
463
+ SklearnIntentClassifier
464
+ Scikit-learn
465
+ N/A
466
+ SVM
467
+ 1
468
+ True
469
+ False
470
+ 173
471
+ FallbackClassifier
472
+ N/A
473
+ N/A
474
+ N/A
475
+ N/A
476
+ False
477
+ True
478
+ 91
479
+ KeywordIntentClassifier
480
+ N/A
481
+ N/A
482
+ N/A
483
+ N/A
484
+ True
485
+ True
486
+ 132
487
+ EntityExtractor
488
+ DIETClassifier
489
+ TensorFlow
490
+ N/A
491
+ Transformer
492
+ 1
493
+ True
494
+ True
495
+ 1217
496
+ CRFEntityExtractor
497
+ Scikit-learn
498
+ N/A
499
+ CRF
500
+ 1
501
+ True
502
+ False
503
+ 438
504
+ MitieEntityExtractor
505
+ Mitie
506
+ Dlib
507
+ SVM
508
+ 1
509
+ True
510
+ False
511
+ 164
512
+ SpacyEntityExtractor
513
+ Spacy
514
+ Thinc
515
+ MLP
516
+ 1
517
+ False
518
+ False
519
+ 52
520
+ DucklingEntityExtractor
521
+ N/A
522
+ N/A
523
+ N/A
524
+ N/A
525
+ False
526
+ False
527
+ 134
528
+ RegexEntityExtractor
529
+ N/A
530
+ N/A
531
+ N/A
532
+ N/A
533
+ True
534
+ True
535
+ 124
536
+ EntitySynonymMapper
537
+ N/A
538
+ N/A
539
+ N/A
540
+ N/A
541
+ True
542
+ True
543
+ 102
544
+ Selector
545
+ ResponseSelector
546
+ TensorFlow
547
+ N/A
548
+ Transformer
549
+ 2
550
+ True
551
+ True
552
+ 560
553
+ Policy
554
+ TEDPolicy
555
+ TensorFlow
556
+ N/A
557
+ Transformer+CRF
558
+ 1
559
+ True
560
+ True
561
+ 1262
562
+ UnexpecTEDIntentPolicy
563
+ TensorFlow
564
+ N/A
565
+ Transformer+CRF
566
+ 1
567
+ True
568
+ True
569
+ 458
570
+ MemoizationPolicy
571
+ N/A
572
+ N/A
573
+ N/A
574
+ N/A
575
+ True
576
+ True
577
+ 207
578
+ AugmentedMemoizationPolicy
579
+ N/A
580
+ N/A
581
+ N/A
582
+ N/A
583
+ True
584
+ True
585
+ 65
586
+ RulePolicy
587
+ N/A
588
+ N/A
589
+ N/A
590
+ N/A
591
+ True
592
+ True
593
+ 818
594
+ PolicyEnsemble
595
+ N/A
596
+ N/A
597
+ N/A
598
+ N/A
599
+ False
600
+ True
601
+ 150
602
+ pre-trained language models from Hugging Face Transformers
603
+ [22], including BERT [17], GPT [31], XLNet [79], Roberta
604
+ [46], XLM [41] and GPT2 [56]. (3) MitieFeaturizer combines
605
+ Canonical Correlation Analysis (CCA) feature and word mor-
606
+ phology features together. (4) SpacyFeaturizer applies HashEm-
607
+ bedCNN or Roberta to convert tokens to features, depending
608
+ on the pre-trained Spacy pipeline specified in the configuration
609
+ file. (5) CountVectorsFeaturizer, LexicalSyntacticFeaturizer and
610
+ RegexFeaturizer create sparse features with n-grams, sliding
611
+ window and regex patterns, respectively.
612
+ IntentClassifier. IntentClassifier generates a predicted intent
613
+ list ordered by confidence scores based on tokens and features
614
+ from upstream modules. (1) DIETClassifier implements Dual
615
+ Intent and Entity Transformer (DIET) to perform intent clas-
616
+ sification and entity recognition simultaneously, and is there-
617
+ fore included in both IntentClassifier and EntityExtractor
618
+ modules. (2) MitieIntentClassifier and SklearnIntentClassifier
619
+ apply a multi-class Support Vector Machine (SVM) [65] with
620
+ a sparse linear kernel using Scikit-learn and Mitie, respectively.
621
+ (3) KeywordIntentClassifier classifies user intent with keywords
622
+ extracted from training data. (4) FallbackClassifier is a post-
623
+ processing component to check the results of other components
624
+ in DIETClassifier. It identifies a user utterance with the intent
625
+ nlu_fallback if the confidence scores are not greater than
626
+ threshold, or the score difference of the two highest ranked
627
+ intents is less than the ambiguity_threshold.
628
+ EntityExtractor. EntityExtractor extract entities such as the
629
+ restaurant’s location and price. (1) DIETClassifier also serves
630
+ as an EntityExtractor. (2) CRFEntityExtractor, MitieEntityEx-
631
+ tractor and SpacyEntityExtractor utilize a conditional random
632
+ fields (CRF) model, a multi-class linear SVM, and a MLP to
633
+ predict entities, respectively. (3) DucklingEntityExtractor and
634
+ RegexEntityExtractor extract entities using a duckling server
635
+ [23] and regex patterns. (4) EntitySynonymMapper is a post-
636
+ processing component to convert synonymous entity values into
637
+ a same value. As Fig. 1 shows, the value of “price” entity,
638
+ “moderate”, is coverted to “mid” by EntitySynonymMapper.
639
+ Selector. ResponseSelector aims to directly select the re-
640
+ sponse from a set of candidate responses, which is also known
641
+ as response selection task in the literature [16]. It embeds user
642
+ inputs and candidate responses in the same vector space, using
643
+ the same neural network architecture as DIETClassifier.
644
+ Policy. Policy decides the action the system takes on each
645
+ conversation based on dialogue states. (1) TEDPolicy proposes
646
+ a Transformer Embedding Dialogue (TED) model to embed
647
+ dialogue states and system actions into a single semantic vector
648
+ space, and select the action with the max similarity score with
649
+ the current dialogue states [75]. (2) MemoizationPolicy, Aug-
650
+ mentedMemoizationPolicy and RulePolicy match the current
651
+ conversation history with examples in the training data and
652
+ predefined rules to predict system actions. (3) UnexpecTEDIn-
653
+ tentPolicy decides on the possibility of the intent predicted by
654
+ IntentClassifier given current dialogue states, which follows
655
+ the same model architecture as TEDPolicy. (4) PolicyEnsemble
656
+ is a post-processing component to select the proper system
657
+ action from output actions of different policies.
658
+
659
+ C. Implications
660
+ The system complexity of Rasa poses challenges for devel-
661
+ opers using Rasa (i.e., application developers) and developers
662
+ creating Rasa (i.e., system developers).
663
+ Complexity from ML supply chain. Rasa depends on 10
664
+ external
665
+ ML
666
+ libraries
667
+ directly
668
+ or
669
+ indirectly.
670
+ Less
671
+ than 100 (0.03%) projects out of 355392 projects using
672
+ TensorFlow on GitHub depend on 10 more DL libraries
673
+ [69]. It could be inferred that relying on 10 more ML
674
+ libraries is also less common. For application developers,
675
+ it is difficult to understand the implementation details of
676
+ components that rely on external ML libraries, not to mention
677
+ selecting proper components and parameters. For example,
678
+ due to the lack of documentations of MitieFeaturizer in
679
+ Rasa, application developers need to inspect Mitie’s source
680
+ code to learn that it implements CCA using Dlib APIs. For
681
+ system developers, vulnerabilities [81] and dependency bugs
682
+ [32] may arise because of outdated or incompatible library
683
+ versions. Therefore, future work should provide supports for
684
+ the management of components and corresponding dependent
685
+ ML libraries for ML-enabled systems, similar to traditional
686
+ software component analysis [26].
687
+ Complexity from configurations. It could be extremely
688
+ complex to configure Rasa with 29 components and hundreds
689
+ of parameters, making it easy to misconfigure and thus affect
690
+ functionality and performance. This kind of misconfiguration
691
+ is similar to what happens in traditional configurable software
692
+ systems [74]. Additionally, finding optimal configurations for
693
+ application developers’ specific TDS scenarios is difficult, also
694
+ known as configuration debt [63]. Although AutoML has been
695
+ extensively studied to select appropriate ML models and param-
696
+ eters for specific tasks, they all focus on selecting a single ML
697
+ model without considering the combination of multiple ML
698
+ models and rules [28]. Another challenge is to detect potential
699
+ bugs by testing a huge set of configuration settings. Existing
700
+ studies on ML model testing mainly focus on testing a single
701
+ ML model with predefined hyperparameters [82].
702
+ IV. RQ2: INTERACTION COMPLEXITY ANALYSIS
703
+ A. Methodology
704
+ The workflow in Fig. 1 only shows the general flow among
705
+ different modules. The details of interactions among compo-
706
+ nents are still uncovered. We consider the interaction among
707
+ two or more components, with at least one ML component.
708
+ An interaction pattern contains a module placeholder, which
709
+ could be instantiated with components in the module to gener-
710
+ ate interaction instances. For example, pattern (PolicyEnsemble,
711
+ [Policy]) could be instantiated as (PolicyEnsemble, TEDPolicy)
712
+ or (PolicyEnsemble, (TEDPolicy, RulePolicy)). To answer RQ2,
713
+ we conducted a qualitative and quantitative analysis of the
714
+ component interaction patterns and instances of components.
715
+ Step 1: Extract interaction patterns. The interaction can
716
+ be divided into two categories: inter-module interaction and
717
+ intra-module interaction. (1) Inter-module interaction: the inter-
718
+ action between two adjacent modules (e.g., Featurizer with To-
719
+ kenizer) was considered. We analyzed the usages of Message
720
+ Component Interaction
721
+ (43/230)
722
+ Inter-Module
723
+ (33/172)
724
+ Intra-Module
725
+ (8/56)
726
+ Component
727
+ Instantiation (N/A)
728
+ Component
729
+ Inheritance (2/2)
730
+ Data
731
+ Dependency
732
+ (25/95)
733
+ Confidence
734
+ Checking (2/50)
735
+ Output
736
+ Selection (1/18)
737
+ Output
738
+ Refinement
739
+ (1/4)
740
+ Usage
741
+ Constraints (4/5)
742
+ Functionality
743
+ Equivalence
744
+ (2/18)
745
+ Priority Order
746
+ (1/7)
747
+ Usage
748
+ Constraints
749
+ (5/31)
750
+ Both ML-ML and
751
+ ML-Rule
752
+ Only ML-Rule
753
+ Only ML-ML
754
+ Fig. 2: Taxonomy of Component Interactions
755
+ in the component code because components use the Message
756
+ class to transfer data. Specifically, we extracted all interaction
757
+ patterns of its upstream and downstream component. We also
758
+ considered the interaction between post-processing compo-
759
+ nent (i.e., FallbackClassifier, EntitySynonymMapper and Poli-
760
+ cyEnsemble) and other components in their residing modules
761
+ as inter-module interaction. (2) Intra-module interaction: we
762
+ identify the interaction pattern for components in each module.
763
+ Step 2: Generate interaction instances. For each inter-
764
+ module interaction pattern, we instantiated the module place-
765
+ holder with every component in the module. For every intra-
766
+ module interaction, we extracted the Cartesian product of all
767
+ components in each module as interaction instances. We then
768
+ filtered out component instances that do not contain ML com-
769
+ ponents, or do not meet the constraints specified in Rasa doc-
770
+ umentation. For example, CRFEntityExtractor could not use
771
+ features of SparseFeaturizer other than RegexFeaturizer.
772
+ Step 3: Summarize the interaction taxonomy. For gen-
773
+ erated component patterns and instances, we analyzed their
774
+ semantics and summarized a component interaction taxonomy.
775
+ B. Results
776
+ The component interaction taxonomy is shown in Fig. 2. It
777
+ is divided into 4 high-level categories (i.e. Inter-Module, Intra-
778
+ Module, Component Instantiation and Component Inheritance)
779
+ and 8 inner categories. Note that only Inter-Module interactions
780
+ contain components with direct data dependency, while other
781
+ categories contain components with indirect interactions (e.g.,
782
+ two featurizers are used together). The number of interaction
783
+ patterns and interaction instances in each category is listed as
784
+ pattern count/instance count in Fig. 2. There are a total of
785
+ 43 interaction patterns and 230 interaction instances. Almost
786
+ all categories include both ML to ML components and ML to
787
+ rule-based components interactions. On the contrary, previous
788
+ work on Apollo [60] also presented 4 of the 8 inner categories,
789
+ but did not provide a taxonomy and quantitative analysis.
790
+ Inter-Module. Components from multiple modules interact
791
+ through data transfers. In particular, Output Selection means
792
+ that the downstream component selects the proper output from
793
+ multiple upstream outputs based on configurable criteria, e.g.,
794
+ PolicyEnsemble with policies. Output Refinement denotes that
795
+
796
+ the downstream component complements the imperfect outputs
797
+ of upstream components with rules, e.g., EntitySynonymMapper
798
+ with entity extractors. Confidence Checking means that the
799
+ downstream component checks reliability of the output from up-
800
+ stream components using ML models (e.g., UnexpecTEDIntent-
801
+ Policy with intent classifiers) or rules (e.g., FallbackClassifier
802
+ with IntentClassifiers). If the outputs are marked as not reliable,
803
+ fallback behaviors such as the fall_back system action are
804
+ triggered. Usage Constraints defines components that should
805
+ or should not be used together under certain circumstances. For
806
+ example, SpacyTokenizer is required by CountVectorsFeaturizer
807
+ when applying use_lemma option and LexicalSyntacticFea-
808
+ turizer when applying pos_tag option. Data Dependency
809
+ includes the rest of inter-module interaction patterns that do
810
+ not fall into any of the above categories, which are relatively
811
+ “trivial” interactions with no specific semantics.
812
+ Intra-Module. The interaction mode of components within a
813
+ module differs from Inter-Module. These components interact
814
+ indirectly when used together. Priority Order means that the
815
+ outputs of components within a module are selected according
816
+ to priority order, e.g., the priority order of policies. Usage
817
+ Constraints is similar to Usage Constraints in the inter-module
818
+ category. For example, only one component in any of Tokenizer,
819
+ IntentClassifier and EntityExtractor should be used in each
820
+ configuration file, otherwise outputs of additional components
821
+ will be overwritten. Functionality Equivalence includes all
822
+ intra-module interaction patterns that do not belong to any of
823
+ the above categories, which are relatively “trivial” interactions
824
+ involving components used together with no specific semantics.
825
+ Component Instantiation. Rasa supports creating multiple
826
+ instances of a component within a configuration setting. For
827
+ example, multiple CountVectorFeaturizers instances with dif-
828
+ ferent ngram settings, and multiple LanguageModelFeaturizer
829
+ instances with different language models could be used together.
830
+ We did not count this category of interaction patterns and
831
+ instances, since developers could specify infinite instances of
832
+ a component within a configuration setting.
833
+ Component Inheritance. The class inheritance mechanism
834
+ allows ML models to be shared among components. For ex-
835
+ ample, ML model definition class in UnexpecTEDIntentPolicy
836
+ is a subclass of the ML model definition class in TEDPolicy.
837
+ C. Implications
838
+ Lack of specifications for interactions. The outputs of ML
839
+ components for specific inputs are not guaranteed due to the
840
+ stochastic nature of ML models [8]. Thus, it is more difficult
841
+ to formulate interaction semantics in ML-enabled systems than
842
+ in traditional systems. When testing samples are predicated
843
+ wrongly, it is challenging to localize the exact faulty component.
844
+ Moreover, even if the faulty component has been fixed and
845
+ performance of it has been improved, the overall performance
846
+ of the entire system may degrade [78]. Therefore, training
847
+ and evaluation should be extended from component-level to
848
+ system-level to consider interactions among components. In
849
+ summary, we need to pay more attention to addressing the
850
+ TABLE II: LoC of Different Code Categories
851
+ Module
852
+ Data
853
+ Model
854
+ Rule
855
+ Pre.
856
+ Post. Usage Definition Usage Definition
857
+ Tokenizer
858
+ 8
859
+ 80
860
+ 27
861
+ 0
862
+ 25
863
+ 25
864
+ Featurizer
865
+ 390
866
+ 323
867
+ 92
868
+ 0
869
+ 162
870
+ 119
871
+ IntentClassifier
872
+ 441
873
+ 131
874
+ 113
875
+ 298
876
+ 3
877
+ 69
878
+ EntityExtractor
879
+ 746
880
+ 311
881
+ 120
882
+ 298
883
+ 24
884
+ 30
885
+ Selector
886
+ 48
887
+ 55
888
+ 9
889
+ 16
890
+ 0
891
+ 0
892
+ Policy
893
+ 1332
894
+ 540
895
+ 64
896
+ 543
897
+ 167
898
+ 283
899
+ Shared
900
+ 996
901
+ 314
902
+ 112
903
+ 1673
904
+ 0
905
+ 43
906
+ Total
907
+ 3961 1754
908
+ 537
909
+ 2828
910
+ 381
911
+ 569
912
+ challenges caused by lack of specifications in bug localization
913
+ and repairing for ML-enabled systems.
914
+ Hidden interactions. It is non-trivial to identify all inter-
915
+ actions even for system developers of Rasa. For example, the
916
+ Data Dependency interaction between RegexFeaturizer and CR-
917
+ FEntityExtractor is not marked in documentations and can only
918
+ be identified from source code. Application developers may
919
+ misuse components and get confused with the poor performance
920
+ of the system without understanding the hidden interactions,
921
+ especially for interaction categories like Usage Constraints,
922
+ Output Selection and Priority Order. Techniques like data flow
923
+ analysis can be explored to automatically reveal component
924
+ interactions in ML-enabled systems [62].
925
+ Furthermore, our results on component interaction complex-
926
+ ity could be helpful to guide developers to build a better ML-
927
+ enabled system. For example, developers can follow interaction
928
+ patterns Output Selection and Output Refinement to improve
929
+ the outputs of components at system level, as well as utilizing
930
+ Confidence Checking to detect cases that ML models can not
931
+ handle, and then triggering fallback rules, which is very impor-
932
+ tant in safety-critical systems like self-driving systems [60].
933
+ V. RQ3: COMPONENT COMPLEXITY ANALYSIS
934
+ A. Methodology
935
+ To answer RQ3, we classified categories of code snippets
936
+ in each component and explored their composition patterns.
937
+ Step 1: Label code snippets. We segmented each source
938
+ code file into code snippets according to semantic meaning, and
939
+ then classified them into 6 categories: (1) model definition, the
940
+ definition code of ML models; (2) rule definition, the definition
941
+ code of rules in rule-based components; (3) model usage, the
942
+ usage code of ML models; (4) rule usage, the usage code
943
+ of rules; (5) data pre-processing, the input data processing
944
+ code before model or rule usages; (6) data post-processing, the
945
+ output data processing code after model or rule usages. Two of
946
+ the authors labeled code snippets independently, and the third
947
+ author was involved to resolve disagreements. The Cohen’s
948
+ Kappa coefficient of the two authors reached 0.830.
949
+ Step 2: Summarize composition patterns of code snip-
950
+ pets. Based on labeled code snippets, we summarized the
951
+ composition patterns of data processing code, and model or
952
+ rule usage code in each component.
953
+ B. Results
954
+ The statistics of different code categories are shown in Table
955
+ II. We only considered the LoC of labeled code snippets, while
956
+
957
+ Select
958
+ Select
959
+ One
960
+ Select
961
+ Select
962
+ One
963
+ Select
964
+ Fig. 3: Non-Sequential Code Composition Patterns within Components
965
+ ignoring general utils code such as class initialization. Data
966
+ processing code contributes a total of 5715 (57.1%) LoC, while
967
+ model usage&definition code and rule usage&definition code
968
+ contribute 3365 (33.5%) and 950 (9.4%) LoC, respectively.
969
+ 1673 (59.2%) of the 2828 LoC of model definition code is in
970
+ Shared module, which shows that the reuse of model definition
971
+ code between different components is quite common. There is
972
+ no model definition code in Tokenizer and Featurizer, because
973
+ ML components are all built on top of external ML libraries.
974
+ We classified data pre-processing and data post-processing
975
+ categories into more specific types, due to the dominant propor-
976
+ tion of data processing code in Rasa. Specifically, Validation
977
+ code intends to validate the input or output data of components.
978
+ Format Transformation code transforms data format, such as
979
+ constructing vectors from Python arrays and reshaping vectors.
980
+ Component Input/Output Filter code filters data that does not
981
+ meet the specified criteria, such as the absence of certain at-
982
+ tributes. Data Scale/Padding/Encoding/Decoding code changes
983
+ the value of data, while Data Split/Shuffle/Balance/Batch/Rank
984
+ code changes the organization of data for better training and
985
+ inference of components. We provide the complete codebook
986
+ and statistics of data processing types at our website [7].
987
+ Moreover, we find that composition patterns of code snippets
988
+ include sequential code composition pattern and various non-
989
+ sequential composition patterns. In a typical sequential compo-
990
+ sition pattern, data is first pre-processed, and then processed
991
+ by model or rule usage code, and finally post-processed. The
992
+ non-sequential code composition patterns are summarized in
993
+ Fig. 3. The black box is a data processing code snippet, the
994
+ red box is a model or rule usage code snippet, and the green
995
+ diamond means to select one or multiple downstream code
996
+ snippets. The first 5 patterns consist of multiple model or rule
997
+ usages in one component. The last 3 patterns consist of a single
998
+ model or rule usage with multiple possible data processing
999
+ snippets, decided by configurations or input data.
1000
+ C. Implications
1001
+ Data processing. Data processing code is scattered at dif-
1002
+ ferent granularity levels, unlike the well-documented and struc-
1003
+ tured code of ML models and rules. In detail, data processing
1004
+ code includes data processing components (e.g., PolicyEnsem-
1005
+ ble), general data processing classes and functions in Shared
1006
+ module, and specific data processing snippets in components
1007
+ entangled with model or rule usages. On the one hand, it could
1008
+ become troublesome to identify and understand the semantics of
1009
+ all data processing code for application developers. A specific
1010
+ example is that data pre-processing code also exists in model
1011
+ definition class of TransformerRasaModel, including
1012
+ Formant Conversion and Data Batch code. It could be explicitly
1013
+ helpful to automatically extract and analyze the semantics of
1014
+ data processing code with techniques like program analysis [61].
1015
+ On the other hand, it would be challenging to maintain and test
1016
+ data processing code for system developers, possibly resulting
1017
+ in severe consequences with ML development paradigm shift
1018
+ from model-centric to data-centric [45]. In general, building
1019
+ a taxonomy of data processing code would be helpful for the
1020
+ maintaining and testing of data processing code.
1021
+ Code composition patterns. These non-sequential compo-
1022
+ sition patterns could introduce additional dynamic complexity
1023
+ for ML-enabled systems, e.g., it is too expensive to capture
1024
+ all possible run-time compositions of code snippets with static
1025
+ analysis. Although dynamic testing is widely adopted to com-
1026
+ plement the limitations of static analysis in traditional software
1027
+ [24], most existing testing techniques tailored for ML only
1028
+ target at ML model level [82]. It would be beneficial to extend
1029
+ them to include data processing code and composition patterns.
1030
+ VI. RQ4: TESTING PRACTICE ANALYSIS
1031
+ A. Methodology
1032
+ To answer RQ4, test cases were inspected in three steps.
1033
+ Step 1: Label test cases. We manually labeled the granular-
1034
+ ity level, oracle type and ML stage of each test case. There are
1035
+ three different granularity levels of test cases: (1) Method-level:
1036
+ testing single or multiple methods; (2) Component-level: testing
1037
+ the complete process of a component in training or inference
1038
+ stage; (3) Integration-level: testing the current component with
1039
+ upstream components. There are four test oracle types: (1)
1040
+ Given input-output pairs: the input and expected output data are
1041
+ given; (2) Component-specific constraints: the constraints must
1042
+ be satisfied according to the implementation of a component,
1043
+ i.e., the sum of confidence scores of the intent list generated by
1044
+ classifiers should equal to 1; (3) Differential executions: outputs
1045
+ of executions under different settings should change or remain
1046
+ the same, i.e., given the same input, the outputs of an original
1047
+ ML model and its loaded version from disk should remain
1048
+ the same; (4) Exception: whether or not the test case throws
1049
+ exceptions for certain inputs and configurations. Finally, the
1050
+ ML stages covered by test cases include training, inference and
1051
+ evaluation stages. To test the training stage of a component,
1052
+ test cases must first train it and check whether any test oracle
1053
+ is violated. Note that there may exist several oracle types and
1054
+ ML stages but only one granularity level for each test case in
1055
+
1056
+ TABLE III: Code Coverage and Labeled Statistics of Test Cases
1057
+ Module
1058
+ Total
1059
+ Test Case Type
1060
+ Test Case Stage
1061
+ Oracle Type
1062
+ Code Coverage
1063
+ Meth.
1064
+ Comp.
1065
+ Integ.
1066
+ Infer.
1067
+ Train
1068
+ Eval.
1069
+ I-O
1070
+ C-S
1071
+ Diff.
1072
+ Exception
1073
+ Stat. Cov.
1074
+ Bran. Cov.
1075
+ Tokenizer
1076
+ 27
1077
+ 7
1078
+ 20
1079
+ 0
1080
+ 25
1081
+ 14
1082
+ 0
1083
+ 24
1084
+ 1
1085
+ 0
1086
+ 3
1087
+ 97.4%
1088
+ 96.8%
1089
+ Featurizer
1090
+ 62
1091
+ 13
1092
+ 14
1093
+ 35
1094
+ 56
1095
+ 40
1096
+ 0
1097
+ 46
1098
+ 5
1099
+ 3
1100
+ 8
1101
+ 95.7%
1102
+ 94.9%
1103
+ IntentClassifier
1104
+ 36
1105
+ 7
1106
+ 15
1107
+ 14
1108
+ 29
1109
+ 30
1110
+ 0
1111
+ 18
1112
+ 11
1113
+ 7
1114
+ 1
1115
+ 92.5%
1116
+ 89.5%
1117
+ EntityExtractor
1118
+ 41
1119
+ 6
1120
+ 19
1121
+ 16
1122
+ 34
1123
+ 31
1124
+ 0
1125
+ 18
1126
+ 14
1127
+ 8
1128
+ 1
1129
+ 92.3%
1130
+ 90.1%
1131
+ Selector
1132
+ 13
1133
+ 4
1134
+ 6
1135
+ 3
1136
+ 9
1137
+ 12
1138
+ 0
1139
+ 6
1140
+ 5
1141
+ 1
1142
+ 1
1143
+ 68.3%
1144
+ 66.4%
1145
+ Policy
1146
+ 165
1147
+ 77
1148
+ 88
1149
+ 0
1150
+ 105
1151
+ 127
1152
+ 0
1153
+ 117
1154
+ 51
1155
+ 0
1156
+ 20
1157
+ 95.7%
1158
+ 94.5%
1159
+ Shared
1160
+ 138
1161
+ 129
1162
+ 2
1163
+ 7
1164
+ 90
1165
+ 84
1166
+ 0
1167
+ 89
1168
+ 42
1169
+ 2
1170
+ 16
1171
+ 92.3%
1172
+ 91.4%
1173
+ Total
1174
+ 461
1175
+ 240
1176
+ 156
1177
+ 65
1178
+ 331
1179
+ 317
1180
+ 47
1181
+ 312
1182
+ 123
1183
+ 15
1184
+ 49
1185
+ 93.2%
1186
+ 92.0%
1187
+ TABLE IV: Test Coverage of Component Interactions
1188
+ Category
1189
+ Sub-Category
1190
+ Cov. Patterns Cov. Instances
1191
+ Inter-module
1192
+ Data Dependency
1193
+ 9/25
1194
+ 17/95
1195
+ Confidence Checking
1196
+ 0/2
1197
+ 0/50
1198
+ Output Selection
1199
+ 0/1
1200
+ 0/18
1201
+ Output Refinement
1202
+ 1/1
1203
+ 1/4
1204
+ Usage Constraints
1205
+ 3/3
1206
+ 3/3
1207
+ Intra-module Functionality Equivalence 2/2
1208
+ 3/18
1209
+ Prioriy Order
1210
+ 1/1
1211
+ 4/7
1212
+ Usage Constraints
1213
+ 2/2
1214
+ 2/4
1215
+ Total
1216
+ 18/37
1217
+ 30/199
1218
+ Rasa. Two of the authors labeled test cases independently, and
1219
+ the third author was involved to resolve disagreements. The
1220
+ Cohen’s Kappa coefficient of granularity level, test oracle and
1221
+ ML stage is 0.907, 0.908, and 0.854, respectively.
1222
+ Step 2: Collect code coverage of test cases. We collected
1223
+ the statement coverage and branch coverage of code via pytest-
1224
+ cov, because Rasa uses pytest to run test cases.
1225
+ Step 3: Collect interaction pattern coverage of test cases.
1226
+ We injected logging statements into methods of every compo-
1227
+ nent, and then executed test cases to collect the co-executed
1228
+ component sets of each test case. All component interaction
1229
+ instances, except Model Inheritance instances and some Usage
1230
+ Constraints instances that cannot be used together, were tried to
1231
+ be matched with these component sets. The matched interaction
1232
+ patterns and instances were considered as covered by test cases.
1233
+ B. Results
1234
+ The code coverage and labeled statistics of test cases are
1235
+ shown in Table III. (1) The total statement coverage and branch
1236
+ coverage of code reach 93.2% and 92.0%, which is much higher
1237
+ than 21.5% and 13.3% in Apollo [60]. (2) The coverage of
1238
+ Selector is only 68.3% and 66.4%, because Selector has two
1239
+ candidate ML models while only one of them was tested. (3)
1240
+ There are 240 (52.0%) method-level, 156 (33.8%) component-
1241
+ level and 65 (14.1%) integration-level test cases. (4) There is no
1242
+ integration-level test cases in Policy, because Policy was tested
1243
+ with given intents and entities input from developers without
1244
+ the dependency of NLU modules. (5) Inference and training
1245
+ stages have similar test case quantities. (6) Only test cases in
1246
+ Shared module cover evaluation stage, because Shared module
1247
+ provides the evaluation code for all components. (7) There are
1248
+ 312 (67.7%), 123 (26.3%), 15 (3.3%), and 49 (10.6%) test cases
1249
+ with given input-output pairs, component-specific constraints,
1250
+ differential executions and exception test oracles.
1251
+ As Table IV shows, the test coverage of interactions is rela-
1252
+ tively low, i.e., 18 (48.6%) of 37 patterns and 30 (15.1%) of 199
1253
+ instances are covered. This is because only integration-level
1254
+ tests cover components interactions. In particular, Confidence
1255
+ Checking and Output Selection are not covered.
1256
+ C. Implications
1257
+ Low test coverage of component interactions. It is difficult
1258
+ to achieve a high test coverage of component interactions, due
1259
+ to the complexity caused by huge configuration space and hid-
1260
+ den interactions. The only test cases that cover component inter-
1261
+ actions (i.e., integration-level test cases) contribute no more than
1262
+ 15% test cases. Yet, integration-level test cases can cover and
1263
+ kill more mutants than component-level and method-level test
1264
+ cases, as many mutants do not manifest in non-integration-level
1265
+ test cases [42]. Therefore, it is crucial to generate integration-
1266
+ level test cases for ML-enabled systems.
1267
+ Limited test oracle types. It is challenging and time-
1268
+ consuming to write test cases with given input-output pairs and
1269
+ component-specific constraints oracles, due to the complexity
1270
+ brought by lacking of specification for interactions. As a result,
1271
+ those test cases without the need of specification of interactions,
1272
+ that is, differential executions and exception test oracles, have
1273
+ been widely utilized to tackle the oracle problem in test case
1274
+ generation techniques for traditional software, such as differ-
1275
+ ential testing [21], fuzzing [44] and search-based testing [50].
1276
+ Besides, we find that test cases with these two oracles have a
1277
+ similar capability to kill mutants similar to component-specific
1278
+ constraints oracle (see RQ5). In spite of this, only 13.9%
1279
+ test cases in Rasa are written with differential executions and
1280
+ exception test oracles, implying that there could be a big room
1281
+ to apply these two test oracle types in test case generation
1282
+ techniques for ML-enabled systems.
1283
+ VII. RQ5: MUTATION TESTING ANALYSIS
1284
+ A. Methodology
1285
+ To answer RQ5, we performed an analysis of mutation test-
1286
+ ing [37]. It applies mutators to generate versions of faulty code,
1287
+ i.e., mutants. For every mutant, test cases were executed to col-
1288
+ lect the testing results to decide whether the mutant was killed.
1289
+ As Rasa contains both ML components and rule-based com-
1290
+ ponents, we considered both mutators for traditional software
1291
+ (i.e., syntactic mutators) and ML-specific mutators. As Table V
1292
+ shows, we used 9 syntactic mutators from Jia et al. [36] and
1293
+ 11 ML specific mutators from DeepCrime [33].
1294
+ We list steps of mutation analysis in the following.
1295
+ Step 1: Generate mutants. We generated syntactic mutants
1296
+ using mutmut [51]. We used two groups of syntactic mutators,
1297
+ i.e., Logic and Value, which mutate the logic flow and variable
1298
+
1299
+ value. Besides, we generated ML specific mutants with Deep-
1300
+ Crime [33]. We used 4 of 8 mutation groups in DeepCrime
1301
+ (Activation, Regularization, Weights and Optimization). For
1302
+ others, mutators in Training Data and Validation groups are not
1303
+ the focus of this paper; Hyperparameters group is not included,
1304
+ as hyperparameters in Rasa are specified with configuration files
1305
+ by developers; and Loss Function group is not applicable, as
1306
+ the loss functions in Rasa are all implemented from scratch,
1307
+ while the mutators provided by DeepCrime are only to replace
1308
+ the Keras loss function API with another one. Besides, we
1309
+ only generated mutants for 6 labeled code cateogries in RQ3,
1310
+ excluding general utils code. We generated no more than 30
1311
+ mutants for every Python class to reduce potential bias. We
1312
+ also only modified one AST node for every mutant.
1313
+ Step 2: Perform mutation testing analysis with test cases.
1314
+ For every mutant, only test cases that cover the mutated line
1315
+ were collected (from test coverage data in RQ4) and executed
1316
+ to save running time. If any test case fails on a mutant, the
1317
+ mutant is considered as killed by the test case. Otherwise, the
1318
+ mutant is considered as survived. A test case could fail with
1319
+ three symptoms: (1) an assertion fails; (2) an execution or
1320
+ runtime error manifests; and (3) the test case times out. The
1321
+ maximum time for a test case to run is 10 times of its running
1322
+ time in the original clean code version. Besides, test cases
1323
+ were executed 3 times for every mutant to avoid flaky tests.
1324
+ We found that all test case statuses remain same for three runs.
1325
+ Step 3: Perform mutation testing analysis with test data.
1326
+ For those survived mutants in Step 2, we assessed the impact
1327
+ of them with 3 default configuration files and the restaurant
1328
+ domain data in Multiwoz [15], which is a widely used multi-
1329
+ domain dataset to evaluate the performance of TDS. Given a
1330
+ configuration file, only components specified in it are included
1331
+ in the Rasa pipeline, thus mutated nodes of some survived mu-
1332
+ tants from Step 2 will not be executed as they are not impacted
1333
+ by the configuration. Due to the stochastic nature of machine
1334
+ learning programs, we trained both the mutated program and
1335
+ original program for 5 times with 80/20 data split into train/test
1336
+ data randomly, and decided whether the performance metrics of
1337
+ two versions are statistically significant with non-negligible and
1338
+ non-small effect size. We followed the same formula to decide
1339
+ whether a mutant is killed with the test data as [33, 35], with
1340
+ the threshold of significance value is 0.05 and of effect size is
1341
+ 0.5. We adopted F1 scores of IntentClassifier, EntityExtractor
1342
+ and Policy as performance metrics, i.e., if the F1 score in any
1343
+ of the three modules is statistically different between two code
1344
+ versions, the mutant is marked as killed by test data.
1345
+ B. Results
1346
+ The mutation testing results by each mutator are shown in Ta-
1347
+ ble V. There are 1447 mutants generated, 1106 (76.4%) mutants
1348
+ killed by test cases, 341 (23.6%) mutants survived, 146 (10.1%)
1349
+ mutants impacted, and 22 (1.5%) mutants killed by test data.
1350
+ Only 146 mutants from 341 survived mutants impact the default
1351
+ 3 Rasa pipelines, which shows that the huge configuration space
1352
+ is challenging to be tested adequately. 81.3% syntactic mutants
1353
+ and 20.0% ML specific mutants are killed by test cases, while
1354
+ TABLE V: Mutation Testing Results
1355
+ Mutation Group Operator Total
1356
+ Test Case
1357
+ Test Data
1358
+ Killed Survived Impacted Killed
1359
+ Logic
1360
+ ArOR
1361
+ 109
1362
+ 86
1363
+ 23
1364
+ 10
1365
+ 1
1366
+ ComOR
1367
+ 109
1368
+ 88
1369
+ 21
1370
+ 6
1371
+ 0
1372
+ LogOR
1373
+ 145
1374
+ 112
1375
+ 33
1376
+ 14
1377
+ 0
1378
+ AsOR
1379
+ 20
1380
+ 19
1381
+ 1
1382
+ 0
1383
+ 0
1384
+ MemOR
1385
+ 32
1386
+ 30
1387
+ 2
1388
+ 0
1389
+ 0
1390
+ KVR
1391
+ 12
1392
+ 7
1393
+ 5
1394
+ 1
1395
+ 0
1396
+ Value
1397
+ BVR
1398
+ 64
1399
+ 32
1400
+ 32
1401
+ 9
1402
+ 0
1403
+ NVR
1404
+ 224
1405
+ 180
1406
+ 64
1407
+ 18
1408
+ 2
1409
+ AsVR
1410
+ 582
1411
+ 525
1412
+ 67
1413
+ 10
1414
+ 0
1415
+ Activation
1416
+ ACH
1417
+ 22
1418
+ 3
1419
+ 19
1420
+ 18
1421
+ 6
1422
+ ARM
1423
+ 2
1424
+ 0
1425
+ 2
1426
+ 1
1427
+ 0
1428
+ AAL
1429
+ 22
1430
+ 11
1431
+ 11
1432
+ 11
1433
+ 2
1434
+ Regularization
1435
+ RAW
1436
+ 6
1437
+ 0
1438
+ 6
1439
+ 3
1440
+ 3
1441
+ RCW
1442
+ 10
1443
+ 0
1444
+ 10
1445
+ 10
1446
+ 0
1447
+ RRW
1448
+ 5
1449
+ 0
1450
+ 5
1451
+ 5
1452
+ 0
1453
+ Weights
1454
+ WCI
1455
+ 24
1456
+ 10
1457
+ 14
1458
+ 13
1459
+ 1
1460
+ WAB
1461
+ 4
1462
+ 0
1463
+ 4
1464
+ 3
1465
+ 1
1466
+ WRB
1467
+ 3
1468
+ 1
1469
+ 2
1470
+ 2
1471
+ 0
1472
+ Optimization
1473
+ OCH
1474
+ 24
1475
+ 2
1476
+ 22
1477
+ 9
1478
+ 6
1479
+ OCG
1480
+ 8
1481
+ 0
1482
+ 8
1483
+ 3
1484
+ 0
1485
+ Total
1486
+ 1447
1487
+ 1106
1488
+ 341
1489
+ 146
1490
+ 22
1491
+ TABLE VI: Mutant Location Results
1492
+ Location
1493
+ Total
1494
+ Test Case Result
1495
+ Test Data Result
1496
+ Killed
1497
+ Survived
1498
+ Impacted
1499
+ Killed
1500
+ Data Prep.
1501
+ 385
1502
+ 326
1503
+ 59
1504
+ 23
1505
+ 0
1506
+ Data Post.
1507
+ 271
1508
+ 222
1509
+ 49
1510
+ 5
1511
+ 0
1512
+ Model Usage
1513
+ 307
1514
+ 243
1515
+ 64
1516
+ 4
1517
+ 0
1518
+ Model Def.
1519
+ 364
1520
+ 224
1521
+ 140
1522
+ 99
1523
+ 22
1524
+ Rule Usage
1525
+ 4
1526
+ 4
1527
+ 0
1528
+ 0
1529
+ 0
1530
+ Rule Def.
1531
+ 115
1532
+ 101
1533
+ 14
1534
+ 4
1535
+ 0
1536
+ TABLE VII: Test Case Mutation Results
1537
+ Category
1538
+ Type
1539
+ Test Num. Strong Test Num. Covered Killed
1540
+ Granularity
1541
+ Method
1542
+ 240
1543
+ 59
1544
+ 947
1545
+ 635
1546
+ Component
1547
+ 156
1548
+ 31
1549
+ 1121
1550
+ 709
1551
+ Integration
1552
+ 65
1553
+ 29
1554
+ 903
1555
+ 613
1556
+ Stage
1557
+ Infer.
1558
+ 331
1559
+ 86
1560
+ 1358
1561
+ 995
1562
+ Training
1563
+ 317
1564
+ 75
1565
+ 1184
1566
+ 847
1567
+ Evaluation
1568
+ 47
1569
+ 11
1570
+ 772
1571
+ 476
1572
+ Oracle Type
1573
+ I-O
1574
+ 312
1575
+ 98
1576
+ 1298
1577
+ 956
1578
+ C-S
1579
+ 123
1580
+ 19
1581
+ 1103
1582
+ 707
1583
+ Diff
1584
+ 15
1585
+ 3
1586
+ 625
1587
+ 338
1588
+ Exception
1589
+ 49
1590
+ 6
1591
+ 686
1592
+ 352
1593
+ 4.4% syntactic mutants and 24.4% ML specific mutants from
1594
+ impacted mutants are killed by test data. It shows that test
1595
+ case is much more effective to detect syntactic mutants and
1596
+ slightly less effective to detect ML specific mutants than test
1597
+ data. The killed syntactic mutants and ML-specific mutants
1598
+ by test data cause the F1 score degradation of IntentClassifier,
1599
+ EntityExtractor and Policy by 20.8%, 0.8%, 3.6% and 11.1%,
1600
+ 13.4%, 5.7% on average.
1601
+ The mutation testing results w.r.t. the location of mutants
1602
+ are shown in Table VI. 224 (61.5%) of 364 mutants in model
1603
+ definition code, and 896 (82.8%) of 1082 mutants in other code
1604
+ categories are killed by test cases. In particular, few mutants
1605
+ in code categories except model definition are impacted and
1606
+ killed by test data, which implies that test data is only effective
1607
+ to kill mutants in model definition code.
1608
+ We investigated the capability to detect mutants w.r.t. differ-
1609
+ ent categories of test cases, by calculating the ratio of strong
1610
+ test case number to all test case number, and the ratio of killed
1611
+ mutants to covered mutants of them. We define strong test
1612
+
1613
+ case as the test case that kills equal or more than 75% of its
1614
+ covered mutants. As Table VII shows, test cases in integration
1615
+ level have the highest ratio of strong test case (44.6%) and
1616
+ highest ratio of killed mutants (67.9%) among three granularity
1617
+ levels. Test cases with given input-output test oracle have the
1618
+ highest ratio of strong test case (31.4%) and highest ratio of
1619
+ killed mutants (73.7%) among four oracle types, while test
1620
+ cases with other three oracle types have similar ratios.
1621
+ C. Implications
1622
+ Non-ML specific bugs and test cases in ML-enabled sys-
1623
+ tems. Complexity from data processing code causes that non-
1624
+ ML specific bugs are prone to be introduced. Compared with
1625
+ test data, test case is more effective to detect syntactic mutants,
1626
+ i.e., non-ML specific bugs. Moreover, it is notorious for develop-
1627
+ ers to analyze, localize and fix bugs in ML programs according
1628
+ to test data, thus interpreting [85], debugging [3] and repairing
1629
+ [67] techniques have been developed for ML models. It is easier
1630
+ for developers to localize and fix bugs with failed test cases by
1631
+ analyzing violated test oracles. Thus, we claim that non-ML
1632
+ specific bugs and test cases in ML-enbaled systems should be
1633
+ paid more attention to. Although there is a rich set of test cases
1634
+ in Rasa that achieve high code coverage, the kill ratio of mutants
1635
+ remains to be improved (76.4%), especially of ML-specific
1636
+ mutants (29.8%). The applicability and limitations of existing
1637
+ test case generation, selection and quality assurance techniques
1638
+ in ML-enabled systems are worthwhile to be explored [18, 39].
1639
+ Challenges of test data to kill mutants. Existing researches
1640
+ on mutation testing for ML programs only evaluated mutants
1641
+ with test data to decide whether they can be killed [30, 33, 35,
1642
+ 36, 48]. However, the capability of test data to kill mutants in
1643
+ large-scale ML-enabled systems is limited for two reasons. First,
1644
+ due to complexity from configurations, only part of mutants will
1645
+ impact the components of actual configured systems. Second,
1646
+ the amount and distributions of training data and test data affect
1647
+ the results a lot. For example, we tried to train the clean code
1648
+ version and mutated version with 75% of original training data,
1649
+ the number of killed mutants changed from 22 to 83, which
1650
+ means some bugs may only manifest under specific training
1651
+ data settings. Therefore, system developers should evaluate and
1652
+ test ML-enabled systems under more possible configurations
1653
+ and data settings that may be used by application developers
1654
+ to detect potential bugs.
1655
+ VIII. THREATS
1656
+ First, our study conducts a case study on Rasa, a widely used
1657
+ task-oriented industrial dialogue system. It is not clear whether
1658
+ our results can be generalized to other ML-enabled systems.
1659
+ However, we believe it is a good start to take a system view for
1660
+ ML-enabled systems. Second, our study involves a lot of man-
1661
+ ual analyses of Rasa source code and documentations, which
1662
+ may incur biases. To reduce them, two of the authors conduct
1663
+ manual analysis separately, and a third author is involved to
1664
+ resolve disagreements. Third, the mutators that we adopt may
1665
+ not simulate real-world bugs. To mitigate it, we decide to use
1666
+ mutators from DeepCrime [33], whose mutators are actually
1667
+ summarized from real word ML bugs.
1668
+ IX. RELATED WORK
1669
+ Study of ML-Enabled Systems. While much of the at-
1670
+ tention has been on ML models, less attention has been
1671
+ paid on system-level analysis [38]. Peng et al. [60] inves-
1672
+ tigated the integration of ML models in Apollo by analyzing
1673
+ how ML models interact with the system and how is the
1674
+ current testing effort. Besides, Nahar et al. [52] explored
1675
+ collaboration challenges between data scientists and software
1676
+ engineers through interviews. Amershi et al. [5] and Bernardi
1677
+ et al. [10] reported challenges and practices of MLOps
1678
+ (from model requirement to model monitoring) at Microsoft
1679
+ and Booking.com. Although they still take a model-centric
1680
+ view, they emphasize that models can be complexly entangled to
1681
+ cause non-monotonic errors [5] and model quality improvement
1682
+ does not necessarily indicate system value gain [10]. Further,
1683
+ Yokoyama [80] developed an architectural pattern to separate
1684
+ ML and non-ML components, while Serban and Visser [64]
1685
+ surveyed architectural challenges for ML-enabled systems.
1686
+ Sculley et al. [63] identified ML-specific technical debt in ML-
1687
+ enabled systems, while Tang et al. [70] further derived new ones
1688
+ from real-world code refactorings. In addition, some attempts
1689
+ were made on the problem of ML component entanglement [5],
1690
+ e.g., performing metamorphic testing on a system with two ML
1691
+ components [83], troubleshooting failures in a system with three
1692
+ ML components by human intellect [53], and decomposing er-
1693
+ rors in a system with two or three ML components [78]. These
1694
+ studies explore the interaction among models but only on simple
1695
+ systems. Moreover, Abdessalem et al. [1, 2] studied the feature
1696
+ interaction failures in self-driving systems, and proposed testing
1697
+ and repairing approaches to automatically detect and fix them.
1698
+ Apel et al. [8] also discussed feature interactions in ML-enabled
1699
+ systems, and suggested strategies to cope with them.
1700
+ The main difference from the previous work is that we take a
1701
+ large-scale complex ML-enabled system, explore its complexity
1702
+ at three levels, and analyze the impact of its complexity on test-
1703
+ ing. The closest work is Peng et al.’s [60], but we report a deeper
1704
+ complexity analysis and also conduct a testing impact analysis.
1705
+ Mutation Testing for DL Models. Jia et al. [36] used syn-
1706
+ tactic mutators for traditional programs to DL models. DeepMu-
1707
+ tation [48] and DeepMutation++ [30] defined DL-specific mu-
1708
+ tators. DeepCrime [33] derived DL-specific mutators based on
1709
+ real DL bugs. Jahangirova and Tonella [35] evaluated syntactic
1710
+ and DL-specific mutators. These studies are focused on model-
1711
+ level mutation, while we target at system-level mutation.
1712
+ Testing for Dialogue Systems. Bozic and Wotawa [13] pro-
1713
+ posed a security testing approach for chatbots to prevent cross-
1714
+ site scripting and SQL injection. Bozic et al. [12] tested a hotel
1715
+ booking chatbot via planning. Bozic and Wotawa [14] intro-
1716
+ duced a metamorphic testing approach for chatbots. Similarly,
1717
+ Liu et al. [47] used semantic metamorphic relations to test the
1718
+ NLU module in dialogue systems. Despite the effort, less atten-
1719
+ tion has been paid on system-level testing of dialogue systems.
1720
+
1721
+ X. CONCLUSION
1722
+ We present a comprehensive study on Rasa to characterize
1723
+ its complexity at three levels and the impact of its complexity
1724
+ on testing from two perspectives. Furthermore, we highlight
1725
+ practical implications to improve software engieering for ML-
1726
+ enabled systems. All study data and source code used in this
1727
+ paper are available at https://rasasystemcomplexity.github.io/.
1728
+ REFERENCES
1729
+ [1] R. B. Abdessalem, A. Panichella, S. Nejati, L. C. Briand, and T. Stifter,
1730
+ “Testing autonomous cars for feature interaction failures using many-
1731
+ objective search,” in Proceedings of the 33rd ACM/IEEE International
1732
+ Conference on Automated Software Engineering, 2018, p. 143–154.
1733
+ [2] ——, “Automated repair of feature interaction failures in automated
1734
+ driving systems,” in Proceedings of the 29th ACM SIGSOFT International
1735
+ Symposium on Software Testing and Analysis, 2020, pp. 88–100.
1736
+ [3] A. Abid, M. Yuksekgonul, and J. Zou, “Meaningfully debugging model
1737
+ mistakes using conceptual counterfactual explanations,” in Proceedings
1738
+ of the International Conference on Machine Learning, 2022, pp. 66–88.
1739
+ [4] A. Aggarwal, P. Lohia, S. Nagar, K. Dey, and D. Saha, “Black box
1740
+ fairness testing of machine learning models,” in Proceedings of the 2019
1741
+ 27th ACM Joint Meeting on European Software Engineering Conference
1742
+ and Symposium on the Foundations of Software Engineering, 2019, p.
1743
+ 625–635.
1744
+ [5] S. Amershi, A. Begel, C. Bird, R. DeLine, H. Gall, E. Kamar,
1745
+ N. Nagappan, B. Nushi, and T. Zimmermann, “Software engineering for
1746
+ machine learning: A case study,” in Proceedings of the 41st International
1747
+ Conference on Software Engineering: Software Engineering in Practice,
1748
+ 2019, pp. 291–300.
1749
+ [6] Anaconda. (2022) Dask. [Online]. Available: https://docs.dask.org/en/
1750
+ stable/
1751
+ [7] Anonymous. (2022) Understanding the complexity and its impact
1752
+ on
1753
+ testing
1754
+ in
1755
+ ml-enabled
1756
+ systems.
1757
+ [Online].
1758
+ Available:
1759
+ https:
1760
+ //rasasystemcomplexity.github.io/
1761
+ [8] S. Apel, C. K¨astner, and E. Kang, “Feature interactions on steroids:
1762
+ On the composition of ml models,” IEEE Software, vol. 39, no. 3, pp.
1763
+ 120–124, 2022.
1764
+ [9] T. Baluta, Z. L. Chua, K. S. Meel, and P. Saxena, “Scalable quantitative
1765
+ verification for deep neural networks,” in Proceedings of the 43rd Inter-
1766
+ national Conference on Software Engineering: Companion Proceedings,
1767
+ 2021, p. 248–249.
1768
+ [10] L. Bernardi, T. Mavridis, and P. Estevez, “150 successful machine learning
1769
+ models: 6 lessons learned at booking.com,” in Proceedings of the 25th
1770
+ ACM SIGKDD International Conference on Knowledge Discovery &
1771
+ Data Mining, 2019, p. 1743–1751.
1772
+ [11] T. Bocklisch, J. Faulkner, N. Pawlowski, and A. Nichol, “Rasa: Open
1773
+ source language understanding and dialogue management,” CoRR, vol.
1774
+ abs/1712.05181, 2017.
1775
+ [12] J. Bozic, O. A. Tazl, and F. Wotawa, “Chatbot testing using ai planning,”
1776
+ in Proceedings of the IEEE International Conference On Artificial
1777
+ Intelligence Testing, 2019, pp. 37–44.
1778
+ [13] J. Bozic and F. Wotawa, “Security testing for chatbots,” in Proceedings
1779
+ of the IFIP International Conference on Testing Software and Systems,
1780
+ 2018, pp. 33–38.
1781
+ [14] ——, “Testing chatbots using metamorphic relations,” in Proceedings
1782
+ of the IFIP International Conference on Testing Software and Systems,
1783
+ 2019, pp. 41–55.
1784
+ [15] P. Budzianowski, T.-H. Wen, B.-H. Tseng, I. Casanueva, S. Ultes,
1785
+ O. Ramadan, and M. Gaˇsi´c, “Multiwoz - a large-scale multi-domain
1786
+ wizard-of-oz dataset for task-oriented dialogue modelling,” in Proceedings
1787
+ of the Conference on Empirical Methods in Natural Language Processing,
1788
+ 2018, pp. 5016–5026.
1789
+ [16] D. Chaudhuri, A. Kristiadi, J. Lehmann, and A. Fischer, “Improving
1790
+ response selection in multi-turn dialogue systems by incorporating domain
1791
+ knowledge,” in Proceedings of the 22nd Conference on Computational
1792
+ Natural Language Learning, 2018, pp. 497–507.
1793
+ [17] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova, “Bert: Pre-training
1794
+ of deep bidirectional transformers for language understanding,” arXiv
1795
+ preprint arXiv:1810.04805, 2018.
1796
+ [18] D. Di Nardo, N. Alshahwan, L. Briand, and Y. Labiche, “Coverage-based
1797
+ test case prioritisation: An industrial case study,” in 2013 IEEE Sixth
1798
+ International Conference on Software Testing, Verification and Validation,
1799
+ 2013, pp. 302–311.
1800
+ [19] S. Dola, M. B. Dwyer, and M. L. Soffa, “Distribution-aware testing
1801
+ of neural networks using generative models,” in Proceedings of the
1802
+ IEEE/ACM 43rd International Conference on Software Engineering,
1803
+ 2021, pp. 226–237.
1804
+ [20] S. R. Eddy, “Hidden markov models,” Current opinion in structural
1805
+ biology, vol. 6, no. 3, pp. 361–365, 1996.
1806
+ [21] R. B. Evans and A. Savoia, “Differential testing: a new approach to
1807
+ change detection,” in The 6th Joint Meeting on European software
1808
+ engineering conference and the ACM SIGSOFT Symposium on the
1809
+ Foundations of Software Engineering: Companion Papers, 2007, pp.
1810
+ 549–552.
1811
+ [22] H. Face. (2022) Transformers. [Online]. Available: https://huggingface.
1812
+ co/docs/transformers/index
1813
+ [23] Facebook. (2022) Duckling. [Online]. Available: https://github.com/
1814
+ facebook/duckling/
1815
+ [24] R. E. Fairley, “Tutorial: Static analysis and dynamic testing of computer
1816
+ software,” Computer, vol. 11, no. 4, pp. 14–23, 1978.
1817
+ [25] Y. Feng, Q. Shi, X. Gao, J. Wan, C. Fang, and Z. Chen, “Deepgini:
1818
+ Prioritizing massive tests to enhance the robustness of deep neural
1819
+ networks,” in Proceedings of the 29th ACM SIGSOFT International
1820
+ Symposium on Software Testing and Analysis, 2020, p. 177–188.
1821
+ [26] D. Foo, J. Yeo, H. Xiao, and A. Sharma, “The dynamics of software
1822
+ composition analysis,” CoRR, vol. abs/1909.00973, 2019.
1823
+ [27] Google. (2022) Tensorhub. [Online]. Available: https://tensorflow.google.
1824
+ cn/hub
1825
+ [28] X. He, K. Zhao, and X. Chu, “Automl: A survey of the state-of-the-art,”
1826
+ Knowledge Based Systems, vol. 212, 2021.
1827
+ [29] M. Henderson, I. Casanueva, N. Mrkˇsi´c, P.-H. Su, T.-H. Wen, and
1828
+ I. Vuli´c, “Convert: Efficient and accurate conversational representations
1829
+ from transformers,” CoRR, 2019.
1830
+ [30] Q. Hu, L. Ma, X. Xie, B. Yu, Y. Liu, and J. Zhao, “Deepmutation++: A
1831
+ mutation testing framework for deep learning systems,” in Proceedings
1832
+ of the 34th IEEE/ACM International Conference on Automated Software
1833
+ Engineering, 2019, pp. 1158–1161.
1834
+ [31] Z. Hu, Y. Dong, K. Wang, K.-W. Chang, and Y. Sun, “Gpt-gnn: Generative
1835
+ pre-training of graph neural networks,” in Proceedings of the 26th ACM
1836
+ SIGKDD International Conference on Knowledge Discovery & Data
1837
+ Mining, 2020, pp. 1857–1867.
1838
+ [32] K. Huang, B. Chen, S. Wu, J. Cao, L. Ma, and X. Peng, “Demystifying
1839
+ dependency bugs in deep learning stack,” CoRR, vol. abs/2207.10347,
1840
+ 2022.
1841
+ [33] N. Humbatova, G. Jahangirova, and P. Tonella, “Deepcrime: Mutation
1842
+ testing of deep learning systems based on real faults,” in Proceedings of
1843
+ the 30th ACM SIGSOFT International Symposium on Software Testing
1844
+ and Analysis, 2021, p. 67–78.
1845
+ [34] IBM. (2022) Ibm global ai adoption index 2022. [Online]. Available:
1846
+ https://www.ibm.com/watson/resources/ai-adoption
1847
+ [35] G. Jahangirova and P. Tonella, “An empirical evaluation of mutation
1848
+ operators for deep learning systems,” in Proceedings of the IEEE 13th
1849
+ International Conference on Software Testing, Validation and Verification,
1850
+ 2020, pp. 74–84.
1851
+ [36] L. Jia, H. Zhong, X. Wang, L. Huang, and Z. Li, “How do injected
1852
+ bugs affect deep learning?” in Proceedings of the IEEE International
1853
+ Conference on Software Analysis, Evolution and Reengineering, 2022,
1854
+ pp. 793–804.
1855
+ [37] Y. Jia and M. Harman, “An analysis and survey of the development of
1856
+ mutation testing,” IEEE Transactions on Software Engineering, vol. 37,
1857
+ no. 5, pp. 649–678, 2011.
1858
+ [38] C.
1859
+ K¨astner.
1860
+ (2022)
1861
+ Machine
1862
+ learning
1863
+ in
1864
+ production:
1865
+ From
1866
+ models to systems. [Online]. Available: https://ckaestne.medium.com/
1867
+ machine-learning-in-production-from-models-to-systems-e1422ec7cd65
1868
+ [39] R. Kazmi, D. N. Jawawi, R. Mohamad, and I. Ghani, “Effective regression
1869
+ test case selection: A systematic literature review,” ACM Computing
1870
+ Surveys (CSUR), vol. 50, no. 2, pp. 1–32, 2017.
1871
+ [40] J. Kim, R. Feldt, and S. Yoo, “Guiding deep learning system testing using
1872
+ surprise adequacy,” in Proceedings of the 41st International Conference
1873
+ on Software Engineering, 2019, p. 1039–1049.
1874
+ [41] G. Lample and A. Conneau, “Cross-lingual language model pretraining,”
1875
+ 2019. [Online]. Available: https://arxiv.org/abs/1901.07291
1876
+ [42] H. Leung and L. White, “A study of integration testing and software
1877
+ regression at the integration level,” in Proceedings. Conference on
1878
+ Software Maintenance 1990, 1990, pp. 290–301.
1879
+
1880
+ [43] Z. Li, X. Ma, C. Xu, J. Xu, C. Cao, and J. L¨u, “Operational calibration:
1881
+ Debugging confidence errors for dnns in the field,” in Proceedings of the
1882
+ 28th ACM Joint Meeting on European Software Engineering Conference
1883
+ and Symposium on the Foundations of Software Engineering, 2020, p.
1884
+ 901–913.
1885
+ [44] H. Liang, X. Pei, X. Jia, W. Shen, and J. Zhang, “Fuzzing: State of the
1886
+ art,” IEEE Transactions on Reliability, vol. 67, no. 3, pp. 1199–1218,
1887
+ 2018.
1888
+ [45] W. Liang, G. A. Tadesse, D. Ho, F.-F. Li, M. Zaharia, C. Zhang, and
1889
+ J. Zou, “Advances, challenges and opportunities in creating data for
1890
+ trustworthy AI,” Nature Machine Intelligence, 2022.
1891
+ [46] Y. Liu, M. Ott, N. Goyal, J. Du, M. Joshi, D. Chen, O. Levy, M. Lewis,
1892
+ L. Zettlemoyer, and V. Stoyanov, “Roberta: A robustly optimized bert
1893
+ pretraining approach,” arXiv preprint arXiv:1907.11692, 2019.
1894
+ [47] Z. Liu, Y. Feng, and Z. Chen, “Dialtest: automated testing for recurrent-
1895
+ neural-network-driven dialogue systems,” in Proceedings of the 30th ACM
1896
+ SIGSOFT International Symposium on Software Testing and Analysis,
1897
+ 2021, pp. 115–126.
1898
+ [48] L. Ma, F. Zhang, J. Sun, M. Xue, B. Li, F. Juefei-Xu, C. Xie, L. Li, Y. Liu,
1899
+ J. Zhao, and Y. Wang, “Deepmutation: Mutation testing of deep learning
1900
+ systems,” in Proceedings of the IEEE 29th International Symposium on
1901
+ Software Reliability Engineering, 2018, pp. 100–111.
1902
+ [49] S. Ma, Y. Liu, W.-C. Lee, X. Zhang, and A. Grama, “Mode: Automated
1903
+ neural network model debugging via state differential analysis and input
1904
+ selection,” in Proceedings of the 2018 26th ACM Joint Meeting on
1905
+ European Software Engineering Conference and Symposium on the
1906
+ Foundations of Software Engineering, 2018, p. 175–186.
1907
+ [50] P. McMinn, “Search-based software testing: Past, present and future,”
1908
+ in 2011 IEEE Fourth International Conference on Software Testing,
1909
+ Verification and Validation Workshops, 2011, pp. 153–163.
1910
+ [51] Mutmut. (2022) Mutmut. [Online]. Available: https://pypi.org/project/
1911
+ mutmut/
1912
+ [52] N. Nahar, S. Zhou, G. Lewis, and C. K¨astner, “Collaboration challenges in
1913
+ building ml-enabled systems: Communication, documentation, engineer-
1914
+ ing, and process,” in Proceedings of the IEEE/ACM 44th International
1915
+ Conference on Software Engineering, 2022, pp. 413–425.
1916
+ [53] B. Nushi, E. Kamar, E. Horvitz, and D. Kossmann, “On human intellect
1917
+ and machine failures: Troubleshooting integrative machine learning
1918
+ systems,” in Proceedings of the Thirty-First AAAI Conference on Artificial
1919
+ Intelligence, 2017, pp. 1017–1025.
1920
+ [54] A. Odena, C. Olsson, D. Andersen, and I. Goodfellow, “TensorFuzz: De-
1921
+ bugging neural networks with coverage-guided fuzzing,” in Proceedings
1922
+ of the 36th International Conference on Machine Learning, 2019, pp.
1923
+ 4901–4911.
1924
+ [55] K. O’Leary and M. Uchida, “Common problems with creating machine
1925
+ learning pipelines from existing code,” in Proceedings of the Third
1926
+ Conference on Machine Learning and Systems, 2020.
1927
+ [56] OpenAI. (2022) Gpt2. [Online]. Available: https://openai.com/blog/tags/
1928
+ gpt-2/
1929
+ [57] B. Paulsen, J. Wang, and C. Wang, “Reludiff: Differential verification
1930
+ of deep neural networks,” in Proceedings of the ACM/IEEE 42nd
1931
+ International Conference on Software Engineering, 2020, p. 714–726.
1932
+ [58] B. Paulsen, J. Wang, J. Wang, and C. Wang, “Neurodiff: Scalable differ-
1933
+ ential verification of neural networks using fine-grained approximation,”
1934
+ in Proceedings of the 35th IEEE/ACM International Conference on
1935
+ Automated Software Engineering, 2020, p. 784–796.
1936
+ [59] K. Pei, Y. Cao, J. Yang, and S. Jana, “Deepxplore: Automated whitebox
1937
+ testing of deep learning systems,” in Proceedings of the 26th Symposium
1938
+ on Operating Systems Principles, 2017, p. 1–18.
1939
+ [60] Z. Peng, J. Yang, T.-H. P. Chen, and L. Ma, “A first look at the integration
1940
+ of machine learning models in complex autonomous driving systems: A
1941
+ case study on apollo,” in Proceedings of the 28th ACM Joint Meeting
1942
+ on European Software Engineering Conference and Symposium on the
1943
+ Foundations of Software Engineering, pp. 1240–1250.
1944
+ [61] V. Salis, T. Sotiropoulos, P. Louridas, D. Spinellis, and D. Mitropoulos,
1945
+ “Pycg: Practical call graph generation in python,” in Proceedings of
1946
+ the 43rd International Conference on Software Engineering, 2021, p.
1947
+ 1646–1657.
1948
+ [62] F. Sattler, A. von Rhein, T. Berger, N. S. Johansson, M. M. Hardø,
1949
+ and S. Apel, “Lifting inter-app data-flow analysis to large app sets,”
1950
+ Automated Software Engineering, vol. 25, no. 2, pp. 315–346, 2017.
1951
+ [63] D. Sculley, G. Holt, D. Golovin, E. Davydov, T. Phillips, D. Ebner,
1952
+ V. Chaudhary, M. Young, J.-F. Crespo, and D. Dennison, “Hidden
1953
+ technical debt in machine learning systems,” in Proceedings of the 28th
1954
+ International Conference on Neural Information Processing Systems,
1955
+ 2015, p. 2503–2511.
1956
+ [64] A. Serban and J. Visser, “Adapting software architectures to machine
1957
+ learning challenges,” in Proceedings of the IEEE International Conference
1958
+ on Software Analysis, Evolution and Reengineering, 2022, pp. 152–163.
1959
+ [65] A. Shmilovici.
1960
+ [66] G. Singh, T. Gehr, M. P¨uschel, and M. Vechev, “An abstract domain
1961
+ for certifying neural networks,” Proc. ACM Program. Lang., vol. 3, no.
1962
+ POPL, pp. 1–30, 2019.
1963
+ [67] B. Sun, J. Sun, L. H. Pham, and J. Shi, “Causality-based neural network
1964
+ repair,” in Proceedings of the 44th International Conference on Software
1965
+ Engineering, 2022, pp. 338–349.
1966
+ [68] Y. Sun, M. Wu, W. Ruan, X. Huang, M. Kwiatkowska, and D. Kroening,
1967
+ “Concolic testing for deep neural networks,” in Proceedings of the 33rd
1968
+ ACM/IEEE International Conference on Automated Software Engineering,
1969
+ 2018, p. 109–119.
1970
+ [69] X. Tan, K. Gao, M. Zhou, and L. Zhang, “An exploratory study of
1971
+ deep learning supply chain,” in Proceedings of the IEEE/ACM 44th
1972
+ International Conference on Software Engineering, 2022, pp. 86–98.
1973
+ [70] Y. Tang, R. Khatchadourian, M. Bagherzadeh, R. Singh, A. Stewart,
1974
+ and A. Raja, “An empirical study of refactorings and technical debt
1975
+ in machine learning systems,” in Proceedings of the IEEE/ACM 43rd
1976
+ International Conference on Software Engineering, 2021, pp. 238–250.
1977
+ [71] G. Tao, S. Ma, Y. Liu, Q. Xu, and X. Zhang, “Trader: Trace divergence
1978
+ analysis and embedding regulation for debugging recurrent neural net-
1979
+ works,” in Proceedings of the ACM/IEEE 42nd International Conference
1980
+ on Software Engineering, 2020, p. 986–998.
1981
+ [72] Y. Tian, K. Pei, S. Jana, and B. Ray, “Deeptest: Automated testing of
1982
+ deep-neural-network-driven autonomous cars,” in Proceedings of the 40th
1983
+ International Conference on Software Engineering, 2018, p. 303–314.
1984
+ [73] F. Toledo, D. Shriver, S. Elbaum, and M. B. Dwyer, “Distribution models
1985
+ for falsification and verification of dnns,” in Proceedings of the 36th
1986
+ IEEE/ACM International Conference on Automated Software Engineering,
1987
+ 2021, p. 317–329.
1988
+ [74] M. Velez, P. Jamshidi, N. Siegmund, S. Apel, and C. K¨astner, “On
1989
+ debugging the performance of configurable software systems: Developer
1990
+ needs and tailored tool support,” in Proceedings of the IEEE/ACM 44th
1991
+ International Conference on Software Engineering, 2022, pp. 1571–1583.
1992
+ [75] V. Vlasov, J. E. M. Mosig, and A. Nichol, “Dialogue transformers,”
1993
+ CoRR, vol. abs/1910.00486, 2019.
1994
+ [76] T.-H. Wen, M. Gaˇsi´c, N. Mrkˇsi´c, P.-H. Su, D. Vandyke, and S. Young,
1995
+ “Semantically conditioned LSTM-based natural language generation for
1996
+ spoken dialogue systems,” in Proceedings of the Conference on Empirical
1997
+ Methods in Natural Language Processing, 2015, pp. 1711–1721.
1998
+ [77] J. Williams, A. Raux, and M. Henderson, “The dialog state tracking
1999
+ challenge series: A review,” Dialogue & Discourse, vol. 7, no. 3, pp.
2000
+ 4–33, 2016.
2001
+ [78] R. Wu, C. Guo, A. Y. Hannun, and L. van der Maaten, “Fixes that
2002
+ fail: Self-defeating improvements in machine-learning systems,” in
2003
+ Proceedings of the 35th Conference on Neural Information Processing
2004
+ Systems, 2021, pp. 11 745–11 756.
2005
+ [79] Z. Yang, Z. Dai, Y. Yang, J. Carbonell, R. R. Salakhutdinov, and Q. V. Le,
2006
+ “Xlnet: Generalized autoregressive pretraining for language understanding,”
2007
+ Advances in neural information processing systems, vol. 32, 2019.
2008
+ [80] H. Yokoyama, “Machine learning system architectural pattern for
2009
+ improving operational stability,” in Proceedings of the IEEE International
2010
+ Conference on Software Architecture Companion, 2019, pp. 267–274.
2011
+ [81] A. Zerouali, E. Constantinou, T. Mens, G. Robles, and J. Gonz´alez-
2012
+ Barahona, “An empirical analysis of technical lag in npm package
2013
+ dependencies,” in Proceedings of the 17th International Conference
2014
+ on Software Reuse, 2018, pp. 95–110.
2015
+ [82] J. M. Zhang, M. Harman, L. Ma, and Y. Liu, “Machine learning
2016
+ testing: Survey, landscapes and horizons,” IEEE Transactions on Software
2017
+ Engineering, vol. 48, no. 1, pp. 1–36, 2022.
2018
+ [83] J. Zhang, X. Jing, W. Zhang, H. Wang, and Y. Dong, “Improve the
2019
+ quality of arc systems based on the metamorphic testing,” in Proceedings
2020
+ of the International Symposium on System and Software Reliability, 2016,
2021
+ pp. 137–141.
2022
+ [84] P. Zhang, J. Wang, J. Sun, G. Dong, X. Wang, X. Wang, J. S. Dong,
2023
+ and T. Dai, “White-box fairness testing through adversarial sampling,”
2024
+ in Proceedings of the ACM/IEEE 42nd International Conference on
2025
+ Software Engineering, 2020, p. 949–960.
2026
+ [85] Y. Zhang, P. Tiˇno, A. Leonardis, and K. Tang, “A survey on neural
2027
+ network interpretability,” IEEE Transactions on Emerging Topics in
2028
+
2029
+ Computational Intelligence, vol. 5, no. 5, pp. 726–742, 2021.
2030
+ [86] Z. Zhang, R. Takanobu, Q. Zhu, M. Huang, and X. Zhu, “Recent
2031
+ advances and challenges in task-oriented dialog systems,” Science China
2032
+ Technological Sciences, vol. 63, no. 10, pp. 2011–2027, 2020.
2033
+
S9E2T4oBgHgl3EQfWwd9/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
SdE0T4oBgHgl3EQfUgDE/content/tmp_files/2301.02252v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
SdE0T4oBgHgl3EQfUgDE/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
U9AzT4oBgHgl3EQf1P4j/content/tmp_files/2301.01795v1.pdf.txt ADDED
The diff for this file is too large to render. See raw diff
 
U9AzT4oBgHgl3EQf1P4j/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
VNAzT4oBgHgl3EQfJ_sw/content/tmp_files/2301.01088v1.pdf.txt ADDED
@@ -0,0 +1,837 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Explaining Imitation Learning through Frames
2
+ Boyuan Zheng1 , Jianlong Zhou1 , Chunjie Liu , Yiqiao Li1 and Fang Chen1
3
+ 1University of Technology Sydney
4
+ Boyuan.Zheng-1@student.uts.edu.au, Jianlong.Zhou@uts.edu.au
5
+ Abstract
6
+ As one of the prevalent methods to achieve au-
7
+ tomation systems, Imitation Learning (IL) presents
8
+ a promising performance in a wide range of do-
9
+ mains. However, despite the considerable improve-
10
+ ment in policy performance, the corresponding re-
11
+ search on the explainability of IL models is still
12
+ limited. Inspired by the recent approaches in ex-
13
+ plainable artificial intelligence methods, we pro-
14
+ posed a model-agnostic explaining framework for
15
+ IL models called R2RISE. R2RISE aims to explain
16
+ the overall policy performance with respect to the
17
+ frames in demonstrations. It iteratively retrains the
18
+ black-box IL model from the randomized masked
19
+ demonstrations and uses the conventional evalua-
20
+ tion outcome environment returns as the coefficient
21
+ to build an importance map. We also conducted ex-
22
+ periments to investigate three major questions con-
23
+ cerning frames’ importance equality, the effective-
24
+ ness of the importance map, and connections be-
25
+ tween importance maps from different IL models.
26
+ The result shows that R2RISE successfully distin-
27
+ guishes important frames from the demonstrations.
28
+ 1
29
+ Introduction
30
+ Recent advances in Imitation Learning (IL), which leverages
31
+ external demonstration to reproduce the desired behaviours,
32
+ demonstrate a promising performance in fields like 3D game-
33
+ play [Scheller et al., 2020], robotics [Yu et al., 2018], and au-
34
+ tomatic driving [Codevilla et al., 2019]. Most of the research
35
+ on IL keep applying more complex Deep Neural Network
36
+ (DNN) models, such as convolutional neural network (CNN)
37
+ and generative adversarial network (GAN), to achieve greater
38
+ performance under various condition while paying less atten-
39
+ tion to explaining what information the trained agents learned
40
+ from the external demonstration. In this case, despite the suc-
41
+ cess, IL methods are becoming increasingly unexplainable,
42
+ and this problem remains an open challenge in both IL and
43
+ Explainable Artificial Intelligence (XAI).
44
+ Currently, the number of existing research that combines
45
+ XAI and IL is still limited, and they could be roughly catego-
46
+ rized into two approaches to achieve better explainability, i.e.,
47
+ leveraging white-box models and analyzing the pixel-wise
48
+ explainability via existing computer vision techniques. As
49
+ for leveraging white-box models, most existing research sub-
50
+ stitutes the prevalent neural network architecture with other
51
+ white-box models with intrinsic interpretability. For example,
52
+ Leech [2019] proposed a learning framework that aggregates
53
+ IL and logical automata to represent problems as compact fi-
54
+ nite state automata with human-interpretable logic states. Be-
55
+ wley et al. [2020] modelled the behaviour policy of a trained
56
+ black-box agent in the form of a decision tree by analyzing
57
+ its input-output statistics. Zhang et al. [2021] leveraged a hi-
58
+ erarchical structure to explain the model’s decision-making.
59
+ On the other hand, the research related to analyzing the pixel-
60
+ wise explainability aims at the CNN structures in IL models
61
+ that are widely used to capture features from image input.
62
+ Referring to existing research in XAI and computer vision,
63
+ the explainability of the model is commonly represented as
64
+ heatmaps and analyzing the model’s decision-making process
65
+ from the heatmaps. For example, Pan et al. [2020] proposed
66
+ a model-specific method called xGAIL that is based on Gen-
67
+ erative Adversarial Imitation Learning (GAIL) [Ho and Er-
68
+ mon, 2016] and obtains local and global explanations for the
69
+ passenger-seeking problem.
70
+ In fact, before xGAIL was proposed, the research commu-
71
+ nity of IL investigated features in image inputs, but they did
72
+ not highlight the significance of explainability. For example,
73
+ Brown et al. [2019] used attention maps of the input image
74
+ frames to validate the effectiveness of the learning process.
75
+ De Haan et al.
76
+ [2019a] pointed out that the IL agent could
77
+ learn wrong causal correlations between expert behaviours
78
+ and irrelevant features in the input. These methods, including
79
+ xGAIL, demonstrate what features in a single frame are sig-
80
+ nificant for models to learn the desired behaviours. However,
81
+ the above-mentioned methods fail to evaluate the importance
82
+ of frames. Do the input image frames have identical impor-
83
+ tance? If not, how to distinguish frames’ importance?
84
+ To tackle these problems, we attempt to explain the in-
85
+ put demonstrations as a whole by proposing a novel explain-
86
+ ing method called R2RISE, which iteratively masks random
87
+ frames in the demonstrations and evaluates the performance
88
+ of the agents trained by the masked inputs. The intuition is
89
+ that the input demonstrations are regarded as a single im-
90
+ age, and frames in the demonstrations are regarded as pix-
91
+ els. In this case, existing XAI and computer vision meth-
92
+ ods could be directly applied to investigate the importance of
93
+ arXiv:2301.01088v1 [cs.LG] 3 Jan 2023
94
+
95
+ Figure 1: A diagrammatic representation of a single iteration of R2RISE. The input demonstrations are subject to element-wise multiplication
96
+ (denotes as �) with a random mask which creates a masked demonstration, with greyed frames indicating those which are masked. Sub-
97
+ sequently, the masked demonstration is used to train a black box IL model. The trained model interacts with the test environment to obtain
98
+ returns, the mean of which is element-wise multiplied with the initial mask and accumulated to the existing importance map.
99
+ frames instead of features in a single frame. R2RISE com-
100
+ bines the existing methods RISE [Petsiuk et al., 2018] and
101
+ ROAR [Hooker et al., 2019], and achieves model-agnostic
102
+ explanations for IL models with various architectures.
103
+ Our main contribution is summarized as follows: 1) We
104
+ proposed a model-agnostic method to explain IL models; 2)
105
+ We extended a novel perspective to explain IL with respect
106
+ to the whole input dataset instead of a specific frame; 3) We
107
+ investigated the connection between agents’ overall perfor-
108
+ mance and demonstration frames;
109
+ 2
110
+ Preliminaries
111
+ To better illustrate our approach, we first introduce the related
112
+ existing literature in the field of XAI: RISE [Petsiuk et al.,
113
+ 2018] and ROAR [Hooker et al., 2019]. We then review an
114
+ insightful method xGAIL [Pan et al., 2020] that aggregates
115
+ XAI with specific IL model GAIL [Ho and Ermon, 2016],
116
+ and discuss its limitations.
117
+ 2.1
118
+ Randomized Input Sampling for Explanation
119
+ (RISE)
120
+ Randomized Input Sampling for Explanation (RISE) is one
121
+ of the state-of-the-art XAI methods proposed by Petsiuk et
122
+ al.
123
+ [2018] that explains black-box models. The attractive
124
+ characteristics of RISE are its simplicity and generality. Un-
125
+ like other popular XAI approaches, which calculate the gra-
126
+ dient of image classification outputs, RISE probes the target
127
+ model by randomly masking the input image and recording
128
+ the probability result with respect to the target class. This
129
+ process is repeated multiple times, and the recorded proba-
130
+ bilities for each pixel are linearly combined to generate an
131
+ importance map. This allows for the extraction of the most
132
+ influential region in the input image for the target decision.
133
+ RISE is also significant for explaining IL, as IL typically re-
134
+ quires multiple demonstrations to train the model, which can
135
+ be regarded as a single image. In this case, RISE can be used
136
+ to explain which frames are important for policy training.
137
+ 2.2
138
+ RemOve And Retrain (ROAR)
139
+ RemOve And Retrain (ROAR) was proposed by Hooker et al.
140
+ [2019], and its name concisely summarizes the working pro-
141
+ cess of ROAR. By substituting some pixels estimated to be
142
+ important with fixed uninformative values and then retrain-
143
+ ing a new model, ROAR achieves to evaluate feature impor-
144
+ tance for a wide range of models. The motivation of ROAR
145
+ is that if the model demonstrates more sharp degradation in
146
+ performance because of the removal, then we could conclude
147
+ the proposed model is more accurate. The authors also ar-
148
+ gued that the retraining process is essential as machine learn-
149
+ ing models commonly assume that the training and test dis-
150
+ tribution is similar, and repeating the training several times
151
+ could ensure a low variance in performance. The intuition of
152
+ ROAR is valuable for explaining IL models, as training a sin-
153
+ gle model to determine the importance of frames is risky, and
154
+ the research community commonly uses ensemble methods to
155
+ deal with the distribution shift. Retraining serval models un-
156
+ der the same removal rate could improve the reliability of the
157
+ importance along the demonstration trajectories. More im-
158
+ portantly, since the conventional evaluation of IL problems
159
+ is different from the classical CNN-involved XAI tasks, the
160
+ performance of the trained IL model is represented as returns
161
+ from the dynamic environment. It is improper to train the
162
+ model once and feed image observations with fixed masks
163
+ under such an environment.
164
+ 2.3
165
+ Explainable Generative Adversarial Imitation
166
+ Learning (xGAIL)
167
+ Pan et al.
168
+ [2020] made the first attempt to explain one of
169
+ the state-of-the-art models Generative Adversarial Imitation
170
+ Learning (GAIL) [Ho and Ermon, 2016], and validated their
171
+ method xGAIL on a passenger seeking problem. xGAIL was
172
+ designed for problems that rely on spatial-temporal data, and
173
+ both local and global explanations were obtained separately
174
+ from a well-trained GAIL model. However, xGAIL’s gen-
175
+ erality is severely limited to a specific problem and model.
176
+ In addition, xGAIL, in fact, transforms the IL problem into
177
+
178
+ Demonstrations
179
+ Mask m;
180
+ Masked
181
+ Environment
182
+ Importance
183
+ Demonstrations
184
+ Map
185
+ Traj o
186
+ Traj o
187
+ Traj 1
188
+ Traj 1
189
+ 0
190
+ Black-box
191
+ Traj 2
192
+ Traj 2
193
+ ROm;
194
+ IL model
195
+ :::
196
+ Traj N
197
+ Traj Nan image classification problem by extracting and analyzing
198
+ limited frames from abundant inputs. This could cause the ab-
199
+ sence of an overall explanation for the model’s performance
200
+ and generate explanations with bias as most of the informa-
201
+ tion in the demonstration was filtered by the frame extraction
202
+ process.
203
+ 3
204
+ R2RISE
205
+ To overcome the above-mentioned limitations, we proposed
206
+ a model-agnostic explanation method for imitation learning
207
+ called R2RISE. R2RISE combines the merits of RISE and
208
+ ROAR, and investigates the frames’ importance with respect
209
+ to the policy’s overall performance.
210
+ We first review how RISE formulates the problem and
211
+ distinguishes pixels’ importance for the image classification
212
+ problem.
213
+ For a given image I with the size of H × W,
214
+ RISE creates a random binary mask m with the same size
215
+ of I, and does an element-wise multiplication between im-
216
+ age I and mask m (denoted as I � m). The masked image
217
+ then feeds into the black-box model (denoted as f(I � m)).
218
+ The importance of pixels is defined as the expected score
219
+ over all possible masks M = {m0, m1, ..., mi} conditioned
220
+ on the event that pixel is observed (denoted as M(λ) = 1,
221
+ if the pixel is masked, then M(λ) = 0), i.e. SI,f(λ) =
222
+ EM[f(I � m)|M(λ) = 1]. By rewriting the above equa-
223
+ tion as a summation over mask m and empirically estimating
224
+ it using Monte Carlo sampling, the saliency map can be com-
225
+ puted as a weighted sum of random masks and normalized by
226
+ the expectation of M:
227
+ SI,f(λ) ≈
228
+ 1
229
+ E[M] · N
230
+ N
231
+
232
+ i=1
233
+ f(I
234
+
235
+ mi) · mi(λ).
236
+ (1)
237
+ Since RISE does not need any assumptions and informa-
238
+ tion from the target model, RISE could be used to explain
239
+ black-box models. The intuition behind RISE is that when
240
+ f(I � m) is high, it indicates that the mask observes impor-
241
+ tant pixels. With similar intuition, Hooker et al. [2019] pro-
242
+ posed ROAR to evaluate a feature importance. An ordered set
243
+ of feature importance is estimated, and then they replace the
244
+ top l fraction of the ordered set with the corresponding chan-
245
+ nel mean, where l is the pre-defined percentage of degrada-
246
+ tion level l = [0, 10, ..., 100]. The major difference between
247
+ RISE and ROAR is that ROAR retrains the model on the re-
248
+ placed dataset, while RISE only trains the model once.
249
+ Like most imitation learning methods, we assume the test-
250
+ ing data has a similar distribution as training data, and the
251
+ input demonstrations Dn are optimal. This could ensure eval-
252
+ uation fairness for a wide range of IL models. The demonstra-
253
+ tions Dn consist of multiple trajectories, and each trajectory
254
+ could be represented as either a sequence of state-action pairs
255
+ or observations. In this work, we represent the trajectory as
256
+ a sequence of state-action pairs, i.e. Dn = {τ1, τ2, ..., τn},
257
+ where τi∈[1,n] = {(s1, a1), (s2, a2), ..., (st, at)}. The black-
258
+ box imitation learning model trains a policy (denoted as
259
+ πDn(a|s)) on the input demonstrations Dn, then interacts
260
+ with the environment and obtains returns R from the test-
261
+ ing environment. For the finite horizon T, the expected return
262
+ could be represented as the accumulation of the return at each
263
+ time step, i.e.
264
+ R(πDn) = E[
265
+ T
266
+
267
+ t=0
268
+ rt|πDn].
269
+ (2)
270
+ The discussion we have so far motivated us to propose a
271
+ frame-wise explanation method for IL called R2RISE. By re-
272
+ garding the demonstrations D as a single image, where the
273
+ number of demonstrations is the image height H, and the
274
+ length of the demonstration is the image width T , we could
275
+ investigate the frame-wise importance based on the similar
276
+ intuition of RISE. However, RISE could not be directly ap-
277
+ plied to IL since applying a fixed mask on a dynamic envi-
278
+ ronment frame by frame is unreasonable, and IL methods are
279
+ commonly evaluated by the interactions with the environment
280
+ instead of feeding the policy network with another dataset.
281
+ In this case, we aggregate ROAR with RISE and propose
282
+ R2RISE. It hypothesises that the importance of each frame
283
+ is not identical and iteratively removes random frames based
284
+ on the predefined degradation level. The modified dataset
285
+ Dn = D � mi is used to retrain an IL model.
286
+ The re-
287
+ trained IL model then constantly interacts with the environ-
288
+ ment to obtain the accumulative return, and R2RISE finally
289
+ compute the linear combination of the returns to obtain the
290
+ saliency map (See Figure 1). Assuming the number of gener-
291
+ ated masks is N, and the return of each mask is the average
292
+ return from J rounds of interaction with the environment, the
293
+ computation of the saliency map is similar to equation (1).
294
+ To cater to the setting of IL, we substitute the f(I � m) in
295
+ equation (1) with equation (2):
296
+ SDn,f(λ) ≈
297
+ 1
298
+ E[M] · N
299
+ N
300
+
301
+ i=1
302
+ R(πDi) · mi(λ)
303
+ (3)
304
+ =
305
+ 1
306
+ E[M] · N
307
+ N
308
+
309
+ i=1
310
+ E[
311
+ T
312
+
313
+ t=0
314
+ rt|πDi] · mi(λ)
315
+ (4)
316
+ =
317
+ 1
318
+ E[M] · N · J
319
+ N
320
+
321
+ i=1
322
+ J
323
+
324
+ j=0
325
+ T
326
+
327
+ t=0
328
+ rt · mi(λ)
329
+ (5)
330
+ where Di = D � mi, and
331
+ mi(λ) =
332
+ �0,
333
+ if the pixel is masked,
334
+ 1,
335
+ if the pixel is observed.
336
+ As the formula presented, R2RISE also does not require any
337
+ information from the IL models, such that R2RISE could be
338
+ used as a model-agnostic method to explain IL.
339
+ To evaluate the effectiveness of R2RISE, we test two di-
340
+ verse IL methods: Behavioural Cloning (BC) [Bain and Sam-
341
+ mut, 1999] and Generative Adversarial Imitation Learning
342
+ (GAIL) [Ho and Ermon, 2016]. BC directly maps the states
343
+ to actions from the input demonstration, and the control pol-
344
+ icy is obtained via supervised learning; GAIL, on the other
345
+ hand, learns the policy through an iterative adversarial pro-
346
+ cess between the generator G and discriminator D, where G
347
+ is generating fake data distribution and D is differentiating
348
+ the fake data distribution with the given expert distribution
349
+
350
+ (a)
351
+ Importance
352
+ maps
353
+ generated
354
+ by
355
+ R2RISE.
356
+ (b) Important frames in BeamRider and Breakout.
357
+ Figure 2: Importance maps and the corresponding extracted sample frames that are recognized as important.
358
+ Algorithm 1 R2RISE
359
+ Input: demonstrations D, target IL model f
360
+ Parameter:
361
+ degradation level l, number of randomized
362
+ masks N
363
+ Output: an importance map SD,f
364
+ 1: Initialize masks M based on degradation level l and num-
365
+ ber of randomized masks N.
366
+ 2: Initialize blank importance map SI,f with the same
367
+ shape as D.
368
+ 3: for mi in M do
369
+ 4:
370
+ Randomly initializes the model f.
371
+ 5:
372
+ Obtain masked demonstrations Dn = D � mi.
373
+ 6:
374
+ Train model f with the masked demonstrations Dn
375
+ and obtain policy πDn.
376
+ 7:
377
+ Evaluate policy πDn by interacting with environment
378
+ and obtain average return ¯R.
379
+ 8:
380
+ Update importance map via element-wise addition,
381
+ SD,f ← SDn,f
382
+ �( ¯R � mi)
383
+ 9: end for
384
+ 10: return importance map SI,f
385
+ [Zheng et al., 2022]. The ways BC and GAIL learn the pol-
386
+ icy are far different, and we wish to validate the generality of
387
+ R2RISE from the diverse model selection.
388
+ 4
389
+ Experiment
390
+ In this section, we conduct a series of experiments and ad-
391
+ dress the following questions: (1) Is the importance between
392
+ frames identical? (2) Can R2RISE distinguish the importance
393
+ between frames? (3) Are there connections between the im-
394
+ portance map obtained from different IL models?
395
+ 4.1
396
+ Setup
397
+ We implement experiments with GPU NVIDIA Quadro RTX
398
+ 5000, and two diverse IL models, BC and GAIL, are evalu-
399
+ ated on two OpenAI Gym Atari tasks: Breakout and Beam-
400
+ rider [Brockman et al., 2016].
401
+ Similar to recent IL methods, we leverage the proximal
402
+ policy optimization (PPO) [Schulman et al., 2017] algorithm
403
+ from the OpenAI baselines [Dhariwal et al., 2017], utiliz-
404
+ ing default parameters and reward function, to generate ex-
405
+ pert demonstrations.
406
+ The PPO training process is check-
407
+ pointed every 20 steps, and the observations with the size of
408
+ 84 × 84 × 3 and actions between the PPO agents and the task
409
+ environment are recorded as ”trajectories.” These trajectories,
410
+ generated from checkpoint 1400, serve as expert demonstra-
411
+ tions. To avoid the “causal confusion” problem (models build
412
+ wrong causal relationships with irrelevant patterns) [De Haan
413
+ et al., 2019b] and ensure the fairness of our evaluation, we
414
+ mask the indicators (such as scoring broad) in frames and en-
415
+ sure the same demonstrations as input for different IL models.
416
+ Regarding the parameter setting, we generate 20 trajec-
417
+ tories with a fixed length of 1000 for each IL model. Five
418
+ random seeds and five levels of percentage degradations l =
419
+ [10, 30, 50, 70, 90] are pre-defined for evaluation. To assess
420
+ each given random seed and percentage degradation, we pro-
421
+ pose to retrain 100 models with 100 randomized masks. Each
422
+ mask contains 20*100 grids, which means that every single
423
+ trajectory is cut into 100 snippets, and each snippet assigns
424
+ the same importance to 10 frames. The retrained model is
425
+ tested from 20 trials, and the average return of the trials, mul-
426
+ tiplied element-wise with the random mask, is added to the
427
+ final importance map.
428
+ 4.2
429
+ Is the importance between frames identical?
430
+ Remember that we hypothesize that the importance of frames
431
+ is different, so we validate this hypothesis by applying sev-
432
+ eral randomized masks on the same demonstration and com-
433
+ paring the performance of the trained model. If the outcomes
434
+ present noticeable deviations, then it can be inferred that the
435
+ contribution between frames varies. To further this idea, we
436
+
437
+ Importance Map of BC -- BeamRider
438
+ # ofdemos
439
+ 0
440
+ 200
441
+ 400
442
+ 600
443
+ 800
444
+ Importance Map of BC -- Breakout
445
+ # of demos
446
+ 0
447
+ 200
448
+ 400
449
+ 600
450
+ 800
451
+ Importance Map of GAIL -- BeamRider
452
+ #of demos
453
+ 0
454
+ 200
455
+ 400
456
+ 600
457
+ 800
458
+ Importance Map of GAlL -- Breakout
459
+ #ofdemos
460
+ 0
461
+ 200
462
+ 400
463
+ 600
464
+ 800Sample Importance Frames of BeamRider
465
+ 800261
466
+ 000261
467
+ 000264
468
+ 000308
469
+ 000308
470
+ ECTOL
471
+ 01
472
+ ECTIR
473
+ 0
474
+ 01
475
+ Sample Importance Frames of BreakoutFigure 3: Deviations in policy performance of 10 models trained by
476
+ 10 different randomized masks.
477
+ divide each trajectory into ten segments of equal length and
478
+ randomly assign either a value of 0 or 1 to each segment.
479
+ Regions assigned 0 are removed, and then the preprocessed
480
+ demonstrations are used as input to train the relevant models.
481
+ This process iterates ten times, and we get Figure 3. Here, the
482
+ x-axis is the number of attempts, the y-axis is the environment
483
+ returns, and the error bars indicate the standard deviation of
484
+ the policy performance.
485
+ From Figure 3, we can observe the performances deviate
486
+ from model to model. The best model achieves far better per-
487
+ formance than the worst one, which indicates that the impor-
488
+ tance of frames is not identical. Similarly, the importance
489
+ maps generated by R2RISE also suggest disparities between
490
+ frames (see Figure 2a). The x-axis is the trajectory’s length,
491
+ the y-axis represents the trajectories, and the grayscale de-
492
+ notes the importance. The whiter the grid is, the more impor-
493
+ tant the grid is. Figure 2b shows the extracted frames that are
494
+ recognized as the most important components in the demon-
495
+ strations. For the task of BeamRider, models would put more
496
+ weight on destroying enemy flights, whereas for the task of
497
+ Breakout, models pay more attention to the rebounding pro-
498
+ cess of the upper blocks, sidewalls, and paddle, which resem-
499
+ bles the strategy we might use for these games.
500
+ 4.3
501
+ Can R2RISE distinguish the importance
502
+ between frames?
503
+ This section investigates the effectiveness of R2RISE. Ac-
504
+ cording to the abovementioned hypothesis validation, we ob-
505
+ tain a map indicating the importance of frames. However,
506
+ the quality of the generated maps needs to be properly eval-
507
+ uated. In this case, we use similar causal metrics in [Petsiuk
508
+ et al., 2018], insertion and deletion, where the availability of
509
+ the ’cause’ will significantly influence the model’s decision-
510
+ making and performance. Under the scenario of image clas-
511
+ sification tasks, deleting the causal pixels will lead to a sharp
512
+ drop in accuracy if the model gets well explained. In our
513
+ experiment, we leverage similar intuition and expect the re-
514
+ moval of the important frames would lead to a worse perfor-
515
+ (a) Policy performance changes in BeamRider.
516
+ (b) Policy performance changes in Breakout.
517
+ Figure 4: Validations of the effectiveness of R2RISE. The lines and
518
+ the error bars represent the mean performance and standard devi-
519
+ ation of the model trained from a certain percentage of the most
520
+ important (or least important) frames.
521
+ mance while limiting the amount of input data to be the same.
522
+ To achieve this, we transform the generated importance map
523
+ into a mask by setting up a threshold and replacing the map
524
+ with either 1s or 0s, depending on the threshold.
525
+ Figure 4 shows the changes in policy performance using
526
+ different percentages of the most important (or least impor-
527
+ tant) frames. The x-axis is the percentage of data used to
528
+ train the model, and the y-axis is the environment returns.
529
+ The solid lines are the average returns from 20 trials using
530
+ the same transformed mask and demonstrations, the error bar
531
+ is the standard deviation of the 20 trials. From Figure 4, we
532
+ can observe that the models trained with the most important
533
+ frames perform well when the input data is relatively lim-
534
+ ited for both tasks, which meets our expectations. For task
535
+ BeamRider (see Figure 4a), the model with important frames
536
+ always performs better than the model trained with the least
537
+ important frames. The performance deviation at the begin-
538
+ ning of the figure is relatively small, we think the reason is
539
+ that the model is more sensitive to the amount of data than to
540
+ the availability of the important frames. From the end of this
541
+ figure, it can be seen that the performance deviation increases,
542
+ which indicates that the missing top important frames signifi-
543
+ cantly limit the upper bound of the model’s performance. For
544
+ task Breakout (see Figure 4b), it can be seen that the model
545
+ with important frames significantly outperforms the model
546
+ without important frames until more than 60% of the total
547
+ demonstrations are fed. When more than 60% of the data is
548
+ given, the model’s performance starts to fluctuate. We sus-
549
+ pect this is due to the fact that 50% of the initial input is
550
+ sufficient to train the policy, and with more ordinary or re-
551
+
552
+ BeamRider
553
+ Returns
554
+ 1100
555
+ 900
556
+ 700
557
+ 500
558
+ 300
559
+ 1
560
+ 2
561
+ 3
562
+ 4
563
+ 5
564
+ 6
565
+ 7
566
+ 8
567
+ 9
568
+ 10
569
+ Trials
570
+ BC
571
+ GAILBreakout
572
+ Environment Returns
573
+ 40
574
+ 30
575
+ 20
576
+ 10
577
+ 0
578
+ 1
579
+ 2
580
+ 3
581
+ 4
582
+ 5
583
+ 6
584
+ 7
585
+ 8
586
+ 9
587
+ 10
588
+ Trials
589
+ +BC-GAILBeamRider
590
+ 2700
591
+ 2300
592
+ 1900
593
+ 1500
594
+ 1100
595
+ 700
596
+ 300
597
+ 10%
598
+ 20%
599
+ 30%
600
+ 40%
601
+ 50%
602
+ 60%
603
+ 70%
604
+ 80%
605
+ 90%
606
+ Degradation Level
607
+ withimportantframes
608
+ withoutimportantframesBreakout
609
+ 30
610
+ EnvironmentReturns
611
+ 25
612
+ 20
613
+ 15
614
+ 10
615
+ 5
616
+ 0
617
+ 10%
618
+ 20%
619
+ 30%
620
+ 40%
621
+ 50%
622
+ 60%
623
+ 70%
624
+ 80%
625
+ 90%
626
+ DegradationLevel
627
+ withimportantframes
628
+ withoutimportantframes(a) Deviation between the importance maps in BeamRider.
629
+ (b) Deviation between the importance maps in Breakout.
630
+ Figure 5: Deviation between the importance maps of BC and GAIL.
631
+ dundant frames added to the dataset, the policy performance
632
+ is negatively influenced.
633
+ 4.4
634
+ Are there connections between the importance
635
+ map obtained from different IL models?
636
+ In addition to exploring the explainability within a single IL
637
+ model, we also attempt to examine the connections between
638
+ IL models. In this section, we investigate the question: does
639
+ the importance map obtained by one model have connections
640
+ to another model? To this end, we propose two approaches to
641
+ explore intrinsic connections. The first attempt directly com-
642
+ pares the importance maps by projecting the values into the
643
+ same range and calculating element-wise deviation (see Fig-
644
+ ure 5). The larger the deviation is, the whiter the output image
645
+ should be. From Figure 5, we can observe that most grids are
646
+ close to black, which indicates the assigned importance for
647
+ these grids are similar. The second attempt involves generat-
648
+ ing a mask from an importance map generated by one model
649
+ and applying that mask to the same demonstrations to train
650
+ another model. The underlying assumption is that if there
651
+ are connections between models’ importance maps, the im-
652
+ portant frames identified by one model should work well on
653
+ another model, leading to improved performance compared
654
+ to those without the frames. Figure 6 displays the average
655
+ returns of GAIL using three types of masks: the blue, orange
656
+ and grey lines correspond to models trained with masks ex-
657
+ tracted from the importance map obtained using BC, GAIL
658
+ and random, respectively. It can be seen in Figure 6 that
659
+ the model trained with BC’s mask demonstrates similar pol-
660
+ icy performance to the model trained with GAIL’s mask, and
661
+ both models outperform the model using a randomized mask
662
+ under the same training epoch.
663
+ This confirms our expec-
664
+ tation that the importance frames recognized by BC could
665
+ also be employed to train model GAIL with considerable per-
666
+ formance. This could prove useful when the target model
667
+ is time-consuming, and one could use a more time-efficient
668
+ method like BC to obtain an alternative importance map.
669
+ Figure 6: Average reward learning curves of GAIL trained with dif-
670
+ ferent masks. The blue, orange and grey lines are trained with the
671
+ masks extracted from the importance map obtained using BC, GAIL
672
+ and random, respectively.
673
+ 5
674
+ Limitations
675
+ Several intriguing challenges await further exploration. Al-
676
+ though we were able to extract promising explanations
677
+ through a large number of masks and validations of the ob-
678
+ tained policy, the robustness of the framework could be fur-
679
+ ther improved through ensemble methods. As the framework
680
+ currently relies on a single trial to train the model, ensem-
681
+ bling IL models could reduce performance variance for each
682
+ given mask. In addition to robustness, computation inten-
683
+ siveness is another limitation. The time taken to obtain a sat-
684
+ isfactory explanation is closely linked to the time spent on
685
+ training the target model once. If the target model requires
686
+ days to train, it would not be practical to retrain hundreds of
687
+ times. Improving time efficiency while preserving the model-
688
+ agnostic property remains an open challenge for R2RISE. In-
689
+ vestigating the relationship between global explanations and
690
+ the frames that are recognized as important is another inter-
691
+ esting future direction. Although we observed similar pat-
692
+ terns in the extracted frames from different trials and models,
693
+ it is still unsafe to claim these patterns could be the global
694
+ explanations for a specific task. Further research is needed to
695
+ provide theoretical proof for the connections.
696
+ 6
697
+ Conclusion
698
+ This paper introduced a model-agnostic explaining frame-
699
+ work for imitation learning called R2RISE. R2RISE distin-
700
+ guishes the frames’ importance in relation to the overall pol-
701
+ icy performance. It iteratively applies numerous randomized
702
+ masks on the demonstrations and retrains the black-box IL
703
+ model based on the masked demonstration. Evaluation of the
704
+ obtained policy is conducted in a manner similar to most IL
705
+ methods, where the policy is evaluated by the accumulated re-
706
+ turns from the environment, and we leverage the accumulated
707
+ returns as a coefficient to multiply with the mask to obtain the
708
+ importance map of the frames. Experiments have shown that
709
+ R2RISE can successfully distinguish important frames from
710
+ the demonstrations, thus providing insight into which frames
711
+ contribute to better performance.
712
+
713
+ 12
714
+ 10
715
+ 8
716
+ AverageReturns
717
+ 4
718
+ 2
719
+ 0
720
+ 1
721
+ 51
722
+ 101
723
+ 151
724
+ 201251301351401451501551601651701
725
+ 751
726
+ 8018519019511001
727
+ Epochs
728
+ BC+GAIL
729
+ GAIL+GAIL
730
+ Random+GAILDifference between importance maps
731
+ # of demos
732
+ 0
733
+ 200
734
+ 400
735
+ 600
736
+ 800Difference between importance maps
737
+ # of demos
738
+ 0
739
+ 200
740
+ 400
741
+ 600
742
+ 800References
743
+ [Bain and Sammut, 1999] Michael Bain and Claude Sam-
744
+ mut. A framework for behavioural cloning. In Machine
745
+ Intelligence 15, pages 103–129. Oxford University Press,
746
+ 1999.
747
+ [Bewley et al., 2020] Tom Bewley, Jonathan Lawry, and
748
+ Arthur Richards.
749
+ Modelling agent policies with inter-
750
+ pretable imitation learning. In International Workshop on
751
+ the Foundations of Trustworthy AI Integrating Learning,
752
+ Optimization and Reasoning, pages 180–186. Springer,
753
+ 2020.
754
+ [Brockman et al., 2016] Greg Brockman,
755
+ Vicki Cheung,
756
+ Ludwig Pettersson, Jonas Schneider, John Schulman, Jie
757
+ Tang, and Wojciech Zaremba. Openai gym. arXiv preprint
758
+ arXiv:1606.01540, 2016.
759
+ [Brown et al., 2019] Daniel Brown, Wonjoon Goo, Prabhat
760
+ Nagarajan, and Scott Niekum. Extrapolating beyond sub-
761
+ optimal demonstrations via inverse reinforcement learning
762
+ from observations.
763
+ In International conference on ma-
764
+ chine learning, pages 783–792. PMLR, 2019.
765
+ [Codevilla et al., 2019] Felipe Codevilla, Eder Santana, An-
766
+ tonio M L´opez, and Adrien Gaidon. Exploring the lim-
767
+ itations of behavior cloning for autonomous driving. In
768
+ Proceedings of the IEEE/CVF International Conference
769
+ on Computer Vision, pages 9329–9338, 2019.
770
+ [De Haan et al., 2019a] Pim De Haan, Dinesh Jayaraman,
771
+ and Sergey Levine. Causal confusion in imitation learn-
772
+ ing. Advances in Neural Information Processing Systems,
773
+ 32, 2019.
774
+ [De Haan et al., 2019b] Pim De Haan, Dinesh Jayaraman,
775
+ and Sergey Levine. Causal confusion in imitation learn-
776
+ ing. Advances in Neural Information Processing Systems,
777
+ 32, 2019.
778
+ [Dhariwal et al., 2017] Prafulla
779
+ Dhariwal,
780
+ Christopher
781
+ Hesse, Oleg Klimov, Alex Nichol, Matthias Plappert,
782
+ Alec Radford, John Schulman, Szymon Sidor, Yuhuai
783
+ Wu, and Peter Zhokhov. Openai baselines, 2017.
784
+ [Ho and Ermon, 2016] Jonathan Ho and Stefano Ermon.
785
+ Generative adversarial imitation learning.
786
+ Advances in
787
+ neural information processing systems, 29, 2016.
788
+ [Hooker et al., 2019] Sara Hooker, Dumitru Erhan, Pieter-
789
+ Jan Kindermans, and Been Kim. A benchmark for inter-
790
+ pretability methods in deep neural networks. Advances in
791
+ neural information processing systems, 32, 2019.
792
+ [Leech, 2019] Thomas Leech. Explainable machine learn-
793
+ ing for task planning in robotics.
794
+ PhD thesis, Mas-
795
+ sachusetts Institute of Technology, 2019.
796
+ [Pan et al., 2020] Menghai Pan, Weixiao Huang, Yanhua Li,
797
+ Xun Zhou, and Jun Luo.
798
+ xgail:
799
+ Explainable gener-
800
+ ative adversarial imitation learning for explainable hu-
801
+ man decision analysis. In Proceedings of the 26th ACM
802
+ SIGKDD International Conference on Knowledge Discov-
803
+ ery & Data Mining, pages 1334–1343, 2020.
804
+ [Petsiuk et al., 2018] Vitali Petsiuk, Abir Das, and Kate
805
+ Saenko. Rise: Randomized input sampling for explanation
806
+ of black-box models. arXiv preprint arXiv:1806.07421,
807
+ 2018.
808
+ [Scheller et al., 2020] Christian Scheller, Yanick Schraner,
809
+ and Manfred Vogel. Sample efficient reinforcement learn-
810
+ ing through learning from demonstrations in minecraft.
811
+ In NeurIPS 2019 Competition and Demonstration Track,
812
+ pages 67–76. PMLR, 2020.
813
+ [Schulman et al., 2017] John Schulman, Filip Wolski, Pra-
814
+ fulla Dhariwal, Alec Radford, and Oleg Klimov.
815
+ Prox-
816
+ imal policy optimization algorithms.
817
+ arXiv preprint
818
+ arXiv:1707.06347, 2017.
819
+ [Yu et al., 2018] Tianhe Yu,
820
+ Chelsea Finn,
821
+ Annie Xie,
822
+ Sudeep Dasari, Tianhao Zhang, Pieter Abbeel, and
823
+ Sergey Levine.
824
+ One-shot imitation from observing hu-
825
+ mans via domain-adaptive meta-learning. arXiv preprint
826
+ arXiv:1802.01557, 2018.
827
+ [Zhang et al., 2021] Dandan Zhang, Qiang Li, Yu Zheng,
828
+ Lei Wei, Dongsheng Zhang, and Zhengyou Zhang. Ex-
829
+ plainable hierarchical imitation learning for robotic drink
830
+ pouring. IEEE Transactions on Automation Science and
831
+ Engineering, 2021.
832
+ [Zheng et al., 2022] Boyuan Zheng, Sunny Verma, Jianlong
833
+ Zhou, Ivor W. Tsang, and Fang Chen. Imitation learning:
834
+ Progress, taxonomies and challenges. IEEE Transactions
835
+ on Neural Networks and Learning Systems, pages 1–16,
836
+ 2022.
837
+
VNAzT4oBgHgl3EQfJ_sw/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf,len=434
2
+ page_content='Explaining Imitation Learning through Frames Boyuan Zheng1 , Jianlong Zhou1 , Chunjie Liu , Yiqiao Li1 and Fang Chen1 1University of Technology Sydney Boyuan.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
3
+ page_content='Zheng-1@student.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
4
+ page_content='uts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
5
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
6
+ page_content='au, Jianlong.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
7
+ page_content='Zhou@uts.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
8
+ page_content='edu.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
9
+ page_content='au Abstract As one of the prevalent methods to achieve au- tomation systems, Imitation Learning (IL) presents a promising performance in a wide range of do- mains.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
10
+ page_content=' However, despite the considerable improve- ment in policy performance, the corresponding re- search on the explainability of IL models is still limited.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
11
+ page_content=' Inspired by the recent approaches in ex- plainable artificial intelligence methods, we pro- posed a model-agnostic explaining framework for IL models called R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
12
+ page_content=' R2RISE aims to explain the overall policy performance with respect to the frames in demonstrations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
13
+ page_content=' It iteratively retrains the black-box IL model from the randomized masked demonstrations and uses the conventional evalua- tion outcome environment returns as the coefficient to build an importance map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
14
+ page_content=' We also conducted ex- periments to investigate three major questions con- cerning frames’ importance equality, the effective- ness of the importance map, and connections be- tween importance maps from different IL models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
15
+ page_content=' The result shows that R2RISE successfully distin- guishes important frames from the demonstrations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
16
+ page_content=' 1 Introduction Recent advances in Imitation Learning (IL), which leverages external demonstration to reproduce the desired behaviours, demonstrate a promising performance in fields like 3D game- play [Scheller et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
17
+ page_content=', 2020], robotics [Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
18
+ page_content=', 2018], and au- tomatic driving [Codevilla et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
19
+ page_content=', 2019].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
20
+ page_content=' Most of the research on IL keep applying more complex Deep Neural Network (DNN) models, such as convolutional neural network (CNN) and generative adversarial network (GAN), to achieve greater performance under various condition while paying less atten- tion to explaining what information the trained agents learned from the external demonstration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
21
+ page_content=' In this case, despite the suc- cess, IL methods are becoming increasingly unexplainable, and this problem remains an open challenge in both IL and Explainable Artificial Intelligence (XAI).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
22
+ page_content=' Currently, the number of existing research that combines XAI and IL is still limited, and they could be roughly catego- rized into two approaches to achieve better explainability, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
23
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
24
+ page_content=', leveraging white-box models and analyzing the pixel-wise explainability via existing computer vision techniques.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
25
+ page_content=' As for leveraging white-box models, most existing research sub- stitutes the prevalent neural network architecture with other white-box models with intrinsic interpretability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
26
+ page_content=' For example, Leech [2019] proposed a learning framework that aggregates IL and logical automata to represent problems as compact fi- nite state automata with human-interpretable logic states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
27
+ page_content=' Be- wley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
28
+ page_content=' [2020] modelled the behaviour policy of a trained black-box agent in the form of a decision tree by analyzing its input-output statistics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
29
+ page_content=' Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
30
+ page_content=' [2021] leveraged a hi- erarchical structure to explain the model’s decision-making.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
31
+ page_content=' On the other hand, the research related to analyzing the pixel- wise explainability aims at the CNN structures in IL models that are widely used to capture features from image input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
32
+ page_content=' Referring to existing research in XAI and computer vision, the explainability of the model is commonly represented as heatmaps and analyzing the model’s decision-making process from the heatmaps.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
33
+ page_content=' For example, Pan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
34
+ page_content=' [2020] proposed a model-specific method called xGAIL that is based on Gen- erative Adversarial Imitation Learning (GAIL) [Ho and Er- mon, 2016] and obtains local and global explanations for the passenger-seeking problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
35
+ page_content=' In fact, before xGAIL was proposed, the research commu- nity of IL investigated features in image inputs, but they did not highlight the significance of explainability.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
36
+ page_content=' For example, Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
37
+ page_content=' [2019] used attention maps of the input image frames to validate the effectiveness of the learning process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
38
+ page_content=' De Haan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
39
+ page_content=' [2019a] pointed out that the IL agent could learn wrong causal correlations between expert behaviours and irrelevant features in the input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
40
+ page_content=' These methods, including xGAIL, demonstrate what features in a single frame are sig- nificant for models to learn the desired behaviours.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
41
+ page_content=' However, the above-mentioned methods fail to evaluate the importance of frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
42
+ page_content=' Do the input image frames have identical impor- tance?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
43
+ page_content=' If not, how to distinguish frames’ importance?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
44
+ page_content=' To tackle these problems, we attempt to explain the in- put demonstrations as a whole by proposing a novel explain- ing method called R2RISE, which iteratively masks random frames in the demonstrations and evaluates the performance of the agents trained by the masked inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
45
+ page_content=' The intuition is that the input demonstrations are regarded as a single im- age, and frames in the demonstrations are regarded as pix- els.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
46
+ page_content=' In this case, existing XAI and computer vision meth- ods could be directly applied to investigate the importance of arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
47
+ page_content='01088v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
48
+ page_content='LG] 3 Jan 2023 Figure 1: A diagrammatic representation of a single iteration of R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
49
+ page_content=' The input demonstrations are subject to element-wise multiplication (denotes as �) with a random mask which creates a masked demonstration, with greyed frames indicating those which are masked.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
50
+ page_content=' Sub- sequently, the masked demonstration is used to train a black box IL model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
51
+ page_content=' The trained model interacts with the test environment to obtain returns, the mean of which is element-wise multiplied with the initial mask and accumulated to the existing importance map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
52
+ page_content=' frames instead of features in a single frame.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
53
+ page_content=' R2RISE com- bines the existing methods RISE [Petsiuk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
54
+ page_content=', 2018] and ROAR [Hooker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
55
+ page_content=', 2019], and achieves model-agnostic explanations for IL models with various architectures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
56
+ page_content=' Our main contribution is summarized as follows: 1) We proposed a model-agnostic method to explain IL models;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
57
+ page_content=' 2) We extended a novel perspective to explain IL with respect to the whole input dataset instead of a specific frame;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
58
+ page_content=' 3) We investigated the connection between agents’ overall perfor- mance and demonstration frames;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
59
+ page_content=' 2 Preliminaries To better illustrate our approach, we first introduce the related existing literature in the field of XAI: RISE [Petsiuk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
60
+ page_content=', 2018] and ROAR [Hooker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
61
+ page_content=', 2019].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
62
+ page_content=' We then review an insightful method xGAIL [Pan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
63
+ page_content=', 2020] that aggregates XAI with specific IL model GAIL [Ho and Ermon, 2016], and discuss its limitations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
64
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
65
+ page_content='1 Randomized Input Sampling for Explanation (RISE) Randomized Input Sampling for Explanation (RISE) is one of the state-of-the-art XAI methods proposed by Petsiuk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
66
+ page_content=' [2018] that explains black-box models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
67
+ page_content=' The attractive characteristics of RISE are its simplicity and generality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
68
+ page_content=' Un- like other popular XAI approaches, which calculate the gra- dient of image classification outputs, RISE probes the target model by randomly masking the input image and recording the probability result with respect to the target class.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
69
+ page_content=' This process is repeated multiple times, and the recorded proba- bilities for each pixel are linearly combined to generate an importance map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
70
+ page_content=' This allows for the extraction of the most influential region in the input image for the target decision.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
71
+ page_content=' RISE is also significant for explaining IL, as IL typically re- quires multiple demonstrations to train the model, which can be regarded as a single image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
72
+ page_content=' In this case, RISE can be used to explain which frames are important for policy training.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
73
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
74
+ page_content='2 RemOve And Retrain (ROAR) RemOve And Retrain (ROAR) was proposed by Hooker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
75
+ page_content=' [2019], and its name concisely summarizes the working pro- cess of ROAR.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
76
+ page_content=' By substituting some pixels estimated to be important with fixed uninformative values and then retrain- ing a new model, ROAR achieves to evaluate feature impor- tance for a wide range of models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
77
+ page_content=' The motivation of ROAR is that if the model demonstrates more sharp degradation in performance because of the removal, then we could conclude the proposed model is more accurate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
78
+ page_content=' The authors also ar- gued that the retraining process is essential as machine learn- ing models commonly assume that the training and test dis- tribution is similar, and repeating the training several times could ensure a low variance in performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
79
+ page_content=' The intuition of ROAR is valuable for explaining IL models, as training a sin- gle model to determine the importance of frames is risky, and the research community commonly uses ensemble methods to deal with the distribution shift.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
80
+ page_content=' Retraining serval models un- der the same removal rate could improve the reliability of the importance along the demonstration trajectories.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
81
+ page_content=' More im- portantly, since the conventional evaluation of IL problems is different from the classical CNN-involved XAI tasks, the performance of the trained IL model is represented as returns from the dynamic environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
82
+ page_content=' It is improper to train the model once and feed image observations with fixed masks under such an environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
83
+ page_content=' 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
84
+ page_content='3 Explainable Generative Adversarial Imitation Learning (xGAIL) Pan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
85
+ page_content=' [2020] made the first attempt to explain one of the state-of-the-art models Generative Adversarial Imitation Learning (GAIL) [Ho and Ermon, 2016], and validated their method xGAIL on a passenger seeking problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
86
+ page_content=' xGAIL was designed for problems that rely on spatial-temporal data, and both local and global explanations were obtained separately from a well-trained GAIL model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
87
+ page_content=' However, xGAIL’s gen- erality is severely limited to a specific problem and model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
88
+ page_content=' In addition, xGAIL, in fact, transforms the IL problem into Demonstrations Mask m;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
89
+ page_content=' Masked Environment Importance Demonstrations Map Traj o Traj o Traj 1 Traj 1 0 Black-box Traj 2 Traj 2 ROm;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
90
+ page_content=' IL model ::: Traj N Traj Nan image classification problem by extracting and analyzing limited frames from abundant inputs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
91
+ page_content=' This could cause the ab- sence of an overall explanation for the model’s performance and generate explanations with bias as most of the informa- tion in the demonstration was filtered by the frame extraction process.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
92
+ page_content=' 3 R2RISE To overcome the above-mentioned limitations, we proposed a model-agnostic explanation method for imitation learning called R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
93
+ page_content=' R2RISE combines the merits of RISE and ROAR, and investigates the frames’ importance with respect to the policy’s overall performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
94
+ page_content=' We first review how RISE formulates the problem and distinguishes pixels’ importance for the image classification problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
95
+ page_content=' For a given image I with the size of H × W, RISE creates a random binary mask m with the same size of I, and does an element-wise multiplication between im- age I and mask m (denoted as I � m).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
96
+ page_content=' The masked image then feeds into the black-box model (denoted as f(I � m)).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
97
+ page_content=' The importance of pixels is defined as the expected score over all possible masks M = {m0, m1, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
98
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
99
+ page_content=', mi} conditioned on the event that pixel is observed (denoted as M(λ) = 1, if the pixel is masked, then M(λ) = 0), i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
100
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
101
+ page_content=' SI,f(λ) = EM[f(I � m)|M(λ) = 1].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
102
+ page_content=' By rewriting the above equa- tion as a summation over mask m and empirically estimating it using Monte Carlo sampling, the saliency map can be com- puted as a weighted sum of random masks and normalized by the expectation of M: SI,f(λ) ≈ 1 E[M] · N N � i=1 f(I � mi) · mi(λ).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
103
+ page_content=' (1) Since RISE does not need any assumptions and informa- tion from the target model, RISE could be used to explain black-box models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
104
+ page_content=' The intuition behind RISE is that when f(I � m) is high, it indicates that the mask observes impor- tant pixels.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
105
+ page_content=' With similar intuition, Hooker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
106
+ page_content=' [2019] pro- posed ROAR to evaluate a feature importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
107
+ page_content=' An ordered set of feature importance is estimated, and then they replace the top l fraction of the ordered set with the corresponding chan- nel mean, where l is the pre-defined percentage of degrada- tion level l = [0, 10, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
108
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
109
+ page_content=', 100].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
110
+ page_content=' The major difference between RISE and ROAR is that ROAR retrains the model on the re- placed dataset, while RISE only trains the model once.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
111
+ page_content=' Like most imitation learning methods, we assume the test- ing data has a similar distribution as training data, and the input demonstrations Dn are optimal.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
112
+ page_content=' This could ensure eval- uation fairness for a wide range of IL models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
113
+ page_content=' The demonstra- tions Dn consist of multiple trajectories, and each trajectory could be represented as either a sequence of state-action pairs or observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
114
+ page_content=' In this work, we represent the trajectory as a sequence of state-action pairs, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
115
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
116
+ page_content=' Dn = {τ1, τ2, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
117
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
118
+ page_content=', τn}, where τi∈[1,n] = {(s1, a1), (s2, a2), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
119
+ page_content='..' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
120
+ page_content=', (st, at)}.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
121
+ page_content=' The black- box imitation learning model trains a policy (denoted as πDn(a|s)) on the input demonstrations Dn, then interacts with the environment and obtains returns R from the test- ing environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
122
+ page_content=' For the finite horizon T, the expected return could be represented as the accumulation of the return at each time step, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
123
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
124
+ page_content=' R(πDn) = E[ T � t=0 rt|πDn].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
125
+ page_content=' (2) The discussion we have so far motivated us to propose a frame-wise explanation method for IL called R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
126
+ page_content=' By re- garding the demonstrations D as a single image, where the number of demonstrations is the image height H, and the length of the demonstration is the image width T , we could investigate the frame-wise importance based on the similar intuition of RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
127
+ page_content=' However, RISE could not be directly ap- plied to IL since applying a fixed mask on a dynamic envi- ronment frame by frame is unreasonable, and IL methods are commonly evaluated by the interactions with the environment instead of feeding the policy network with another dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
128
+ page_content=' In this case, we aggregate ROAR with RISE and propose R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
129
+ page_content=' It hypothesises that the importance of each frame is not identical and iteratively removes random frames based on the predefined degradation level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
130
+ page_content=' The modified dataset Dn = D � mi is used to retrain an IL model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
131
+ page_content=' The re- trained IL model then constantly interacts with the environ- ment to obtain the accumulative return, and R2RISE finally compute the linear combination of the returns to obtain the saliency map (See Figure 1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
132
+ page_content=' Assuming the number of gener- ated masks is N, and the return of each mask is the average return from J rounds of interaction with the environment, the computation of the saliency map is similar to equation (1).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
133
+ page_content=' To cater to the setting of IL, we substitute the f(I � m) in equation (1) with equation (2): SDn,f(λ) ≈ 1 E[M] · N N � i=1 R(πDi) · mi(λ) (3) = 1 E[M] · N N � i=1 E[ T � t=0 rt|πDi] · mi(λ) (4) = 1 E[M] · N · J N � i=1 J � j=0 T � t=0 rt · mi(λ) (5) where Di = D � mi, and mi(λ) = �0, if the pixel is masked, 1, if the pixel is observed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
134
+ page_content=' As the formula presented, R2RISE also does not require any information from the IL models, such that R2RISE could be used as a model-agnostic method to explain IL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
135
+ page_content=' To evaluate the effectiveness of R2RISE, we test two di- verse IL methods: Behavioural Cloning (BC) [Bain and Sam- mut, 1999] and Generative Adversarial Imitation Learning (GAIL) [Ho and Ermon, 2016].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
136
+ page_content=' BC directly maps the states to actions from the input demonstration, and the control pol- icy is obtained via supervised learning;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
137
+ page_content=' GAIL, on the other hand, learns the policy through an iterative adversarial pro- cess between the generator G and discriminator D, where G is generating fake data distribution and D is differentiating the fake data distribution with the given expert distribution (a) Importance maps generated by R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
138
+ page_content=' (b) Important frames in BeamRider and Breakout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
139
+ page_content=' Figure 2: Importance maps and the corresponding extracted sample frames that are recognized as important.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
140
+ page_content=' Algorithm 1 R2RISE Input: demonstrations D, target IL model f Parameter: degradation level l, number of randomized masks N Output: an importance map SD,f 1: Initialize masks M based on degradation level l and num- ber of randomized masks N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
141
+ page_content=' 2: Initialize blank importance map SI,f with the same shape as D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
142
+ page_content=' 3: for mi in M do 4: Randomly initializes the model f.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
143
+ page_content=' 5: Obtain masked demonstrations Dn = D � mi.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
144
+ page_content=' 6: Train model f with the masked demonstrations Dn and obtain policy πDn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
145
+ page_content=' 7: Evaluate policy πDn by interacting with environment and obtain average return ¯R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
146
+ page_content=' 8: Update importance map via element-wise addition, SD,f ← SDn,f �( ¯R � mi) 9: end for 10: return importance map SI,f [Zheng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
147
+ page_content=', 2022].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
148
+ page_content=' The ways BC and GAIL learn the pol- icy are far different, and we wish to validate the generality of R2RISE from the diverse model selection.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
149
+ page_content=' 4 Experiment In this section, we conduct a series of experiments and ad- dress the following questions: (1) Is the importance between frames identical?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
150
+ page_content=' (2) Can R2RISE distinguish the importance between frames?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
151
+ page_content=' (3) Are there connections between the im- portance map obtained from different IL models?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
152
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
153
+ page_content='1 Setup We implement experiments with GPU NVIDIA Quadro RTX 5000, and two diverse IL models, BC and GAIL, are evalu- ated on two OpenAI Gym Atari tasks: Breakout and Beam- rider [Brockman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
154
+ page_content=', 2016].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
155
+ page_content=' Similar to recent IL methods, we leverage the proximal policy optimization (PPO) [Schulman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
156
+ page_content=', 2017] algorithm from the OpenAI baselines [Dhariwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
157
+ page_content=', 2017], utiliz- ing default parameters and reward function, to generate ex- pert demonstrations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
158
+ page_content=' The PPO training process is check- pointed every 20 steps, and the observations with the size of 84 × 84 × 3 and actions between the PPO agents and the task environment are recorded as ”trajectories.” These trajectories, generated from checkpoint 1400, serve as expert demonstra- tions.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
159
+ page_content=' To avoid the “causal confusion” problem (models build wrong causal relationships with irrelevant patterns) [De Haan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
160
+ page_content=', 2019b] and ensure the fairness of our evaluation, we mask the indicators (such as scoring broad) in frames and en- sure the same demonstrations as input for different IL models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
161
+ page_content=' Regarding the parameter setting, we generate 20 trajec- tories with a fixed length of 1000 for each IL model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
162
+ page_content=' Five random seeds and five levels of percentage degradations l = [10, 30, 50, 70, 90] are pre-defined for evaluation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
163
+ page_content=' To assess each given random seed and percentage degradation, we pro- pose to retrain 100 models with 100 randomized masks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
164
+ page_content=' Each mask contains 20*100 grids, which means that every single trajectory is cut into 100 snippets, and each snippet assigns the same importance to 10 frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
165
+ page_content=' The retrained model is tested from 20 trials, and the average return of the trials, mul- tiplied element-wise with the random mask, is added to the final importance map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
166
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
167
+ page_content='2 Is the importance between frames identical?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
168
+ page_content=' Remember that we hypothesize that the importance of frames is different, so we validate this hypothesis by applying sev- eral randomized masks on the same demonstration and com- paring the performance of the trained model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
169
+ page_content=' If the outcomes present noticeable deviations, then it can be inferred that the contribution between frames varies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
170
+ page_content=' To further this idea,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
171
+ page_content=' we ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
172
+ page_content='Importance Map of BC -- BeamRider ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
173
+ page_content='# ofdemos ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
174
+ page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
175
+ page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
176
+ page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
177
+ page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
178
+ page_content='800 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
179
+ page_content='Importance Map of BC -- Breakout ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
180
+ page_content='# of demos ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
181
+ page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
182
+ page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
183
+ page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
184
+ page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
185
+ page_content='800 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
186
+ page_content='Importance Map of GAIL -- BeamRider ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
187
+ page_content='#of demos ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
188
+ page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
189
+ page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
190
+ page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
191
+ page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
192
+ page_content='800 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
193
+ page_content='Importance Map of GAlL -- Breakout ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
194
+ page_content='#ofdemos ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
195
+ page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
196
+ page_content='200 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
197
+ page_content='400 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
198
+ page_content='600 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
199
+ page_content='800Sample Importance Frames of BeamRider ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
200
+ page_content='800261 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
201
+ page_content='000261 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
202
+ page_content='000264 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
203
+ page_content='000308 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
204
+ page_content='000308 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
205
+ page_content='ECTOL ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
206
+ page_content='01 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
207
+ page_content='ECTIR ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
208
+ page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
209
+ page_content='01 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
210
+ page_content='Sample Importance Frames of BreakoutFigure 3: Deviations in policy performance of 10 models trained by ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
211
+ page_content='10 different randomized masks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
212
+ page_content=' divide each trajectory into ten segments of equal length and randomly assign either a value of 0 or 1 to each segment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
213
+ page_content=' Regions assigned 0 are removed, and then the preprocessed demonstrations are used as input to train the relevant models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
214
+ page_content=' This process iterates ten times, and we get Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
215
+ page_content=' Here, the x-axis is the number of attempts, the y-axis is the environment returns, and the error bars indicate the standard deviation of the policy performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
216
+ page_content=' From Figure 3, we can observe the performances deviate from model to model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
217
+ page_content=' The best model achieves far better per- formance than the worst one, which indicates that the impor- tance of frames is not identical.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
218
+ page_content=' Similarly, the importance maps generated by R2RISE also suggest disparities between frames (see Figure 2a).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
219
+ page_content=' The x-axis is the trajectory’s length, the y-axis represents the trajectories, and the grayscale de- notes the importance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
220
+ page_content=' The whiter the grid is, the more impor- tant the grid is.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
221
+ page_content=' Figure 2b shows the extracted frames that are recognized as the most important components in the demon- strations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
222
+ page_content=' For the task of BeamRider, models would put more weight on destroying enemy flights, whereas for the task of Breakout, models pay more attention to the rebounding pro- cess of the upper blocks, sidewalls, and paddle, which resem- bles the strategy we might use for these games.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
223
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
224
+ page_content='3 Can R2RISE distinguish the importance between frames?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
225
+ page_content=' This section investigates the effectiveness of R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
226
+ page_content=' Ac- cording to the abovementioned hypothesis validation, we ob- tain a map indicating the importance of frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
227
+ page_content=' However, the quality of the generated maps needs to be properly eval- uated.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
228
+ page_content=' In this case, we use similar causal metrics in [Petsiuk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
229
+ page_content=', 2018], insertion and deletion, where the availability of the ’cause’ will significantly influence the model’s decision- making and performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
230
+ page_content=' Under the scenario of image clas- sification tasks, deleting the causal pixels will lead to a sharp drop in accuracy if the model gets well explained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
231
+ page_content=' In our experiment, we leverage similar intuition and expect the re- moval of the important frames would lead to a worse perfor- (a) Policy performance changes in BeamRider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
232
+ page_content=' (b) Policy performance changes in Breakout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
233
+ page_content=' Figure 4: Validations of the effectiveness of R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
234
+ page_content=' The lines and the error bars represent the mean performance and standard devi- ation of the model trained from a certain percentage of the most important (or least important) frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
235
+ page_content=' mance while limiting the amount of input data to be the same.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
236
+ page_content=' To achieve this, we transform the generated importance map into a mask by setting up a threshold and replacing the map with either 1s or 0s, depending on the threshold.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
237
+ page_content=' Figure 4 shows the changes in policy performance using different percentages of the most important (or least impor- tant) frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
238
+ page_content=' The x-axis is the percentage of data used to train the model, and the y-axis is the environment returns.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
239
+ page_content=' The solid lines are the average returns from 20 trials using the same transformed mask and demonstrations, the error bar is the standard deviation of the 20 trials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
240
+ page_content=' From Figure 4, we can observe that the models trained with the most important frames perform well when the input data is relatively lim- ited for both tasks, which meets our expectations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
241
+ page_content=' For task BeamRider (see Figure 4a), the model with important frames always performs better than the model trained with the least important frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
242
+ page_content=' The performance deviation at the begin- ning of the figure is relatively small, we think the reason is that the model is more sensitive to the amount of data than to the availability of the important frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
243
+ page_content=' From the end of this figure, it can be seen that the performance deviation increases, which indicates that the missing top important frames signifi- cantly limit the upper bound of the model’s performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
244
+ page_content=' For task Breakout (see Figure 4b), it can be seen that the model with important frames significantly outperforms the model without important frames until more than 60% of the total demonstrations are fed.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
245
+ page_content=' When more than 60% of the data is given, the model’s performance starts to fluctuate.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
246
+ page_content=' We sus- pect this is due to the fact that 50% of the initial input is sufficient to train the policy,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
247
+ page_content=' and with more ordinary or re- ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
248
+ page_content='BeamRider ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
249
+ page_content='Returns ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
250
+ page_content='1100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
251
+ page_content='900 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
252
+ page_content='700 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
253
+ page_content='500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
254
+ page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
255
+ page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
256
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
257
+ page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
258
+ page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
259
+ page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
260
+ page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
261
+ page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
262
+ page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
263
+ page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
264
+ page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
265
+ page_content='Trials ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
266
+ page_content='BC ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
267
+ page_content='GAILBreakout ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
268
+ page_content='Environment Returns ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
269
+ page_content='40 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
270
+ page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
271
+ page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
272
+ page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
273
+ page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
274
+ page_content='1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
275
+ page_content='2 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
276
+ page_content='3 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
277
+ page_content='4 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
278
+ page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
279
+ page_content='6 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
280
+ page_content='7 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
281
+ page_content='8 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
282
+ page_content='9 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
283
+ page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
284
+ page_content='Trials ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
285
+ page_content='+BC-GAILBeamRider ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
286
+ page_content='2700 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
287
+ page_content='2300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
288
+ page_content='1900 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
289
+ page_content='1500 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
290
+ page_content='1100 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
291
+ page_content='700 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
292
+ page_content='300 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
293
+ page_content='10% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
294
+ page_content='20% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
295
+ page_content='30% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
296
+ page_content='40% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
297
+ page_content='50% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
298
+ page_content='60% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
299
+ page_content='70% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
300
+ page_content='80% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
301
+ page_content='90% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
302
+ page_content='Degradation Level ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
303
+ page_content='withimportantframes ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
304
+ page_content='withoutimportantframesBreakout ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
305
+ page_content='30 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
306
+ page_content='EnvironmentReturns ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
307
+ page_content='25 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
308
+ page_content='20 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
309
+ page_content='15 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
310
+ page_content='10 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
311
+ page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
312
+ page_content='0 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
313
+ page_content='10% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
314
+ page_content='20% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
315
+ page_content='30% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
316
+ page_content='40% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
317
+ page_content='50% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
318
+ page_content='60% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
319
+ page_content='70% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
320
+ page_content='80% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
321
+ page_content='90% ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
322
+ page_content='DegradationLevel ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
323
+ page_content='withimportantframes ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
324
+ page_content='withoutimportantframes(a) Deviation between the importance maps in BeamRider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
325
+ page_content=' (b) Deviation between the importance maps in Breakout.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
326
+ page_content=' Figure 5: Deviation between the importance maps of BC and GAIL.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
327
+ page_content=' dundant frames added to the dataset, the policy performance is negatively influenced.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
328
+ page_content=' 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
329
+ page_content='4 Are there connections between the importance map obtained from different IL models?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
330
+ page_content=' In addition to exploring the explainability within a single IL model, we also attempt to examine the connections between IL models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
331
+ page_content=' In this section, we investigate the question: does the importance map obtained by one model have connections to another model?' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
332
+ page_content=' To this end, we propose two approaches to explore intrinsic connections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
333
+ page_content=' The first attempt directly com- pares the importance maps by projecting the values into the same range and calculating element-wise deviation (see Fig- ure 5).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
334
+ page_content=' The larger the deviation is, the whiter the output image should be.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
335
+ page_content=' From Figure 5, we can observe that most grids are close to black, which indicates the assigned importance for these grids are similar.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
336
+ page_content=' The second attempt involves generat- ing a mask from an importance map generated by one model and applying that mask to the same demonstrations to train another model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
337
+ page_content=' The underlying assumption is that if there are connections between models’ importance maps, the im- portant frames identified by one model should work well on another model, leading to improved performance compared to those without the frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
338
+ page_content=' Figure 6 displays the average returns of GAIL using three types of masks: the blue, orange and grey lines correspond to models trained with masks ex- tracted from the importance map obtained using BC, GAIL and random, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
339
+ page_content=' It can be seen in Figure 6 that the model trained with BC’s mask demonstrates similar pol- icy performance to the model trained with GAIL’s mask, and both models outperform the model using a randomized mask under the same training epoch.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
340
+ page_content=' This confirms our expec- tation that the importance frames recognized by BC could also be employed to train model GAIL with considerable per- formance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
341
+ page_content=' This could prove useful when the target model is time-consuming, and one could use a more time-efficient method like BC to obtain an alternative importance map.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
342
+ page_content=' Figure 6: Average reward learning curves of GAIL trained with dif- ferent masks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
343
+ page_content=' The blue, orange and grey lines are trained with the masks extracted from the importance map obtained using BC, GAIL and random, respectively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
344
+ page_content=' 5 Limitations Several intriguing challenges await further exploration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
345
+ page_content=' Al- though we were able to extract promising explanations through a large number of masks and validations of the ob- tained policy, the robustness of the framework could be fur- ther improved through ensemble methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
346
+ page_content=' As the framework currently relies on a single trial to train the model, ensem- bling IL models could reduce performance variance for each given mask.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
347
+ page_content=' In addition to robustness, computation inten- siveness is another limitation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
348
+ page_content=' The time taken to obtain a sat- isfactory explanation is closely linked to the time spent on training the target model once.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
349
+ page_content=' If the target model requires days to train, it would not be practical to retrain hundreds of times.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
350
+ page_content=' Improving time efficiency while preserving the model- agnostic property remains an open challenge for R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
351
+ page_content=' In- vestigating the relationship between global explanations and the frames that are recognized as important is another inter- esting future direction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
352
+ page_content=' Although we observed similar pat- terns in the extracted frames from different trials and models, it is still unsafe to claim these patterns could be the global explanations for a specific task.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
353
+ page_content=' Further research is needed to provide theoretical proof for the connections.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
354
+ page_content=' 6 Conclusion This paper introduced a model-agnostic explaining frame- work for imitation learning called R2RISE.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
355
+ page_content=' R2RISE distin- guishes the frames’ importance in relation to the overall pol- icy performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
356
+ page_content=' It iteratively applies numerous randomized masks on the demonstrations and retrains the black-box IL model based on the masked demonstration.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
357
+ page_content=' Evaluation of the obtained policy is conducted in a manner similar to most IL methods, where the policy is evaluated by the accumulated re- turns from the environment, and we leverage the accumulated returns as a coefficient to multiply with the mask to obtain the importance map of the frames.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
358
+ page_content=' Experiments have shown that R2RISE can successfully distinguish important frames from the demonstrations, thus providing insight into which frames contribute to better performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
359
+ page_content=' 12 10 8 AverageReturns 4 2 0 1 51 101 151 201251301351401451501551601651701 751 8018519019511001 Epochs BC+GAIL GAIL+GAIL Random+GAILDifference between importance maps # of demos 0 200 400 600 800Difference between importance maps # of demos 0 200 400 600 800References [Bain and Sammut, 1999] Michael Bain and Claude Sam- mut.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
360
+ page_content=' A framework for behavioural cloning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
361
+ page_content=' In Machine Intelligence 15, pages 103–129.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
362
+ page_content=' Oxford University Press, 1999.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
363
+ page_content=' [Bewley et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
364
+ page_content=', 2020] Tom Bewley, Jonathan Lawry, and Arthur Richards.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
365
+ page_content=' Modelling agent policies with inter- pretable imitation learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
366
+ page_content=' In International Workshop on the Foundations of Trustworthy AI Integrating Learning, Optimization and Reasoning, pages 180–186.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
367
+ page_content=' Springer, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
368
+ page_content=' [Brockman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
369
+ page_content=', 2016] Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
370
+ page_content=' Openai gym.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
371
+ page_content=' arXiv preprint arXiv:1606.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
372
+ page_content='01540, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
373
+ page_content=' [Brown et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
374
+ page_content=', 2019] Daniel Brown, Wonjoon Goo, Prabhat Nagarajan, and Scott Niekum.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
375
+ page_content=' Extrapolating beyond sub- optimal demonstrations via inverse reinforcement learning from observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
376
+ page_content=' In International conference on ma- chine learning, pages 783–792.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
377
+ page_content=' PMLR, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
378
+ page_content=' [Codevilla et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
379
+ page_content=', 2019] Felipe Codevilla, Eder Santana, An- tonio M L´opez, and Adrien Gaidon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
380
+ page_content=' Exploring the lim- itations of behavior cloning for autonomous driving.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
381
+ page_content=' In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9329–9338, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
382
+ page_content=' [De Haan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
383
+ page_content=', 2019a] Pim De Haan, Dinesh Jayaraman, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
384
+ page_content=' Causal confusion in imitation learn- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
385
+ page_content=' Advances in Neural Information Processing Systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
386
+ page_content=' [De Haan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
387
+ page_content=', 2019b] Pim De Haan, Dinesh Jayaraman, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
388
+ page_content=' Causal confusion in imitation learn- ing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
389
+ page_content=' Advances in Neural Information Processing Systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
390
+ page_content=' [Dhariwal et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
391
+ page_content=', 2017] Prafulla Dhariwal, Christopher Hesse, Oleg Klimov, Alex Nichol, Matthias Plappert, Alec Radford, John Schulman, Szymon Sidor, Yuhuai Wu, and Peter Zhokhov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
392
+ page_content=' Openai baselines, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
393
+ page_content=' [Ho and Ermon, 2016] Jonathan Ho and Stefano Ermon.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
394
+ page_content=' Generative adversarial imitation learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
395
+ page_content=' Advances in neural information processing systems, 29, 2016.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
396
+ page_content=' [Hooker et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
397
+ page_content=', 2019] Sara Hooker, Dumitru Erhan, Pieter- Jan Kindermans, and Been Kim.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
398
+ page_content=' A benchmark for inter- pretability methods in deep neural networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
399
+ page_content=' Advances in neural information processing systems, 32, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
400
+ page_content=' [Leech, 2019] Thomas Leech.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
401
+ page_content=' Explainable machine learn- ing for task planning in robotics.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
402
+ page_content=' PhD thesis, Mas- sachusetts Institute of Technology, 2019.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
403
+ page_content=' [Pan et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
404
+ page_content=', 2020] Menghai Pan, Weixiao Huang, Yanhua Li, Xun Zhou, and Jun Luo.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
405
+ page_content=' xgail: Explainable gener- ative adversarial imitation learning for explainable hu- man decision analysis.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
406
+ page_content=' In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discov- ery & Data Mining, pages 1334–1343, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
407
+ page_content=' [Petsiuk et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
408
+ page_content=', 2018] Vitali Petsiuk, Abir Das, and Kate Saenko.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
409
+ page_content=' Rise: Randomized input sampling for explanation of black-box models.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
410
+ page_content=' arXiv preprint arXiv:1806.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
411
+ page_content='07421, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
412
+ page_content=' [Scheller et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
413
+ page_content=', 2020] Christian Scheller, Yanick Schraner, and Manfred Vogel.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
414
+ page_content=' Sample efficient reinforcement learn- ing through learning from demonstrations in minecraft.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
415
+ page_content=' In NeurIPS 2019 Competition and Demonstration Track, pages 67–76.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
416
+ page_content=' PMLR, 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
417
+ page_content=' [Schulman et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
418
+ page_content=', 2017] John Schulman, Filip Wolski, Pra- fulla Dhariwal, Alec Radford, and Oleg Klimov.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
419
+ page_content=' Prox- imal policy optimization algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
420
+ page_content=' arXiv preprint arXiv:1707.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
421
+ page_content='06347, 2017.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
422
+ page_content=' [Yu et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
423
+ page_content=', 2018] Tianhe Yu, Chelsea Finn, Annie Xie, Sudeep Dasari, Tianhao Zhang, Pieter Abbeel, and Sergey Levine.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
424
+ page_content=' One-shot imitation from observing hu- mans via domain-adaptive meta-learning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
425
+ page_content=' arXiv preprint arXiv:1802.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
426
+ page_content='01557, 2018.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
427
+ page_content=' [Zhang et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
428
+ page_content=', 2021] Dandan Zhang, Qiang Li, Yu Zheng, Lei Wei, Dongsheng Zhang, and Zhengyou Zhang.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
429
+ page_content=' Ex- plainable hierarchical imitation learning for robotic drink pouring.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
430
+ page_content=' IEEE Transactions on Automation Science and Engineering, 2021.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
431
+ page_content=' [Zheng et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
432
+ page_content=', 2022] Boyuan Zheng, Sunny Verma, Jianlong Zhou, Ivor W.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
433
+ page_content=' Tsang, and Fang Chen.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
434
+ page_content=' Imitation learning: Progress, taxonomies and challenges.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
435
+ page_content=' IEEE Transactions on Neural Networks and Learning Systems, pages 1–16, 2022.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/VNAzT4oBgHgl3EQfJ_sw/content/2301.01088v1.pdf'}
WdAyT4oBgHgl3EQfu_n3/content/tmp_files/2301.00625v1.pdf.txt ADDED
@@ -0,0 +1,990 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ On the saturation stress of deformed metals
2
+ S. Queyreau1∗
3
+ 1 Universit´e Sorbonne Paris Nord, LSPM-CNRS,
4
+ UPR 3407, 93430 Villetaneuse, France
5
+ 1
6
+ arXiv:2301.00625v1 [cond-mat.mtrl-sci] 2 Jan 2023
7
+
8
+ Abstract
9
+ Crystalline materials exhibit an hysteresis behaviour when deformed cyclically. The origins of
10
+ this tension-compression asymmetry have been fully understood only recently as being caused by
11
+ an asymmetry in the junction strength and a reduced mean free path of dislocations inherited
12
+ from previous deformation stage. Here, we investigate the saturation stress in fcc single- and poly-
13
+ crystals using a Crystal Plasticity framework derived from dislocation dynamics simulations. In
14
+ the absence of plastic localization and damage mechanism, the single-crystal mechanical response
15
+ eventually saturates. We show that the cyclic saturation stress converges asymptotically to the
16
+ monotonic saturation stress as the cycle plastic increment increases, and this convergence can be
17
+ observed for some experimental conditions. The analysis of the experimental literature suggests
18
+ that the mechanisms controlling the saturation in single crystals are the same controlling the cyclic
19
+ response of polycrystals with large grains. We propose also analytical and approximated models to
20
+ predict the saturation stress over the considered loading conditions. The saturation stress appears
21
+ as a fundamental property of dislocations, explaining the consistency observed in the experimental
22
+ literature. This work provides a unified view on the monotonous and cyclic responses of fcc single
23
+ and poly-crystals, which may help in interpreting experimental data.
24
+ I.
25
+ INTRODUCTION
26
+ Deformation of crystalline materials depends upon the deformation history undergone by
27
+ the materials. This is particularly apparent through the existence of hysteresis curves when
28
+ alternating the loading direction in tension and compression as in cyclic deformation [1–3].
29
+ A part of the deformation is reversible and the resulting hardening is much smaller that in
30
+ continuous monotonic deformation, explaining why deformation can be easily accumulated
31
+ this way. At every two cycles, the flow stress may increase usually in a continuous manner,
32
+ until reaching a saturation shear stress [4–10].
33
+ In single crystals, this saturation stress
34
+ depends upon the loading direction and the materials as it not always scales with the shear
35
+ modulus [10]. For a given loading direction and material, the saturation stress typically
36
+ increases with the cycle shear increment γp,cy, and the saturation stress may well be below
37
+ the stress values observed in monotonous deformation. For example, in Cu -certainly the
38
+ ∗ sylvain.queyreau@cnrs.fr
39
+ 2
40
+
41
+ most studied system- deformed in single glide condition, the cyclic saturation stress ranges
42
+ from 16 to 40 MPa exhibits a three stage curve with a central plateau value of 28 MPa.
43
+ In stable multislip conditions, the cyclic saturation stress typically increases monotonously
44
+ with an apparent slope that depends upon the loading direction and the material [7–10]
45
+ and may reach 50 or 60 MPa. In comparison the maximum stress reached by monotonically
46
+ deformed single crystal of Cu is in the range of 80-100 MPa.
47
+ Until recently, this tension-compression asymmetry -also known as the Bauschinger Ef-
48
+ fect (BE)- was commonly thought to be related to the building up of Long-Range Internal
49
+ Stresses (LRIS) or backstress associated to the formation of dislocation patterns according
50
+ to the so-called composite model [3, 11, 12]. However, dislocation patterns are rather weak
51
+ at smaller strain, they may well be different from the archetype of the composite model, and
52
+ no LRIS was found in recent large scale mesoscale simulations [13] or by X-ray microdiffrac-
53
+ tion [14, 15] (except at very large strain [16]). Another explanation consisted in the partial
54
+ dissolution of the microstructure [17–20], as a mechanistic way to reduce dislocation den-
55
+ sity and thus flow stress. However, no clear dislocation elementary mechanism was clearly
56
+ identified as there is no in-situ observation of dislocation motion during cyclic deformation.
57
+ Ultimately, these explanations explain neither the transient nature of the Bauschinger effect
58
+ nor its reversibility component.
59
+ The present authors [13, 21] recently proposed a systematic study of cyclic deformations
60
+ in single crystals by means of Discrete Dislocations Dynamic (DDD) simulations. Despite
61
+ of the present of a pronounced Bauschinger effect in the simulations, no LRIS was measured
62
+ in the simulations. Statistical analysis of the DDD simulations showed that the tension-
63
+ compression asymmetry is caused by two original elementary mechanisms of dislocations.
64
+ The first mechanism i) is related to easy destruction of binary junctions as their stability is
65
+ asymmetrical as they formed from mobile segments whose curvature is driven by the applied
66
+ loading. Junctions formed is tension are thus more stable in tension than in compression, and
67
+ are easily destroyed during the backward motion of dislocation in compression. The second
68
+ elementary mechanism ii) is related to the reduction of the mean free path of dislocations
69
+ as they glide in regions of the crystal that they already explored. In the backward motion,
70
+ the mobile segments unwind stored segments on the edge of the swept area, leading to a
71
+ reduction in the storage rate. These mechanisms naturally explain the transient nature of
72
+ the tension-compression asymmetry and the reversible component of plastic deformation.
73
+ 3
74
+
75
+ From these results, the authors proposed a modified Crystal Plasticity framework, that
76
+ was implemented into a FEM to include the physics highlighted in the DDD. As a results
77
+ this multiscale approach was successful in predicting the Bauschinger effect, hysteresis and
78
+ saturation stress reported in most of the experimental literature on cyclically deformed fcc
79
+ single crystals.
80
+ Now that we have a CP framework justified from mesoscale physics and validated by
81
+ confrontation with the existing single-crystal literature, we can now assess the full implica-
82
+ tions of these results and of the model. In particular, when comparing the literature data
83
+ obtained though out decades on cyclic deformation of single crystals - the saturation stresses
84
+ - observed in cyclic deformation are clearly more reproducible that the Bauschinger effect
85
+ observed on a single tension-compression experiment. This fact suggests that the saturation
86
+ stress in cyclic deformation is the sole result of the average of some basic dislocation mech-
87
+ anisms, while the Bauschinger effect is impacted by the initial state of the material, such as
88
+ the impurity content, the initial dislocation density or the microstructure, but also by the
89
+ loading conditions. In other words, the saturation stress seems to be a basic property of
90
+ the material and dislocations. Simulation results could also be processed in order to provide
91
+ simple equations to help in interpreting and/or fitting the experimental data or to trace
92
+ back to some of the fundamental constants of the CP model e.g. in an inverse approach.
93
+ Finally, the single crystal is an integral part of the more complex polycrystal system. The
94
+ elementary mechanisms described above are general enough to be operative in a polycrystal.
95
+ The insertion of GB adds a new lengthscale, and a competition between this lengthscale and
96
+ the ones associated with elementary mechanisms i and ii. These ideas will act as motivations
97
+ of the present paper.
98
+ The objectives of the present work are thus to extend and analyse the CP FEM results
99
+ obtained for the cyclic deformation of fcc single-crystals. We will see that the saturation
100
+ stress of cyclic loading is actually related to the theoretical saturation stress obtained in
101
+ monotonous conditions. Analytical solutions to the constituting differential equations of the
102
+ CP framework will be provided and can be employed to help in interpreting experimental
103
+ data.
104
+ Finally, the implication of the mechanisms i and ii on the cyclic deformation of
105
+ polycrystal will be assessed.
106
+ 4
107
+
108
+ II.
109
+ METHODOLOGY
110
+ This section presents the Crystal Plasticity framework derived from the DDD results in
111
+ [13]. Readers interested in the presentation of the technical details of the DDD technique
112
+ along with a review of recent progresses are referred to [22, 23].
113
+ This CP model is an
114
+ extension of the dislocation density based descriptions proposed in the seminal work [24, 25]
115
+ and subsequent works inspired from DDD [26] for monotonous loadings.
116
+ In fcc metals
117
+ at intermediate and high temperature, the flow stress is controlled by the formation and
118
+ destruction of reactions -junctions- between dislocations belong to different slip systems.
119
+ The critical shear stress τ i
120
+ c on the active slip system ’i’ is thus related to the forest obstacle
121
+ densities ρj and expressed as :
122
+ τ i
123
+ c = µb
124
+ ��
125
+ j
126
+ aijρj
127
+ (1)
128
+ Where µ is the isotropic shear modulus, b the norm of the Burgers vector and aij the
129
+ components of the interaction matrix. These last interaction coefficients measure the average
130
+ strength of the two interacting slip systems and can be determined in a straigthforward
131
+ manner from DDD [26–29].
132
+ For reasons of symmetry, only six different interactions of
133
+ different nature exist among the twelve a0/2 < 110 > 111 slip systems existing in fcc metals.
134
+ The interactions are the self-interaction, the coplanar interaction, three junctions: Lomer,
135
+ Hirth and glissile reactions, and the collinear interaction. The interaction matrix has to
136
+ have non identical components in order to reproduce fully the anisotropy of the plastic
137
+ deformation of fcc single-crystals.
138
+ A first extension of the CP framework was introduced to capture the asymmetry of
139
+ dislocation junctions (mechanism ’i’ in the introduction) and to reproduce the tension-
140
+ compression asymmetry observed in DDD [13]. Junctions formed during the forward loading
141
+ from curved segments inherit a mechanical stability asymmetry. The junctions formed during
142
+ prestrain or forward tension loading, are stronger when stressed in tension and statistically
143
+ weaker when now stressed in compression, and this asymmetry in mechanical stability can be
144
+ considered as a line tension effect, with the parent moving segments colliding when they were
145
+ in extension. As a results, while plastic flow is still controlled by junctions destruction, the
146
+ interaction coefficients during the backward loading are effectively weakened over a transient
147
+ that depends on the amount of the initial forward deformation. The pool of weak junctions
148
+ gets destroyed as backward deformation proceeds, until mobile segments explore new area
149
+ 5
150
+
151
+ of the crystal and form junctions polarized in compression. A reversibility function ra was
152
+ introduced to reproduce these effects as:
153
+ abck
154
+ ij = (1 − ra) × aij, with ra = exp
155
+
156
+ �−
157
+ γi
158
+ bck
159
+ Cab
160
+
161
+ ∆ρi
162
+ pr
163
+
164
+
165
+ (2)
166
+ with aij the reference and constant interaction coefficient measured in continued tension.
167
+ the subscripts ’pr’ and ’bck’ refer to prestrain and backward loading, respectively. In a
168
+ cyclic loading, the prestrain becomes the previous cycle, while the backward loading cor-
169
+ responds to the current cycle. The amount of backward strain impacted by this transient
170
+ on the junction stability is approximated as Cab
171
+
172
+ ∆ρi
173
+ pr. This last equation can provide a
174
+ lengthscale estimate to this effect. The density increase during previous deformation cy-
175
+ cle is ∆ρi
176
+ pr = max(ρi
177
+ pr) − min(ρi
178
+ pr) and corresponds to the potentially impacted dislocation
179
+ density polarized according to the initial loading. The constant Ca has been determined as
180
+ Ca = 0.6 ± 0.1 through a statistical analysis of the interaction coefficients over a large panel
181
+ of relevant DDD simulations [13]. The γi
182
+ bck/Cab
183
+
184
+ ∆ρi
185
+ pr term within the exponential of ra
186
+ states the competition between the easy destruction of junctions formed in tension, and the
187
+ formation of new junctions in never-explored regions of the crystal.
188
+ A second fundamental equation in the CP framework describes the evolution of the
189
+ dislocation density on active slip system ’i’ with the system shear γi.
190
+ The evolution of
191
+ the dislocation density is related to the kinetics of plastic activity that occurs through
192
+ intermittent busts or avalanches of dislocation motion. In principle, dislocations are stored
193
+ at the end of an avalanche as dislocation segments left at the edge of the swept area. This
194
+ being said the exact theoretical connection between dislocation avalanches at the mesoscale
195
+ and the observed continuous macroscale storage of dislocations has still to be formulated.
196
+ For the present work and in monotonic loading conditions, the density evolution takes the
197
+ following simple form:
198
+ dρi
199
+ dγi = 1
200
+ b
201
+ � 1
202
+ Lhkl
203
+ + 1
204
+ LI
205
+ − yρi
206
+
207
+ = 1
208
+ b
209
+
210
+
211
+ ��
212
+ i̸=j aijρj
213
+ Khkl
214
+ +
215
+
216
+ a′
217
+ 0ρi
218
+ KI
219
+ − yρi
220
+
221
+
222
+ (3)
223
+ where the first two terms relate to the dislocation storage and the last term represents the
224
+ dynamic recovery. The storage rate is commonly related to the Mean Free Path (MFP)
225
+ of dislocations Li, which represents the average distance covered by dislocations before the
226
+ temporary or permanent storage.
227
+ The MFP of dislocations typically depends upon the
228
+ 6
229
+
230
+ loading axis and thus the number of active slip systems. In [24, 26], Kubin et al. proposed
231
+ a simple decomposition of the MFP into elementary ingredients: i) the rate p0 of forming
232
+ a junction that will ultimately store a dislocation segment, and ii) the average length of
233
+ stored dislocation < l > and iii) junction segments and < lj >, both of which scale with
234
+ the dislocation density < l >= k0/√¯aρ and < lj >= κ < l >. ¯a is the average interaction
235
+ coefficient for the considered orientation. Statistical analysis of DDD results showed that
236
+ parameters k0, p0, κ to be constants independent of the loading direction. The MFP can
237
+ thus be written:
238
+ Li = µb2
239
+ τ i
240
+ c
241
+ � √¯an(1 + κ)3/2
242
+ p0k0(n − 1 − κ)
243
+
244
+ (4)
245
+ the third term is associated to the dynamic recovery occurring at large dislocation density.
246
+ y is related to critical distance at which two dislocations can easily annihilate, which may be
247
+ measured from atomistic data. Here, y takes different values depending on the loading axis
248
+ to reproduce the anisotropy of the onset of stage III observed on experimental deformation
249
+ curves on single-crystal [30, 31].
250
+ Table I. List of physical parameters employed in the CP simulations of the cyclic deformation of fcc
251
+ single crystals. Most of these parameters are coming from DDD simulations for monotonic or cyclic
252
+ deformation, few parameters are coming from the experiemental litterature but for monotonic
253
+ loading only. No parameters or equations were fitted to the cyclic deformation literature. The
254
+ resulting FEM simulations are therefore truly predictive of cyclic deformation.
255
+ a′
256
+ 0 (self) aortho (Hirth) a2 (glissile)
257
+ a3 (Lomer)
258
+ acolli (Collinear)
259
+ ρref
260
+ ρ0
261
+ 0.122
262
+ 0.07
263
+ 0.137
264
+ 0.122
265
+ 0.625
266
+ 1012 m−2
267
+ 1012/nsys
268
+ KI
269
+ K112
270
+ K111
271
+ K001
272
+ Ca
273
+ Ap
274
+ Cp
275
+ 90
276
+ 10.42
277
+ 7.29
278
+ 5
279
+ 0.6 ± 0.1
280
+ 2 ± 0.6
281
+ 2.3 ± 0.3
282
+ ˙γapp
283
+ ˙γi
284
+ 0
285
+ m
286
+ yI (SG, [112])
287
+ y001 (nm)
288
+ y111 (nm)
289
+ nsys × ˙γi
290
+ 0
291
+ 10−4 s−1
292
+ 35
293
+ 0.5 nm
294
+ 3.6 (Ni), 3.4 (Cu) 2 (Ni), 1.5 (Cu)
295
+ When considering the cyclic deformation, DDD simulations [13, 21] showed that dislo-
296
+ cation evolution is still associated to the storage of segments in the wake of avalanches at
297
+ the fringe of the area swept by dislocations, with some similarities to what was observed
298
+ in alloys [32]. Equation 3 is thus still valid. However, the reduced storage (mechanism ii
299
+ 7
300
+
301
+ in the introduction) observed at the loading reversal where the stored segments are simply
302
+ unwinded, needs to be taken into account through a modified MFP (through a change of
303
+ rate of locking dislocation rate p0). A second reversibility function rp is thus introduced
304
+ [13, 21] as:
305
+ pbck
306
+ 0
307
+ = (1 − rp) × p0, with rp = Ap × exp
308
+
309
+ �−
310
+ γi
311
+ bck
312
+ Cpb
313
+
314
+ ∆ρi
315
+ pr
316
+
317
+
318
+ (5)
319
+ Where Ap and Cp are two additional constants measuring the initial MFP drop and the
320
+ transient length of the reduced MFP, respectively. Statistical analysis over the transient
321
+ observed in DD showed that Aa = 2±0.6 and Cp = 2.3±0.3. Interestingly, the transient on
322
+ the reduced MFP (ii) is much larger that the one on the reduced junction stability (i). These
323
+ different transients explain the non-monotonic response at the beginning of the backward
324
+ deformation that is sometimes observed on experimental deformation curves [33–35].
325
+ Finally, the flow rule provides a close form to the CP framework in connecting the plastic
326
+ activity ˙γi on the active system to the critical resolved shear stress τ i
327
+ c and the applied shear
328
+ stress τ i:
329
+ ˙γi = ˙γi
330
+ 0
331
+ �τ i
332
+ τ i
333
+ c
334
+ �m
335
+ (6)
336
+ In the case of fcc metals at room temperature, the strain rate sensitivity is related to the
337
+ formation and dragging of jogs along dislocation lines. Here, the constant γi
338
+ 0 is taken as
339
+ γapp/nact, with γapp the applied shear rate and nact the number of active slip system. The
340
+ sensitivity exponent is chosen as m > 35 to stay as close as possible to the Schmid criterion.
341
+ The proposed CP modeling focuses on the some fundamental mechanisms of dislocations
342
+ that are described by some average constants obtained from statistical analysis of DDD
343
+ results. The resulting FEM simulations are expected to be general and representative of
344
+ fcc pure systems. The simulations will not include plastic localization nor finite geometry
345
+ effects for now.
346
+ Great care has been paid to the numerical resolution of the set of ODE presented above
347
+ using the Z-Set Finite Element software and Matlab. We employ a double nested Newton-
348
+ Raphson implicit scheme to solve the corresponding non-linear equations and obtain ρi and τ i
349
+ for a given time step. Gradients required in the Newton-Raphson are expressed analytically,
350
+ and a relative convergence criterion was set to 10−7. The representative volume was set to
351
+ a single point of integration in order to simulate several thousand of cycles of deformation
352
+ and finite geometry effects are left for a future work. The parametrization and resolution
353
+ 8
354
+
355
+ 10-2
356
+ 10-1
357
+ 100
358
+ 1012
359
+ 1013
360
+ 1014
361
+ 1015
362
+ 0
363
+ 0.2
364
+ 0.4
365
+ 0.6
366
+ 0.8
367
+ 0.9
368
+ 0
369
+ 2
370
+ 4
371
+ 6
372
+ 8
373
+ 10
374
+ 107
375
+ 0
376
+ 1
377
+ 2
378
+ 3
379
+ 4
380
+ 5
381
+ 0
382
+ 2
383
+ 4
384
+ 6
385
+ 8
386
+ 10
387
+ 12
388
+ 14
389
+ 107
390
+ τ (MPa)
391
+ s.g.
392
+ τ (MPa)
393
+ ρ (m−2)
394
+ s.g.
395
+ [001]
396
+ [111]
397
+ [112]
398
+ [001]
399
+ [112]
400
+ [111]
401
+ [001]
402
+ [111]
403
+ [112]
404
+ a
405
+ b
406
+ c
407
+ γ
408
+ γ
409
+ γ
410
+ τI
411
+ sat,mo
412
+ τ001
413
+ sat,mo
414
+ τ111
415
+ sat,mo
416
+ ρ111
417
+ sat,mo
418
+ a
419
+ a
420
+ ρI
421
+ sat,mo
422
+ Figure 1. a. One-to-one comparison between the CP prediction (thick continuous lines) and the
423
+ reference data from Takeuchi [30] (circles) on the monotonous deformation of Cu single-crystals
424
+ at RT. Key single-crystal orientations are chosen. b. Extension of the CP prediction until true
425
+ saturation of the stress is obtained, in the absence of finite geometry and plastic localization effects.
426
+ c. Corresponding total dislocation density evolutions in log-log scale. Note that the saturation
427
+ density for [001] and [111] simulations are of the order of > 1015 m−2, which is still lower than the
428
+ saturation density obtained in recent large scale MD simulations of single-crystals of Al [36].
429
+ strategy were validated in [13] by the one-to-one comparison of CP predictions with the
430
+ reference DDD simulations.
431
+ III.
432
+ RESULTS
433
+ A.
434
+ Saturation stress in monotonic deformation in absence of damage
435
+ In this first section, we focus on the basic case of the single-crystal deformed monotonically
436
+ in tension. This constitutes as well-pose problem, for which reference experimental data
437
+ exists. The plastic response is mostly homogeneous during most of the deformation, in the
438
+ sense that the slip activity is similar in all part of the single-crystalline sample [37]. We will
439
+ see later that this basic plastic response can be connected to cyclic deformation.
440
+ First, to demonstrate the validity of the CP framework and its parametrization from
441
+ DDD results, we start with a one-to-one confrontation of the CP prediction with some of
442
+ the reference data from the experimental literature on single crystal deformation [30]. Such
443
+ experiments are rather delicate to perform as the materials state and experimental conditions
444
+ can have a dramatic impact on the mechanical response, such as the impurity content, the
445
+ 9
446
+
447
+ precise orientation of the sample, or whether the jaws can rotate to accommodate crystalline
448
+ rotation and ensure uniaxial deformation. Here, we compare our CP simulations with the
449
+ reference work from Takeuchi on Cu single-crystals for selected loading directions: [112],
450
+ [111] and [001] directions. The initial density was slightly adjusted in the simulations about
451
+ 1012 m−2 to capture the initial flow stress.
452
+ This comparison is shown in Figure 1.a, where a nice quantitative agreement is found
453
+ between the simulations and the experimental reference data. The classical picture of the
454
+ response of the single crystal is recovered here. The initial hardening rate depends upon
455
+ the orientation and the number of slip systems activated simultaneously, e.g. in a increasing
456
+ order: [112], [111] and [001] having two, three and four slip systems activated simultaneously.
457
+ For [111] and [001] loading directions, these numbers of slips correspond to only half of the
458
+ six or eight possible slip systems as these are pairs of colinear interactions. The colinear
459
+ interaction is known to be a specific reaction among junctions, as it leads to the annihilation
460
+ of the intersecting segments of dislocations and is associated to a very large hardening due
461
+ to the shortening of mobile segments. DDD simulations have shown that when starting
462
+ with pairs of colinear systems, one of the colinear system will take over the other for [111] or
463
+ [001] simulations [38]. Another notable aspect of the single-crystal response is the anisotropic
464
+ onset of the stage III, when the dynamic recovery becomes noticeable. The dynamic recovery
465
+ of the [111] in particular is weaker, so much so that the [111] curve eventually crosses the
466
+ initially steepest [001] curve at a deformation of about 40% of plastic strain.
467
+ In agreement with the response of ductile materials, prior to the fracture of the sample,
468
+ the experimental flow stress decreases after a maximum corresponding to the striction of
469
+ the sample. This decrease is expectedly absent in the CP simulations as plastic localization
470
+ and fracture mechanics are not included. Fracture mechanics may be included in different
471
+ manner in FEM, for example using a cohesive zone framework [39, 40], in connection to
472
+ atomistic mechanisms controlling the debonding of matter [41–43]. For now, let us focus on
473
+ the plastic response in the absence of fracture mechanism. For a deformation corresponding
474
+ to the experimental fracture, the hardening rate in the simulations has still a non-zero value.
475
+ When recalling that equation 3 was proposed initially to saturate, the CP deformation curves
476
+ are thus expected to saturate at a larger deformation.
477
+ We thus expanded the deformation range until saturation of the stress is obtained, and
478
+ this is shown in figure 1.b for Cu. All curves saturate eventually, the corresponding deforma-
479
+ 10
480
+
481
+ tion at saturation is however rather large, even beyond 100% for the [001] and [111] curves.
482
+ The saturation stress in monotonous condition τsat,mo follows the following hierarchy, with
483
+ the single glide condition [135] having the smallest saturation stress of 28 MPa, [112], [001]
484
+ and [111] having the largest τsat,mo = 140 MPa. Since yc is material independent for the
485
+ [135] and [112] curves, the τsat,mo for these curves scales nicely with the shear modulus of
486
+ the considered materials. This saturation stress values will be exploited a bit further.
487
+ From a theoretical point of view, this saturation stress observed in the simulation can
488
+ be predicted from the set of constituting ODE of the CP framework. The starting point is
489
+ obviously the dislocation density evolution that has to be set to zero. In the case of single
490
+ glide the resolution is straigthforward leading to:
491
+ ρsat,I =
492
+
493
+ a′
494
+ 0
495
+ K2
496
+ I y2
497
+ c,I
498
+ (7)
499
+ The stable multislip conditions are a bit less straightforward to solve as the interaction
500
+ coefficient exhibit a dependence upon the dislocation density inherited from the simplified
501
+ line tension that is assumed in the critical stress equation [28, 44]. When the dislocation
502
+ density is large this correction becomes important, which is the case at the large satura-
503
+ tion stresses under consideration here.
504
+ The logarithmic correction is defined as c(ρi) =
505
+ ln
506
+
507
+ 1/b
508
+
509
+ aijρj
510
+
511
+ / ln
512
+
513
+ 1/b√aijρref
514
+
515
+ , where ρref is the reference dislocation density used to de-
516
+ termine the interaction coefficient aij. The saturation density is now:
517
+ ρsat,hkl =
518
+
519
+
520
+
521
+ a′
522
+ 0
523
+ KIyhkl
524
+ +
525
+
526
+ (nact − 1)¯ac(ρi)
527
+ Khklyhkl
528
+
529
+
530
+ 2
531
+ (8)
532
+ where nact is the number of activated systems. This equation is an implicit equation for ρi,
533
+ but since the density is contained in a logarithm function, this equation converges after only
534
+ few iterations to evaluate ρsat. Finally, the saturation stress is obtained in inserting the ρsat
535
+ into the critical stress:
536
+ τsat = µb√¯anactρsat
537
+ (9)
538
+ In the past equations, the saturation stresses are solely function of interaction coefficients,
539
+ MFP and y. In the model, these quantities are defined from statistical averages over evolv-
540
+ ing dislocation microstructures, and are weakly impacted by the nature of the fcc system
541
+ under consideration (at least not as a first order approximation). The initial dislocation
542
+ microstructure or density are absent from these equations. The saturation stresses appear
543
+ 11
544
+
545
+ 10-4
546
+ 10-3
547
+ 10-2
548
+ 10
549
+ 20
550
+ 30
551
+ 50
552
+ 100
553
+ 150
554
+ 10-4
555
+ 10-3
556
+ 10-2
557
+ 20
558
+ 40
559
+ 60
560
+ 80
561
+ 100
562
+ 120
563
+ 140
564
+ 10-5
565
+ 10-4
566
+ 10-3
567
+ 10-2
568
+ 10-1
569
+ 20
570
+ 30
571
+ 40
572
+ 50
573
+ 60
574
+ 70
575
+ τ001
576
+ sat,mono
577
+ τ111
578
+ sat,mono
579
+ τI
580
+ sat,mo
581
+ [001]
582
+ [111]
583
+ Cu
584
+ Cu
585
+ Ni
586
+ Ni
587
+ s.g.
588
+ Ni
589
+ Cu
590
+ Ag
591
+ τ112
592
+ sat,mo
593
+ τI
594
+ sat,mo
595
+ τI
596
+ sat,mo
597
+ τI
598
+ sat,cy (MPa)
599
+ τ001
600
+ sat,cy (MPa)
601
+ τ111
602
+ sat,cy (MPa)
603
+ γp,cy
604
+ γp,cy
605
+ γp,cy
606
+ τ112
607
+ sat,mo
608
+ Figure 2.
609
+ The saturation shear stress τsat,cy for cyclic deformation in fcc metals measured in a
610
+ number of experimental studies [4–10] compared to the prediction of the CP model in [13, 21] and
611
+ to the theoretical saturation stress in monotonous conditions from previous section. Data is shown
612
+ as function of the loading direction: a) initially single glide condition, b) [001] and c) [111] for Cu,
613
+ Ni and Ag and the plastic strain increment γp,cy
614
+ thus as a fundamental dislocation property representing the balance of storage and recovery
615
+ processes among dislocations in fcc crystals.
616
+ An approximate solution for the evolution ρi(γi) is provided in the appendixes for single
617
+ and stable multislip conditions. Theses functions can be used to interpret or fit experimen-
618
+ tal data. In monotonous condition, the dislocation density increases monotonically until
619
+ saturation. The strain corresponding to the saturation density can be estimated from the
620
+ density evolution as an integral:
621
+ ∆γi =
622
+ � ρsat
623
+ ρ0
624
+
625
+ �1
626
+ b
627
+
628
+
629
+ �� aijρj
630
+ Khkl
631
+ +
632
+
633
+ a′
634
+ 0ρi
635
+ KI
636
+ − yρi
637
+
638
+
639
+
640
+
641
+ −1
642
+ dρi
643
+ (10)
644
+ This only provides an estimation as the saturation density is only reached as γi tends to
645
+ +∞. We now recover a dependence upon the initial dislocation density ρ0 that affects the
646
+ amount of deformation required before saturation.
647
+ B.
648
+ Saturation stress in cyclic deformation in single crystals
649
+ The CP framework can be applied to alternating loadings and the simulations capture
650
+ most of the features of cyclic deformation [10, 45]. The maximum stress reached at each
651
+ cycle increases monotonically until spontaneously saturating. For example in single glide
652
+ 12
653
+
654
+ conditions, we recover the so-called three stage curves of the now saturation stress in cyclic
655
+ deformation τsat,cy with respect to the shear increment per cycle γp,cy. This is shown in figure
656
+ 2.a for some fcc metals in comparison with most of the existing experimental literature [4–
657
+ 10]. In the central plateau (or pseudo plateau) the microstructure typically transform from
658
+ no Persistent Slip Band (PSB) to progressively fully constituted of PSB [10, 45]. The plateau
659
+ ends for strain increments of the order of 1% when a secondary slip system activates. In the
660
+ CP simulations, the center region of the curve is not exactly a plateau as it exhibits a slight
661
+ slope.
662
+ Figure 2.b and 2.c show the saturation stress for cyclic deformation of Cu and Ni single
663
+ crystals oriented along [001] and [111] in stable multislip conditions. The saturation stress
664
+ τsat,cy increases here monotonically without the presence of stages. The slope of the exper-
665
+ imental curves seems however to decrease as the strain increment per cycle γp,cy increases.
666
+ This effect is more apparent on the data concerning Ni single crystals. The CP predictions of
667
+ the τsat,cy are in nice qualitative and quantitative agreement with the existing experimental
668
+ data. The CP predictions seem however to overestimate the saturation stress at larger γp,cy,
669
+ and this was suggested to be a consequence of the absence of dislocation microstructure in
670
+ the CP simulations [13].
671
+ The saturation stress τsat,cy can be also obtained from the analytical resolution of the
672
+ set of ODE presented in section 2. The resolution is however a bit more delicate with the
673
+ presence of the two reversible functions ra and rp that are themselves function of γi and the
674
+ dislocation density ∆ρi stored on the previous cycle. In cyclic deformation, we highlight that
675
+ the saturation stress does not mecessary correspond to an horizontal tangent of the shear
676
+ strain-shear stress curves of the considered cycle (except in the large γp,cy limit discussed
677
+ a bit further). At the end of a cycle at saturation, the hardening may well be non zero,
678
+ but wince the cycle starts with a large dislocation density decrease, the increment over the
679
+ entire cycle is null. To obtain the saturation density, one has to solve the integrated density
680
+ evolution function:
681
+ ∆ρi = 0 =
682
+ � γp,cy/nact
683
+ 0
684
+ 1
685
+ b
686
+
687
+
688
+ �� aijρj
689
+ Khkl
690
+ +
691
+
692
+ a′
693
+ 0ρi
694
+ KI
695
+ − yρi
696
+
697
+ � dγi
698
+ (11)
699
+ Therefore, the dislocation evolution ρi(γi) must be solved first contrary to the monotonous
700
+ case. This can be done if the logarithm correction is approximated as a Taylor series where
701
+ the dislocation ρi in the current cycle is in the proximity of ρi
702
+ sat. The details are given in
703
+ 13
704
+
705
+ the Appendixes. The resolution leads once more to an implicit equation to obtain ρi
706
+ sat.
707
+ Next, we connect the saturation stresses obtained in monotonous and cyclic deformation.
708
+ Previous section has shown that the saturation stress in monotonous conditions can be
709
+ obtained for rather large deformation amount. These saturation stress results could thus be
710
+ drawn as well on the figure 2 for one cycle of the corresponding amount γp,cy. One would
711
+ expect that the τsat,mono would act as an asymptotic limit to the τsat,cy curves. This is exactly
712
+ what is obtained when considering the case of single glide condition. The saturation stresses
713
+ obtained in single glide and monotonous conditions in previous section are strikingly close
714
+ to the saturation stress at the plateau in cyclic conditions. The τsat,cy curves obtained from
715
+ experiments or from the CP simulations asymptotically converge towards τsat,mono.
716
+ When now considering the third stage of the curves in figure 2.a or the multislip condi-
717
+ tions in figure 2.b and 2.c, the agreement with experiments is a bit less obvious. The CP
718
+ simulations are carried out past the range of γp,cy considered in the experiments. Similarly
719
+ to the single glide condition, the simulated τsat,cy curves converges towards the τsat,mono limit
720
+ defined in previous section. In figure 2.a, the third stage converges certainly toward the
721
+ τsat,mono in secondary double glide, that is probably out of reach. The agreement with the
722
+ experimental data is rather good for the case of Cu. However, the quantitative agreement
723
+ between with the model is a bit less good for Ni single crystal data, nonetheless the exper-
724
+ imental data seem to enter a plateau for the largest γp,cy considered experimentally, which
725
+ could be well agree with the analysis proposed here, but not the quantitative values of the
726
+ saturation stress. The τsat,mono being here overestimated in the model due maybe because
727
+ of some specific parameters less well documented for Ni, such as yhkl or the stronger impact
728
+ of microstructures that are absent in the model.
729
+ C.
730
+ Saturation stress in polycrystals
731
+ This last section aims at connecting our results on the single crystal to the more complex
732
+ and general polycrystalline system. What comes next is simply an illustration as the plastic
733
+ response of the polycrystals is the average of many different features or elementary mech-
734
+ anisms that are not addressed here, ranging from the crystallographic texture, grain size
735
+ and morphology, and dislocation-grain boundary interactions. Besides, simulating using CP
736
+ FEM the cyclic deformation over more than 10,000 cycles while accounting for the a rep-
737
+ 14
738
+
739
+ 10-6
740
+ 10-5
741
+ 10-4
742
+ 10-3
743
+ 10-2
744
+ 10-1
745
+ 0
746
+ 50
747
+ 100
748
+ 150
749
+ 200
750
+ 250
751
+ σsat,cy (MPa)
752
+ Δϵp,cy/2
753
+ Figure 3.
754
+ compilation of saturation stresses obtained experimentally on polycrystalline Cu with
755
+ large grain size [46] as function of strain increment ∆ϵp,cy. The data covers the work from Sax-
756
+ ena and Antolovich (1975, squares), Figueroa et al. (1981, circles), Mughrabi and Wang (1981,
757
+ triangles) and Rasmussen and Pederson (1980, diamonds)
758
+ resentative polycrystalline microstructure seems computationally out of reach, still to date.
759
+ This being said we reprise a part of the analysis initiated by Magnin et al. in [46], where
760
+ they compiled the cyclic response of polycrystalline Cu with relatively large grains. The
761
+ collection of saturation stresses σsat from the literature is displayed in figure 3. These curves
762
+ typically show an increase in the σsat with the plastic increment per cycle ϵp,cy, some curves
763
+ may suggest a quasi plateau regime starting for ϵp,cy > 10−5 and ending around ϵp,cy ≈ 10−3.
764
+ The deformation microstructures transform from veines to PSB that are typical of the pat-
765
+ terns observed in single glide conditions in single crystals. For ϵp,cy >≈ 10−3, the saturation
766
+ stress increases more rapidly. The deformation microstructures now correspond to mazes,
767
+ that are typical of the patterns observed in single-crystals deformed in multislip conditions.
768
+ The polycrystalline plastic response can be understood in terms of effective single-crystal
769
+ response through the so-called Taylor coefficient M, which depends upon the crystallographic
770
+ texture. The so-called Hall-Petch effect is neglected and this is a valid assumption in the
771
+ case of large grain polycrystals. In the absence of precise M measures, the Taylor or Sachs
772
+ hypothesises are commonly considered as bounding limits. The figure 4 shows theses two
773
+ transformations of the polycrystal saturation stress into a single crystal approximation as
774
+ 15
775
+
776
+ 10-5
777
+ 10-4
778
+ 10-3
779
+ 10-2
780
+ 10-1
781
+ 10
782
+ 20
783
+ 30
784
+ 40
785
+ 50
786
+ 60
787
+ 70
788
+ Cu
789
+ s.g.
790
+ [001]
791
+ [111]
792
+ PolyX Sachs
793
+ PolyX Taylor
794
+ γp,cy
795
+ τsat,cy (MPa)
796
+ Figure 4.
797
+ Comparison of the cyclic saturation shear stress of single crystals and polycrystals in
798
+ pure Cu from Mughrabi [47] and Wang using the Taylor coefficient following the procedure from
799
+ Magnin et al. [46].
800
+ τsat = σsat/M. The Sachs is typically a good approximation at small strain, while the Taylor
801
+ approximation works well at large strain. The average single crystal behaviour is expected
802
+ to follow Sacks approximation first and transition to the Taylor ones at larger deformation.
803
+ These curves will be compared with the saturation stress obtained experimentally on single
804
+ crystals. At small strain, one might expect that grains in the polycrystal deform in sin-
805
+ gle glide condition as suggested by the dislocation microstructures, and we see a sticking
806
+ agreement between the single crystal τsat and the Sachs approximation for ϵp,cy < 5.10−4.
807
+ For very small strain, so grains may remain in the elastic regime. For large deformation,
808
+ one might expect that all grains to deform in multislip conditions, in accordance with the
809
+ maze microstructure observed experimentally, and the [001] curve in particular remains be-
810
+ tween the two approximations. Between these two extreme situations, the polycrystalline
811
+ behaviour is certainly a composite response of the single glide and multislip grain activity,
812
+ with more and more grains transitioning into multislip as γp,cy is increased.
813
+ These correlations may thus explain the shape of the raw σsat curves from the previous
814
+ figures. Most grains of the polycrystals are thus following the single crystal response, which
815
+ will eventually reach a saturation stress corresponding to τsat,mono of 28 MPa. The plateau in
816
+ 16
817
+
818
+ the single-crystal system is transformed in a quasi-plateau in the polycrystalline system as
819
+ more grains transition into multislip conditions. At large ϵp,cy = ϵIII ≈ 10−3 the polycrystal
820
+ enters stage III where most of the grains are deformed in multislip conditions. Interestingly,
821
+ the curves for single slip and [001] obtained for the single crystal cross for a deformation
822
+ ≈ MγIII = M ∗ 3e−3 that corresponds well to the onset of the third stage ϵIII observed
823
+ in the figure 4. The cyclic behaviour of the polycrystal with large grains seems thus to
824
+ correspond simply to the average of the individual single grain behaviour, similar to what
825
+ is accepted for polycrystals in monotonic deformations.
826
+ Finally going back to our CP framework, we showed that its predictions were in quan-
827
+ titative agreement with the single crystal cyclic response, and we proposed expressions to
828
+ predict the saturation stress in these conditions. The previous qualitative analysis means
829
+ that we can use these results to estimate the saturation stress of the polycrystal with large
830
+ grains as well.
831
+ IV.
832
+ CONCLUSION
833
+ In this paper, we employ a physically based CP FE model derived from our recent DDD
834
+ analysis [13, 21] to analyse the saturation stresses obtained in monotonic and cyclic defor-
835
+ mations of single-crystals and polycrystals of fcc metals.
836
+ • First, we compare the model results to reference strain-stress deformation curves from
837
+ the experimental literature on Cu single-crystals. In the absence of plastic localization
838
+ and fracture mechanism, the CP model expectedly saturates at large strain.
839
+ • For cyclic deformations, the CP model reproduces the cyclic saturation over various
840
+ loading conditions and fcc metals. The saturation stress observed in monotonic con-
841
+ ditions acts as an asymptotic behaviour at large cycle increment strain γp,cy to the
842
+ cyclic deformation. This asymptotic behaviour is clearly reached in the plateau stage
843
+ of single-glide conditions for several fcc metals and for Ni single crystals in multi-slip
844
+ conditions at large strain increment.
845
+ • From an analysis of the experimental literature on large grain polycrystals, we have
846
+ shown that the polycrystalline response to cyclic deformation corresponds to the re-
847
+ sponse of an aggregate of effective single-crystals. At small strain increment, grains
848
+ 17
849
+
850
+ are deformed elastically or in single slip, a quasi plateau can thus be sometimes seen
851
+ as in single-crystals. Then at larger strain, grains transition into multislip conditions
852
+ with larger saturation stresses.
853
+ • We proposed analytical or approximated solutions of the CP ODE to predict the
854
+ saturation stresses in single or multislip conditions for single crystals. These models
855
+ can be employed to interpret experimental data.
856
+ • These results were obtained in the absence of plastic localisation and dislocation mi-
857
+ crostructure, which means that these features have only a secondary order impact on
858
+ the macroscopic behaviour.
859
+ • The saturation stress in monotonic or cyclic conditions appear as a fundamental prop-
860
+ erty of dislocations mechanism as it is simply related to averages of dislocations inter-
861
+ actions (through the interaction coefficients), dynamical recovery mechanism (through
862
+ y), and reversibility part of the microstructure.
863
+ • These quantities are phenomenological averages from DDD, where large number of
864
+ binary interactions are occurring in absence of patterns.
865
+ • Experimental data on the saturation stress can thus now be used to define these
866
+ physical quantities and the procedure can be applied to other bcc or hcp crystalline
867
+ systems.
868
+ [1] J. Bauschinger, ¨Uber die ver¨anderung der elastizit¨atsgrenze und der festigkeit des eisens und
869
+ stahls durch strecken und quetschen, durch erw¨armen und abk¨uhlen und durch oftmals wieder-
870
+ holte beanspruchung, Mitteilungen des mechanisch-technischen Laboratoriums der K¨oniglich
871
+ Technischen Hochschule M¨unchen 13 (1886).
872
+ [2] L. Brown and W. Stobbs, The work-hardening of copper-silica, Philosophical Magazine 23,
873
+ 1201 (1971).
874
+ [3] A. S. Argon, Strengthening mechanisms in crystal plasticity, Oxford series on materials mod-
875
+ elling No. 4 (Oxford Univ. Press, Oxford, 2008) oCLC: 255673019.
876
+ 18
877
+
878
+ [4] A. S. Cheng and C. Laird, Mechanisms of fatigue hardening in copper single crystals: The
879
+ effects of strain amplitude and orientation, Materials Science and Engineering 51, 111 (1981).
880
+ [5] H. Mughrabi, The cyclic hardening and saturation behaviour of copper single crystals, Mate-
881
+ rials Science and Engineering 33, 207 (1978).
882
+ [6] T. Lepist¨o, V.-T. Kuokkala, and P. Kettunen, Dislocation arrangements in cyclically deformed
883
+ copper single crystals, Materials Science and Engineering 81, 457 (1986).
884
+ [7] J. Bretschneider, C. Holste, and B. Tippelt, Cyclic plasticity of nickel single crystals at elevated
885
+ temperatures, Acta Materialia 45, 3775 (1997).
886
+ [8] B. Gong, Z. Wang, and Z. Wang, Cyclic deformation behavior and dislocation structures of
887
+ [001] copper single crystals—I Cyclic stress-strain response and surface feature, Acta Materi-
888
+ alia 45, 1365 (1997).
889
+ [9] P. Li, Z. Zhang, X. Li, S. Li, and Z. Wang, Effect of orientation on the cyclic deformation
890
+ behavior of silver single crystals: Comparison with the behavior of copper and nickel single
891
+ crystals, Acta Materialia 57, 4845 (2009).
892
+ [10] P. Li, S. Li, Z. Wang, and Z. Zhang, Fundamental factors on formation mechanism of disloca-
893
+ tion arrangements in cyclically deformed fcc single crystals, Progress in Materials Science 56,
894
+ 328 (2011).
895
+ [11] R. J. Asaro, Elastic-plastic memory and kinematic-type hardening, Acta Metallurgica 23,
896
+ 1255 (1975).
897
+ [12] H. Mughrabi, Dislocation clustering and long-range internal stresses in monotonically and
898
+ cyclically deformed metal crystals, Revue de Physique Appliquee 23, 367 (1988).
899
+ [13] S. Queyreau and B. Devincre, A Multiscale Investigation of the Physical Origins of Ten-
900
+ sion–Compression Asymmetry in Crystals and their Implications for Cyclic Behavior (2021),
901
+ working paper or preprint.
902
+ [14] M. E. Kassner, P. Geantil, L. E. Levine, and B. C. Larson, Mapping mesoscale heterogeneity
903
+ in the plastic deformation of a copper single crystal, Int. J. Mech. Sci. 100, 333 (2009).
904
+ [15] M. E. Kassner, P. Geantil, and L. E. Levine, Long range internal stresses in single-phase
905
+ crystalline materials, In Honor of Rob Wagoner, International Journal of Plasticity 45, 44
906
+ (2013).
907
+ [16] L. E. Levine, M. R. Stoudt, A. Creuziger, T. Q. Phan, R. Xu, and M. E. Kassner, Basis for
908
+ the Bauschinger effect in copper single crystals: changes in the long-range internal stress with
909
+ 19
910
+
911
+ reverse deformation, Journal of Materials Science 54, 6579 (2019).
912
+ [17] S. Buckley and Entwistle, K.M., The bauschinger effect in super-pure aluminum single crystals
913
+ and polycrystals, Acta Metallurgica 4, 352 (1956), publisher: Pergamon.
914
+ [18] A. Sleeswyk, M. James, D. Plantinga, and W. Maathuis, Reversible strain in cyclic plastic
915
+ deformation, Acta Metallurgica 26, 1265 (1978).
916
+ [19] C. D´epr´es, M. Fivel, and L. Tabourot, A dislocation-based model for low-amplitude fatigue
917
+ behaviour of face-centred cubic single crystals, Scripta Materialia 58, 1086 (2008).
918
+ [20] E. Rauch, J. J. Gracio, F. Barlat, and G. Vincze, Modelling the plastic behaviour of met-
919
+ als under complex loading conditions, Modelling and Simulation in Materials Science and
920
+ Engineering 19 (2011).
921
+ [21] S. Queyreau and B. Devincre, On the Origins of Tension–Compression Asymmetry in Crystals
922
+ and Implications for Cyclic Behavior (2020), working paper or preprint.
923
+ [22] B. Devincre, R. Madec, G. Monnet, S. Queyreau, R. Gatti, and L. Kubin, Mechanics of nano-
924
+ objects (Presses de l’Ecole des Mines de Paris, 2011) Chap. Modeling crystal plasticity with
925
+ dislocation dynamics simulations: The ’microMegas’ code.
926
+ [23] S. Queyreau, Dislocation based mechanics: the various contributions of dislocation dynamics
927
+ simulations (2020).
928
+ [24] L. Kubin, B. Devincre, and T. Hoc, Modeling dislocation storage rates and mean free paths
929
+ in face-centered cubic crystals, Acta Materialia 56, 6040 (2008).
930
+ [25] U. Kocks and H. Mecking, Physics and phenomenology of strain hardening: the fcc case,
931
+ Progress in Materials Science 48, 171 (2003).
932
+ [26] B. Devincre, L. Kubin, and T. Hoc, Physical analyses of crystal plasticity by dd simulations,
933
+ Scripta Materialia 54, 741 (2006).
934
+ [27] S. Queyreau and B. Devincre, Bauschinger effect in precipitation-strengthened materials: A
935
+ dislocation dynamics investigation, Philosophical Magazine Letters 89, 419 (2009).
936
+ [28] S. Queyreau, G. Monnet, and B. Devincre, Orowan strengthening and forest hardening super-
937
+ position examined by dislocation dynamics simulations, Acta Materialia 58, 5586 (2010).
938
+ [29] R. Madec and L. P. Kubin, Dislocation strengthening in fcc metals and in bcc metals at high
939
+ temperatures, Acta Materialia 126, 166 (2017).
940
+ [30] T. Takeuchi, Work hardening of copper single crystals with multiple glide orientations, Trans-
941
+ actions of the Japan Institute of Metals 16, 629 (1975).
942
+ 20
943
+
944
+ [31] L. Kubin, Dislocations, mesoscale simulations and plastic flow, in Oxford Series On Materials
945
+ Modelling, Vol. 5, edited by A. Sutton and R. Rudd (Oxford University Press, 2013).
946
+ [32] S. Queyreau, G. Monnet, and B. Devincre, Slip systems interactions in [alpha]-iron determined
947
+ by dislocation dynamics simulations, International Journal of Plasticity 25, 361 (2009).
948
+ [33] H. Ebener, Acoustic emission and the Bauschinger effect in Cu single crystals, Scripta Metal-
949
+ lurgica et Materialia 25, 2035 (1991).
950
+ [34] R. C. Daniel and G. T. Horne, The Bauschinger effect and cyclic hardening in copper, Met-
951
+ allurgical Transactions 2, 1161 (1971).
952
+ [35] Y. Nasu, T. Takeda, T. Tominaga, and O. Kato, The Bauschinger Effect of Aluminum Single
953
+ Crystals Tested under Plane-Strain Compression, Bulletin of JSME 27, 145 (1984).
954
+ [36] L. A. Zepeda-Ruiz, A. Stukowski, T. Oppelstrup, N. Bertin, N. R. Barton, R. Freitas, and
955
+ V. V. Bulatov, Atomistic insights into metal hardening, Nature Materials 20, 315 (2021).
956
+ [37] C. Kahloun, G. Monnet, S. Queyreau, L. Le, and P. Franciosi, A comparison of collective
957
+ dislocation motion from single slip quantitative topographic analysis during in-situ AFM room
958
+ temperature tensile tests on Cu and Fe alpha crystals, International Journal of Plasticity 84,
959
+ 277 (2016).
960
+ [38] B. Devincre, L. Kubin, and T. Hoc, Collinear superjogs and the low-stress response of fcc
961
+ crystals, Scripta Materialia 57, 905 (2007).
962
+ [39] Y. Mishin, M. Asta, and J. Li, Atomistic modeling of interfaces and their impact on mi-
963
+ crostructure and properties, Acta Materialia 58, 1117 (2010).
964
+ [40] J. R. Rice and J.-S. Wang, Embrittlement of interfaces by solute segregation, Materials Science
965
+ and Engineering: A Poceedings of the Symposium on Interfacial Phenomena in Composites:
966
+ Processing Characterization and Mechanical Properties, 107, 23 (1989).
967
+ [41] A. Van der Ven, The thermodynamics of decohesion, Acta Materialia 52, 1223 (2004).
968
+ [42] F. J. H. Ehlers, M. Seydou, D. Tingaud, F. Maurel, Y. Charles, and S. Queyreau, Ab initio
969
+ determination of the traction–separation curve for a metal grain boundary: a critical assess-
970
+ ment of strategies, Modelling and Simulation in Materials Science and Engineering 24, 085014
971
+ (2016).
972
+ [43] F. J. H. Ehlers, M. Seydou, D. Tingaud, F. Maurel, S. Queyreau, and Y. Charles, Ab initio
973
+ studies of two Al grain boundaries subjected to mixed tension/shear mode loading: how shear
974
+ may promote breakage, Modelling and Simulation in Materials Science and Engineering 25,
975
+ 21
976
+
977
+ 064001 (2017).
978
+ [44] B. Devincre, T. Hoc, and L. Kubin, Dislocation mean free paths and strain hardening of
979
+ crystals, Science 320, 1745 (2008).
980
+ [45] H. Mughrabi, Fatigue, an everlasting materials problem - still en vogue, Procedia Engineering
981
+ 2, 3 (2010).
982
+ [46] T. Magnin, J. Driver, J. Lepinoux, and L. Kubin, Aspects microstructuraux de la d´eformation
983
+ cyclique dans les m´etaux et alliages C.C. et C.F.C. II. — Saturation cyclique et localisation
984
+ de la d´eformation, Revue de Physique Appliqu´ee 19, 483 (1984).
985
+ [47] H. Mughrabi and R. Wang, Cyclic deformation of face-centred cubic polycrystals: a compar-
986
+ ison with observations on single crystals, in Deformation of Polycrystals: Mechanisms and
987
+ Microstructures. 2 nd Riso Int. Symposium on Metallurgy and Materials Science (1981) pp.
988
+ 87–98.
989
+ 22
990
+
WdAyT4oBgHgl3EQfu_n3/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
XNE1T4oBgHgl3EQfJQOq/content/tmp_files/2301.02950v1.pdf.txt ADDED
@@ -0,0 +1,1303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
2
+ DYLAN LAPLACE MERMOUD
3
+ This job market paper will be updated regularly, please check my
4
+ personal webpage for the last version.
5
+ Abstract. They are many unexplored links between cooperative transferable utility
6
+ games and convex, discrete or combinatorial geometry. In this paper, we investigate
7
+ some of these links, such as the ones between cores of convex games and generalised
8
+ permutohedra, also named polymatroids or base polyhedra. Another link that we
9
+ investigate is the one between the resonance hyperplane arrangement and the set of
10
+ sets of preimputations which are effective for a given coalition. These bridges can
11
+ give interpretation and intuition to cooperative game theory, as well as bring new
12
+ results and tools from other fields into the study of cooperative games.
13
+ 1. Introduction
14
+ In his pioneer work on game theory, von Neumann (1928) described a n-person
15
+ game as a set of ‘2n − 1 constants’, representing what one coalition can obtain by
16
+ itself during the game. In the next century, Postnikov (2009) characterised his newly
17
+ defined generalised permutohedra by ‘collections {zI} of the 2n − 1 coordinates zI, for
18
+ nonempty subsets I ⊆ [n], that belong to a certain deformation cone’. Furthermore,
19
+ the polyhedral complex known as the braid (arrangement) fan is determined by a set
20
+ of 2n − 1 n-dimensional (0, 1)-vectors defining its rays (Barvinok, 2002). As a final ex-
21
+ ample, let us talk about the resonance arrangements (K¨uhne, 2020), previously called
22
+ all-subsets arrangements (Kamiya et al., 2012), which are hyperplane arrangements
23
+ where each hyperplane is defined as the orthogonal complement of the elements of a set
24
+ of 2n−1 n-dimensional (0, 1)-vectors. As we can expect, all of these mathematical con-
25
+ cepts are closely related and bring more understanding to one another. For instance,
26
+ a generalised permutohedron is the core of a convex game (Shapley, 1971; Castillo &
27
+ Liu, 2022) and a hyperplane of a generic resonance arrangement is the set of preim-
28
+ putations for which a specific coalition is effective. Also, the resonance arrangement
29
+ cuts the space of preimputations in several polyhedral sets, corresponding to what is
30
+ called feasible regions (Grabisch & Sudh¨olter, 2021; Laplace Mermoud et al., 2022).
31
+ Likewise, the cones of a braid fan correspond to the polar regions of a game, a subset
32
+ of the set of preimputations that is important in the study of domination between
33
+ preimputations.
34
+ Moreover, the braid arrangement and the resonance arrangement
35
+ are adjoints (Aguiar & Mahajan, 2017). The maximal cones of the standard braid
36
+ arrangement are in bijection with the set of maximal unbalanced sets (Billera et al.,
37
+ Date: January 7, 2023.
38
+ 2020 Mathematics Subject Classification. MSC Primary 91A12; Secondary 52C35, 28A10.
39
+ 1
40
+ arXiv:2301.02950v1 [cs.GT] 8 Jan 2023
41
+
42
+ 2
43
+ DYLAN LAPLACE MERMOUD
44
+ 2012), the dual counterpart of the minimal balanced set defined by Shapley (1965)
45
+ which are the cornerstone of the study of non-emptiness of the core.
46
+ All these concepts have their standard objects, which are related either to the game
47
+ with a characteristic function v defined for all coalitions S by v(S) = |S|(|S| − 1)/2
48
+ or to the game assigning 0 to every coalition. Then, all other games define deforma-
49
+ tions of these standard objects. In this paper, I present the formal definitions of the
50
+ aforementioned concepts, and then I discuss the link between them and cooperative
51
+ games. The work on projection together with the study of deformed braid fans and
52
+ facial configuration of cores can lead to a resolution of the problem of knowing whether
53
+ a game is convergent, i.e., if it is balanced and its core is a von Neumann-Morgenstern
54
+ stable set.
55
+ 2. Definitions and notation
56
+ Let N be a finite set of cardinality n, and denote by 2N its power set, i.e., the set
57
+ of all subsets of N. A set function on N is a mapping ξ : 2N → R, assigning a real
58
+ number ξ(S) to any subset S of N. A set function ξ is called grounded if ξ(∅) = 0.
59
+ Definition 1 (von Neumann, 1928).
60
+ A (cooperative transferable utility) game is an ordered pair (N, v), with N a finite
61
+ set whose elements are called the players, and v a grounded set function called the
62
+ characteristic function of the game.
63
+ We denote by N the set of nonempty subsets of N, called coalitions. Throughout
64
+ this paper, we denote by ξ the set functions and by v the characteristic functions of
65
+ games. The interpretation of the characteristic function v that we will follow is that
66
+ v(S) is the quantity of resources acquired by the players in S ∈ N after one unit of
67
+ time if they cooperate to form the coalition S. One of the two main parts of coopera-
68
+ tive game theory is to study how the coalitions share the resources acquired by their
69
+ players, according to the value of the subcoalitions (including the singletons). Another
70
+ question in cooperative game theory, which interests us in this paper, is to know at
71
+ which scale the cooperation may occur. More specifically, we want to know at which
72
+ level the commonly acquired resources can benefit any subcoalition, and subsequently
73
+ whether the subcoalitions can threaten to leave the current state of cooperation to
74
+ ensure a quantity that satisfies them. These two properties, called balancedness and
75
+ stability, are defined in the sequel.
76
+ In this paper, we denote by RN the Euclidean vector space built as the Cartesian
77
+ product of n copies of R, one for each player in N. We use the same notation RS
78
+ which only considers the players belonging to the coalition S. An element x ∈ RN is
79
+ called payoff vector and associates a real value xi to any player i ∈ N. We denote by
80
+ x(S) the sum of the values associated with the players of S: x(S) = �
81
+ i∈S xi.
82
+ Definition 2. A payoff vector x ∈ RN is called
83
+ • a preimputation if N is effective for x, i.e., if x(N) = v(N). We denote the
84
+ set of preimputations by X(N, v).
85
+
86
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
87
+ 3
88
+ • an upper vector if, for all S ∈ N, we have x(S) ≥ v(S). We denote the set of
89
+ upper vectors by U(N, v).
90
+ The set X(N, v) forms a hyperplane of RN, and the set U(N, v) (van Gellekom et
91
+ al., 1999) is a polyhedral set unbounded from above. A preimputation represents an
92
+ allocation of the value of the grand coalition N among its players, while an upper
93
+ vector is an allocation that benefits any coalition. A preimputation is Pareto optimal:
94
+ increasing some coordinates without changing their total sum implies decreasing some
95
+ other coordinates. We denote by AS(N, v) the set of preimputations for which S is
96
+ effective, i.e.,
97
+ AS(N, v) := {x ∈ X(N, v) | x(S) = v(S)}.
98
+ If there is no confusion risk, we denote these sets by AS. Notice that these sets are
99
+ affine subspaces of codimension 2, and have the same normals for any game (N, v),
100
+ depending only on S and not on the characteristic function. The latter is only respon-
101
+ sible for the relative positions of these affine subspaces, as increasing the value v(S)
102
+ of a coalition will solely shift the subspace along their normals.
103
+ Each AS cuts the hyperplane X(N, v) in two halves, denoted by A≥
104
+ S := {x ∈ X(N, v) |
105
+ x(S) ≥ v(S)} and A≤
106
+ S := {x ∈ X(N, v) | x(S) ≤ v(S)}.
107
+ Each preimputation in
108
+ A≥
109
+ S benefits the coalition S.
110
+ The preimputations that benefit every coalition, i.e.,
111
+ preimputations being also upper vectors, are said to be coalitionally rational, and
112
+ their set, denoted by
113
+ C(N, v) :=
114
+
115
+ S∈N
116
+ A≥
117
+ S = X(N, v) ∩ U(N, v),
118
+ is called the core (Gillies, 1959). When nonempty, the core is a bounded convex poly-
119
+ hedral set, i.e. a polytope, lying in the hyperplane X(N, v).
120
+ A game with a nonempty core, i.e., for which there exists a preimputation benefiting
121
+ every coalition, is called a balanced game. The reason for this denomination will be
122
+ discussed in Section 4. A balanced game (N, v) is a game for which there is no better
123
+ state of cooperation for the players than the grand coalition N. It implies in particular
124
+ that there exists no partition of N such that the sum of the values of the partition’s
125
+ blocks is strictly greater than the value of N. For illustration, let (N, v) be a game
126
+ with N = {a, b, c} and the values:
127
+ S
128
+ {a}
129
+ {b}
130
+ {c}
131
+ {a, b}
132
+ {a, c}
133
+ {b, c}
134
+ N
135
+ v(S)
136
+ 5
137
+ 0
138
+ 0
139
+ 8
140
+ 8
141
+ 8
142
+ 10
143
+ .
144
+ This game is not balanced because the players will prefer to be organised as {{a}, {b, c}}
145
+ and get 13 rather than form the grand coalition N and get 10. But it is not sufficient
146
+ to look at the partition of N to know whether the core is nonempty. Players can spend
147
+ fractions of their time in different coalitions, as we can see in the following example.
148
+
149
+ 4
150
+ DYLAN LAPLACE MERMOUD
151
+ Let (N, v) be a game with N = {a, b, c} and the values:
152
+ (1)
153
+ S
154
+ {a}
155
+ {b}
156
+ {c}
157
+ {a, b}
158
+ {a, c}
159
+ {b, c}
160
+ N
161
+ v(S)
162
+ 0
163
+ 0
164
+ 0
165
+ 8
166
+ 8
167
+ 8
168
+ 10
169
+ .
170
+ The only difference between these games is v({a}), but it is sufficient for the players
171
+ to prefer the grand coalition to the organisation {{a}, {b, c}}. But what if they spend
172
+ half of their time with one player and the other half with the other player? They
173
+ obtain
174
+ 1
175
+ 2v({a, b}) + 1
176
+ 2v({a, c}) + 1
177
+ 2v({b, c}) = 3 · 8
178
+ 2
179
+ = 12,
180
+ that is still greater than 10 that they can get if they form the grand coalition. Therefore
181
+ this game is also not balanced.
182
+ The set B = {{a, b}, {a, c}, {b, c}} together with
183
+ the systems of weights λB = (λ{a,b} =
184
+ 1
185
+ 2, λ{a,c} =
186
+ 1
187
+ 2, λ{b,c} =
188
+ 1
189
+ 2), as well as the set
190
+ B′ = {{a}, {b, c}} with the weights λB′ = (λ{a} = 1, λ{b,c} = 1) are examples of
191
+ balanced sets of coalitions.
192
+ Definition 3. Let B ⊆ N be a set of coalitions. We say that B is a balanced set if
193
+ there exists a balancing vector λB = (λB
194
+ S)S∈B such that, for each player i ∈ N, we have
195
+
196
+ S∈Bi λB
197
+ S = 1, with Bi = {S ∈ B | i ∈ S}.
198
+ The balanced sets are defined by Shapley (1965) but were studied a few years be-
199
+ fore by Bondareva (1963) without defining them explicitly. They both independently
200
+ gave a characterisation of the set of balanced games based on the balanced sets. The
201
+ balanced games are exactly the games for which the weighted sum of the values of the
202
+ coalitions of any balanced set does not exceed the value of the grand coalition, i.e., for
203
+ any balanced set B with a system of weights λB, we have �
204
+ S∈B λB
205
+ Sv(S) ≤ v(N). In
206
+ the same paper, Shapley identified a subset of the set of balanced sets, on which the
207
+ characterisation of balanced games remains valid. This characterisation is now known
208
+ as the Bondareva-Shapley Theorem and is maybe the fundamental theorem for this
209
+ study of cooperative behaviour. The Bondareva-Shapley Theorem will be discussed in
210
+ Section 4.
211
+ Now, let us focus on a given coalition S ∈ N \ {N}. If the proposed preimputation
212
+ x ∈ X(N, v) satisfies x(S) < v(S), the coalition S can get a better value by leaving
213
+ the grand coalition. Let y ∈ X(N, v) be a second preimputation.
214
+ Definition 4. We say that y dominates x via S, denoted y domS x, if
215
+ • y is affordable, i.e., y(S) ≤ v(S),
216
+ • y is improving, i.e., for all i ∈ S, we have yi > xi, .
217
+ We say that y dominates x, noted y dom x, if a coalition S exists such that y domS x.
218
+ For illustration, consider the game described in Table (1), and let x = (6, 2, 2),
219
+ y = (2, 6, 2) and z = (2, 2, 6) be preimputations. We have that x dom{a,b} z, also
220
+ z dom{a,c} y and y dom{b,c} x. We see in particular that the domination relation is not
221
+ transitive, however, the relation of being dominated via a specific coalition is transitive.
222
+
223
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
224
+ 5
225
+ The first condition expresses that the threat from S to leave the grand coalition only
226
+ exists because they can get v(S) on their own. The second condition requires that
227
+ everyone in the coalition S benefits from leaving the grand coalition. According to our
228
+ interpretation of the characteristic function, this is what v(S) represents: the relative
229
+ importance of their threat of leaving the grand coalition, and therefore the relative
230
+ importance the coalition gets when we seek a preimputation that benefits everyone.
231
+ In today’s literature, we call solution a map associating to each game a subset of
232
+ RN. But von Neumann and Morgenstern (1944), when they made the first proposal
233
+ for a solution concept in the broad sense, defined a solution as being a subset of RN
234
+ that we today call stable set, or von Neumann-Morgenstern stable set. They are the
235
+ set of “maximal” elements relative to the intransitive domination relation.
236
+ Definition 5 (von Neumann and Morgenstern, 1944).
237
+ A subset K of V ⊆ RN is a V -stable set if it is
238
+ • internally stable, i.e., for all x ∈ K, there is no y ∈ K such that y dom x,
239
+ • externally stable, i.e., for all x ∈ V \K, there exists y ∈ K such that y dom x.
240
+ We simply call stable sets the {x ∈ RN | x(N) ≤ v(N)}-stable sets.
241
+ Originally, von Neumann and Morgenstern defined their solutions of a game as
242
+ I(N, v)-stable sets, ignoring any payoff vector that was not efficient nor individually
243
+ rational. In this paper, we are interested in the most general stable sets. Shapley
244
+ (1952) showed that a set is a stable set if and only if it is a X(N, v)-stable set. We
245
+ see from the definition that any stable set included in I(N, v) is a I(N, v)-stable set.
246
+ Moreover, Shapley showed that a I(N, v)-stable set K is a stable set, if and only if,
247
+ for any player i ∈ N, there exists an element x ∈ K such that xi = v({i}).
248
+ The definition of stable sets is very appealing, but suffers from several defaults:
249
+ there may be no stable sets, several stable sets, or even a continuum of them, and it
250
+ is difficult to identify them (Lucas, 1969, 1992).
251
+ By contrast, when nonempty, the core is unique, contains all the coalitionally ra-
252
+ tional preimputations and is a polytope. Also, notice that all the core elements are
253
+ undominated: they cannot be affordable and improvable at the same time for any
254
+ coalition by definition. Therefore, the core is always internally stable and is contained
255
+ in all the stable sets if they exist. However, the core is not necessarily externally sta-
256
+ ble: there can exist preimputations that are dominated solely by preimputations not
257
+ belonging to the core. Nevertheless, if the core is externally stable, it is the unique
258
+ stable set (Driessen, 2013).
259
+ By definition, a preimputation x can be dominated via a coalition S only if the
260
+ coalition can improve upon x by leaving the grand coalition, i.e., x(S) < v(S). In the
261
+ following, we denote by e(S, x) the additional quantity of resources that coalition S
262
+ can acquire by itself, i.e., e(S, x) := v(S) − x(S), that we call the excess of S at x. Let
263
+ S ⊆ N be a set of coalitions. We denote by XS(N, v) the set of preimputations upon
264
+
265
+ 6
266
+ DYLAN LAPLACE MERMOUD
267
+ which a coalition can improve if and only if it belongs to S, i.e.,
268
+ XS(N, v) := {x ∈ X(N, v) | x(S) < v(S) if and only if S ∈ S}.
269
+ We say that the set S is feasible if its associated region XS(N, v) is non-empty. The
270
+ regions correspond to the connected parts of the complement of the union of all the
271
+ affine subspaces AS, that form a hyperplane arrangement in X(N, v). However, the
272
+ regions are slightly bigger than the chambers of the hyperplane arrangement: the
273
+ chambers are open sets, while the regions include a part of their frontier. If we consider
274
+ that the core is the region associated with the empty set of coalitions, the regions form
275
+ a partition of X(N, v).
276
+ Definition 6. The resonance arrangement AR is defined by
277
+ AR := {HR
278
+ S | S ∈ N \ {N}}
279
+ where
280
+ HR
281
+ S := {x ∈ RN | x(N) = 0 and x(S) = 0}.
282
+ Let o be the set function assigning 0 to any coalition.
283
+ Proposition 1. The set A(N, o) := {AS(N, o) | S ∈ N \ {N}} and AR define the
284
+ same hyperplane arrangement.
285
+ Proof. Let S be a coalition. We have
286
+ AS(N, o) = {x ∈ X(N, o) | x(S) = o(S)}
287
+ = {x ∈ RN | x(N) = 0 and x(S) = 0} = HR
288
+ S .
289
+
290
+ From Prop. 1, we see that any game (N, v) induces a deformation of the resonance
291
+ arrangement leading to the arrangement A(N, v) := {AS(N, v) | S ∈ N \ {N}}. Also,
292
+ the regions XS(N, o) are in bijection with the chambers of AR, i.e., the connected part
293
+ of RN \ AR. Another well-known hyperplane arrangement interests us in this paper.
294
+ Definition 7. The (restricted) braid arrangement AB is defined by
295
+ AB := {HB
296
+ ij | 1 ≤ i < j ≤ n}
297
+ where
298
+ HB
299
+ ij := {x ∈ RN | x(N) = 0 and xi = xj}.
300
+ The permutohedron is the polytope the normal fan of which is the restricted braid
301
+ arrangement. Also, the chambers of AB correspond to the polar regions defined in
302
+ Section 3 the same way the chambers of AR correspond to the regions XS(N, v).
303
+ Throughout this paper, we equip the Euclidean vector space RN, and therefore
304
+ X(N, v), with the usual scalar product, denoted by ⟨·, ·⟩ and the associated norm,
305
+ denoted by ∥·∥ defined, for all x, y ∈ RN, by
306
+ ⟨x, y⟩ =
307
+
308
+ i∈N
309
+ xiyi,
310
+ and
311
+ ∥x∥ =
312
+
313
+ ⟨x, x⟩.
314
+ Definition 8. Let K ⊆ RN be nonempty, closed and convex and let x ∈ RN. We
315
+ denote by πK(x) the element of RN defined by πK(x) := argminp∈K∥x − p∥. The map
316
+ πK : x �→ πK(x) is called the projector onto K.
317
+
318
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
319
+ 7
320
+ In the definition above, the existence of the projection comes from the closedness
321
+ and the nonemptiness of K, and the uniqueness comes from the convexity. The fol-
322
+ lowing result, as formulated by Bauschke and Combettes (2011) is well-known and
323
+ characterises the projectors we use in the space of preimputations.
324
+ Theorem 1 (Projection Theorem).
325
+ Let K be a nonempty closed convex subset of RN. For all payoff vectors x and y,
326
+ y = πK(x)
327
+ if and only if
328
+
329
+ y ∈ K
330
+ and for all z ∈ K, ⟨z − y, x − y⟩ ≤ 0
331
+
332
+ .
333
+ 3. Affine geometry and hyperplane arrangements
334
+ This section focuses on the hyperplane X(N, v). Recall that the affine subspace AS
335
+ and the affine halfsubspace A≥
336
+ S are defined by
337
+ AS = {x ∈ X(N, v) | x(S) = v(S)},
338
+ and
339
+ A≥
340
+ S = {x ∈ X(N, v) | x(S) ≥ v(S)}.
341
+ Proposition 2. Let S be a coalition. The vector ηS, defined by
342
+ ηS = 1S − |S|
343
+ n 1N,
344
+ with
345
+ 1S
346
+ i =
347
+ �1,
348
+ if i ∈ S,
349
+ 0,
350
+ otherwise.
351
+ is a side payment and a normal of AS.
352
+ Proof. For any coalition S, we have ηS(N) = 1S − |S|
353
+ n 1N(S) = |S| − |S| = 0, then ηS
354
+ is a side payment. Let x and y be two elements of AS. We have
355
+ ⟨ηS, x − y⟩ = ⟨ηS, x⟩ − ⟨ηS, y⟩ = x(S) − |S|
356
+ n x(N) − y(S) + |S|
357
+ n y(N).
358
+ The coalitions S and N are effective for both x and y, then ⟨ηS, x − y⟩ = 0.
359
+
360
+ For convenience, we write ηi to denote η{i}. The set of vectors {ηS | S ∈ N} does not
361
+ depend on the game we are considering, and the characteristic function only defines
362
+ the position of AS along the line generated by ηS. We now define the projectors used
363
+ in this paper.
364
+ Proposition 3. Let S be a coalition and x be a preimputation. Then
365
+ πAS(x) = x + γS(x)ηS,
366
+ with γS(x) := e(S, x)/∥ηS∥2, is a projector onto AS. Moreover, if πA≥
367
+ S (x) differs from
368
+ x, then πAS(x) dominates x via S.
369
+ Proof. First, notice that ∥ηS∥2 = ⟨ηS, ηS⟩ = ηS(S). Following the Projection Theorem,
370
+ we first prove that πAS(x) belongs to AS.
371
+ πAS(x)(S) = x(S) + γS(x)ηS(S) = x(S) + e(S, x)
372
+ ∥ηS∥ ∥ηS∥2 = v(S).
373
+
374
+ 8
375
+ DYLAN LAPLACE MERMOUD
376
+ Let z be an element of AS. We have
377
+ ⟨z − πAS(x), x − πAS(s)⟩ = ⟨z − x − γS(x)ηS, −γS(x)ηS⟩
378
+ = −γS(x)
379
+
380
+ ⟨z, ηS⟩ − ⟨x, ηS⟩ − γS(x)⟨ηS, ηS⟩
381
+
382
+ = −γS(x)
383
+
384
+ z(S) − x(S) − γS(x)∥ηS∥
385
+
386
+ = −γS(x) (e(S, x) − e(S, x)) = 0.
387
+ We use the Projection Theorem to conclude the construction of the projector. To end
388
+ this proof, notice that, for all i ∈ S, we have ηS
389
+ i = 1 − |S|
390
+ n > 0.
391
+
392
+ If a coalition S can improve upon a preimputation x ∈ X(N, v), the projection
393
+ πAS(x) shares the excess of S at x equally among the players in S, and therefore the
394
+ projection dominates x via S. This fact motivates the use of these projectors in this
395
+ work to study the domination relations between payoff vectors and, more specifically,
396
+ between preimputations and core elements. Moreover, γS(x)ηS is the shortest side
397
+ payment able to map the preimputation x onto a preimputation that benefits the
398
+ coalition S. It is the solution to the problem consisting of satisfying coalition S with
399
+ the smallest value transfer between players.
400
+ The idea of projections between preimputations was already existing in the transfer
401
+ scheme defined by Cesco (1998). A transfer scheme, first defined and used by Stearns
402
+ (1968), is a sequence of preimputations defined by a sequence of transfers, i.e., trans-
403
+ lation by a side payment. The sequence of preimputations defined by Cesco is defined
404
+ recursively by projecting onto AS the last preimputation produced. His projector is
405
+ defined, for all preimputation x, by
406
+ πAS(x) = x + e(S, x)
407
+ � 1S
408
+ |S| −
409
+ 1N\S
410
+ |N \ S|
411
+
412
+ .
413
+ Cesco’s projectors are identical to ours, but the choice of normals is different. He chose
414
+ to have a normal of a different norm to have a formulation depending explicitly on the
415
+ excess e(S, x). Our choice of normals is motivated by the following theorem.
416
+ Theorem 2. The map η : S → ηS is modular, and for all S, T ∈ N, we have
417
+ ⟨ηS, ηT⟩ = ηS(T).
418
+ Proof. We start by proving that η : S → ηS is modular. Notice that if S and T are
419
+ disjoint, we have
420
+ ηS + ηT = 1S − |S|
421
+ n 1N + 1T − |T|
422
+ n 1N = 1S∪T − |S| + |T|
423
+ n
424
+ 1N = ηS∪T.
425
+ Now we decompose ηS = ηS\T + ηS∩T, similarly for ηT, and we have
426
+ ηS + ηT = ηS\T + ηS∩T + ηT\SηS∩T = ηS∪T + ηS∩T.
427
+ For the second property, the fact that ηS is a side payment leads to
428
+ ⟨ηS, ηT⟩ = ⟨ηS, 1T⟩ − |S|
429
+ n ⟨ηS, 1N⟩ = ηS(T) − |S|
430
+ n ηS(N) = ηS(T).
431
+
432
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
433
+ 9
434
+
435
+ From this theorem, we can derive several properties on the set {ηS | S ∈ N}, in
436
+ particular, the repartition of the ηS is balanced around the origin of the vector space
437
+ parallel to X(N, v).
438
+ Corollary 3. A set B of coalitions is balanced if and only if there exists a set of
439
+ positive scalars {λS | S ∈ B} such that
440
+
441
+ S∈B
442
+ λSηS = ⃗o.
443
+ Proof. First, remark that
444
+
445
+ S∈B
446
+ λS|S| =
447
+
448
+ S∈B
449
+ λS1S(N) =
450
+ ��
451
+ S∈B
452
+ λS1S
453
+
454
+ (N) = 1N(N) = n.
455
+ Assume that B is balanced with balancing weights {λS}S∈B. Then
456
+
457
+ S∈B
458
+ λSηS =
459
+
460
+ S∈B
461
+ λS1S − 1
462
+ n
463
+
464
+ S∈B
465
+ λS|S|1N = 1N − 1
466
+ nn1N = ⃗o.
467
+ Assume now that �
468
+ S∈B λSηS = ⃗o.
469
+
470
+ S∈B
471
+ λS1S =
472
+
473
+ S∈B
474
+ λS
475
+
476
+ ηS + |S|
477
+ n 1N
478
+
479
+ =
480
+
481
+ S∈B
482
+ λSηS + 1
483
+ n1N �
484
+ S∈B
485
+ λS|S| = 1
486
+ nn1N = 1N.
487
+
488
+ Another surprising result following from the theorem is that, for any coalition S
489
+ and T, if n is a prime number, ηS and ηT cannot be orthogonal, because
490
+ ⟨ηS, ηT⟩ = ηS(T) = 1S(T) − |S|
491
+ n 1N(T) = |S ∩ T| − |S| · |T|
492
+ n
493
+ ,
494
+ and |S| · |T| cannot be a multiple of n.
495
+ We have seen earlier that projecting onto A≥
496
+ S generates a preimputation dominating
497
+ the initial one if they differ. For any preimputation x, the relative position of πAS(x)
498
+ with respect to AT depends on the excesses e(S, x) and e(T, x), but also on the scalar
499
+ product ηS(T) = ⟨ηS, ηT⟩. The value and the sign of ηS(T) indicate how correlated the
500
+ fluctuations of coalitions S and T are. To summarize all this information, we define
501
+ χS(T, x) := e(S, x)ηS(T) − e(T, x)ηS(S).
502
+ Proposition 4. Let S and T be two coalitions, and let x ∈ X(N, v). Then
503
+ πAS(x) ∈ A≥
504
+ T
505
+ if and only if
506
+ χS(T, x) ≥ 0.
507
+ In other words, πAS(x) ∈ A≥
508
+ S ∩ A≥
509
+ T and πAS(x) dom x.
510
+
511
+ 10
512
+ DYLAN LAPLACE MERMOUD
513
+ Proof. First, study the excess of T at the projection onto AS:
514
+ e(T, πAS(x)) = v(T) − x(T) − γS(x)ηS(T)
515
+ = e(T, x) − e(S, x)
516
+ ηS(S) ηS(T)
517
+ =
518
+ 1
519
+ ηS(S)
520
+
521
+ e(T, x)ηS(S) − e(S, x)ηS(T)
522
+
523
+ =
524
+ −1
525
+ ηS(S)χS(T, x).
526
+ Then, the projection lies into A≥
527
+ T if and only if e(T, πAS(x)) is nonpositive, by definition,
528
+ and therefore if and only if χS(T, x) is nonnegative. For domination, we apply Prop.
529
+ 3.
530
+
531
+ There are intuitive implications for this result. If ⟨ηS, ηT⟩ ≥ 0, an increase in the
532
+ value of S will also generate an increase for T. Therefore, assuming that e(S, x) ≥
533
+ e(T, x), projecting onto AS will also provide, if needed, what coalition T requires to
534
+ be satisfied with the preimputation. It is implied by the previous result, knowing that
535
+ ηS(T) ≤ ηS(S) = ∥ηS∥2.
536
+ For most of the preimputations, projecting onto an affine subspace AS is not suffi-
537
+ cient to be projected on the core. To do so, we need to be able to define projectors
538
+ on any intersection of affine subspaces, to simultaneously improve the value of all the
539
+ coalitions that can improve upon the considered preimputation. In the following, we
540
+ provide a closed formula that defines any needed projectors, without having to use
541
+ algorithmic sequential methods such as Dykstra’s projection algorithm.
542
+ Definition 9. Let S ⊆ N be a set of coalitions. We say that S is an independent set
543
+ if {ηS}S∈S forms a linearly independent set of vectors.
544
+ A set which is not independent is said to be dependent.
545
+ Denote by AS the set
546
+ AS := �
547
+ S∈S AS and by LS the matrix with {ηS | S ∈ S} as rows. This unusual
548
+ definition of a matrix by its rows is motivated by an easier proof for a forthcoming
549
+ result. Define by ΓS := LSL⊤
550
+ S the Gram matrix associated with the set S. If we write
551
+ S = {S1, . . . , Sk}, the Gram matrix is a square matrix of size k whose coefficients are
552
+ (ΓS)ij = ⟨ηSi, ηSj⟩. The symmetry of the scalar product implies the symmetry of ΓS.
553
+ In addition, ΓS is positive semidefinite: for all k-dimensional vector x ∈ RS,
554
+ x⊤ΓSx =
555
+
556
+ S∈S
557
+
558
+ T∈S
559
+ ⟨ηS, ηT⟩xSxT =
560
+ ��
561
+ S∈S
562
+ xSηS,
563
+
564
+ T∈S
565
+ xTηT
566
+
567
+ =
568
+ �����
569
+
570
+ S∈S
571
+ xSηS
572
+ �����
573
+ 2
574
+ ≥ 0.
575
+ Proposition 5. A coalition set S is independent if and only if ΓS is nonsingular.
576
+ Proof. Using the above formula, we have that for all x ∈ RS,
577
+ x⊤ΓSx =
578
+ �����
579
+
580
+ S∈S
581
+ xSηS
582
+ �����
583
+ 2
584
+ ≥ 0.
585
+ It shows that ΓS is positive semidefinite, i.e., that all its eigenvalues are nonnegative.
586
+ To have ΓS nonsingular, we need to have only positive eigenvalues, i.e., to have ΓS
587
+
588
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
589
+ 11
590
+ positive definite. Then ΓS is nonsingular if and only if, for all x ∈ RS \ {⃗o},
591
+ x⊤ΓSx =
592
+ �����
593
+
594
+ S∈S
595
+ xSηS
596
+ �����
597
+ 2
598
+ > 0,
599
+ i.e., if and only if for all x ∈ RS \ {⃗o}, we have �
600
+ S∈S xSηS ̸= 0; that is the definition
601
+ of {ηS | S ∈ S} being a linearly independent set of vectors.
602
+
603
+ Another implication of the linear independence of the set {ηS | S ∈ S} is that AS
604
+ is nonempty. Therefore, if a coalition set S is independent, it is possible to satisfy all
605
+ the coalitions at once, thanks to a translation of the initial preimputation by a well-
606
+ chosen side payment. For a coalition set S, a coalition S ∈ S, and a preimputation
607
+ x ∈ X(N, v), denote by ΓS,x
608
+ S
609
+ the matrix formed from ΓS by replacing each coefficient
610
+ ⟨ηT, ηS⟩ of the column of scalar product with ηS by e(T, x).
611
+ Theorem 4. Let S be an independent coalition set. Then AS is nonempty and for all
612
+ preimputation x,
613
+ πAS(x) = x +
614
+ 1
615
+ det ΓS
616
+
617
+ S∈S
618
+ det
619
+
620
+ ΓS,x
621
+ S
622
+
623
+ ηS.
624
+ Proof. The proof relies on the following result.
625
+ Proposition 6 (Bauschke and Combettes, 2011, Example 29.17). Let L : RN → RS
626
+ be linear and let y ∈ ranL. Then, for all x ∈ RN,
627
+ πL−1({y})(x) = x − L⊤Γ−1
628
+ S (Lx − y) .
629
+ We apply this formula to the linear map LS that we defined earlier, with the vector
630
+ y ∈ RS defined, for all S ∈ S, by
631
+ yS = v(S) − |S|
632
+ n v(N).
633
+ Therefore the image of a preimputation x through LS is, for all S ∈ S,
634
+ (LSx)S = ⟨ηS, x⟩ = ⟨1S, x⟩ − |S|
635
+ n ⟨1N, x⟩ = x(S) − |S|
636
+ n v(N).
637
+ Because S is independent, AS is nonempty, therefore we have y ∈ ranLS and L−1
638
+ S ({y}) =
639
+ AS. We consider the singleton {y} to emphasise the set-theoretical notation for L−1
640
+ S .
641
+ Implementing these in the formula yields
642
+ πAS(x) = x − L⊤
643
+ S Γ−1
644
+ S (LSx − y) = x + L⊤
645
+ S Γ−1
646
+ S e(S, x),
647
+ with e(S, x) being the column vector composed of the excesses (e(S, x))S∈S. Using
648
+ Cramer’s rule, the solution to the linear system ΓSβ = e(S, x) can be expressed as,
649
+ for each S ∈ S,
650
+ βS =
651
+ det
652
+
653
+ ΓS,x
654
+ S
655
+
656
+ det ΓS
657
+ .
658
+
659
+ 12
660
+ DYLAN LAPLACE MERMOUD
661
+ Therefore the projection formula becomes
662
+ πAS(x) = x + L⊤
663
+ S β = x +
664
+ 1
665
+ det ΓS
666
+
667
+ S∈S
668
+ det
669
+
670
+ ΓS,x
671
+ S
672
+
673
+ ηS.
674
+
675
+ In general, the set AS is empty if S is dependent, making this result sharp. Here we
676
+ understand in general in the sense that an overdetermined system of linear equations
677
+ has, in general, no solution. We say that a set function v acts generally on a coalition
678
+ set S if for all dependent subsets T ⊆ S we have AT = ∅. There are two main impli-
679
+ cations of this result: for a preimputation upon which a set of coalitions can improve,
680
+ and a game acting generally on N, we have a necessary and sufficient condition to
681
+ know whether we can find another preimputation putting every excess to 0, and the
682
+ explicit formula of the side payment between the two preimputations that minimises
683
+ the transfers of value between the players. This projector is especially useful for a
684
+ feasible coalition set which is independent.
685
+ Theorem 5. Let (N, v) be a balanced game and let S be an independent and feasible
686
+ coalition set. Then cl(XS(N, v)) ∩ C(N, v) ̸= ∅ and, for all x ∈ XS(N, v),
687
+ πAS(x) ∈ C(N, v).
688
+ Proof. Let x ∈ XS(N, v). By Theorem 4, the independence of S implies the nonempti-
689
+ ness of AS. Therefore set y = πAS(x). Define
690
+ ct = tx + (1 − t)y,
691
+ for t ∈ [0, 1].
692
+ Because cl(XS(N, v)) is a closed polyhedron defined by inequalities, it is convex, and
693
+ ct ∈ cl(XS(N, v)), with ct ∈ XS, if t ̸= 0. By contradiction, assume that y ̸∈ C(N, v).
694
+ Therefore, there exists a coalition T ̸∈ S such that e(T, y) > 0. Because T ̸∈ S, we
695
+ have e(T, x) ≤ 0, and then there exists t∗ ∈ ]0, 1] such that e(T, ct∗) = 0. Set τ = t∗/2.
696
+ Because t∗ ̸= 0, we have τ ̸= 0 and
697
+ cτ ∈ XS(N, v),
698
+ e(T, cτ) > 0,
699
+ which is impossible. Then y ∈ C(N, v) ∩ cl(XS(N, v)).
700
+
701
+ 4. Polyhedral geometry
702
+ In this section, we will discuss the core, its properties, what it represents in eco-
703
+ nomics or any cooperative environment, and how this particular object can be found
704
+ somewhere else in mathematics, in particular in the theory of set functions. First,
705
+ recall the modern definition of the core from Gillies (1959). For a game (N, v), the
706
+ core, denoted by C(N, v), is the intersection between the set of preimputations and
707
+ the set of upper vectors, i.e., the set of preimputations benefitting every coalition:
708
+ C(N, v) = X(N, v) ∩ U(N, v) =
709
+
710
+ S∈N
711
+ A≥
712
+ S .
713
+
714
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
715
+ 13
716
+ Let v and w be two set functions on the same domain 2N. We write w ≥ v if, for every
717
+ S ∈ 2N, we have w(S) ≥ v(S). Then, the core of v can be seen as the set of additive
718
+ set functions w such that w ≥ v but still with w(N) = v(N).
719
+ Figure 1. Graphical illustrations of cores from Shapley (1971).
720
+ In this paper, we interpret the nonemptiness of the core as a necessary condition
721
+ for the grand coalition N to form, but not as a sufficient condition. It is necessary
722
+ because if the core is empty, there exists no preimputation that benefits every coali-
723
+ tion, and then some players will prefer to form other coalitions than N. However,
724
+ even if the core is nonempty, there may exist no mechanism for the players to reach
725
+ these coalitionally rational preimputations, nothing indicates that all the players and
726
+ coalitions will succeed to defend their interests. This mechanism can be the result
727
+ of external intervention, for instance, a planner that has the power to choose which
728
+ preimputations will be used to share the value v(N) among the players, in which case
729
+ the core’s nonemptiness is a sufficient condition.
730
+ Nevertheless, if there is no external mediator between the players, a coalition able to
731
+ improve upon a current preimputation can still threaten to leave the grand coalition
732
+ N, and then require a preimputation that dominates this current preimputation. To
733
+ end this interlude on the game-theoretical interpretation, we can conclude that, if
734
+ there is no external mediator, the core’s stability, especially its external stability, is a
735
+ sufficient condition for the grand coalition to form and cooperate behaviour to emerge.
736
+ 4.1. The balanced sets. Let (N, v) be a game. Recall that a balanced set on N is
737
+ a set B of subsets of N together with a set of positive weights {λS}S∈B, called the
738
+ balancing weights, such that, for every player i ∈ N, the sum �
739
+ S∈Bi λS is equal to
740
+ one, with Bi being the set of coalitions of B containing i. In other words, we have
741
+
742
+ S∈B
743
+ λS1S = 1N,
744
+
745
+ Characteristic function:
746
+ v(s)
747
+ v(S)=/2i 12
748
+ 0
749
+ 0
750
+ jes
751
+ 23
752
+ H23414
753
+ DYLAN LAPLACE MERMOUD
754
+ with 1S being the n-dimensional (0, 1)-vector such that 1S
755
+ i = 1 if and only if i ∈ S.
756
+ From a geometrical point of view, the set B is balanced if
757
+ 1N ∈ relint
758
+
759
+ cone
760
+
761
+ {1S}S∈B
762
+ ��
763
+ ,
764
+ when relint(X) denotes the relative interior of a set X. Notice that B is minimal if
765
+ the cone is simplicial, i.e., if its number of rays exceeds by 1 the dimension of its affine
766
+ span. Using the balanced sets, Bondareva (1963), and then Shapley (1965) have found
767
+ a characterisation of games with nonempty cores.
768
+ Theorem 6 (Bondareva-Shapley Theorem, first version).
769
+ A game (N, v) has a nonempty core if and only if for every balanced set B on N
770
+ together with its balancing weights {λS}S∈B, we have
771
+
772
+ S∈B
773
+ λSv(S) ≤ v(N).
774
+ This well-known theorem provides the adjective balanced to name games having a
775
+ nonempty core. The interpretation of the result is very natural: a balanced set is a
776
+ possible organisation for the players, possibly spending fractions of their time among
777
+ different coalitions. The weighted sum is the total amount of resources gathered by
778
+ the players in N at the end of one unit of time. Then, if the biggest weighted sum
779
+ is the one corresponding to the balanced set {N}, the players should be organised as
780
+ one unique grand coalition, and therefore there should exist preimputations allocat-
781
+ ing v(N) among the players, with each subcoalition of N acquiring at least the same
782
+ amount of resources as if it was working on its own.
783
+ One way to study the existence of a preimputation which is also an upper vector
784
+ is to study the linear program minx∈U(N,v) x(N) and to use the duality theory. The
785
+ program can be rewritten as
786
+ (P)
787
+ min x(N)
788
+ s.t. x(S) ≥ v(S),
789
+ for all S ∈ N.
790
+ Remark that this program is always feasible and that the game’s core is nonempty if
791
+ and only if the program’s value is v(N). The dual program is:
792
+ (D)
793
+ max
794
+
795
+ S∈N
796
+ λSv(S)
797
+ s.t.
798
+
799
+
800
+
801
+
802
+ S∈N
803
+ λS1S = 1N, and
804
+ λS ≥ 0, for all S ∈ N.
805
+ The dual program’s constraints define the aforementioned balanced sets. Because the
806
+ vector λ∗ defined by λ∗
807
+ S = 0 for all S ∈ N and λ∗
808
+ N = 1 satisfies the constraints, the
809
+ program has a solution. Furthermore, the value of the objective function for λ∗ is
810
+ v(N). It follows from the duality theorem that the game’s core is nonempty if and
811
+ only if the value of (D) is v(N), i.e., for all balanced sets B on N with balancing
812
+ weights {λS}S∈B, we have �
813
+ S∈B λSv(S) ≤ v(N).
814
+
815
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
816
+ 15
817
+ In practice, this characterisation cannot be used for algorithmic purposes because
818
+ most of the balanced sets can have an infinity of balancing weights. Notice that the
819
+ balancing weights form a polytope in RN, described by
820
+ F =
821
+
822
+ λ ∈ RN
823
+ +
824
+ �����
825
+
826
+ S∈N
827
+ λS1S = 1N
828
+
829
+ .
830
+ Each point λ ∈ F represents a balanced set B corresponding to the support of λ, and
831
+ the balancing weights of B are the corresponding positive coefficients of λ. Similarly,
832
+ we can identify each characteristic function v with an (2n − 1)-dimensional vector;
833
+ therefore, the vector space RN can be seen as the space of games. Then, the ambient
834
+ space of the polytope of balancing weights and the vector space of games, together
835
+ with the usual scalar product is a dual system, or a dual pair, studied in functional
836
+ analysis. The set of balanced normalised (i.e. v(N) = 1) games is the polar set of
837
+ F because the weighted sum in Bondareva-Shapley Theorem is the aforementioned
838
+ scalar product.
839
+ Because the balanced sets and their weights are associated with a polytope, they are
840
+ determined by convex combinations of the balanced sets represented by the vertices.
841
+ Definition 10. A balanced set B on N is minimal if there is no balanced subset of B.
842
+ Shapley proved that the set of minimal balanced sets corresponds to the set of
843
+ extremal points of F, therefore a minimal balanced set B has a unique set of balancing
844
+ weights λB. From this, he stated this sharp version of the Bondareva-Shapley Theorem.
845
+ For more details about this, see the monograph of Grabisch (2016).
846
+ Theorem 7 (Bondareva-Shapley, sharp version).
847
+ A game (N, v) has a nonempty core if and only if for every minimal balanced set B on
848
+ N, we have
849
+
850
+ S∈B
851
+ λB
852
+ Sv(S) ≤ v(N).
853
+ Furthermore, none of these equalities is redundant except for B = {N}.
854
+ The sharpness of the result comes from the fact that, if we choose a minimal bal-
855
+ anced set B, we can find a game that satisfies all the inequalities for minimal balanced
856
+ sets different from B, but not the one corresponding to B. This new characterisation
857
+ only requires a finite number of inequalities to check, provided that we know the min-
858
+ imal balanced sets on N.
859
+ As a consequence, it is possible to design an algorithm checking the nonemptiness
860
+ of the core, significantly faster than the usual algorithms used in linear programming.
861
+ The only requirement to use a Bondareva-Shapley-like algorithm is to know the set of
862
+ minimal balanced sets, which is very complex (see Laplace Mermoud et al., 2022).
863
+ The balanced collections and the Bondareva-Shapley theorem are thus defining
864
+ which deformations of the permutohedron are acceptable to keep it non-empty, even
865
+
866
+ 16
867
+ DYLAN LAPLACE MERMOUD
868
+ if, in general, the deformation induced by a game of the permutohedron does not give
869
+ a generalised permutohedron. Only the deformations induced by convex games satisfy
870
+ this property.
871
+ 4.2. Cores of convex games. So far, we have studied games in a very general frame-
872
+ work, coming from arbitrary grounded set functions. In this paper, we are mainly fo-
873
+ cused on the core and the domination relation between the preimputations. Moreover,
874
+ a lot of economic problems modelled with a game-theoretic framework as described
875
+ here have some common and handy properties.
876
+ Definition 11. Let ξ : 2N → R be a set function. We say that ξ is
877
+ • supermodular if, for all S, T ∈ N, we have ξ(S)+ξ(T) ≤ ξ(S ∪T)+ξ(S ∩T),
878
+ • submodular if, for all S, T ∈ N, we have ξ(S) + ξ(T) ≥ ξ(S ∪ T) + ξ(S ∩ T).
879
+ Games with supermodular characteristic functions are called convex (Shapley, 1971).
880
+ Many interaction situations can be modelled by these, for instance: production econ-
881
+ omy with landowners (Shapley & Shubik, 1967; Driessen, 2013), bankruptcy games
882
+ (Aumann & Maschler, 1985; Driessen, 2013), common pool games with linear cost
883
+ functions (O’Neill, 1982; Driessen, 2013), etc. Shapley (1971) studied in great depth
884
+ the properties of convex games, thanks to the extensive study of submodular set func-
885
+ tions, and more specifically polymatroids, by Edmonds (1970). In particular, convex
886
+ games have nonempty cores, and their cores are (externally) stable. The result of the
887
+ nonemptiness of the core comes from the theory of submodular set functions. For an
888
+ exposition of this theory, see the monograph of Fujishige (2005).
889
+ Let ξ : 2N → R be a grounded submodular set function. The submodular polyhedron
890
+ P(ξ) of ξ is defined by P(ξ) := {x ∈ RN | x(A) ≤ ξ(A), for all A ∈ N}, and the base
891
+ polyhedron of ξ by B(ξ) := {x ∈ P(ξ) | x(N) = v(N)}.
892
+ Figure 2. Example of a submodular/base polyhedron from Fujishige
893
+ (2005).
894
+ Denote by ξ# the conjugate set function of ξ, defined, for all S ∈ N, by
895
+ ξ#(N \ S) = ξ(N) − ξ(S).
896
+ Lemma 8. Let ξ : 2N → R be a grounded submodular set function and let (N, v) be a
897
+ game such that v ≡ ξ#. Then, (N, v) is convex and B(ξ) = C(N, v).
898
+
899
+ r(2)
900
+ B(f)
901
+ P(f)GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
902
+ 17
903
+ Proof. This result is based on Lemma 2.4 from Fujishige (2005). Let x ∈ B(ξ). Then,
904
+ for all S ∈ N, we have x(S) ≤ ξ(S). Taking the opposite value and adding ξ(N) =
905
+ x(N) of each side yields x(N) − x(S) ≥ ξ(N) − ξ(S), then we have, for all S ∈ N,
906
+ x(N \ S) ≥ ξ#(N \ S) = v(N \ S).
907
+ Therefore, B(ξ) and C(N, v) are the same set. Let us now prove that (N, v) is convex.
908
+ For all S and T ∈ N, we have
909
+ v(S) + v(T) = ξ#(S) + ξ#(T) = 2ξ(N) − (ξ(N \ S) + ξ(N \ T)) .
910
+ Notice that (N \ S) ∪ (N \ T) = N \ (S ∩ T) and (N \ S) ∩ (N \ T) = N \ (S ∪ T).
911
+ Because ξ is submodular, we have that
912
+ ξ(N \ S) + ξ(N \ T) ≥ ξ(N \ (S ∩ T)) + ξ(N \ (S ∪ T)).
913
+ Then, we finish the calculations with
914
+ v(S) + v(T) ≤ 2ξ(N) − (ξ(N \ (S ∩ T)) + ξ(N \ (S ∪ T)))
915
+ = ξ#(S ∩ T) + ξ#(S ∪ T)
916
+ = v(S ∩ T) + v(S ∪ T).
917
+
918
+ Figure 3. Example of submodular and base polyhedra for a set func-
919
+ tion f and its conjugate f # from Fujishige (2005).
920
+ Then the theory of submodular functions (Edmonds, 1970; Fujishige, 2005, etc) is,
921
+ to some extent, dual to Shapley’s theory of convex games. There remains an impor-
922
+ tant difference between these two theories: the concept of external stability of the
923
+ core/base polyhedron. There is no analogue in the set function theory.
924
+ Originally, Edmonds studied what he called a polymatroid, which is a polytope.
925
+ His definition differs from the one of Fujishige, who defined a submodular set func-
926
+ tion under the same name of polymatroid. These two definitions are however deeply
927
+ connected. For two vectors x, y ∈ RN, we write x ≤ y if, for all i ∈ N, we have xi ≤ yi.
928
+
929
+ T(2)
930
+ P(f)
931
+ 0
932
+ B(f)18
933
+ DYLAN LAPLACE MERMOUD
934
+ Definition 12 (Edmonds, 1970; Schrijver, 2003).
935
+ A polymatroid P in RN
936
+ + is a compact nonempty subset of RN
937
+ + such that
938
+ (1) if ⃗o ≤ y ≤ x ∈ P, then y ∈ P,
939
+ (2) for each z ∈ RN
940
+ + there exists a number ρ(z) such that each maximal vector x
941
+ of P ∩ {x | x ≤ z} satisfies x(N) = ρ(z).
942
+ This definition resembles a polytopal equivalent of the following definition of a ma-
943
+ troid: a matroid is a pair M = (N, B) with N a finite set and B a nonempty set of
944
+ so-called independent subsets of N such that
945
+ (1) every subset of an independent set is an independent set,
946
+ (2) for every A ⊆ B, every maximal independent subset of A has the same cardi-
947
+ nality, called the rank, r(A), of A (w.r.t. M).
948
+ Definition 13 (Fujishige, 2005).
949
+ Let ρ : 2N → R be a grounded set function satisfying
950
+ (1) A ⊆ B ⊆ N implies ρ(A) ≤ ρ(B),
951
+ (2) for all A, B ⊆ N, we have ρ(A) + ρ(B) ≥ ρ(A ∪ B) + ρ(A ∩ B).
952
+ The pair (N, ρ) is called a polymatroid and ρ is called rank function of the polymatroid.
953
+ The two definitions are connected thanks to the forthcoming theorem.
954
+ Theorem 9 (Edmonds, 1970).
955
+ Let ξ : 2N → R be a grounded, non-decreasing, submodular set function. Then the
956
+ following polyhedron is a polymatroid:
957
+ P(ξ) = {x ∈ RN
958
+ + | x(A) ≤ ξ(A), for all A ∈ N}.
959
+ Another interesting polytope in the polyhedral combinatorics literature linked to the
960
+ core is the (generalised) permutohedron. It is generated by an ‘admissible’ deformation
961
+ of a permutohedron, a polytope extensively studied in polyhedral combinatorics.
962
+ Definition 14 (Postnikov, 2009).
963
+ Let x ∈ RN such that, for all i, j ∈ N, xi ̸= xj. The permutohedron ΠN(x) is the
964
+ convex polytope in RN defined as the convex hull of all vectors obtained from x by
965
+ permutation of the coordinates:
966
+ ΠN(x) := conv {xσ | σ ∈ SN} ,
967
+ where SN is the group of permutations of N and xσ = (xσ(1), . . . , xσ(n)).
968
+ We see that cores of convex games, polymatroids, base polyhedra of submodular
969
+ set functions and generalised permutahedra are the same objects, seen from differ-
970
+ ent perspectives. However, definitions of permutohedra are not yet consistent in the
971
+ literature. For some authors, the standard permutahedron is defined as
972
+ ΠN
973
+
974
+ (n, n − 1, . . . , 2, 1)
975
+
976
+ .
977
+ It is the core of the strictly convex game (N, v) defined, for all S ∈ N, by
978
+ v(S) = |S| (|S| − 1)
979
+ 2
980
+ .
981
+
982
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
983
+ 19
984
+ In polyhedral combinatorics, since Postnikov (2009), people are studying deformations
985
+ of permutohedra to deal with a more general type of polytopes. A generalised per-
986
+ mutohedron is a polytope obtained from a permutohedron by moving vertices so that
987
+ the directions of all edges are preserved, while some of the edges may accidentally
988
+ degenerate into a single point. More formally, we have the following characterisation.
989
+ Lemma 10 (Castillo and Liu, 2022).
990
+ A polytope P ⊆ RN is a generalised permutohedron if and only if, all of its edge
991
+ directions are in the form of
992
+ 1{i} − 1{j},
993
+ for some i, j ∈ N such that i ̸= j.
994
+ Figure 4. A permutohedron and two of its generalised permutohedra
995
+ from Postnikov (2009).
996
+ According to Postnikov, generalised permutohedra are parametrised by sets of 2n−1
997
+ coordinates {zS}S∈N indexed by the nonempty subsets S ⊆ N.
998
+ Each generalised
999
+ permutohedron is of the form
1000
+
1001
+ x ∈ RN | x(N) = zN, and x(S) ≥ zS, for all S ∈ N
1002
+
1003
+ .
1004
+ Here, we can recognise the core’s definition, where the “parameters” {zS | S ∈ N}
1005
+ are the value v(S). Castillo and Liu (2022) called these sets of parameters deforming
1006
+ vectors. A game can then be viewed as a deformation of the standard permutohedron.
1007
+ This point of view joins the one we had earlier with the hyperplane arrangement
1008
+ {AS | S ∈ N} in X(N, v). Moreover, generalised permutohedra are deeply connected
1009
+ to submodular functions by the Subdmodulartiy Theorem (Castillo & Liu, 2022).
1010
+ Theorem 11 (Submodularity Theorem).
1011
+ There exists a bijection between generalised permutohedra P with dim(P) ≤ n − 1 and
1012
+ grounded submodular set functions on 2N.
1013
+ Another proof of the Submodularity Theorem can be found in Rehberg (2021). To
1014
+ conclude this part on convex games, let us introduce the convex cover of a balanced
1015
+ game.
1016
+ Definition 15. Let (N, v) be a balanced game. A convex cover of (N, v) is a convex
1017
+ game (N, v∗) such that C(N, v) = C(N, v∗).
1018
+
1019
+ 1,2,3)20
1020
+ DYLAN LAPLACE MERMOUD
1021
+ Unlike the exact cover and the totally balanced cover, which will be defined later,
1022
+ not all games have a convex cover.
1023
+ Proposition 7. A balanced game (N, v) has a convex cover if and only if its core is
1024
+ a generalised permutohedron. Moreover, its convex cover is unique.
1025
+ Proof. It is a corollary of the Submodularity Theorem. For each generalised permuto-
1026
+ hedron P, a unique submodular function ξ exists for which B(ξ) = P. Then, we use
1027
+ Lemma 8 to go from the submodular function to the convex game.
1028
+
1029
+ The relations between the polytopes presented in this subsection are summarised in
1030
+ the following table.
1031
+ Set functions
1032
+ Polymatroids
1033
+ Permutohedra
1034
+ Convex games
1035
+ Submodular
1036
+ polyhedron
1037
+ Extended
1038
+ polymatroid
1039
+ -
1040
+ Upper vectors
1041
+ -
1042
+ Polymatroid
1043
+ -
1044
+ -
1045
+ Base polyhedron
1046
+ Base polytope
1047
+ Generalised
1048
+ permutohedron
1049
+ Core
1050
+ -
1051
+ -
1052
+ Permutohedron
1053
+ C(N, v) with
1054
+ v : S �→ |S|(|S|−1)
1055
+ 2
1056
+ Schrijver (2003),
1057
+ Fujishige (2005)
1058
+ Edmonds (1970),
1059
+ Schrijver (2003)
1060
+ Postnikov (2009),
1061
+ Castillo and Liu (2022)
1062
+ Shapley (1971),
1063
+ Grabisch (2016)
1064
+ Figure 5. Similarly defined polyhedra in different theories.
1065
+ As we have seen, convex games’ cores are very well-known, from at least four different
1066
+ perspectives. But a supermodular characteristic function is a restrictive condition, and
1067
+ many social interactions or economic situations can not be modelled by convex games.
1068
+ To model these problems, we need a more general theory.
1069
+ 4.3. Totally balanced games.
1070
+ Definition 16. Let (N, v) be a game, and let S ∈ N be a coalition. The subgame (S, v)
1071
+ is the game on S where its characteristic function is v restricted to the subcoalitions
1072
+ of S. A game is totally balanced if all of its subgames are balanced.
1073
+ These totally balanced games appear from various and common situations.
1074
+ Market games. The definition of a mathematical market comes from Shapley and
1075
+ Shubik (1969).
1076
+ Let N be a finite set of players, or traders, let G be the nonneg-
1077
+ ative orthant of a finite-dimensional vector space, called the commodity space, let
1078
+ A = {ai | i ∈ N} ∈ GN be an indexed set of elements in G, called the initial endow-
1079
+ ments, and let U = {ui | i ∈ N} is an indexed set of continuous, concave functions
1080
+ ui : G → R called the utility functions. For any coalition S ∈ N, a set of endowments
1081
+
1082
+ GEOMETRY OF SET FUNCTIONS IN COOPERATIVE GAME THEORY
1083
+ 21
1084
+ {xi}i∈S ⊆ G such that �
1085
+ i∈S xi = �
1086
+ i∈S ai is called a feasible S-allocation of the mar-
1087
+ ket (N, G, A, U), and we denote their set by XS. The market game generated by this
1088
+ market is a game (N, v) such that, for all S ∈ N,
1089
+ v(S) = max
1090
+ x∈XS
1091
+
1092
+ i∈S
1093
+ ui(xi).
1094
+ Theorem 12 (Shapley and Shubik, 1969).
1095
+ A game is a market game if and only if it is totally balanced.
1096
+ For a specific subclass of market games, called assignment games, Solymosi and
1097
+ Raghavan (2001) found the set of games with a stable core.
1098
+ Flow games. Another example of a class of games with a wide range of applications
1099
+ is the class of flow games. These games are described by Kalai and Zemel (1982)
1100
+ as being useful for modelling problems of profit sharing in an integrated production
1101
+ system with alternative production routes. Let G = (V, E) be a directed graph, with
1102
+ V being the set of vertices and E being the set of edges, and let N be a finite set of
1103
+ players. Define two functions u : e ∈ E �→ u(e) ∈ R and p : e ∈ E �→ p(n) ∈ N, with
1104
+ u describing the “capacity” of an edge and p pointing to the owner of the edge. For a
1105
+ coalition S ∈ N, let GS be the subgraph restricted to the edges owned by a player in
1106
+ S. Then the characteristic function v of the flow game (N, v) associated with G maps
1107
+ the coalitions S to the maximal amount of flow carried throughout GS.
1108
+ Theorem 13 (Kalai and Zemel, 1982).
1109
+ A game is a flow game if and only if it is totally balanced.
1110
+ These games represent a much wider class of economic situations and are solely
1111
+ defined as games having nonempty cores, no matter at which level we study the ap-
1112
+ pearance of cooperation. Any totally balanced game, i.e., any market game or any
1113
+ flow game, is balanced, and this does not depend on a choice of a grand coalition. We
1114
+ can say that they are games which lead naturally the players to cooperate, at least
1115
+ the necessary condition of having a nonempty core is always satisfied, by definition.
1116
+ There are at least two other strong assets for totally balanced games.
1117
+ Definition 17. Two games are called d-equivalent (domination-equivalent) if they
1118
+ have the same imputation sets and the same domination relations on them.
1119
+ Then two d-equivalent games have precisely the same stable sets, or both have none.
1120
+ Also, if they have cores, they have the same cores. The notion of d-equivalence comes
1121
+ from Gillies (1959), and the definition above is the reformulation by Shapley and
1122
+ Shubik (1969). From the last authors, we also have the concept of a (totally balanced)
1123
+ cover of a game.
1124
+ Lemma 14. Let (N, v) be a game. The game (N, v) defined, for all S ∈ N, by
1125
+ v(S) = max
1126
+ B
1127
+
1128
+ T∈B
1129
+ λB
1130
+ T v(T),
1131
+ where the maximum is taken over the minimal balanced sets on S, is the cover of
1132
+ (N, v). If (N, v) is balanced, then it is d-equivalent to its cover (N, v).
1133
+
1134
+ 22
1135
+ DYLAN LAPLACE MERMOUD
1136
+ This result reduces the study of the external stability of cores of balanced games
1137
+ to the study of the external stability of cores of totally balanced games. Therefore,
1138
+ studying totally balanced games allows us to study a very broad class of games, but
1139
+ also through the d-equivalence, all the balanced games. This is why in this paper
1140
+ we always consider if needed, totally balanced games. But, unlike the convex games,
1141
+ it is not clear which type of set functions characterises the totally balanced games,
1142
+ but Kalai and Zemel (1982) found a very interesting and useful characterisation while
1143
+ studying flow games. The games which have an additive characteristic function are
1144
+ called inessential.
1145
+ Theorem 15 (Kalai and Zemel, 1982).
1146
+ A game (N, v) is totally balanced if and only if it is the minimum of a finite set of
1147
+ inessential games.
1148
+ Let {ai | i ∈ I} be a set of additive set functions indexed by a finite set I. When
1149
+ we say that a game (N, v) is the minimum of inessential games {(N, ai)}i∈I, we mean
1150
+ that, for all S ∈ N,
1151
+ v(S) = min
1152
+ i∈I ai(S).
1153
+ The set {ai | i ∈ I} is called a representation of the game (see Rosenm¨uller (2013)).
1154
+ Definition 18. We say that a set function ξ : 2N → R is min-additive is there exists
1155
+ a finite set of additive set functions {ai : 2N → R}i∈I such that, for all S ∈ 2N,
1156
+ ξ(S) = min
1157
+ i∈I ai(S).
1158
+ Then the min-additive set functions are to the totally balanced games what the
1159
+ super/submodular were to the convex games. Let a be an additive set function. Then,
1160
+ it is completely determined by its values on singletons, and we can therefore identify
1161
+ a to a n-dimensional vector a = (a({i}))i∈N. From this vector, we can associate a
1162
+ unique linear form ϕ on RN, defined, for all x ∈ RN, by
1163
+ ϕ(x) := ⟨a, x⟩.
1164
+ Definition 19. A tropical polynomial is a linear form defined, for all x ∈ RN, by
1165
+ φ(x) = min
1166
+ i∈I
1167
+
1168
+ ci + ⟨zi, x⟩
1169
+
1170
+ ,
1171
+ with I a finite set of indices, and for all i ∈ I, ci ∈ R ∪ {−∞} and zi ∈ RN.
1172
+ Theorem 16. Let (N, v) be a totally balanced game. There exists a tropical polynomial
1173
+ φ : RN → R such that, for all S ∈ N, we have φ
1174
+
1175
+ 1S�
1176
+ = v(S). If φ is a monic tropical
1177
+ polynomial, there exists a totally balanced game that can be extended to φ.
1178
+ Proof. Let (N, v) be a totally balanced game. Then, there exists a finite set of additive
1179
+ set functions {ai}i∈I such that, for all S ∈ N, we have v(S) = mini∈I{ai(S)}. Denote
1180
+ by ai the n-dimensional vector corresponding to the additive set function ai. Now,
1181
+ denote by φ the map φ : RN → R defined, for all x ∈ RN, by
1182
+ φ(x) = min
1183
+ i∈I ⟨ai, x⟩.
1184
+
1185
+ REFERENCES
1186
+ 23
1187
+ Therefore, for every coalition S ∈ N, we have
1188
+ φ
1189
+
1190
+ 1S�
1191
+ = min
1192
+ i∈I ⟨ai, 1S⟩ = min
1193
+ i∈I ai(S) = v(S).
1194
+ Now, let φ be a tropical polynomial defined as the minimum of a finite set of linear
1195
+ forms ϕi, i.e., for all x ∈ RN,
1196
+ φ(x) = min
1197
+ i∈I ϕi(x).
1198
+ For all linear forms ϕi, there exists a unique vector ai such that, for all x ∈ RN,
1199
+ ϕi(x) = ⟨ai, x⟩.
1200
+ For each vector ai, we define an additive function ai by ai(S) = ai(S). The game
1201
+ (N, v) defined by v(S) = mini∈I ai(S) is totally balanced.
1202
+
1203
+ In the game-theoretical literature, there already exist different concepts of exten-
1204
+ sions of games. Back in 1972, Owen (1972) defined a multilinear extension for any
1205
+ game (N, v). Algaba et al. (2004) studied the Choquet integral (also called the Lov´asz
1206
+ extension, see Grabisch (2016)) in the context of cooperative game theory. The Cho-
1207
+ quet and the Sugeno integrals are also used as extensions of capacities in decision
1208
+ theory (Grabisch, 2016). But our extension aims to build deeper connections with
1209
+ combinatorial optimisation and discrete geometry.
1210
+ 5. Concluding remarks
1211
+ We investigated many links between cooperative game theory and various parts
1212
+ of discrete mathematics. Seeing cooperative games as deformations of the resonance
1213
+ hyperplane arrangement, and cores as deformations of generalised permutohedra may
1214
+ help to import new results and tools from other fields into the study of cooperative
1215
+ games, and the other way around. Furthermore, the study of totally balanced games
1216
+ with the help of tropical polynomial and tropical hypersurfaces has to be continued,
1217
+ in the same way, that set functions, polymatroids and generalised permutohedra have
1218
+ contributed to the study of convex games.
1219
+ References
1220
+ Aguiar, M., & Mahajan, S. (2017). Topics in hyperplane arrangements (Vol. 226).
1221
+ American Mathematical Society.
1222
+ Algaba, E., Bilbao, J. M., Fern´andez, J. R., & Jimenez, A. (2004). The Lov´asz exten-
1223
+ sion of market games. Essays in cooperative games (pp. 229–238). Springer.
1224
+ Aumann, R. J., & Maschler, M. (1985). Game theoretic analysis of a bankruptcy
1225
+ problem from the talmud. Journal of economic theory, 36(2), 195–213.
1226
+ Barvinok, A. (2002). A course in convexity (Vol. 54). American Mathematical Society.
1227
+ Bauschke, H. H., & Combettes, P. L. (2011). Convex analysis and monotone operator
1228
+ theory in hilbert spaces (Vol. 408). Springer.
1229
+ Billera, L. J., Moore, J. T., Moraites, C. D., Wang, Y., & Williams, K. (2012). Maximal
1230
+ unbalanced families. arXiv preprint arXiv:1209.2309.
1231
+ Bondareva, O. N. (1963). Some applications of linear programming methods to the
1232
+ theory of cooperative games. Problemy kibernetiki, 10(119), 139.
1233
+
1234
+ 24
1235
+ REFERENCES
1236
+ Castillo, F., & Liu, F. (2022). Deformation cones of nested braid fans. International
1237
+ Mathematics Research Notices, 2022(3), 1973–2026.
1238
+ Cesco, J. C. (1998). A convergent transfer scheme to the core of a tu-game. Revista
1239
+ de Matem´aticas Aplicadas, 19(1-2), 23–35.
1240
+ Driessen, T. S. (2013). Cooperative games, solutions and applications (Vol. 3). Springer
1241
+ Science & Business Media.
1242
+ Edmonds, J. (1970). Submodular functions, matroids and certain polyhedra. Combi-
1243
+ natorial Structures and Their Applications.
1244
+ Fujishige, S. (2005). Submodular functions and optimization. Elsevier.
1245
+ Gillies, D. B. (1959). Solutions to general non-zero-sum games. Contributions to the
1246
+ Theory of Games, 4, 47–85.
1247
+ Grabisch, M. (2016). Set functions, games and capacities in decision making (Vol. 46).
1248
+ Springer.
1249
+ Grabisch, M., & Sudh¨olter, P. (2021). Characterization of tu games with stable cores
1250
+ by nested balancedness. Mathematical Programming, 1–26.
1251
+ Kalai, E., & Zemel, E. (1982). Totally balanced games and games of flow. Mathematics
1252
+ of Operations Research, 7(3), 476–478.
1253
+ Kamiya, H., Takemura, A., & Terao, H. (2012). Arrangements stable under the coxeter
1254
+ groups. Configuration spaces (pp. 327–354). Springer.
1255
+ K¨uhne, L. (2020). The universality of the resonance arrangement and its betti numbers.
1256
+ arXiv preprint arXiv:2008.10553.
1257
+ Laplace Mermoud, D., Grabisch, M., & Sudh¨olter, P. (2022). Core stability and other
1258
+ applications of minimal balanced collections. (No. 4/2022). University of South-
1259
+ ern Denmark, Department of Economics.
1260
+ Lucas, W. F. (1969). The proof that a game may not have a solution. Transactions of
1261
+ the American Mathematical Society, 137, 219–229.
1262
+ Lucas, W. F. (1992). Von neumann-morgenstern stable sets. Handbook of game theory
1263
+ with economic applications, 1, 543–590.
1264
+ O’Neill, B. (1982). A problem of rights arbitration from the talmud. Mathematical
1265
+ social sciences, 2(4), 345–371.
1266
+ Owen, G. (1972). Multilinear extensions of games. Management Science, 18(5-part-2),
1267
+ 64–79.
1268
+ Postnikov, A. (2009). Permutohedra, associahedra, and beyond. International Mathe-
1269
+ matics Research Notices, 2009(6), 1026–1106.
1270
+ Rehberg, S. (2021). Combinatorial reciprocity theorems for generalized permutahedra,
1271
+ hypergraphs, and pruned inside-out polytopes. arXiv preprint arXiv:2103.09073.
1272
+ Rosenm¨uller, J. (2013). Game theory: Stochastics, information, strategies and cooper-
1273
+ ation (Vol. 25). Springer Science & Business Media.
1274
+ Schrijver, A. (2003). Combinatorial optimization: Polyhedra and efficiency (Vol. 24).
1275
+ Springer.
1276
+ Shapley, L. S. (1952). Notes on the n-person game, III: Some variants of the von
1277
+ Neumann-Morgenstern definition of solution. RAND Corporation. https://doi.
1278
+ org/10.7249/RM817
1279
+
1280
+ REFERENCES
1281
+ 25
1282
+ Shapley, L. S. (1965). On balanced sets and cores (tech. rep.). Santa Monica, CA,
1283
+ RAND Corporation.
1284
+ Shapley, L. S. (1971). Cores of convex games. International journal of game theory,
1285
+ 1(1), 11–26.
1286
+ Shapley, L. S., & Shubik, M. (1967). Ownership and the production function. The
1287
+ Quarterly Journal of Economics, 81(1), 88–111.
1288
+ Shapley, L. S., & Shubik, M. (1969). On market games. Journal of Economic Theory,
1289
+ 1(1), 9–25.
1290
+ Solymosi, T., & Raghavan, T. (2001). Assignment games with stable core. Interna-
1291
+ tional Journal of Game Theory, 30(2), 177–185.
1292
+ Stearns, R. E. (1968). Convergent transfer schemes for n-person games. Transactions
1293
+ of the American Mathematical Society, 134(3), 449–459.
1294
+ van Gellekom, J., Potters, J. A., & Reijnierse, J. (1999). Prosperity properties of tu-
1295
+ games. International Journal of Game Theory, 28(2), 211���227.
1296
+ von Neumann, J. (1928). On the theory of games of strategy. Zur theorie der gesellschaftsspiele
1297
+ (S. Bargmann, Trans.). Mathematische annalen, 100(1), 295–320.
1298
+ von Neumann, J., & Morgenstern, O. (1944). Theory of games and economic behavior.
1299
+ Princeton University Press.
1300
+ Centre d’´Economie de la Sorbonne, Universit´e Paris I Panth´eon-Sorbonne, 106-112
1301
+ boulevard de l’Hˆopital, 75013, Paris, France
1302
+ Email address: dylan.laplace.mermoud@gmail.com
1303
+
XNE1T4oBgHgl3EQfJQOq/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
a9AyT4oBgHgl3EQfW_fi/content/tmp_files/2301.00176v1.pdf.txt ADDED
@@ -0,0 +1,1072 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ arXiv:2301.00176v1 [math.NA] 31 Dec 2022
2
+ RANDOMIZED KACZMARZ METHOD WITH ADAPTIVE STEPSIZES
3
+ FOR INCONSISTENT LINEAR SYSTEMS
4
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
5
+ Abstract. We investigate the randomized Kaczmarz method that adaptively updates the
6
+ stepsize using readily available information for solving inconsistent linear systems. A novel
7
+ geometric interpretation is provided which shows that the proposed method can be viewed
8
+ as an orthogonal projection method in some sense. We prove that this method converges
9
+ linearly in expectation to the unique minimum Euclidean norm least-squares solution of
10
+ the linear system. Numerical experiments are given to illustrate the theoretical results.
11
+ 1. Introduction
12
+ Solving systems of linear equations is a fundamental problem in scientific computing
13
+ and engineering. It comes up in many real-world applications such as signal processing
14
+ [6], optimal control [41], machine learning [8], and partial differential equations [40]. The
15
+ Kaczmarz method [24], also known as algebraic reconstruction technique (ART) [15,22], is
16
+ a classic while effective row-action iteration solver for solving the large-scale linear system
17
+ of equations
18
+ (1)
19
+ Ax = b, A ∈ Rm×n, b ∈ Rm.
20
+ At each step of the original Kaczmarz method, a row of the system is sampled and the
21
+ previous iterate is orthogonally projected onto the hyperplane defined by that row.
22
+ In the literature, there was empirical evidence that using the rows of the matrix A in
23
+ random order rather than in their given order can often accelerate the convergence of the
24
+ Kaczmarz method [13, 22, 32]. In the seminal paper [47], Strohmer and Vershynin studied
25
+ the randomized Kaczmarz (RK) method and proved its linear convergence in expectation
26
+ provided that the linear system (1) is consistent. Subsequently, there is a large amount of
27
+ work on the development of the Kaczmarz-type method including accelerated randomized
28
+ Key words: system of linear equations, inconsistency, Kaczmarz, adaptive stepsize, minimum Euclidean
29
+ norm least-squares solution
30
+ Mathematics subject classification (2020): 65F10, 65F20, 90C25, 15A06, 68W20
31
+ 1
32
+
33
+ 2
34
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
35
+ Kaczmarz methods [19,27,28], block Kaczmarz methods [17,31,33,36], greedy randomized
36
+ Kaczmarz methods [2,16], randomized sparse Kaczmarz methods [9,45], etc. Nevertheless,
37
+ all of these methods will not converge if the linear system (1) is not consistent. Indeed,
38
+ Needell [34] showed that RK applied to inconsistent linear systems converges only to within
39
+ a radius (convergence horizon) of the least-squares solution (see Theorem 2.1); see also [4]
40
+ for some further comments.
41
+ It is well-known that the so-called relaxation parameters or stepsizes λ1, . . . , λk are impor-
42
+ tant for the Kaczmarz method in practice. The original Kaczmarz method with decreasing
43
+ stepsizes for solving inconsistent systems has been investigated in [7,20]. It has shown that
44
+ with the stepsizes being nearly zero and appropriate initial point, the Kaczmarz method
45
+ converges inside the convergence horizon to the minimum Euclidean norm least-squares
46
+ solution. However, its convergence rate is difficult to obtain. Hence, for the RK method,
47
+ a natural and interesting question is that Is it possible that with carefully designed step-
48
+ sizes the RK method is convergent for solving inconsistent systems? Furthermore, can the
49
+ convergence rate of the proposed method be obtained easily?
50
+ In this paper, we answer positively to the above questions. We investigate the RK method
51
+ with adaptive stepsizes for solving inconsistent systems.
52
+ To the best of our knowledge,
53
+ this is the first time that dynamic stepsizes have been identified as a key property for
54
+ proving the convergence of the RK method for inconsistent systems. We show that our
55
+ strategy is effective in simplifying the analysis and endows the proposed method with a
56
+ linear convergence rate.
57
+ Finally, we mention that in a fruitful line of research, the extended randomized methods
58
+ have been proposed for solving inconsistent systems, which include randomized extended
59
+ Kaczmarz (REK) method [10, 51], its block or deterministic variants [3, 11, 12, 37, 42, 43,
60
+ 48–50], greedy randomized augmented Kaczmarz (GRAK) method [4], randomized extended
61
+ Gauss-Seidel (REGS) method [10, 30], etc.
62
+ These methods make use of both rows and
63
+ columns of A in each step (see (8)) and work for general linear systems (consistent or
64
+ inconsistent, full-rank or rank-deficient). Another randomized method that can be used
65
+ to solve inconsistent systems with full column-rank coefficient matrix is the randomized
66
+ coordinate descent (RCD) method [26], we refer to [1] for more discussions about the RCD
67
+ method.
68
+
69
+ RKAS FOR INCONSISTENT SYSTEMS
70
+ 3
71
+ Table 1. The pseudoinverse solution A†b of Ax = b.
72
+ Ax = b
73
+ rank(A)
74
+ A†b
75
+ consistent
76
+ = n
77
+ unique solution
78
+ consistent
79
+ < n
80
+ unique minimum Euclidean norm solution
81
+ inconsistent
82
+ = n
83
+ unique least-squares (LS) solution
84
+ inconsistent
85
+ < n
86
+ unique minimum Euclidean norm LS solution
87
+ The remainder of the paper is organized as follows. After introducing some preliminaries
88
+ in Section 2, we present and analyze the RK method with adaptive stepsizes in Section
89
+ 3. In Section 4, we perform some numerical experiments to show the effectiveness of the
90
+ proposed method. Finally, we conclude the paper in Section 5.
91
+ 2. Preliminaries
92
+ 2.1. Notations. Throughout the paper, for any random variables ξ and ζ, we use E[ξ] and
93
+ E[ξ|ζ] to denote the expectation of ξ and the conditional expectation of ξ given ζ. For an
94
+ integer m ≥ 1, let [m] := {1, . . . , m}. For any vector x ∈ Rn, we use xi, x⊤, and ∥x∥2 to
95
+ denote the i-th entry, the transpose and the Euclidean norm of x, respectively. For any
96
+ matrix A ∈ Rm×n, we use Ai,:, A:,j, A⊤, A†, ∥A∥2, ∥A∥F , Range(A), and Null(A) to denote
97
+ the i-th row, the j-th column, the transpose, the Moore-Penrose pseudoinverse, the spectral
98
+ norm, the Frobenius norm, the column space, and the null space of A, respectively. The
99
+ nonzero singular values of a matrix A are σ1(A) ≥ σ2(A) ≥ . . . ≥ σr(A) := σmin(A) > 0,
100
+ where r is the rank of A and σmin(A) denotes the smallest nonzero singular values of A. We
101
+ see that ∥A∥2 = σ1(A) and ∥A∥F =
102
+
103
+ r�
104
+ i=1
105
+ σi(A)2.
106
+ 2.2. The pseudoinverse solution. In this paper, we are interested in the pseudoinverse
107
+ solution A†b of the linear system (1). Here we would like to make clear what A†b represents
108
+ in different cases of linear systems [5,11,14]. Table 1 summarizes the results.
109
+ 2.3. The RK method. The RK method for solving the linear system (1) begins with an
110
+ arbitrary vector x0, and in the k-th iteration iterates by
111
+ (2)
112
+ xk+1 = xk − λAik,:xk − bik
113
+ ∥Aik,:∥2
114
+ 2
115
+ A⊤
116
+ ik,:,
117
+
118
+ 4
119
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
120
+ where the index ik is i.i.d. selected from [m] and λ ∈ (0, 2) is the stepsize. When λ = 1,
121
+ it reduces to the classical RK method. In the seminal paper [47], Strohmer and Vershynin
122
+ proved the first linear convergence rate of the RK method for consistent systems. Later,
123
+ Needell [34,35] studied the RK method for inconsistent cases. The result is precisely restated
124
+ below.
125
+ Theorem 2.1 ( [35], Corollary 5.1). Starting from any initial vector x0 ∈ Range(A⊤), the
126
+ expected error of the RK method (2) in the k-th iteration satisfies
127
+ (3)
128
+ E
129
+ ����xk − A†b
130
+ ���
131
+ 2
132
+ 2
133
+
134
+
135
+
136
+ 1 − 2λ(1 − λ)σ2
137
+ min(A)
138
+ ∥A∥2
139
+ F
140
+ �k ���x0 − A†b
141
+ ���
142
+ 2
143
+ 2 +
144
+ λa2
145
+ max∥e∥2
146
+ 2
147
+ (1 − λ)a2
148
+ minσ2
149
+ min(A),
150
+ where the index i is selected with probability
151
+ ∥Ai,:∥2
152
+ 2
153
+ ∥A∥2
154
+ F , λ ∈ (0, 1), e = AA†b − b, a2
155
+ min =
156
+ min
157
+ i∈[m] ∥Ai,:∥2
158
+ 2, and a2
159
+ max = max
160
+ i∈[m] ∥Ai,:∥2
161
+ 2.
162
+ It can be seen that for arbitrary λ, the above result implies a tradeoff between a smaller
163
+ convergence horizon and a slower convergence. In this paper, we study the RK method
164
+ with adaptive stepsizes λk to eliminate the last term in (3) (see Corollary 3.2).
165
+ 3. Adaptive stepsizes for RK
166
+ In this section, we introduce RK with adaptive stepsizes (RKAS) for solving the linear
167
+ system (1). The method is formally described in Algorithm 1.
168
+ Algorithm 1 RK with adaptive stepsizes (RKAS)
169
+ Input: A ∈ Rm×n, b ∈ Rm, k = 0 and initial points x0 = 0, r0 = Ax0 − b = −b.
170
+ 1: Select ik ∈ [m] with probability Pr(ik = i) = ∥Ai,:∥2
171
+ 2
172
+ ∥A∥2
173
+ F .
174
+ 2: Compute
175
+ αk =
176
+ ⟨AA⊤
177
+ ik,:, rk⟩
178
+ ∥AA⊤
179
+ ik,:∥2
180
+ 2
181
+ .
182
+ 3: Update
183
+ xk+1 = xk − αkA⊤
184
+ ik,:,
185
+ rk+1 = rk − αkAA⊤
186
+ ik,:.
187
+ 4: If the stopping rule is satisfied, stop and go to output. Otherwise, set k = k + 1 and
188
+ return to Step 1.
189
+ Output: The approximate solution.
190
+
191
+ RKAS FOR INCONSISTENT SYSTEMS
192
+ 5
193
+ Note that the most expensive computational cost in the k-th iteration of Algorithm 1
194
+ is to compute AA⊤
195
+ ik,:.
196
+ Let B := AA⊤, then B:,ik = AA⊤
197
+ ik,:.
198
+ Thus, if it is possible to
199
+ store B = AA⊤ at the initialization, Algorithm 1 could be faster in practice. In fact, this
200
+ strategy is also adopted by the greedy randomized Kaczmarz method [2,4] and the weighted
201
+ randomized Kaczmarz method [46].
202
+ 3.1. A geometric interpretation. We present an intuitive geometric explanation of Al-
203
+ gorithm 1 in this subsection. Consider the following least-squares problem
204
+ min
205
+ x∈Rn f(x) := 1
206
+ 2∥Ax − b∥2
207
+ 2.
208
+ Since
209
+ f(x) = 1
210
+ 2x⊤A⊤Ax − x⊤A⊤b + 1
211
+ 2∥b∥2
212
+ 2
213
+ = 1
214
+ 2x⊤A⊤Ax − x⊤A⊤AA†b + 1
215
+ 2∥b∥2
216
+ 2
217
+ = 1
218
+ 2∥Ax − AA†b∥2
219
+ 2 − 1
220
+ 2∥AA†b∥2
221
+ 2 + 1
222
+ 2∥b∥2
223
+ 2,
224
+ where the second equality follows from the fact that A⊤AA†b = A⊤b. This implies that the
225
+ least-squares problem can be equivalently reformulated as
226
+ (4)
227
+ min
228
+ x∈Rn
229
+ 1
230
+ 2∥Ax − AA†b∥2
231
+ 2.
232
+ When using RK (2) to solve the least-squares problem (4), in the k-th iteration, we may
233
+ expect the distance between Axk+1 and AA†b to be as small as possible. This leads to the
234
+ following optimization problem:
235
+ (5)
236
+ xk+1 = arg min
237
+ x∈Rn ∥Ax − AA†b∥2
238
+ 2 subject to x = xk − λAik,:xk − bik
239
+ ∥Aik,:∥2
240
+ 2
241
+ A⊤
242
+ ik,:, λ ∈ R.
243
+ Note that rk+1 in Step 3 is actually obtained by an incremental method
244
+ rk+1 = rk − αkAA⊤
245
+ ik,: = Axk − b − αkAA⊤
246
+ ik,: = Axk+1 − b.
247
+ Using the fact that A⊤AA†b = A⊤b and letting γk =
248
+ Aik,:xk−bik
249
+ ∥Aik,:∥2
250
+ 2
251
+ , then the minimizer of (5)
252
+ is achieved when
253
+ λ∗
254
+ k =
255
+ ⟨AA⊤
256
+ ik,:, Axk − AA†b⟩
257
+ ∥AA⊤
258
+ ik,:∥2
259
+ 2γk
260
+ =
261
+ ⟨AA⊤
262
+ ik,:, Axk − b⟩
263
+ ∥AA⊤
264
+ ik,:∥2
265
+ 2γk
266
+ =
267
+ ⟨AA⊤
268
+ ik,:, rk⟩
269
+ ∥AA⊤
270
+ ik,:∥2
271
+ 2γk
272
+ .
273
+ Hence
274
+ xk+1 = xk − λ∗
275
+ kγkAik,: = xk −
276
+ ⟨AA⊤
277
+ ik,:, rk⟩
278
+ ∥AA⊤
279
+ ik,:∥2
280
+ 2
281
+ A⊤
282
+ ik,: = xk − αkA⊤
283
+ ik,:,
284
+
285
+ 6
286
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
287
+ which is exactly the iteration in Step 3 of Algorithm 1.
288
+ It follows from (5) that xk+1
289
+ obtained by Algorithm 1 satisfies that Axk+1 is the orthogonal projection of AA†b onto
290
+ Axk + Span{AA⊤
291
+ ik,:}. The geometric interpretation of Algorithm 1 is presented in Figure 1.
292
+ Axk + Span{AA⊤
293
+ ik,:}
294
+ Axk+1 + Span{AA⊤
295
+ ik+1,:}
296
+ AA†b
297
+ Axk+1
298
+ Axk
299
+ Axk+2
300
+ Figure 1. A geometric interpretation of Algorithm 1. The next iterate xk+1
301
+ arises such that Axk+1 is the projection of AA†b onto Axk + Span{AA⊤
302
+ ik,:}.
303
+ 3.2. Convergence analysis. We now state our convergence results. The following result
304
+ is about the convergence for E[∥Axk − AA†b∥2
305
+ 2].
306
+ Theorem 3.1. For any given linear system Ax = b, let {xk}∞
307
+ k=0 be the iteration sequence
308
+ generated by Algorithm 1 with x0 = 0. Then
309
+ E
310
+ ����Axk − AA†b
311
+ ���
312
+ 2
313
+ 2
314
+
315
+
316
+
317
+ 1 −
318
+ σ4
319
+ min(A)
320
+ ∥A∥2
321
+ 2∥A∥2
322
+ F
323
+ �k ���Ax0 − AA†b
324
+ ���
325
+ 2
326
+ 2 .
327
+ Proof. Since Axk+1 − AA†b and AA⊤
328
+ ik,: are orthogonal, we have
329
+ ���Axk+1 − AA†b
330
+ ���
331
+ 2
332
+ 2 =
333
+ ���Axk − AA†b
334
+ ���
335
+ 2
336
+ 2 −
337
+ ���Axk+1 − Axk���
338
+ 2
339
+ 2
340
+ =
341
+ ���Axk − AA†b
342
+ ���
343
+ 2
344
+ 2 −
345
+ ⟨AA⊤
346
+ ik,:, rk⟩2
347
+ ∥AA⊤
348
+ ik,:∥2
349
+ 2
350
+ =
351
+ ���Axk − AA†b
352
+ ���
353
+ 2
354
+ 2 −
355
+ ⟨AA⊤
356
+ ik,:, Axk − b⟩2
357
+ ∥AA⊤
358
+ ik,:∥2
359
+ 2
360
+ =
361
+ ���Axk − AA†b
362
+ ���
363
+ 2
364
+ 2 −
365
+ ⟨AA⊤
366
+ ik,:, Axk − AA†b⟩2
367
+ ∥AA⊤
368
+ ik,:∥2
369
+ 2
370
+ ,
371
+
372
+ RKAS FOR INCONSISTENT SYSTEMS
373
+ 7
374
+ where the last equality follows from A⊤b = A⊤AA†b. Hence
375
+ E
376
+ ����Axk+1 − AA†b
377
+ ���
378
+ 2
379
+ 2
380
+ ����xk
381
+
382
+ =
383
+ ���Axk − AA†b
384
+ ���
385
+ 2
386
+ 2 −
387
+ m
388
+
389
+ i=1
390
+ ∥Ai,:∥2
391
+ 2
392
+ ∥A∥2
393
+ F
394
+ ⟨AA⊤
395
+ i,:, Axk − AA†b⟩2
396
+ ∥AA⊤
397
+ i,:∥2
398
+ 2
399
+
400
+ ���Axk − AA†b
401
+ ���
402
+ 2
403
+ 2 −
404
+ m
405
+
406
+ i=1
407
+ ∥Ai,:∥2
408
+ 2
409
+ ∥A∥2
410
+ F
411
+ ⟨AA⊤
412
+ i,:, Axk − AA†b⟩2
413
+ ∥A∥2
414
+ 2∥Ai,:∥2
415
+ 2
416
+ =
417
+ ���Axk − AA†b
418
+ ���
419
+ 2
420
+ 2 −
421
+ m
422
+
423
+ i=1
424
+ ⟨AA⊤
425
+ i,:, Axk − AA†b⟩2
426
+ ∥A∥2
427
+ 2∥A∥2
428
+ F
429
+ =
430
+ ���Axk − AA†b
431
+ ���
432
+ 2
433
+ 2 − ∥AA⊤(Axk − AA†b)∥2
434
+ 2
435
+ ∥A∥2
436
+ 2∥A∥2
437
+ F
438
+
439
+
440
+ 1 −
441
+ σ4
442
+ min(A)
443
+ ∥A∥2
444
+ 2∥A∥2
445
+ F
446
+ � ���Axk − AA†b
447
+ ���
448
+ 2
449
+ 2 ,
450
+ where the first inequality follows from ∥AA⊤
451
+ i,:∥2 ≤ ∥A∥2∥Ai,:∥2 and the last inequality follows
452
+ from the fact that A(xk − A†b) ∈ Range(AA⊤). Taking expectation over the entire history
453
+ we have
454
+ E
455
+ ����Axk+1 − AA†b
456
+ ���
457
+ 2
458
+ 2
459
+
460
+
461
+
462
+ 1 −
463
+ σ4
464
+ min(A)
465
+ ∥A∥2
466
+ 2∥A∥2
467
+ F
468
+
469
+ E
470
+ ����Axk − AA†b
471
+ ���
472
+ 2
473
+ 2
474
+
475
+ .
476
+ By induction on the iteration index k, we can obtain the desired result.
477
+
478
+ By Theorem 3.1, we can obtain the following linear convergence for the expected norm
479
+ of the error.
480
+ Corollary 3.2. For any given linear system Ax = b, let {xk}∞
481
+ k=0 be the iteration sequence
482
+ generated by Algorithm 1 with x0 = 0. Then
483
+ E
484
+ ����xk − A†b
485
+ ���
486
+ 2
487
+ 2
488
+
489
+ ≤ σ−2
490
+ min(A)
491
+
492
+ 1 −
493
+ σ4
494
+ min(A)
495
+ ∥A∥2
496
+ 2∥A∥2
497
+ F
498
+ �k ���Ax0 − AA†b
499
+ ���
500
+ 2
501
+ 2 .
502
+ Proof. According to the iteration of Algorithm 1 and x0 = 0, we know that xk ∈ Range(A⊤).
503
+ As A†b ∈ Range(A⊤), we have xk − A†b ∈ Range(A⊤). This implies
504
+ ���Axk − AA†b
505
+ ���
506
+ 2
507
+ 2 ≥ σ2
508
+ min(A)
509
+ ���xk − A†b
510
+ ���
511
+ 2
512
+ 2 .
513
+ Then by Theorem 3.1, we arrive at this corollary.
514
+
515
+ Remark 3.3. We note that one can choose any x0 ∈ Rn as the initial vector. In this case,
516
+ we can prove that
517
+ E
518
+ ����xk − x0
519
+
520
+ ���
521
+ 2
522
+ 2
523
+
524
+ ≤ σ−2
525
+ min(A)
526
+
527
+ 1 −
528
+ σ4
529
+ min(A)
530
+ ∥A∥2
531
+ 2∥A∥2
532
+ F
533
+ �k ��Ax0 − Ax0
534
+
535
+ ��2
536
+ 2 ,
537
+
538
+ 8
539
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
540
+ where x0
541
+ ∗ := A†b + (I − A†A)x0. We refer the reader to [12,18,19] for more details.
542
+ Remark 3.4. For the RK method (2), from (3) and with an analysis analogous to [35,
543
+ Corollary 2.2], for any desired ε, using a stepsize
544
+ λ =
545
+ εσ2
546
+ min(A)a2
547
+ min
548
+ 2εσ2
549
+ min(A)α2
550
+ min + 2∥e∥2
551
+ 2a2max
552
+ ,
553
+ one has that after
554
+ (6)
555
+ k = 2 log (2ε0/ε)
556
+ � ∥A∥2
557
+ F
558
+ σ2
559
+ min(A) + ∥A∥2
560
+ F ∥e∥2
561
+ 2a2
562
+ max
563
+ εσ4
564
+ min(A)a2
565
+ min
566
+
567
+ iterations, E[∥xk − A†b∥2
568
+ 2] ≤ ε, where ε0 = ∥x0 − A†b∥2
569
+ 2. For Algorithm 1, according to
570
+ corollary 3.2, we have that after
571
+ (7)
572
+ k = log
573
+
574
+ ε1
575
+ εσ2
576
+ min(A)
577
+ � ∥A∥2
578
+ F ∥A∥2
579
+ 2
580
+ σ4
581
+ min(A)
582
+ iterations, E[∥xk −A†b∥2
583
+ 2] ≤ ε, where ε1 = ∥Ax0 −AA†b∥2
584
+ 2. From (6) and (7), we know that
585
+ the RKAS method shall use less number of iterations than that of the RK method to obtain
586
+ an iterative solution with the accuracy ε < O(1/∥A∥2
587
+ 2).
588
+ Remark 3.5. Recently, the randomized extended Kaczmarz (REK) method [10,51] has at-
589
+ tracted much attention for solving inconsistent systems. As shown by Du [10, Theorem 2],
590
+ the convergence factor for REK is 1 − σ2
591
+ min(A)
592
+ ∥A∥2
593
+ F , which is better than 1 −
594
+ σ4
595
+ min(A)
596
+ ∥A∥2
597
+ 2∥A∥2
598
+ F established
599
+ in Corollary 3.2. However, we note that RKAS is a row-action method, while REK uses
600
+ both a row and a column of the matrix A in each step. Therefore, RKAS would be partic-
601
+ ularly suitable for handling large-scale and sparse systems, which is also confirmed by our
602
+ numerical experiments.
603
+ 4. Numerical experiments
604
+ In this section, we describe some numerical results for the RKAS method for inconsistent
605
+ systems. We also compare RKAS with REK [10,51] on a variety of test problems. Recall
606
+ that REK generates two sequences {zk}∞
607
+ k=0 and {xk}∞
608
+ k=0 via
609
+ (8)
610
+ zk+1
611
+ = zk −
612
+ A⊤
613
+ :,jkzk
614
+ ∥A:,jk∥2
615
+ 2 A:,jk,
616
+ xk+1
617
+ = xk −
618
+ Aik,:xk−bik+(zk+1)ik
619
+ ∥Aik,:∥2
620
+ 2
621
+ A⊤
622
+ ik,:,
623
+
624
+ RKAS FOR INCONSISTENT SYSTEMS
625
+ 9
626
+ where the column A:,jk is chosen with probability
627
+ ∥A:,jk∥2
628
+ 2
629
+ ∥A∥2
630
+ F
631
+ and the row Aik,: is chosen with
632
+ probability
633
+ ∥Aik,:∥2
634
+ 2
635
+ ∥A∥2
636
+ F , see [10]. All methods are implemented in Matlab R2022a for Windows
637
+ 10 on a desktop PC with the Intel(R) Core(TM) i7-10710U CPU @ 1.10GHz and 16 GB
638
+ memory.
639
+ As in Du et al [11], to construct an inconsistent linear system, we set b = Ax + r, where
640
+ x is a vector with entries generated from a standard normal distribution and the residual
641
+ r ∈ Null
642
+
643
+ A⊤�
644
+ . Note that one can obtain such a vector r by the Matlab function null.
645
+ For RKAS, we set x0 = 0, and for REK, we set z0 = b and x0 = 0. We stop the algorithms
646
+ if the relative solution error (RSE) ∥xk−A†b∥2
647
+ 2
648
+ ∥A†b∥2
649
+ 2
650
+ ≤ 10−12. We report the average number of
651
+ iterations (denoted as Iter) and the average computing time in seconds (denoted as CPU)
652
+ of RKAS and REK.
653
+ 4.1. Synthetic data. We use the following two types of coefficient matrices.
654
+ • For given m, n, r, and κ > 1, we construct a dense matrix A by A = UDV T , where
655
+ U ∈ Rm×r, D ∈ Rr×r, and V ∈ Rn×r. Using Matlab colon notation, these matri-
656
+ ces are generated by [U,∼]=qr(randn(m,r),0), [V,∼]=qr(randn(n,r),0), and
657
+ D=diag(1+(κ-1).*rand(r,1)). So the condition number of A is upper bounded
658
+ by κ.
659
+ • We construct a random sparse matrix by using the Matlab sparse random matrix
660
+ function sprandn(m,n,density,rc), where density is the percentage of nonzero
661
+ entries and rc is the reciprocal of the condition number.
662
+ Figures 2 and 3 illustrate our experimental results with a fixed n. In Figure 2, we plot
663
+ the computing time of the REK and RKAS for inconsistent linear systems with coefficient
664
+ matrices A = UDV ⊤, where m = 1000, 2000, . . . , 10000, n = 100, r = 80, κ(A) = 2 (left) or
665
+ κ(A) = 10 (right). It can be observed from Figure 2 that REK is more efficient than RKAS
666
+ for solving the dense problem, and the changing of parameter κ(A) affects the performance
667
+ of RKAS greatly than that of REK.
668
+ In Figure 3, we plot the computing time of the REK and RKAS with random sparse
669
+ matrices A, where m = 1000, 2000, . . . , 10000, n = 100,κ(A) = 2 (left) or κ(A) = 10 (right),
670
+ and the sparsity of the coefficient matrices A is 0.1. Noting that now κ(A) is exactly the
671
+ condition number of A. It can be seen that RKAS performs better than REK. Indeed, this
672
+
673
+ 10
674
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
675
+ is because the computational cost of RKAS has reduced a lot at each step. It can be also
676
+ found that the more rows there are than columns, the better RKAS performs than REK.
677
+ This is due to REK adopting both a row and a column at each step, while RKAS is a row-
678
+ action method where only a single row is used at each step. To illustrate this observation
679
+ more clearly, in Figure 4, we plot the computing times of REK and RKAS with a fixed
680
+ m = 10000 and n = 100, 200, . . . , 1000. It is clear that the performance of RKAS is better
681
+ than REK when n is small, and REK is better than RKAS when n is large.
682
+ 1000
683
+ 2000
684
+ 3000
685
+ 4000
686
+ 5000
687
+ 6000
688
+ 7000
689
+ 8000
690
+ 9000
691
+ 10000
692
+ Number of rows (m)
693
+ 0
694
+ 0.2
695
+ 0.4
696
+ 0.6
697
+ 0.8
698
+ 1
699
+ 1.2
700
+ CPU time
701
+ REK
702
+ RKAS
703
+ 1000
704
+ 2000
705
+ 3000
706
+ 4000
707
+ 5000
708
+ 6000
709
+ 7000
710
+ 8000
711
+ 9000
712
+ 10000
713
+ Number of rows (m)
714
+ 0
715
+ 20
716
+ 40
717
+ 60
718
+ 80
719
+ 100
720
+ 120
721
+ CPU time
722
+ REK
723
+ RKAS
724
+ Figure 2. Figures depict the CPU time (in seconds) vs increasing number
725
+ of rows for the case of the random dense matrix.
726
+ The title of each plot
727
+ indicates the values of n, r, and κ. All plots are averaged over 50 trials.
728
+ 1000
729
+ 2000
730
+ 3000
731
+ 4000
732
+ 5000
733
+ 6000
734
+ 7000
735
+ 8000
736
+ 9000
737
+ 10000
738
+ Number of rows (m)
739
+ 0
740
+ 0.005
741
+ 0.01
742
+ 0.015
743
+ 0.02
744
+ 0.025
745
+ 0.03
746
+ 0.035
747
+ 0.04
748
+ 0.045
749
+ CPU time
750
+ REK
751
+ RKAS
752
+ 1000
753
+ 2000
754
+ 3000
755
+ 4000
756
+ 5000
757
+ 6000
758
+ 7000
759
+ 8000
760
+ 9000
761
+ 10000
762
+ Number of rows (m)
763
+ 0
764
+ 0.02
765
+ 0.04
766
+ 0.06
767
+ 0.08
768
+ 0.1
769
+ 0.12
770
+ 0.14
771
+ 0.16
772
+ 0.18
773
+ 0.2
774
+ CPU time
775
+ REK
776
+ RKAS
777
+ Figure 3. Figures depict the CPU time (in seconds) vs increasing number
778
+ of rows for the case of the random sparse matrix. The title of each plot
779
+ indicates the values of n, κ, and sparsity. All plots are averaged over 50
780
+ trials.
781
+ 4.2. Real-world data. The real-world data are available via the SuiteSparse Matrix Col-
782
+ lection [25]. The five matrices are nemsafm, df2177, ch8 8 b1, bibd 16 8, and ash958.
783
+ Each dataset consists of a matrix A ∈ Rm×n and a vector b ∈ Rm. In our experiments, we
784
+ only use the matrices A of the datasets and ignore the vector b. In Table 2, we report the
785
+
786
+ RKAS FOR INCONSISTENT SYSTEMS
787
+ 11
788
+ 100
789
+ 200
790
+ 300
791
+ 400
792
+ 500
793
+ 600
794
+ 700
795
+ 800
796
+ 900
797
+ 1000
798
+ Number of columns (n)
799
+ 0
800
+ 0.1
801
+ 0.2
802
+ 0.3
803
+ 0.4
804
+ 0.5
805
+ 0.6
806
+ 0.7
807
+ CPU time
808
+ REK
809
+ RKAS
810
+ 100
811
+ 200
812
+ 300
813
+ 400
814
+ 500
815
+ 600
816
+ 700
817
+ 800
818
+ 900
819
+ 1000
820
+ Number of columns (n)
821
+ 0
822
+ 0.2
823
+ 0.4
824
+ 0.6
825
+ 0.8
826
+ 1
827
+ 1.2
828
+ 1.4
829
+ 1.6
830
+ 1.8
831
+ 2
832
+ CPU time
833
+ REK
834
+ RKAS
835
+ Figure 4. Figures depict the CPU time (in seconds) vs increasing number
836
+ of columns for the case of the random sparse matrix. The title of each plot
837
+ indicates the values of m, κ, and sparsity. All plots are averaged over 50
838
+ trials.
839
+ Table 2. The average (50 trials of each algorithm) Iter and CPU of REK
840
+ and RKAS for inconsistent linear systems with coefficient matrices from [25].
841
+ Matrix
842
+ m × n
843
+ rank
844
+ σmax(A)
845
+ σmin(A)
846
+ REK
847
+ RKAS
848
+ Iter
849
+ CPU
850
+ Iter
851
+ CPU
852
+ nemsafm
853
+ 334 × 2348
854
+ 334
855
+ 4.77
856
+ 41308.70 1.3104 120565.48 2.3087
857
+ df2177
858
+ 630 × 10358 630
859
+ 2.01
860
+ 20192.62 5.1010
861
+ 21480.34
862
+ 2.9148
863
+ ch8 8 b1
864
+ 1568 × 64
865
+ 63
866
+ 3.48e+14 1800.96
867
+ 0.0186
868
+ 1686.84
869
+ 0.0136
870
+ bibd 16 8 120 × 12870 120
871
+ 9.54
872
+ 7859.60 3.3143 151632.30 32.4403
873
+ ash958
874
+ 958 × 292
875
+ 292
876
+ 3.20
877
+ 15711.02 0.1037 42197.00
878
+ 0.1924
879
+ number of iterations and the computing times for REK and RKAS. It can be observed that
880
+ RKAS is comparable with REK for solving inconsistent linear systems.
881
+ 5. Concluding remarks
882
+ Consider the following the least-squares problem
883
+ (9)
884
+ min
885
+ x∈Rn f(x) :=
886
+ 1
887
+ 2m∥Ax − b∥2
888
+ 2 = 1
889
+ m
890
+ m
891
+
892
+ i=1
893
+ fi(x),
894
+ where fi(x) = 1
895
+ 2 (Ai,:x − bi)2. To state conveniently, we assume that A ∈ Rm×n is normal-
896
+ ized to ∥Ai,:∥2
897
+ 2 = 1 for each row of A. The RK method (2) can be seen as stochastic gradient
898
+ descent (SGD) [21,29,44] applied to the least-squares problem (9). Indeed, SGD solves (9)
899
+ using unbiased estimates for the gradient of the objective function, i.e. ∇fi(x) such that
900
+ E [∇fi(x)] = ∇f(x). At each iteration, a random unbiased estimate ∇fi(x) is drawn and
901
+
902
+ 12
903
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
904
+ SGD uses the following update formula
905
+ (10)
906
+ xk+1 = xk − λk∇fi(xk),
907
+ where λk is an appropriately chosen stepsize. Noting that if a random row of the matrix A
908
+ is selected and (10) is computed with ∇fi(xk) =
909
+
910
+ Ai,:xk − bi
911
+
912
+ A⊤
913
+ i,:, then one can recover the
914
+ RK method.
915
+ It is well-known that SGD suffers from slow convergence as the variance of the gradient
916
+ estimate ∇fi(x) does not naturally diminish, i.e.
917
+ lim
918
+ k→∞ E[∥∇fik(xk) − ∇f(xk)∥2
919
+ 2] ̸= 0. Let
920
+ x∗ be an optimal point of (9) and consider the variance of its gradient estimate
921
+ σ2 = E[∥∇fi(x∗) − ∇f(x∗)∥2
922
+ 2] = E[∥∇fi(x∗)∥2
923
+ 2] =
924
+ m
925
+
926
+ i=1
927
+ (Ai,:x∗ − bi)2
928
+ m
929
+ = ∥e∥2
930
+ 2
931
+ m ,
932
+ where e = Ax∗ − b is the residual at x∗. When the system is consistent, as the iterate
933
+ approaches x∗, the residual gradually drops to zero and thus so does the variance, which
934
+ ensures the convergence of SGD with a constant stepsize. When the system is inconsistent,
935
+ however, e ̸= 0 and the variance does not decrease to zero. In this case, variance reduction
936
+ techniques are introduced [23,39], otherwise a decreasing stepsize is required. Nevertheless,
937
+ the decreasing stepsize brings about adverse effect on the convergence of SGD, which is
938
+ sublinear even if the objective function is strongly convex [38].
939
+ In this paper, we have
940
+ shown that RK, i.e. SGD for (9), with our adaptive stepsize strategy enjoys a linear rate
941
+ without any variance reduction procedure. A natural extension of our results is the design
942
+ and analysis of adaptive stepsizes for SGD in the case of general convex or strongly convex
943
+ functions. This should be an interesting and valuable topic that deserves in-depth study in
944
+ the future.
945
+ Finally, we note that a bunch of advanced probability criteria have been investigated in
946
+ the literature for the RK method, such as the greedy selection rule [2] and the weighted
947
+ version [46]. These criteria are convenient to be adapted to the RKAS context for further
948
+ improvement in efficiency.
949
+ References
950
+ [1] Zhong-Zhi Bai, Lu Wang, and Wen-Ting Wu. On convergence rate of the randomized Gauss–Seidel
951
+ method. Linear Algebra Appl., 611:237–252, 2021.
952
+ [2] Zhong-Zhi Bai and Wen-Ting Wu. On greedy randomized Kaczmarz method for solving large sparse
953
+ linear systems. SIAM J. Sci. Comput., 40(1):A592–A606, 2018.
954
+
955
+ RKAS FOR INCONSISTENT SYSTEMS
956
+ 13
957
+ [3] Zhong-Zhi Bai and Wen-Ting Wu. On partially randomized extended Kaczmarz method for solving
958
+ large sparse overdetermined inconsistent linear systems. Linear Algebra Appl., 578:225–250, 2019.
959
+ [4] Zhong-Zhi Bai and Wen-Ting Wu. On greedy randomized augmented Kaczmarz method for solving
960
+ large sparse inconsistent linear systems. SIAM J. Sci. Comput., 43(6):A3892–A3911, 2021.
961
+ [5] Adi Ben-Israel and Thomas NE Greville. Generalized inverses: theory and applications, volume 15.
962
+ Springer Science & Business Media, 2003.
963
+ [6] Charles Byrne. A unified treatment of some iterative algorithms in signal processing and image recon-
964
+ struction. Inverse Problems, 20(1):103–120, 2003.
965
+ [7] Yair Censor, Paul PB Eggermont, and Dan Gordon. Strong underrelaxation in Kaczmarz’s method for
966
+ inconsistent systems. Numer. Math., 41(1):83–92, 1983.
967
+ [8] Kai-Wei Chang, Cho-Jui Hsieh, and Chih-Jen Lin. Coordinate descent method for large-scale L2-loss
968
+ linear support vector machines. J. Mach. Learn. Res., 9(7):1369—-1398, 2008.
969
+ [9] Xuemei Chen and Jing Qin. Regularized Kaczmarz algorithms for tensor recovery. SIAM J. Imaging
970
+ Sci., 14(4):1439–1471, 2021.
971
+ [10] Kui Du. Tight upper bounds for the convergence of the randomized extended Kaczmarz and Gauss-
972
+ Seidel algorithms. Numer. Linear Algebra Appl., 26(3):e2233, 2019.
973
+ [11] Kui Du, Wu-Tao Si, and Xiao-Hui Sun. Randomized extended average block Kaczmarz for solving least
974
+ squares. SIAM J. Sci. Comput., 42(6):A3541–A3559, 2020.
975
+ [12] Kui Du and Xiao-Hui Sun. Pseudoinverse-free randomized block iterative algorithms for consistent and
976
+ inconsistent linear systems. arXiv preprint arXiv:2011.10353, 2020.
977
+ [13] Hans Georg Feichtinger, C Cenker, M Mayer, H Steier, and Thomas Strohmer. New variants of the
978
+ POCS method using affine subspaces of finite codimension with applications to irregular sampling. In
979
+ Visual Communications and Image Processing’92, volume 1818, pages 299–310. SPIE, 1992.
980
+ [14] Gene H Golub and Charles F Van Loan. Matrix computations. JHU press, 2013.
981
+ [15] Richard Gordon, Robert Bender, and Gabor T Herman. Algebraic reconstruction techniques (ART) for
982
+ three-dimensional electron microscopy and X-ray photography. J. Theor. Biol., 29(3):471–481, 1970.
983
+ [16] Robert M Gower, Denali Molitor, Jacob Moorman, and Deanna Needell. On adaptive sketch-and-project
984
+ for solving linear systems. SIAM J. Matrix Anal. Appl., 42(2):954–989, 2021.
985
+ [17] Robert M. Gower and Peter Richt´arik. Randomized iterative methods for linear systems. SIAM J.
986
+ Matrix Anal. Appl., 36(4):1660–1690, 2015.
987
+ [18] Deren Han, Yansheng Su, and Jiaxin Xie. Randomized Douglas-Rachford method for linear systems:
988
+ Improved accuracy and efficiency. arXiv preprint arXiv:2207.04291, 2022.
989
+ [19] Deren Han and Jiaxin Xie. On pseudoinverse-free randomized methods for linear systems: Unified
990
+ framework and acceleration. arXiv preprint arXiv:2208.05437, 2022.
991
+ [20] Martin Hanke and Wilhelm Niethammer. On the acceleration of Kaczmarz’s method for inconsistent
992
+ linear systems. Linear Algebra Appl., 130:83–98, 1990.
993
+ [21] Moritz Hardt, Ben Recht, and Yoram Singer. Train faster, generalize better: Stability of stochastic
994
+ gradient descent. In Proc. 33th Int. Conf. Machine Learning, pages 1225–1234. PMLR, 2016.
995
+ [22] Gabor T Herman and Lorraine B Meyer. Algebraic reconstruction techniques can be made computation-
996
+ ally efficient (positron emission tomography application). IEEE Trans. Medical Imaging, 12(3):600–609,
997
+ 1993.
998
+ [23] Rie Johnson and Tong Zhang. Accelerating stochastic gradient descent using predictive variance reduc-
999
+ tion. In Proc. Adv. Neural Inf. Process. Syst., pages 315–323, 2013.
1000
+ [24] S Karczmarz. Angen¨aherte aufl¨osung von systemen linearer glei-chungen. Bull. Int. Acad. Pol. Sic. Let.,
1001
+ Cl. Sci. Math. Nat., pages 355–357, 1937.
1002
+ [25] Scott P Kolodziej, Mohsen Aznaveh, Matthew Bullock, Jarrett David, Timothy A Davis, Matthew
1003
+ Henderson, Yifan Hu, and Read Sandstrom. The suitesparse matrix collection website interface. J.
1004
+ Open Source Softw., 4(35):1244, 2019.
1005
+ [26] Dennis Leventhal and Adrian S Lewis. Randomized methods for linear constraints: convergence rates
1006
+ and conditioning. Math. Oper. Res., 35(3):641–654, 2010.
1007
+ [27] Ji Liu and Stephen Wright. An accelerated randomized Kaczmarz algorithm. Math. Comp., 85(297):153–
1008
+ 178, 2016.
1009
+
1010
+ 14
1011
+ YUN ZENG, DEREN HAN, YANSHENG SU, AND JIAXIN XIE
1012
+ [28] Nicolas Loizou and Peter Richt´arik. Momentum and stochastic momentum for stochastic gradient,
1013
+ newton, proximal point and subspace descent methods. Comput. Optim. Appl., 77(3):653–710, 2020.
1014
+ [29] Anna Ma and Deanna Needell. Stochastic gradient descent for linear systems with missing data. Numer.
1015
+ Math. Theory Methods Appl., 12(1):1–20, 2019.
1016
+ [30] Anna Ma, Deanna Needell, and Aaditya Ramdas. Convergence properties of the randomized extended
1017
+ Gauss–Seidel and Kaczmarz methods. SIAM J. Matrix Anal. Appl., 36(4):1590–1604, 2015.
1018
+ [31] Jacob D Moorman, Thomas K Tu, Denali Molitor, and Deanna Needell. Randomized Kaczmarz with
1019
+ averaging. BIT., 61(1):337–359, 2021.
1020
+ [32] Frank Natterer. The mathematics of computerized tomography. SIAM, 2001.
1021
+ [33] Ion Necoara. Faster randomized block Kaczmarz algorithms. SIAM J. Matrix Anal. Appl., 40(4):1425–
1022
+ 1452, 2019.
1023
+ [34] Deanna Needell. Randomized Kaczmarz solver for noisy linear systems. BIT., 50(2):395–403, 2010.
1024
+ [35] Deanna Needell, Nathan Srebro, and Rachel Ward. Stochastic gradient descent, weighted sampling, and
1025
+ the randomized Kaczmarz algorithm. Math. Program., 155:549–573, 2016.
1026
+ [36] Deanna Needell and Joel A Tropp. Paved with good intentions: analysis of a randomized block kaczmarz
1027
+ method. Linear Algebra Appl., 441:199–221, 2014.
1028
+ [37] Deanna Needell and Rachel Ward. Two-subspace projection method for coherent overdetermined sys-
1029
+ tems. J. Fourier Anal. Appl., 19(2):256–269, 2013.
1030
+ [38] Arkadi Nemirovski, Anatoli Juditsky, Guanghui Lan, and Alexander Shapiro. Robust stochastic ap-
1031
+ proximation approach to stochastic programming. SIAM J. Optim., 19(4):1574–1609, 2009.
1032
+ [39] Lam M Nguyen, Jie Liu, Katya Scheinberg, and Martin Tak´aˇc. Sarah: A novel method for machine
1033
+ learning problems using stochastic recursive gradient. In Proc. 34th Int. Conf. Machine Learning, pages
1034
+ 2613–2621. PMLR, 2017.
1035
+ [40] Maxim A Olshanskii and Eugene E Tyrtyshnikov. Iterative methods for linear systems: theory and
1036
+ applications. SIAM, 2014.
1037
+ [41] Andrei Patrascu and Ion Necoara. Nonasymptotic convergence of stochastic proximal point methods
1038
+ for constrained convex optimization. J. Mach. Learn. Res., 18(1):7204–7245, 2017.
1039
+ [42] Constantin Popa. Extensions of block-projections methods with relaxation parameters to inconsistent
1040
+ and rank-deficient least-squares problems. BIT., 38(1):151–176, 1998.
1041
+ [43] Constantin Popa. Characterization of the solutions set of inconsistent least-squares problems by an
1042
+ extended Kaczmarz algorithm. Korean J. Comput. Appl. Math., 6(1):51–64, 1999.
1043
+ [44] Herbert Robbins and Sutton Monro. A stochastic approximation method. Ann. Math. Statistics, pages
1044
+ 400–407, 1951.
1045
+ [45] Frank Sch¨opfer and Dirk A Lorenz. Linear convergence of the randomized sparse Kaczmarz method.
1046
+ Math. Program., 173(1):509–536, 2019.
1047
+ [46] Stefan Steinerberger. A weighted randomized Kaczmarz method for solving linear systems. Math.
1048
+ Comp., 90:2815–2826, 2021.
1049
+ [47] Thomas Strohmer and Roman Vershynin. A randomized Kaczmarz algorithm with exponential conver-
1050
+ gence. J. Fourier Anal. Appl., 15(2):262–278, 2009.
1051
+ [48] Nian-Ci Wu, Chengzhi Liu, Yatian Wang, and Qian Zuo. On the extended randomized multiple row
1052
+ method for solving linear least-squares problems. arXiv preprint arXiv:2210.03478, 2022.
1053
+ [49] Nian-Ci Wu and Hua Xiang. Semiconvergence analysis of the randomized row iterative method and its
1054
+ extended variants. Numer. Linear Algebra Appl., 28(1):e2334, 2021.
1055
+ [50] Wen-Ting Wu. On two-subspace randomized extended Kaczmarz method for solving large linear least-
1056
+ squares problems. Numer. Algorithms, 89(1):1–31, 2022.
1057
+ [51] Anastasios Zouzias and Nikolaos M. Freris. Randomized extended Kaczmarz for solving least squares.
1058
+ SIAM J. Matrix Anal. Appl., 34(2):773–793, 2013.
1059
+
1060
+ RKAS FOR INCONSISTENT SYSTEMS
1061
+ 15
1062
+ School of Mathematical Sciences, Beihang University, Beijing, 100191, China.
1063
+ Email address: zengyun@buaa.edu.cn
1064
+ LMIB of the Ministry of Education, School of Mathematical Sciences, Beihang University,
1065
+ Beijing, 100191, China.
1066
+ Email address: handr@buaa.edu.cn
1067
+ School of Mathematical Sciences, Beihang University, Beijing, 100191, China.
1068
+ Email address: suyansheng@buaa.edu.cn
1069
+ LMIB of the Ministry of Education, School of Mathematical Sciences, Beihang University,
1070
+ Beijing, 100191, China.
1071
+ Email address: xiejx@buaa.edu.cn
1072
+
a9AyT4oBgHgl3EQfW_fi/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
d9FRT4oBgHgl3EQfUTfv/content/tmp_files/2301.13536v1.pdf.txt ADDED
@@ -0,0 +1,759 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LOW COMPLEXITY ADAPTIVE MACHINE LEARNING
2
+ APPROACHES FOR END-TO-END LATENCY PREDICTION ∗
3
+ Pierre Larrenie
4
+ Thales SIX & LIGM
5
+ Université Gustave Eiffel, CNRS
6
+ Marne-la-Vallée, France
7
+ pierre.larrenie@esiee.fr
8
+ Jean-François Bercher
9
+ LIGM
10
+ Université Gustave Eiffel, CNRS
11
+ Marne-la-Vallée, France
12
+ jean-francois.bercher@esiee.fr
13
+ Olivier Venard
14
+ ESYCOM
15
+ Université Gustave Eiffel, CNRS
16
+ Marne-la-Vallée, France
17
+ olivier.venard@esiee.fr
18
+ Iyad Lahsen-Cherif
19
+ Institut National des Postes et Télécommunications (INPT)
20
+ Rabat, Morocco
21
+ lahsencherif@inpt.ac.ma
22
+ ABSTRACT
23
+ Software Defined Networks have opened the door to statistical and AI-based techniques
24
+ to improve efficiency of networking. Especially to ensure a certain Quality of Service
25
+ (QoS) for specific applications by routing packets with awareness on content nature
26
+ (VoIP, video, files, etc.) and its needs (latency, bandwidth, etc.) to use efficiently
27
+ resources of a network.
28
+ Monitoring and predicting various Key Performance Indicators (KPIs) at any level may
29
+ handle such problems while preserving network bandwidth.
30
+ The question addressed in this work is the design of efficient, low-cost adaptive algo-
31
+ rithms for KPI estimation, monitoring and prediction. We focus on end-to-end latency
32
+ prediction, for which we illustrate our approaches and results on data obtained from a
33
+ public generator provided after the recent international challenge on GNN [12].
34
+ In this paper, we improve our previously proposed low-cost estimators [6] by adding
35
+ the adaptive dimension, and show that the performances are minimally modified while
36
+ gaining the ability to track varying networks.
37
+ Keywords KPI Prediction · Machine Learning · Adaptivity · General Regression ·
38
+ SDN · Networking
39
+ 1
40
+ Introduction
41
+ Routing while ensuring quality of service (QoS) remains a significant challenge in all networks. Whatever
42
+ the resources, their use must be optimized to satisfy both throughput and QoS to users. This is true for
43
+ static wide area networks, but even more so for mobile networks with dynamic topology.
44
+ The emergence of software-defined networks (SDNs) [11, 1] has enabled data to be shared more ef-
45
+ ficiently across communication layers. Services can provide network requirements to routers; routers
46
+ acquire data about network performance and allocate resources to meet those requirements as best as pos-
47
+ sible. However, acquiring overall network performance can result in high network bandwidth consump-
48
+ tion for signaling, degrading the available resources, and is particularly limiting for resource-constrained
49
+ networks such as mobile networks (MANETs).
50
+ We consider a network for which we wish to reduce signaling and perform intelligent routing. In order to
51
+ limit the amount of signaling, the first axis is to estimate some key performance indicators (KPIs) from
52
+ other KPIs. A second axis would be to perform this prediction locally, at the node level, rather than a
53
+ global estimation in the network. Finally, if predictions are to be performed locally, the complexity of
54
+ the algorithms will need to be low while preserving good prediction quality. The last point is to be able
55
+ to detect and track changes in the state of the network, which implies that the predictors will have to use
56
+ only a small number of the previous states of the network and be able to readapt continuously.
57
+ ∗Note: Paper accepted at the 5th International Conference on Machine Learning for Networking (MLN’2022)
58
+ and will be published as a post-proceedings in Springer’s LNCS.
59
+ arXiv:2301.13536v1 [cs.NI] 31 Jan 2023
60
+
61
+ The question addressed in this work is the design of efficient, low-cost adaptive algorithms for KPI
62
+ estimation, monitoring and prediction. In the present paper, we improve our previously proposed low-
63
+ cost estimators [6] by adding the adaptive dimension and show that the performances are minimally
64
+ modified while gaining the ability to track varying networks. We focus on end-to-end latency prediction,
65
+ for which we illustrate our approaches and results on data we generated using a public generator made
66
+ available after the recent international challenge [12]. The best performances of the state-of-the-art are
67
+ obtained with Graph Neural Networks (GNNs) [10, 3, 12]. Although this is a global method while we
68
+ favor local and adaptive methods, we used these performances as a benchmark.
69
+ We present related works in section 2. Then we present in section 3 our main results from [6]. Instead
70
+ of using high performances global and high-costs methods based on Graph Neural Networks (GNNs)
71
+ [10, 3, 12], we proposed to use standard machine learning regression methods. We showed that a careful
72
+ feature engineering and feature selection (based on queue theory and the approach in [2]), as well as
73
+ the use of a single feature with curve-fitting methods, allows to obtain near state-of-the-art performances
74
+ with both a very low number of parameters, significantly lower learning and inference times compared to
75
+ GNNs, and the with the ability to operate at the link level instead of a whole-graph level. In section 4,
76
+ we show how these block algorithms can be transformed into versions implementable in an iterative way
77
+ (i.e. by taking into account the data one by one as they become available), with the originality of using a
78
+ regularization term. Then, time dependent estimations, or the addition of forgetting factor will give them
79
+ an adaptive character. In section 5 we describe the validation dataset we built from a public generator and
80
+ then the results of our experimentation. Finally, we conclude, discuss the overall results and draw some
81
+ perspectives.
82
+ 2
83
+ Related work
84
+ [4] present an heuristic and an Mixed Integer Programming approach to optimize Service Functions Chain
85
+ provisioning when using Network Functions Virtualization for a service provider. Their approach relies
86
+ on minimizing a trade-off between the expected latency and infrastructures resources.
87
+ Such optimization routing flow in SDN may need additional information to be exchanged between the
88
+ nodes of a network. This results in an increase of the volume of signalization, by performing some
89
+ measurements such as in [7]. This is not a consequent problem in unconstrained networks, i.e. static
90
+ wired networks with near-infinite bandwidth but may decrease performance of wireless network with
91
+ poor capacity. An interesting solution to save bandwidth would be to predict some of the KPIs from other
92
+ KPIs and data exchanged globally between nodes.
93
+ In [8, 9], authors proposed a MANETs application of SDN in the domain of tactical networks. They
94
+ proposed a multi-level SDN controllers architecture to build both secure and resilient networking. While
95
+ orchestrating communication efficiently under military constraints such as: high-level of dynamism, fre-
96
+ quent network failures, resources-limited devices. The proposed architecture is a trade-off between tra-
97
+ ditional centralized architecture of SDN and a decentralized architecture to meet dynamic in-network
98
+ constraints.
99
+ [5] proposed a Quality of Experience (QoE) management strategy in a SDN to optimize the loading time
100
+ of all the tile of a mapping application. They have shown the impact of several KPIs on their application
101
+ using a Generalized Linear Model (GLM). This mechanism make the application aware of the current
102
+ network state.
103
+ [10] used GNNs for predicting KPIs such as latency, error-rate and jitter. They relied on the Routenet
104
+ architecture of Figure 1. The idea is to model the problem as a bipartite hypergraph mapping flows to
105
+ links as depicted on Figure 2. Aggregating messages in such graph may result in predicting KPIs of the
106
+ network in input. The model needs to know the routing scheme, traffic and links properties. Their result
107
+ is very promising and has been the subject of two ITU Challenge in 2020 and 2021 [3, 12]. These ITU
108
+ challenges have very good results since the top-3 teams are around 2% error in delay prediction in the
109
+ sense of Mean-Absolute Percentage Error (MAPE).
110
+ In [2], very promising results were obtained with a a near 1% GNN model error (in the sense of MAPE)
111
+ on the test set. The model mix analytical M/M/1/K queueing theory used to create extra-features to
112
+ feed GNN model. In order to satisfy the constraint of scalability proposed by the challenge, the first part
113
+ of model operates at the link level.
114
+ Figure 1: Routenet Architecture [10]
115
+ 2
116
+
117
+ Topology
118
+ Per-flowperformance
119
+ Traffic matrix
120
+ RouteNet
121
+ metrics
122
+ (e.g., delay, jitter, loss)
123
+ Routing schemeA
124
+ B
125
+ C
126
+ D
127
+ E
128
+ F3
129
+ F2
130
+ F1
131
+ (a) Simple topology
132
+ F1
133
+ F2
134
+ F3
135
+ LAD
136
+ LDE
137
+ LAB
138
+ LBE
139
+ LCB
140
+ (b) Paths-links Hypergraph of (a)
141
+ Figure 2: Routenet [10] paths-links hypergraph transformation applied on a simple topology graph
142
+ carrying 3 flows.
143
+ (a) Black circles represents communication node, double headed arrows between them denotes available symmetric
144
+ communications links and dotted arrows shows flows path. (b) Circle (resp. dotted) represents links (resp. flows)
145
+ entities defined in the first graph (Lij is the symmetric link between node i and node j.). Unidirectional arrows
146
+ encode the relation "<flow> is carried by <link>".
147
+ 3
148
+ Simple machine-learning approaches for latency prediction
149
+ Our first problem is to define an estimator ˆy of the occupancy y as a function of the different available
150
+ “features” of the system, with a joint objective of low complexity and performance. To do so, we will
151
+ look for an approximation function fθ(u) that allows to estimate y from the features u and parameters θ.
152
+ ˆy = fθ(u)
153
+ (1)
154
+ Here u and θ are vectors that collect the different features or parameters. Once an estimate of occupancy
155
+ is obtained, it is possible to get the latency prediction ˆdn for a specific link n by the simple relation
156
+ ˆdn = ˆyn
157
+ E(|Pn|)
158
+ cn
159
+ (2)
160
+ where E [|Pn|] is the observed average packet size on link n and cn the capacity of this link.
161
+ For analytical simplicity, the parameters θ will be sought by minimizing the minimum mean square error
162
+ E
163
+
164
+ (y − ˆy)2�
165
+ = E
166
+
167
+ (y − fθ(u))2�
168
+ ,
169
+ (3)
170
+ although the performances are also often evaluated in the MAPE sense
171
+ L (ˆy, y) = 100%
172
+ N
173
+ N
174
+
175
+ n=1
176
+ ����
177
+ ˆyn − yn
178
+ yn
179
+ ����
180
+ (4)
181
+ which is preferred to Mean Squared Error (MSE) because of its scale-invariant property.
182
+ We will focus here on two very simple models, although other machine learning models have also been
183
+ considered in [6]. Indeed, these two models lend themselves very easily to an adaptive formulation. In
184
+ this section, we will first describe these two approaches and their performances, before giving the general
185
+ adaptive formulation, which we will particularize in both cases.
186
+ 3.1
187
+ Feature Engineering and Linear Regression
188
+ Based on the assumption that the system may be approximated by a model whose essential features come
189
+ from M/M/1/K and M/G/1/K queue theory, we took essential parameters characterizing queueing
190
+ systems, such as: ρ, ρe, π0, πK, etc. and built further features by applying interactions and various
191
+ non-linearities (powers, log, exponential, square root). Then, we selected features in this set by a forward
192
+ step-wise selection method; i.e. by adding in turn each feature to potential models and keeping the feature
193
+ with best performance. Finally, we selected the model with best MAPE error. For a linear regression
194
+ model, this led us to select and keep a set of 4 simple features, which interestingly enough, have simple
195
+ interpretations:
196
+
197
+
198
+
199
+
200
+
201
+
202
+
203
+
204
+
205
+ π0 =
206
+ 1−ρ
207
+ 1−ρK+1
208
+ L = ρ + π0
209
+
210
+ k kρk
211
+ ρe = λe
212
+ λ ρ = λe
213
+ µ
214
+ Se = �
215
+ k kρk
216
+ e
217
+ (5)
218
+ where L is the expected number of packets in the queue according to M/M/1/K, π0 the probability
219
+ that the queue is empty according to M/M/1/K theory, ρe the effective queue utilization, and Se the
220
+ 3
221
+
222
+ unnormalized expected value of the effective number of packet in the queue buffer. These features can
223
+ be thought as a kind of data preprocessing, before applying ML algorithms, and this turns out to be a key
224
+ to achieving good performances. The 4 previous features have been used as input for several machine
225
+ learning models like Multi-Layer Perceptron model (MLP), Linear Regression, SVM, Random Forest,
226
+ Gradient Boosting Regression Tree. We only describe here the case of linear regression, since it is a
227
+ method for which an adaptive version is readily obtained. In this case, model (1) is simply
228
+ ˆy = θ0 + θ1π0 + θ2L + θ3ρe + θ4Se = θT u
229
+ (6)
230
+ with θT = [θ0, . . . θ4] and uT = [1, π0, L, ρe, Se]. For the linear regression model in ((6), it is well
231
+ known that the regularized minimum mean squared error
232
+ J(θ) = E
233
+
234
+ (y − θT u)2�
235
+ + α||θ||2
236
+ (7)
237
+ is obtained for
238
+ θ : (Ruu + α1) θ = Ryu
239
+ (8)
240
+ where we denoted
241
+ �Ruu = E
242
+
243
+ uuT �
244
+ ,
245
+ the correlation matrix of u
246
+ Ryu = E [yu] ,
247
+ the correlation vector of y and u
248
+ and 1 the identity matrix, α the regularization parameter.
249
+ As far as performance is concerned with this approach, it was evaluated using static data from the GNN
250
+ ITU Challenge 2021 [12]. Compared to the state-of-the-art, our linear regression with carefully selected
251
+ features shows a very slight performance degradation: 1.74% in MAPE while the best state-of-the-art
252
+ method is at 1.27%. One strong advantage is in term of training and inference time. It has a training
253
+ time of less than a second when GNN requires more than 8 hours. Moreover, the inference time for the
254
+ complete network is also much lower, by a factor of almost 1000 (0.296s vs 214s).
255
+ 3.2
256
+ Curve Regression by Bernstein polynomials
257
+ There is a high interdependence of the features we selected in Equation (5), since all these features can be
258
+ expressed in term of ρe. Furthermore, it is confirmed by data exploration that ρe is the prominent feature
259
+ for occupancy prediction (and in turn latency prediction), as exemplified in Figure 3.
260
+ It is then tempting to try to further simplify our features space and estimate the occupancy from a non-
261
+ linear transformation of the single feature ρe, as:
262
+ ˆy = g(ρe)
263
+ (9)
264
+ where ˆy is the estimate of the occupancy y. The concerns are of course to define simple and efficient
265
+ functions g, with a low number of parameters, that can model the kind of growth shown in Figure 3, and
266
+ of course to check that the performance remains interesting.
267
+ Figure 3: Data of ITU Challenge 2021 [12], ρe vs queue occupancy. Color-scale is an indicator of points
268
+ cloud density.
269
+ The estimator g is defined as a linear combination of simple functions fn:
270
+ ˆy = g(ρe) =
271
+
272
+ n
273
+ θn · fn(ρe)
274
+ (10)
275
+ which is also a linear model in terms of function fn(ρe).
276
+ 4
277
+
278
+ 1.0
279
+ 2988
280
+ 2000
281
+ 1500
282
+ 1000
283
+ 500
284
+ 0.8
285
+ 0.6 -
286
+ 0.4 -
287
+ 0.2 -
288
+ 0.0-
289
+ 0
290
+ 0.0
291
+ 0.2
292
+ 0.4
293
+ 0.6
294
+ 0.8
295
+ 1.0Several solutions were considered in [6] to define or choose the functions fn. Since we know that the
296
+ Bernstein polynomials form a basis in the set of polynomial in the interval [0; 1]; and that the approxima-
297
+ tion of any continuous function on [0; 1[ by a Bernstein polynomial converges uniformly, we were led to
298
+ these polynomials:
299
+ f K
300
+ n (x) =
301
+ �K
302
+ n
303
+
304
+ xn(1 − x)K−n
305
+ (11)
306
+ where K is maximum order of polynomials.
307
+ As mentioned, (10) can be rewritten as the linear model
308
+ ��y = g(ρe) =
309
+
310
+ n
311
+ θn · fn(ρe) = θT u
312
+ (12)
313
+ with θT = [θ0, . . . , θK] and uT = [f K
314
+ 0 (ρe), f K
315
+ 1 (ρe), . . . , f K
316
+ K (ρe)]. Hence, we have the same form as in
317
+ (8) for the solution.
318
+ In term of performances, we also obtained a minor degradation in MAPE (1.68%) compared to state-of-
319
+ the-art (1.29%), while improving by several orders the wall training and inference times (2min/3.14s vs
320
+ 8hrs/214s); though a bit less than the simple linear regression.
321
+ 4
322
+ Adaptive versions
323
+ We place ourselves in the context where we have regular snapshots of the state of the network, which
324
+ allows us to both monitor the quality of predictions, and to track changes in the network. For the n-th
325
+ series of measurements, let us denote y(n) the measured latency and u(n) the features. We can also
326
+ group several snapshots or several links into a vector of latencies y(n) and matrix U(n). In the following
327
+ we will derive equations for this block case, which includes immediately the scalar case.
328
+ The minimum mean square error (7) which has the explicit solution (8) can also be solved by a gradient
329
+ algorithm as
330
+ θk+1 = θk − µ ∇J(θ)|θ=θk ,
331
+ (13)
332
+ = θk − µ ((Ruu + α1) θk − Ryu) .
333
+ (14)
334
+ In (14), we can substitute the true values with estimated ones. In order to introduce adaptivity to context
335
+ changes in the network, these estimates will preserve the temporal dimension. We thus use either a sliding
336
+ average
337
+
338
+ ˆRuu(n) = �L
339
+ l=0 U(n − l)U(n − l)T
340
+ ˆRyu(n) = �L
341
+ l=0 U(n − l)y(n − l)
342
+ (15)
343
+ or an exponential mean
344
+
345
+ ˆRuu(n) = �n
346
+ l=0 λl−nU(l)U(l)T = λ ˆRuu(n − 1) + U(n)U(n)T
347
+ ˆRyu(n) = λ ˆRyu(n − 1) + U(n)y(n).
348
+ (16)
349
+ where λ ≤ 1 is the forgetting factor.
350
+ In the limit case where we take either L = 0 or λ = 0 in the previous formulas, we get the ‘instantaneous
351
+ estimates‘
352
+
353
+ ˆRuu(n) = U(n)U(n)T
354
+ ˆRdu(n) = U(n)y(n).
355
+ (17)
356
+ we obtain
357
+ θ(n + 1) = (1 − µα)θ(n) − µU(n)
358
+
359
+ U(n)T θ(n) − y(n)
360
+
361
+ (18)
362
+ which reduces to the well known LMS algorithm [14] in the scalar case and no regularization, α = 0.
363
+ Alternatively, one can try to solve the normal equation (8), using the time dependent estimates as the
364
+ exponential mean (16). The difficulty with the solution
365
+ ˆθ(n + 1) =
366
+
367
+ ˆRuu(n + 1) + α1
368
+ �−1 ˆRyu(n + 1)
369
+ (19)
370
+ is the inversion, for each n, of the correlation matrix. Let us denote
371
+ K(n + 1) =
372
+
373
+ ˆRuu(n + 1) + α1
374
+ �−1
375
+ .
376
+ (20)
377
+ Using (16), we have
378
+ K(n + 1)−1 = λ ˆRuu(n) + U(n + 1)U(n + 1)T + α1
379
+ (21)
380
+ = λ
381
+
382
+ ˆRuu(n) + α1
383
+
384
+ + U(n + 1)U(n + 1)T + α(1 − λ)1
385
+ (22)
386
+ = λK(n)−1 + U(n + 1)U(n + 1)T + α(1 − λ)1
387
+ (23)
388
+ 5
389
+
390
+ and
391
+ K(n + 1) =
392
+ ��
393
+ λK(n)−1 + U(n + 1)U(n + 1)T �
394
+ + α(1 − λ)1
395
+ �−1
396
+ (24)
397
+ = [Q(n + 1) + δ1]−1
398
+ (25)
399
+ with
400
+ Q(n + 1) =
401
+
402
+ λK(n)−1 + U(n + 1)U(n + 1)T �
403
+ (26)
404
+ and δ = α(1 − λ) The matrix inversion lemma enables to reduce the inversion of Q(n + 1) to
405
+ Q(n + 1)−1 = 1
406
+ λK(n) − 1
407
+ λ2 K(n)U(n + 1)×
408
+
409
+ 1 + 1
410
+ λU(n + 1)T K(n)U(n + 1)
411
+ �−1
412
+ U(n + 1)T K(n),
413
+ (27)
414
+ which simplifies to
415
+ Q(n + 1)−1 = 1
416
+ λK(n) − 1
417
+ λ2
418
+ K(n)u(n + 1)u(n + 1)T K(n)
419
+ 1 + 1
420
+ λu(k + 1)T K(n)u(k + 1),
421
+ (28)
422
+ for scalar observations. Now, we can use the Taylor expansion to get
423
+ K(n + 1) = [Q(n + 1) + δ1]−1 = Q(n + 1)−1 − δQ(n + 1)−2 + δ2Q(n + 1)−3 + . . .
424
+ (29)
425
+ This gives us a way to compute recursively the inverse of the regularized estimate of the correlation matrix
426
+ by combining (27) and (29) into
427
+ K(n + 1) ≈ Q(n + 1)−1 − δQ(n + 1)−2
428
+ (30)
429
+ which, by (27), does not require the inversion of K(n).
430
+ In both cases, we have the updating formula
431
+ θ(n + 1) = θ(n) + K(n + 1)U(n + 1)[y(n + 1) − θ(n)T U(n + 1)].
432
+ (31)
433
+ 5
434
+ Experiments and results
435
+ 5.1
436
+ Dataset
437
+ We generate a dataset thanks to a public challenge data generator [12]. This data generator is based on the
438
+ well-known OMNET++ discrete event network simulator[13]. The published simulator is available as a
439
+ docker image. However, due to the rules of the 2022 edition of the challenge, it is not possible to generate
440
+ large topologies, i.e. no more than 10 nodes. Since our models are link-based, the use of small topologies
441
+ does not seem to be a problem. The simulator is parameterized by a traffic matrix and a topological graph
442
+ that are easy to generate thanks to the provided API.
443
+ Our generated dataset, used is this paper, is the result of 11900 simulations of the same topology graph
444
+ of 10 nodes and 30 links, subject to 100 different traffic matrices. In order to get complex results of
445
+ simulations but at low cost, we made the choice to model a network with small queue buffers (8000 bits)
446
+ and possibly subject of high traffic intensities (maximum traffic rate set to 4000 bits/s for each flow). Then
447
+ for each traffic matrices, we alter the capacity of the network according to a sigmoid, in order to model
448
+ a network subject to jamming, with 2 stationary states. The proposed jamming may cause a decrease
449
+ in the capacity of the network links by up to a factor of 5, as depicted on Figure 4a. For simplification
450
+ purposes, we have considered that each link of the network has the same capacity. This result in a U-
451
+ shaped distribution of our link data samples according to the link capacity as shown in Figure 4b.
452
+ 5.2
453
+ Results
454
+ From the generated data, we validate our approach along several axes.
455
+ 5.2.1
456
+ Global performances
457
+ First, we establish the benchmark performances based on the global methods presented in the paper [6]
458
+ and Sections 3.1 and 3.2.
459
+ For the linear regression method described in Section 3.1, we obtain an MSE of 5.86e-4 and a MAPE
460
+ of 9.58% for the queue occupancy prediction and an MSE of 1.10e-3 and a MAPE of 10.26%. for the
461
+ end-to-end latency prediction.
462
+ Concerning the curve regression using Bernstein polynomials (of degree 8) described in in Section 3.2,
463
+ we obtain an MSE of 4.52e-4 and a MAPE of 8.72% for the queue occupancy prediction and an MSE of
464
+ 9.35e-4 and a MAPE of 9.95% for the end-to-end latency prediction.
465
+ Note that these benchmark performances are below the performances obtained in [6], but the dataset we
466
+ use here is probably more severe since using ground truth value for occupancy results in an MSE 6.03e-4
467
+ of a MAPE 9.34% for the flow delay prediction. That is indeed very close of the obtained results.
468
+ 6
469
+
470
+ 0
471
+ 2000
472
+ 4000
473
+ 6000
474
+ 8000
475
+ 10000
476
+ 12000
477
+ Sample ID
478
+ 10000
479
+ 15000
480
+ 20000
481
+ 25000
482
+ 30000
483
+ 35000
484
+ 40000
485
+ 45000
486
+ 50000
487
+ Capacity of the links (bits/s)
488
+ (a) Capacity alteration to model jamming with a decrease of the capacity up to a factor of 5.
489
+ 10000
490
+ #40
491
+ 15000
492
+ #35
493
+ 20000
494
+ #30
495
+ 25000
496
+ #25
497
+ 30000
498
+ #20
499
+ 35000
500
+ #15
501
+ 40000
502
+ #10
503
+ 45000
504
+ #5
505
+ 50000
506
+ #0
507
+ capacity of links (bits/s)
508
+ #scenario ID
509
+ 0
510
+ 5000
511
+ 10000
512
+ 15000
513
+ 20000
514
+ 25000
515
+ 30000
516
+ #links data
517
+ (b) Link capacity distribution of the generated dataset.
518
+ Figure 4: Overview of the generated dataset
519
+ 7
520
+
521
+ 5.2.2
522
+ Behavior of iterative algorithms
523
+ In a second step, we verify that the algorithms presented in section 4 converge and allow us to recover
524
+ these performances. With a forgetting factor of 1 (use of all data with the same weight) and a block size
525
+ of 10, we observe, for example in Figure 5, that the model coefficients converge towards a stable value,
526
+ and that MAPE recovers the value obtained with the global method using all data. The convergence is
527
+ obtained in less than 10,000 operations. It is thus possible to replace the global method, which is already
528
+ low-cost, with an approach where the calculations are carried out recursively.
529
+ 5.2.3
530
+ Adaptivity
531
+ In a third step, we compare the algorithms to the case of network modifications We consider an abrupt
532
+ change in the network capacity, which could correspond to a jamming scenario. We then examine how
533
+ the two adaptive algorithms presented (linear regression with judiciously chosen features; and Bernstein
534
+ polynomial model) can detect and adapt to these modifications. In this context, we examine the role of
535
+ the forgetting factor and the regularization parameter. Figure 6 and Figure 7 present the results for the
536
+ case of a capacity change. We observe that (i) the square of the residual error, smoothed over 100 points,
537
+ is a remarkable indicator of a change in the network; and (ii) that the model coefficients readjust over the
538
+ iterations after this change.
539
+ 5.2.4
540
+ Discussion
541
+ These experiments show the effectiveness and relevance of our iterative and adaptive versions of end-
542
+ to-end latency estimation procedures. The iterative versions have the same performance as their global
543
+ counterparts; an even lower cost since they can be implemented iteratively as the data is received or made
544
+ available. The convergence time for the model coefficients is a few thousand samples while the global
545
+ model used around 350,000 samples for training, while the GNN models require several million samples.
546
+ Moreover, we observe that the residual error converges very quickly, in some tens of samples, which
547
+ means that although the convergence of the models’ coefficients does not seem to be complete, they are
548
+ equivalent from the point of view of performance for occupancy prediction. From an operational point of
549
+ view, the model can be refreshed regularly, and the predicted KPIs between these updates can be used for
550
+ intelligent routing. As we have observed, residual error monitoring is an excellent indicator of changes
551
+ in the network state.
552
+ 6
553
+ Conclusion
554
+ In this paper, we considered the problem of designing efficient and low-cost algorithms for KPI predic-
555
+ tion that are locally implementable and adaptive to network changes. Based on a previous work, we
556
+ have argued and developed adaptive solutions, introducing in addition a regularization term in order to
557
+ stabilize the results. We used a public domain simulator to simulate networks and generate relevant data.
558
+ The experiments demonstrate the effectiveness and relevance of these algorithms. Thus, we now have
559
+ low-complexity models that can be implemented iteratively at the level of local links. We have the possi-
560
+ bility to predict the occupancy of the different links, and the end-to-end latencies (the models predict the
561
+ occupancy of the queues, then compute analytically the delay for each link and finally aggregate along the
562
+ path). Moreover, the adaptability of the solution allows to follow changes in the network state, always at
563
+ a minimal cost, by re-adapting from the current solution and new data. The continuation of the work will
564
+ focus on the choice criteria of the forgetting factor, on the impact of the regularization factor, in order to
565
+ find automatic selection methods. Of course, the approaches considered here will have to be considered
566
+ and adapted for other types of KPI, such as error rate or jitter.
567
+ 8
568
+
569
+ 0
570
+ 5000
571
+ 10000
572
+ 15000
573
+ 20000
574
+ 25000
575
+ 30000
576
+ samples
577
+ −1.0
578
+ −0.5
579
+ 0.0
580
+ 0.5
581
+ 1.0
582
+ Weight value
583
+ θ_0
584
+ θ_1
585
+ θ_2
586
+ θ_3
587
+ θ_4
588
+ θ_5
589
+ θ_6
590
+ θ_7
591
+ θ_8
592
+ (a) Iterative curve-fitting based on Bernstein polynomials of degree 8.
593
+ 0
594
+ 5000
595
+ 10000
596
+ 15000
597
+ 20000
598
+ 25000
599
+ 30000
600
+ samples
601
+ 0.0
602
+ 0.1
603
+ 0.2
604
+ 0.3
605
+ 0.4
606
+ 0.5
607
+ Weight value
608
+ θ_0
609
+ θ_1
610
+ θ_2
611
+ θ_3
612
+ θ_4
613
+ (b) Iterative Linear Regression
614
+ Figure 5: Evolution of weights for our iterative methods without forgetting (non-adaptive) while fitting
615
+ the whole dataset.
616
+ 9
617
+
618
+ 0
619
+ 10000
620
+ 20000
621
+ 30000
622
+ 40000
623
+ samples
624
+ −2
625
+ −1
626
+ 0
627
+ 1
628
+ 2
629
+ Weight value
630
+ #39
631
+ #10
632
+ #1
633
+ #10
634
+ #39
635
+ θ_0
636
+ θ_1
637
+ θ_2
638
+ θ_3
639
+ θ_4
640
+ θ_5
641
+ θ_6
642
+ θ_7
643
+ θ_8
644
+ (a) Evolution of weights along the scenario.
645
+ 0
646
+ 10000
647
+ 20000
648
+ 30000
649
+ 40000
650
+ samples
651
+ 0.015
652
+ 0.020
653
+ 0.025
654
+ 0.030
655
+ 0.035
656
+ RMSE
657
+ #39
658
+ #10
659
+ #1
660
+ #10
661
+ #39
662
+ (b) Evolution of the RMSE along the scenario.
663
+ Figure 6: Evolution of weights and
664
+
665
+ MSE (RMSE) for our adaptive approach of Bernstein polynomial
666
+ curve regression of degree 8,λ = 0.9, α = 0.08. Scenario describes a nominal period between 2 periods
667
+ of jamming. #39 corresponds to a link capacity of 11 Kbits/s, #10 40 Kbits/s and #1 49 Kbts/s. Figure is
668
+ smoothed over 100 points.
669
+ 10
670
+
671
+ 0
672
+ 10000
673
+ 20000
674
+ 30000
675
+ 40000
676
+ samples
677
+ 0.0
678
+ 0.1
679
+ 0.2
680
+ 0.3
681
+ 0.4
682
+ 0.5
683
+ 0.6
684
+ 0.7
685
+ Weight value
686
+ #39
687
+ #10
688
+ #1
689
+ #10
690
+ #39
691
+ θ_0
692
+ θ_1
693
+ θ_2
694
+ θ_3
695
+ θ_4
696
+ (a) Evolution of weights along the scenario.
697
+ 0
698
+ 10000
699
+ 20000
700
+ 30000
701
+ 40000
702
+ samples
703
+ 0.01
704
+ 0.02
705
+ 0.03
706
+ 0.04
707
+ 0.05
708
+ RMSE
709
+ #39
710
+ #10
711
+ #1
712
+ #10
713
+ #39
714
+ (b) Evolution of the RMSE along the scenario.
715
+ Figure 7: Evolution of weights and
716
+
717
+ MSE (RMSE) for our adaptive approach of Linear
718
+ Regression,λ = 0.9, α = 0.08. Scenario describes a nominal period between 2 periods of jamming. #39
719
+ corresponds to a link capacity of 11 Kbits/s, #10 40 Kbits/s and #1 49 Kbts/s. Figure is smoothed over
720
+ 100 points.
721
+ 11
722
+
723
+ References
724
+ [1] Amin, R., Reisslein, M., Shah, N.: Hybrid SDN networks: A survey of existing approaches. IEEE
725
+ Communications Surveys & Tutorials 20(4), 3259–3306 (2018)
726
+ [2] de Aquino Afonso, B.K.: GNNet challenge 2021 report (1st place). https://github.com/
727
+ ITU-AI-ML-in-5G-Challenge/ITU-ML5G-PS-001-PARANA (2021)
728
+ [3] Barcelona Neural Networking Center: The graph neural networking challenge 2020. https://
729
+ bnn.upc.edu/challenge/gnnet2020
730
+ [4] Chua, F.C., Ward, J., Zhang, Y., Sharma, P., Huberman, B.A.: Stringer: Balancing latency and re-
731
+ source usage in service function chain provisioning. IEEE Internet Computing 20(6), 22–31 (2016)
732
+ [5] Jahromi, H.Z., Hines, A., Delanev, D.T.: Towards application-aware networking: Ml-based end-to-
733
+ end application KPI/QoE metrics characterization in SDN. In: 2018 Tenth International Conference
734
+ on Ubiquitous and Future Networks (ICUFN). pp. 126–131. IEEE (2018)
735
+ [6] Larrenie, P., Bercher, J.F., Lahsen-Cherif, I., Venard, O.: Low complexity approaches for end-to-
736
+ end latency prediction. In: Proceedings of the 13th IEEE International Conference On Computing,
737
+ Communication and Networking Technologies. IEEE (2022)
738
+ [7] Pasca, S.T.V., Kodali, S.S.P., Kataoka, K.: AMPS: Application aware multipath flow routing using
739
+ machine learning in SDN. In: 2017 Twenty-third National Conference on Communications (NCC).
740
+ pp. 1–6. IEEE (2017)
741
+ [8] Poularakis, K., Iosifidis, G., Tassiulas, L.: SDN-enabled tactical ad hoc networks: Extending pro-
742
+ grammable control to the edge. IEEE Communications Magazine 56(7), 132–138 (2018)
743
+ [9] Poularakis, K., Qin, Q., Nahum, E.M., Rio, M., Tassiulas, L.: Flexible SDN control in tactical ad
744
+ hoc networks. Ad Hoc Networks 85, 71–80 (2019)
745
+ [10] Rusek, K., Suárez-Varela, J., Mestres, A., Barlet-Ros, P., Cabellos-Aparicio, A.: Unveiling the
746
+ potential of graph neural networks for network modeling and optimization in SDN. In: Proceedings
747
+ of the 2019 ACM Symposium on SDN Research. pp. 140–151 (2019)
748
+ [11] Singh, S., Jha, R.K.: A survey on Software Defined Networking: Architecture for next generation
749
+ network. Journal of Network and Systems Management 25(2), 321–374 (2017)
750
+ [12] Suárez-Varela, J., et al.: The graph neural networking challenge: a worldwide competition for
751
+ education in AI/ML for networks. ACM SIGCOMM Computer Communication Review 51(3), 9–16
752
+ (2021)
753
+ [13] Varga, A., Hornig, R.: An overview of the omnet++ simulation environment. In: 1st International
754
+ ICST Conference on Simulation Tools and Techniques for Communications, Networks and Systems
755
+ (2010)
756
+ [14] Widrow, B., Stearns, S.: Adaptive Signal Processing. Edited by Alan V. Oppenheim, Prentice-Hall
757
+ (1985)
758
+ 12
759
+
d9FRT4oBgHgl3EQfUTfv/content/tmp_files/load_file.txt ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ filepath=/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf,len=415
2
+ page_content='LOW COMPLEXITY ADAPTIVE MACHINE LEARNING APPROACHES FOR END-TO-END LATENCY PREDICTION ∗ Pierre Larrenie Thales SIX & LIGM Université Gustave Eiffel, CNRS Marne-la-Vallée, France pierre.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
3
+ page_content='larrenie@esiee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
4
+ page_content='fr Jean-François Bercher LIGM Université Gustave Eiffel, CNRS Marne-la-Vallée, France jean-francois.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
5
+ page_content='bercher@esiee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
6
+ page_content='fr Olivier Venard ESYCOM Université Gustave Eiffel, CNRS Marne-la-Vallée, France olivier.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
7
+ page_content='venard@esiee.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
8
+ page_content='fr Iyad Lahsen-Cherif Institut National des Postes et Télécommunications (INPT) Rabat, Morocco lahsencherif@inpt.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
9
+ page_content='ac.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
10
+ page_content='ma ABSTRACT Software Defined Networks have opened the door to statistical and AI-based techniques to improve efficiency of networking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
11
+ page_content=' Especially to ensure a certain Quality of Service (QoS) for specific applications by routing packets with awareness on content nature (VoIP, video, files, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
12
+ page_content=') and its needs (latency, bandwidth, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
13
+ page_content=') to use efficiently resources of a network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
14
+ page_content=' Monitoring and predicting various Key Performance Indicators (KPIs) at any level may handle such problems while preserving network bandwidth.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
15
+ page_content=' The question addressed in this work is the design of efficient, low-cost adaptive algo- rithms for KPI estimation, monitoring and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
16
+ page_content=' We focus on end-to-end latency prediction, for which we illustrate our approaches and results on data obtained from a public generator provided after the recent international challenge on GNN [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
17
+ page_content=' In this paper, we improve our previously proposed low-cost estimators [6] by adding the adaptive dimension, and show that the performances are minimally modified while gaining the ability to track varying networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
18
+ page_content=' Keywords KPI Prediction · Machine Learning · Adaptivity · General Regression · SDN · Networking 1 Introduction Routing while ensuring quality of service (QoS) remains a significant challenge in all networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
19
+ page_content=' Whatever the resources, their use must be optimized to satisfy both throughput and QoS to users.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
20
+ page_content=' This is true for static wide area networks, but even more so for mobile networks with dynamic topology.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
21
+ page_content=' The emergence of software-defined networks (SDNs) [11, 1] has enabled data to be shared more ef- ficiently across communication layers.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
22
+ page_content=' Services can provide network requirements to routers;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
23
+ page_content=' routers acquire data about network performance and allocate resources to meet those requirements as best as pos- sible.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
24
+ page_content=' However, acquiring overall network performance can result in high network bandwidth consump- tion for signaling, degrading the available resources, and is particularly limiting for resource-constrained networks such as mobile networks (MANETs).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
25
+ page_content=' We consider a network for which we wish to reduce signaling and perform intelligent routing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
26
+ page_content=' In order to limit the amount of signaling, the first axis is to estimate some key performance indicators (KPIs) from other KPIs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
27
+ page_content=' A second axis would be to perform this prediction locally, at the node level, rather than a global estimation in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
28
+ page_content=' Finally, if predictions are to be performed locally, the complexity of the algorithms will need to be low while preserving good prediction quality.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
29
+ page_content=' The last point is to be able to detect and track changes in the state of the network, which implies that the predictors will have to use only a small number of the previous states of the network and be able to readapt continuously.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
30
+ page_content=' ∗Note: Paper accepted at the 5th International Conference on Machine Learning for Networking (MLN’2022) and will be published as a post-proceedings in Springer’s LNCS.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
31
+ page_content=' arXiv:2301.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
32
+ page_content='13536v1 [cs.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
33
+ page_content='NI] 31 Jan 2023 The question addressed in this work is the design of efficient, low-cost adaptive algorithms for KPI estimation, monitoring and prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
34
+ page_content=' In the present paper, we improve our previously proposed low- cost estimators [6] by adding the adaptive dimension and show that the performances are minimally modified while gaining the ability to track varying networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
35
+ page_content=' We focus on end-to-end latency prediction, for which we illustrate our approaches and results on data we generated using a public generator made available after the recent international challenge [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
36
+ page_content=' The best performances of the state-of-the-art are obtained with Graph Neural Networks (GNNs) [10, 3, 12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
37
+ page_content=' Although this is a global method while we favor local and adaptive methods, we used these performances as a benchmark.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
38
+ page_content=' We present related works in section 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
39
+ page_content=' Then we present in section 3 our main results from [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
40
+ page_content=' Instead of using high performances global and high-costs methods based on Graph Neural Networks (GNNs) [10, 3, 12], we proposed to use standard machine learning regression methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
41
+ page_content=' We showed that a careful feature engineering and feature selection (based on queue theory and the approach in [2]), as well as the use of a single feature with curve-fitting methods, allows to obtain near state-of-the-art performances with both a very low number of parameters, significantly lower learning and inference times compared to GNNs, and the with the ability to operate at the link level instead of a whole-graph level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
42
+ page_content=' In section 4, we show how these block algorithms can be transformed into versions implementable in an iterative way (i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
43
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
44
+ page_content=' by taking into account the data one by one as they become available), with the originality of using a regularization term.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
45
+ page_content=' Then, time dependent estimations, or the addition of forgetting factor will give them an adaptive character.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
46
+ page_content=' In section 5 we describe the validation dataset we built from a public generator and then the results of our experimentation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
47
+ page_content=' Finally, we conclude, discuss the overall results and draw some perspectives.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
48
+ page_content=' 2 Related work [4] present an heuristic and an Mixed Integer Programming approach to optimize Service Functions Chain provisioning when using Network Functions Virtualization for a service provider.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
49
+ page_content=' Their approach relies on minimizing a trade-off between the expected latency and infrastructures resources.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
50
+ page_content=' Such optimization routing flow in SDN may need additional information to be exchanged between the nodes of a network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
51
+ page_content=' This results in an increase of the volume of signalization, by performing some measurements such as in [7].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
52
+ page_content=' This is not a consequent problem in unconstrained networks, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
53
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
54
+ page_content=' static wired networks with near-infinite bandwidth but may decrease performance of wireless network with poor capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
55
+ page_content=' An interesting solution to save bandwidth would be to predict some of the KPIs from other KPIs and data exchanged globally between nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
56
+ page_content=' In [8, 9], authors proposed a MANETs application of SDN in the domain of tactical networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
57
+ page_content=' They proposed a multi-level SDN controllers architecture to build both secure and resilient networking.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
58
+ page_content=' While orchestrating communication efficiently under military constraints such as: high-level of dynamism, fre- quent network failures, resources-limited devices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
59
+ page_content=' The proposed architecture is a trade-off between tra- ditional centralized architecture of SDN and a decentralized architecture to meet dynamic in-network constraints.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
60
+ page_content=' [5] proposed a Quality of Experience (QoE) management strategy in a SDN to optimize the loading time of all the tile of a mapping application.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
61
+ page_content=' They have shown the impact of several KPIs on their application using a Generalized Linear Model (GLM).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
62
+ page_content=' This mechanism make the application aware of the current network state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
63
+ page_content=' [10] used GNNs for predicting KPIs such as latency, error-rate and jitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
64
+ page_content=' They relied on the Routenet architecture of Figure 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
65
+ page_content=' The idea is to model the problem as a bipartite hypergraph mapping flows to links as depicted on Figure 2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
66
+ page_content=' Aggregating messages in such graph may result in predicting KPIs of the network in input.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
67
+ page_content=' The model needs to know the routing scheme, traffic and links properties.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
68
+ page_content=' Their result is very promising and has been the subject of two ITU Challenge in 2020 and 2021 [3, 12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
69
+ page_content=' These ITU challenges have very good results since the top-3 teams are around 2% error in delay prediction in the sense of Mean-Absolute Percentage Error (MAPE).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
70
+ page_content=' In [2], very promising results were obtained with a a near 1% GNN model error (in the sense of MAPE) on the test set.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
71
+ page_content=' The model mix analytical M/M/1/K queueing theory used to create extra-features to feed GNN model.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
72
+ page_content=' In order to satisfy the constraint of scalability proposed by the challenge, the first part of model operates at the link level.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
73
+ page_content=' Figure 1: Routenet Architecture [10] 2 Topology Per-flowperformance Traffic matrix RouteNet metrics (e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
74
+ page_content='g.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
75
+ page_content=', delay, jitter, loss) Routing schemeA B C D E F3 F2 F1 (a) Simple topology F1 F2 F3 LAD LDE LAB LBE LCB (b) Paths-links Hypergraph of (a) Figure 2: Routenet [10] paths-links hypergraph transformation applied on a simple topology graph carrying 3 flows.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
76
+ page_content=' (a) Black circles represents communication node, double headed arrows between them denotes available symmetric communications links and dotted arrows shows flows path.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
77
+ page_content=' (b) Circle (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
78
+ page_content=' dotted) represents links (resp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
79
+ page_content=' flows) entities defined in the first graph (Lij is the symmetric link between node i and node j.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
80
+ page_content=').' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
81
+ page_content=' Unidirectional arrows encode the relation "<flow> is carried by <link>".' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
82
+ page_content=' 3 Simple machine-learning approaches for latency prediction Our first problem is to define an estimator ˆy of the occupancy y as a function of the different available “features” of the system, with a joint objective of low complexity and performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
83
+ page_content=' To do so, we will look for an approximation function fθ(u) that allows to estimate y from the features u and parameters θ.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
84
+ page_content=' ˆy = fθ(u) (1) Here u and θ are vectors that collect the different features or parameters.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
85
+ page_content=' Once an estimate of occupancy is obtained, it is possible to get the latency prediction ˆdn for a specific link n by the simple relation ˆdn = ˆyn E(|Pn|) cn (2) where E [|Pn|] is the observed average packet size on link n and cn the capacity of this link.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
86
+ page_content=' For analytical simplicity, the parameters θ will be sought by minimizing the minimum mean square error E � (y − ˆy)2� = E � (y − fθ(u))2� , (3) although the performances are also often evaluated in the MAPE sense L (ˆy, y) = 100% N N � n=1 ���� ˆyn − yn yn ���� (4) which is preferred to Mean Squared Error (MSE) because of its scale-invariant property.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
87
+ page_content=' We will focus here on two very simple models, although other machine learning models have also been considered in [6].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
88
+ page_content=' Indeed, these two models lend themselves very easily to an adaptive formulation.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
89
+ page_content=' In this section, we will first describe these two approaches and their performances, before giving the general adaptive formulation, which we will particularize in both cases.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
90
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
91
+ page_content='1 Feature Engineering and Linear Regression Based on the assumption that the system may be approximated by a model whose essential features come from M/M/1/K and M/G/1/K queue theory, we took essential parameters characterizing queueing systems, such as: ρ, ρe, π0, πK, etc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
92
+ page_content=' and built further features by applying interactions and various non-linearities (powers, log, exponential, square root).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
93
+ page_content=' Then, we selected features in this set by a forward step-wise selection method;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
94
+ page_content=' i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
95
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
96
+ page_content=' by adding in turn each feature to potential models and keeping the feature with best performance.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
97
+ page_content=' Finally, we selected the model with best MAPE error.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
98
+ page_content=' For a linear regression model,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
99
+ page_content=' this led us to select and keep a set of 4 simple features,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
100
+ page_content=' which interestingly enough,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
101
+ page_content=' have simple interpretations: � � � � � � � � � π0 = 1−ρ 1−ρK+1 L = ρ + π0 � k kρk ρe = λe λ ρ = λe µ Se = � k kρk e (5) where L is the expected number of packets in the queue according to M/M/1/K,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
102
+ page_content=' π0 the probability that the queue is empty according to M/M/1/K theory,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
103
+ page_content=' ρe the effective queue utilization,' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
104
+ page_content=' and Se the 3 unnormalized expected value of the effective number of packet in the queue buffer.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
105
+ page_content=' These features can be thought as a kind of data preprocessing, before applying ML algorithms, and this turns out to be a key to achieving good performances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
106
+ page_content=' The 4 previous features have been used as input for several machine learning models like Multi-Layer Perceptron model (MLP), Linear Regression, SVM, Random Forest, Gradient Boosting Regression Tree.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
107
+ page_content=' We only describe here the case of linear regression, since it is a method for which an adaptive version is readily obtained.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
108
+ page_content=' In this case, model (1) is simply ˆy = θ0 + θ1π0 + θ2L + θ3ρe + θ4Se = θT u (6) with θT = [θ0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
109
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
110
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
111
+ page_content=' θ4] and uT = [1, π0, L, ρe, Se].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
112
+ page_content=' For the linear regression model in ((6), it is well known that the regularized minimum mean squared error J(θ) = E � (y − θT u)2� + α||θ||2 (7) is obtained for θ : (Ruu + α1) θ = Ryu (8) where we denoted �Ruu = E � uuT � , the correlation matrix of u Ryu = E [yu] , the correlation vector of y and u and 1 the identity matrix, α the regularization parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
113
+ page_content=' As far as performance is concerned with this approach, it was evaluated using static data from the GNN ITU Challenge 2021 [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
114
+ page_content=' Compared to the state-of-the-art, our linear regression with carefully selected features shows a very slight performance degradation: 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
115
+ page_content='74% in MAPE while the best state-of-the-art method is at 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
116
+ page_content='27%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
117
+ page_content=' One strong advantage is in term of training and inference time.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
118
+ page_content=' It has a training time of less than a second when GNN requires more than 8 hours.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
119
+ page_content=' Moreover, the inference time for the complete network is also much lower, by a factor of almost 1000 (0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
120
+ page_content='296s vs 214s).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
121
+ page_content=' 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
122
+ page_content='2 Curve Regression by Bernstein polynomials There is a high interdependence of the features we selected in Equation (5), since all these features can be expressed in term of ρe.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
123
+ page_content=' Furthermore, it is confirmed by data exploration that ρe is the prominent feature for occupancy prediction (and in turn latency prediction), as exemplified in Figure 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
124
+ page_content=' It is then tempting to try to further simplify our features space and estimate the occupancy from a non- linear transformation of the single feature ρe, as: ˆy = g(ρe) (9) where ˆy is the estimate of the occupancy y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
125
+ page_content=' The concerns are of course to define simple and efficient functions g, with a low number of parameters, that can model the kind of growth shown in Figure 3, and of course to check that the performance remains interesting.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
126
+ page_content=' Figure 3: Data of ITU Challenge 2021 [12], ρe vs queue occupancy.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
127
+ page_content=' Color-scale is an indicator of points cloud density.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
128
+ page_content=' The estimator g is defined as a linear combination of simple functions fn: ˆy = g(ρe) = � n θn · fn(ρe) (10) which is also a linear model in terms of function fn(ρe).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
129
+ page_content=' 4 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
130
+ page_content='0 2988 2000 1500 1000 500 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
131
+ page_content='8 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
132
+ page_content='6 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
133
+ page_content='4 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
134
+ page_content='2 - 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
135
+ page_content='0- 0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
136
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
137
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
138
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
139
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
140
+ page_content='8 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
141
+ page_content='0Several solutions were considered in [6] to define or choose the functions fn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
142
+ page_content=' Since we know that the Bernstein polynomials form a basis in the set of polynomial in the interval [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
143
+ page_content=' 1];' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
144
+ page_content=' and that the approxima- tion of any continuous function on [0;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
145
+ page_content=' 1[ by a Bernstein polynomial converges uniformly, we were led to these polynomials: f K n (x) = �K n � xn(1 − x)K−n (11) where K is maximum order of polynomials.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
146
+ page_content=' As mentioned, (10) can be rewritten as the linear model ˆy = g(ρe) = � n θn · fn(ρe) = θT u (12) with θT = [θ0, .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
147
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
148
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
149
+ page_content=' , θK] and uT = [f K 0 (ρe), f K 1 (ρe), .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
150
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
151
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
152
+ page_content=' , f K K (ρe)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
153
+ page_content=' Hence, we have the same form as in (8) for the solution.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
154
+ page_content=' In term of performances, we also obtained a minor degradation in MAPE (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
155
+ page_content='68%) compared to state-of- the-art (1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
156
+ page_content='29%), while improving by several orders the wall training and inference times (2min/3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
157
+ page_content='14s vs 8hrs/214s);' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
158
+ page_content=' though a bit less than the simple linear regression.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
159
+ page_content=' 4 Adaptive versions We place ourselves in the context where we have regular snapshots of the state of the network, which allows us to both monitor the quality of predictions, and to track changes in the network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
160
+ page_content=' For the n-th series of measurements, let us denote y(n) the measured latency and u(n) the features.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
161
+ page_content=' We can also group several snapshots or several links into a vector of latencies y(n) and matrix U(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
162
+ page_content=' In the following we will derive equations for this block case, which includes immediately the scalar case.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
163
+ page_content=' The minimum mean square error (7) which has the explicit solution (8) can also be solved by a gradient algorithm as θk+1 = θk − µ ∇J(θ)|θ=θk , (13) = θk − µ ((Ruu + α1) θk − Ryu) .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
164
+ page_content=' (14) In (14), we can substitute the true values with estimated ones.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
165
+ page_content=' In order to introduce adaptivity to context changes in the network, these estimates will preserve the temporal dimension.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
166
+ page_content=' We thus use either a sliding average � ˆRuu(n) = �L l=0 U(n − l)U(n − l)T ˆRyu(n) = �L l=0 U(n − l)y(n − l) (15) or an exponential mean � ˆRuu(n) = �n l=0 λl−nU(l)U(l)T = λ ˆRuu(n − 1) + U(n)U(n)T ˆRyu(n) = λ ˆRyu(n − 1) + U(n)y(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
167
+ page_content=' (16) where λ ≤ 1 is the forgetting factor.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
168
+ page_content=' In the limit case where we take either L = 0 or λ = 0 in the previous formulas, we get the ‘instantaneous estimates‘ � ˆRuu(n) = U(n)U(n)T ˆRdu(n) = U(n)y(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
169
+ page_content=' (17) we obtain θ(n + 1) = (1 − µα)θ(n) − µU(n) � U(n)T θ(n) − y(n) � (18) which reduces to the well known LMS algorithm [14] in the scalar case and no regularization, α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
170
+ page_content=' Alternatively, one can try to solve the normal equation (8), using the time dependent estimates as the exponential mean (16).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
171
+ page_content=' The difficulty with the solution ˆθ(n + 1) = � ˆRuu(n + 1) + α1 �−1 ˆRyu(n + 1) (19) is the inversion, for each n, of the correlation matrix.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
172
+ page_content=' Let us denote K(n + 1) = � ˆRuu(n + 1) + α1 �−1 .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
173
+ page_content=' (20) Using (16),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
174
+ page_content=' we have ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
175
+ page_content='K(n + 1)−1 = λ ˆRuu(n) + U(n + 1)U(n + 1)T + α1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
176
+ page_content='(21) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
177
+ page_content='= λ ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
178
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
179
+ page_content='ˆRuu(n) + α1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
180
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
181
+ page_content='+ U(n + 1)U(n + 1)T + α(1 − λ)1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
182
+ page_content='(22) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
183
+ page_content='= λK(n)−1 + U(n + 1)U(n + 1)T + α(1 − λ)1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
184
+ page_content='(23) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
185
+ page_content='5 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
186
+ page_content='and ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
187
+ page_content='K(n + 1) = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
188
+ page_content='�� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
189
+ page_content='λK(n)−1 + U(n + 1)U(n + 1)T � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
190
+ page_content='+ α(1 − λ)1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
191
+ page_content='�−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
192
+ page_content='(24) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
193
+ page_content='= [Q(n + 1) + δ1]−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
194
+ page_content='(25) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
195
+ page_content='with ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
196
+ page_content='Q(n + 1) = ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
197
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
198
+ page_content='λK(n)−1 + U(n + 1)U(n + 1)T � ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
199
+ page_content='(26) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
200
+ page_content='and δ = α(1 − λ) The matrix inversion lemma enables to reduce the inversion of Q(n + 1) to ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
201
+ page_content='Q(n + 1)−1 = 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
202
+ page_content='λK(n) − 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
203
+ page_content='λ2 K(n)U(n + 1)× ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
204
+ page_content='� ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
205
+ page_content='1 + 1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
206
+ page_content='λU(n + 1)T K(n)U(n + 1) ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
207
+ page_content='�−1 ' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
208
+ page_content='U(n + 1)T K(n),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
209
+ page_content=' (27) which simplifies to Q(n + 1)−1 = 1 λK(n) − 1 λ2 K(n)u(n + 1)u(n + 1)T K(n) 1 + 1 λu(k + 1)T K(n)u(k + 1),' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
210
+ page_content=' (28) for scalar observations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
211
+ page_content=' Now, we can use the Taylor expansion to get K(n + 1) = [Q(n + 1) + δ1]−1 = Q(n + 1)−1 − δQ(n + 1)−2 + δ2Q(n + 1)−3 + .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
212
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
213
+ page_content=' .' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
214
+ page_content=' (29) This gives us a way to compute recursively the inverse of the regularized estimate of the correlation matrix by combining (27) and (29) into K(n + 1) ≈ Q(n + 1)−1 − δQ(n + 1)−2 (30) which, by (27), does not require the inversion of K(n).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
215
+ page_content=' In both cases, we have the updating formula θ(n + 1) = θ(n) + K(n + 1)U(n + 1)[y(n + 1) − θ(n)T U(n + 1)].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
216
+ page_content=' (31) 5 Experiments and results 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
217
+ page_content='1 Dataset We generate a dataset thanks to a public challenge data generator [12].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
218
+ page_content=' This data generator is based on the well-known OMNET++ discrete event network simulator[13].' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
219
+ page_content=' The published simulator is available as a docker image.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
220
+ page_content=' However, due to the rules of the 2022 edition of the challenge, it is not possible to generate large topologies, i.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
221
+ page_content='e.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
222
+ page_content=' no more than 10 nodes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
223
+ page_content=' Since our models are link-based, the use of small topologies does not seem to be a problem.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
224
+ page_content=' The simulator is parameterized by a traffic matrix and a topological graph that are easy to generate thanks to the provided API.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
225
+ page_content=' Our generated dataset, used is this paper, is the result of 11900 simulations of the same topology graph of 10 nodes and 30 links, subject to 100 different traffic matrices.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
226
+ page_content=' In order to get complex results of simulations but at low cost, we made the choice to model a network with small queue buffers (8000 bits) and possibly subject of high traffic intensities (maximum traffic rate set to 4000 bits/s for each flow).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
227
+ page_content=' Then for each traffic matrices, we alter the capacity of the network according to a sigmoid, in order to model a network subject to jamming, with 2 stationary states.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
228
+ page_content=' The proposed jamming may cause a decrease in the capacity of the network links by up to a factor of 5, as depicted on Figure 4a.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
229
+ page_content=' For simplification purposes, we have considered that each link of the network has the same capacity.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
230
+ page_content=' This result in a U- shaped distribution of our link data samples according to the link capacity as shown in Figure 4b.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
231
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
232
+ page_content='2 Results From the generated data, we validate our approach along several axes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
233
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
234
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
235
+ page_content='1 Global performances First, we establish the benchmark performances based on the global methods presented in the paper [6] and Sections 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
236
+ page_content='1 and 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
237
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
238
+ page_content=' For the linear regression method described in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
239
+ page_content='1, we obtain an MSE of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
240
+ page_content='86e-4 and a MAPE of 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
241
+ page_content='58% for the queue occupancy prediction and an MSE of 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
242
+ page_content='10e-3 and a MAPE of 10.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
243
+ page_content='26%.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
244
+ page_content=' for the end-to-end latency prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
245
+ page_content=' Concerning the curve regression using Bernstein polynomials (of degree 8) described in in Section 3.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
246
+ page_content='2, we obtain an MSE of 4.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
247
+ page_content='52e-4 and a MAPE of 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
248
+ page_content='72% for the queue occupancy prediction and an MSE of 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
249
+ page_content='35e-4 and a MAPE of 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
250
+ page_content='95% for the end-to-end latency prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
251
+ page_content=' Note that these benchmark performances are below the performances obtained in [6], but the dataset we use here is probably more severe since using ground truth value for occupancy results in an MSE 6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
252
+ page_content='03e-4 of a MAPE 9.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
253
+ page_content='34% for the flow delay prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
254
+ page_content=' That is indeed very close of the obtained results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
255
+ page_content=' 6 0 2000 4000 6000 8000 10000 12000 Sample ID 10000 15000 20000 25000 30000 35000 40000 45000 50000 Capacity of the links (bits/s) (a) Capacity alteration to model jamming with a decrease of the capacity up to a factor of 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
256
+ page_content=' 10000 #40 15000 #35 20000 #30 25000 #25 30000 #20 35000 #15 40000 #10 45000 #5 50000 #0 capacity of links (bits/s) #scenario ID 0 5000 10000 15000 20000 25000 30000 #links data (b) Link capacity distribution of the generated dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
257
+ page_content=' Figure 4: Overview of the generated dataset 7 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
258
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
259
+ page_content='2 Behavior of iterative algorithms In a second step, we verify that the algorithms presented in section 4 converge and allow us to recover these performances.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
260
+ page_content=' With a forgetting factor of 1 (use of all data with the same weight) and a block size of 10, we observe, for example in Figure 5, that the model coefficients converge towards a stable value, and that MAPE recovers the value obtained with the global method using all data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
261
+ page_content=' The convergence is obtained in less than 10,000 operations.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
262
+ page_content=' It is thus possible to replace the global method, which is already low-cost, with an approach where the calculations are carried out recursively.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
263
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
264
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
265
+ page_content='3 Adaptivity In a third step, we compare the algorithms to the case of network modifications We consider an abrupt change in the network capacity, which could correspond to a jamming scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
266
+ page_content=' We then examine how the two adaptive algorithms presented (linear regression with judiciously chosen features;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
267
+ page_content=' and Bernstein polynomial model) can detect and adapt to these modifications.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
268
+ page_content=' In this context, we examine the role of the forgetting factor and the regularization parameter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
269
+ page_content=' Figure 6 and Figure 7 present the results for the case of a capacity change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
270
+ page_content=' We observe that (i) the square of the residual error, smoothed over 100 points, is a remarkable indicator of a change in the network;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
271
+ page_content=' and (ii) that the model coefficients readjust over the iterations after this change.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
272
+ page_content=' 5.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
273
+ page_content='2.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
274
+ page_content='4 Discussion These experiments show the effectiveness and relevance of our iterative and adaptive versions of end- to-end latency estimation procedures.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
275
+ page_content=' The iterative versions have the same performance as their global counterparts;' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
276
+ page_content=' an even lower cost since they can be implemented iteratively as the data is received or made available.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
277
+ page_content=' The convergence time for the model coefficients is a few thousand samples while the global model used around 350,000 samples for training, while the GNN models require several million samples.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
278
+ page_content=' Moreover, we observe that the residual error converges very quickly, in some tens of samples, which means that although the convergence of the models’ coefficients does not seem to be complete, they are equivalent from the point of view of performance for occupancy prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
279
+ page_content=' From an operational point of view, the model can be refreshed regularly, and the predicted KPIs between these updates can be used for intelligent routing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
280
+ page_content=' As we have observed, residual error monitoring is an excellent indicator of changes in the network state.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
281
+ page_content=' 6 Conclusion In this paper, we considered the problem of designing efficient and low-cost algorithms for KPI predic- tion that are locally implementable and adaptive to network changes.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
282
+ page_content=' Based on a previous work, we have argued and developed adaptive solutions, introducing in addition a regularization term in order to stabilize the results.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
283
+ page_content=' We used a public domain simulator to simulate networks and generate relevant data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
284
+ page_content=' The experiments demonstrate the effectiveness and relevance of these algorithms.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
285
+ page_content=' Thus, we now have low-complexity models that can be implemented iteratively at the level of local links.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
286
+ page_content=' We have the possi- bility to predict the occupancy of the different links, and the end-to-end latencies (the models predict the occupancy of the queues, then compute analytically the delay for each link and finally aggregate along the path).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
287
+ page_content=' Moreover, the adaptability of the solution allows to follow changes in the network state, always at a minimal cost, by re-adapting from the current solution and new data.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
288
+ page_content=' The continuation of the work will focus on the choice criteria of the forgetting factor, on the impact of the regularization factor, in order to find automatic selection methods.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
289
+ page_content=' Of course, the approaches considered here will have to be considered and adapted for other types of KPI, such as error rate or jitter.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
290
+ page_content=' 8 0 5000 10000 15000 20000 25000 30000 samples −1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
291
+ page_content='0 −0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
292
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
293
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
294
+ page_content='5 1.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
295
+ page_content='0 Weight value θ_0 θ_1 θ_2 θ_3 θ_4 θ_5 θ_6 θ_7 θ_8 (a) Iterative curve-fitting based on Bernstein polynomials of degree 8.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
296
+ page_content=' 0 5000 10000 15000 20000 25000 30000 samples 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
297
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
298
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
299
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
300
+ page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
301
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
302
+ page_content='5 Weight value θ_0 θ_1 θ_2 θ_3 θ_4 (b) Iterative Linear Regression Figure 5: Evolution of weights for our iterative methods without forgetting (non-adaptive) while fitting the whole dataset.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
303
+ page_content=' 9 0 10000 20000 30000 40000 samples −2 −1 0 1 2 Weight value #39 #10 #1 #10 #39 θ_0 θ_1 θ_2 θ_3 θ_4 θ_5 θ_6 θ_7 θ_8 (a) Evolution of weights along the scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
304
+ page_content=' 0 10000 20000 30000 40000 samples 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
305
+ page_content='015 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
306
+ page_content='020 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
307
+ page_content='025 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
308
+ page_content='030 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
309
+ page_content='035 RMSE #39 #10 #1 #10 #39 (b) Evolution of the RMSE along the scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
310
+ page_content=' Figure 6: Evolution of weights and √ MSE (RMSE) for our adaptive approach of Bernstein polynomial curve regression of degree 8,λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
311
+ page_content='9, α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
312
+ page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
313
+ page_content=' Scenario describes a nominal period between 2 periods of jamming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
314
+ page_content=' #39 corresponds to a link capacity of 11 Kbits/s, #10 40 Kbits/s and #1 49 Kbts/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
315
+ page_content=' Figure is smoothed over 100 points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
316
+ page_content=' 10 0 10000 20000 30000 40000 samples 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
317
+ page_content='0 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
318
+ page_content='1 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
319
+ page_content='2 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
320
+ page_content='3 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
321
+ page_content='4 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
322
+ page_content='5 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
323
+ page_content='6 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
324
+ page_content='7 Weight value #39 #10 #1 #10 #39 θ_0 θ_1 θ_2 θ_3 θ_4 (a) Evolution of weights along the scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
325
+ page_content=' 0 10000 20000 30000 40000 samples 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
326
+ page_content='01 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
327
+ page_content='02 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
328
+ page_content='03 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
329
+ page_content='04 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
330
+ page_content='05 RMSE #39 #10 #1 #10 #39 (b) Evolution of the RMSE along the scenario.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
331
+ page_content=' Figure 7: Evolution of weights and √ MSE (RMSE) for our adaptive approach of Linear Regression,λ = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
332
+ page_content='9, α = 0.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
333
+ page_content='08.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
334
+ page_content=' Scenario describes a nominal period between 2 periods of jamming.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
335
+ page_content=' #39 corresponds to a link capacity of 11 Kbits/s, #10 40 Kbits/s and #1 49 Kbts/s.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
336
+ page_content=' Figure is smoothed over 100 points.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
337
+ page_content=' 11 References [1] Amin, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
338
+ page_content=', Reisslein, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
339
+ page_content=', Shah, N.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
340
+ page_content=': Hybrid SDN networks: A survey of existing approaches.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
341
+ page_content=' IEEE Communications Surveys & Tutorials 20(4), 3259–3306 (2018) [2] de Aquino Afonso, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
342
+ page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
343
+ page_content=' : GNNet challenge 2021 report (1st place).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
344
+ page_content=' https://github.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
345
+ page_content='com/ ITU-AI-ML-in-5G-Challenge/ITU-ML5G-PS-001-PARANA (2021) [3] Barcelona Neural Networking Center: The graph neural networking challenge 2020.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
346
+ page_content=' https:// bnn.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
347
+ page_content='upc.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
348
+ page_content='edu/challenge/gnnet2020 [4] Chua, F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
349
+ page_content='C.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
350
+ page_content=', Ward, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
351
+ page_content=', Zhang, Y.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
352
+ page_content=', Sharma, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
353
+ page_content=', Huberman, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
354
+ page_content='A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
355
+ page_content=' : Stringer: Balancing latency and re- source usage in service function chain provisioning.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
356
+ page_content=' IEEE Internet Computing 20(6), 22–31 (2016) [5] Jahromi, H.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
357
+ page_content='Z.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
358
+ page_content=', Hines, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
359
+ page_content=', Delanev, D.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
360
+ page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
361
+ page_content=' : Towards application-aware networking: Ml-based end-to- end application KPI/QoE metrics characterization in SDN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
362
+ page_content=' In: 2018 Tenth International Conference on Ubiquitous and Future Networks (ICUFN).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
363
+ page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
364
+ page_content=' 126–131.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
365
+ page_content=' IEEE (2018) [6] Larrenie, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
366
+ page_content=', Bercher, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
367
+ page_content='F.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
368
+ page_content=', Lahsen-Cherif, I.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
369
+ page_content=', Venard, O.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
370
+ page_content=': Low complexity approaches for end-to- end latency prediction.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
371
+ page_content=' In: Proceedings of the 13th IEEE International Conference On Computing, Communication and Networking Technologies.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
372
+ page_content=' IEEE (2022) [7] Pasca, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
373
+ page_content='T.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
374
+ page_content='V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
375
+ page_content=', Kodali, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
376
+ page_content='S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
377
+ page_content='P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
378
+ page_content=', Kataoka, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
379
+ page_content=': AMPS: Application aware multipath flow routing using machine learning in SDN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
380
+ page_content=' In: 2017 Twenty-third National Conference on Communications (NCC).' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
381
+ page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
382
+ page_content=' 1–6.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
383
+ page_content=' IEEE (2017) [8] Poularakis, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
384
+ page_content=', Iosifidis, G.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
385
+ page_content=', Tassiulas, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
386
+ page_content=': SDN-enabled tactical ad hoc networks: Extending pro- grammable control to the edge.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
387
+ page_content=' IEEE Communications Magazine 56(7), 132–138 (2018) [9] Poularakis, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
388
+ page_content=', Qin, Q.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
389
+ page_content=', Nahum, E.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
390
+ page_content='M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
391
+ page_content=', Rio, M.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
392
+ page_content=', Tassiulas, L.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
393
+ page_content=': Flexible SDN control in tactical ad hoc networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
394
+ page_content=' Ad Hoc Networks 85, 71–80 (2019) [10] Rusek, K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
395
+ page_content=', Suárez-Varela, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
396
+ page_content=', Mestres, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
397
+ page_content=', Barlet-Ros, P.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
398
+ page_content=', Cabellos-Aparicio, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
399
+ page_content=': Unveiling the potential of graph neural networks for network modeling and optimization in SDN.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
400
+ page_content=' In: Proceedings of the 2019 ACM Symposium on SDN Research.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
401
+ page_content=' pp.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
402
+ page_content=' 140–151 (2019) [11] Singh, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
403
+ page_content=', Jha, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
404
+ page_content='K.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
405
+ page_content=': A survey on Software Defined Networking: Architecture for next generation network.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
406
+ page_content=' Journal of Network and Systems Management 25(2), 321–374 (2017) [12] Suárez-Varela, J.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
407
+ page_content=', et al.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
408
+ page_content=' : The graph neural networking challenge: a worldwide competition for education in AI/ML for networks.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
409
+ page_content=' ACM SIGCOMM Computer Communication Review 51(3), 9–16 (2021) [13] Varga, A.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
410
+ page_content=', Hornig, R.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
411
+ page_content=': An overview of the omnet++ simulation environment.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
412
+ page_content=' In: 1st International ICST Conference on Simulation Tools and Techniques for Communications, Networks and Systems (2010) [14] Widrow, B.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
413
+ page_content=', Stearns, S.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
414
+ page_content=': Adaptive Signal Processing.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
415
+ page_content=' Edited by Alan V.' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
416
+ page_content=' Oppenheim, Prentice-Hall (1985) 12' metadata={'source': '/home/zjlab/wf/langchain-ChatGLM/knowledge_base/d9FRT4oBgHgl3EQfUTfv/content/2301.13536v1.pdf'}
dNFQT4oBgHgl3EQfjDY0/content/tmp_files/2301.13352v1.pdf.txt ADDED
@@ -0,0 +1,2147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Sentence Identification with BOS and EOS Label Combinations
2
+ Takuma Udagawa, Hiroshi Kanayama, Issei Yoshida
3
+ IBM Research - Tokyo, Japan
4
+ Takuma.Udagawa@ibm.com, {hkana,issei}@jp.ibm.com
5
+ Abstract
6
+ The sentence is a fundamental unit in many
7
+ NLP applications. Sentence segmentation is
8
+ widely used as the first preprocessing task,
9
+ where an input text is split into consecutive
10
+ sentences considering the end of the sentence
11
+ (EOS) as their boundaries. This task formula-
12
+ tion relies on a strong assumption that the in-
13
+ put text consists only of sentences, or what we
14
+ call the sentential units (SUs). However, real-
15
+ world texts often contain non-sentential units
16
+ (NSUs) such as metadata, sentence fragments,
17
+ nonlinguistic markers, etc. which are unrea-
18
+ sonable or undesirable to be treated as a part
19
+ of an SU. To tackle this issue, we formulate a
20
+ novel task of sentence identification, where the
21
+ goal is to identify SUs while excluding NSUs
22
+ in a given text.
23
+ To conduct sentence iden-
24
+ tification, we propose a simple yet effective
25
+ method which combines the beginning of the
26
+ sentence (BOS) and EOS labels to determine
27
+ the most probable SUs and NSUs based on
28
+ dynamic programming. To evaluate this task,
29
+ we design an automatic, language-independent
30
+ procedure to convert the Universal Dependen-
31
+ cies corpora into sentence identification bench-
32
+ marks. Finally, our experiments on the sen-
33
+ tence identification task demonstrate that our
34
+ proposed method generally outperforms sen-
35
+ tence segmentation baselines which only uti-
36
+ lize EOS labels.
37
+ 1
38
+ Introduction
39
+ The sentence, which we refer to as the sentential
40
+ unit (SU), is a fundamental unit of processing in
41
+ many NLP applications including syntactic pars-
42
+ ing (Dozat and Manning, 2017), semantic parsing
43
+ (Dozat and Manning, 2018), and machine transla-
44
+ tion (Liu et al., 2020). Existing works mostly rely
45
+ on sentence segmentation (a.k.a. sentence bound-
46
+ ary detection) as the first preprocessing task, where
47
+ we predict the end of the sentence (EOS) to split a
48
+ text into consecutive SUs (Kiss and Strunk, 2006;
49
+ Gillick, 2009). This approach relies on a strong
50
+ assumption that the text only consists of SUs; how-
51
+ ever, real-world texts like web contents often con-
52
+ tain non-sentential units (NSUs) such as the meta-
53
+ data of attachments embedded in the email body,
54
+ repetition of symbols for separating texts, irregular
55
+ series of nouns, etc. (just to name a few). Such
56
+ NSUs may cause detrimental or unexpected results
57
+ in the downstream tasks if considered as parts of
58
+ the SUs and are more desirable to be distinguished
59
+ from SUs in the first preprocessing step.
60
+ To tackle this problem, we formulate a novel
61
+ task of sentence identification, where the goal is
62
+ to identify SUs while excluding NSUs in a given
63
+ text (§3). This can be regarded as an SU span ex-
64
+ traction task, where each SU span is represented
65
+ by the beginning of the sentence (BOS) and the
66
+ EOS labels.1 We illustrate the difference between
67
+ sentence segmentation and sentence identification
68
+ in Table 1. In sentence segmentation, the text frag-
69
+ ment of an embedded file (“- TEXT.htm << File:
70
+ TEXT.htm >>”) needs to be considered as a part
71
+ of an SU. In contrast, sentence identification can
72
+ regard it as an NSU and exclude it for downstream
73
+ applications such as dependency parsing.
74
+ To conduct sentence identification, we propose
75
+ a simple method which effectively combines the
76
+ BOS and EOS probabilities to determine both SUs
77
+ and NSUs (§4).
78
+ To be specific, we first train
79
+ the BOS and EOS labeling models based on ei-
80
+ ther the sentence identification dataset (with SUs
81
+ and NSUs) or sentence segmentation dataset (only
82
+ SUs). Then, we search for the most probable spans
83
+ of SUs and NSUs using a simple dynamic program-
84
+ ming framework. Theoretically, our method can be
85
+ considered as a natural generalization of existing
86
+ sentence segmentation algorithms.
87
+ To evaluate this task, we design an automatic pro-
88
+ 1For simplicity, we assume that the input text can be seg-
89
+ mented into consecutive, non-overlapping units of SUs and
90
+ NSUs. This way, we can also represent and evaluate SU
91
+ extraction as an equivalent BIO labeling task (§5-§7).
92
+ arXiv:2301.13352v1 [cs.CL] 31 Jan 2023
93
+
94
+ Input Text
95
+ Thank you. - TEXT.htm << File: TEXT.htm >> I was thinking of converting it to a hover
96
+ (from EWT)
97
+ vehicle. I might just sell the car and get you to drive me around all winter.
98
+ E
99
+ Sentence
100
+ Thank you. - TEXT.htm << File: TEXT.htm >> I was thinking of converting it to a hover
101
+ Segmentation
102
+ E
103
+ E
104
+ vehicle. I might just sell the car and get you to drive me around all winter.
105
+ B
106
+ E
107
+ B
108
+ Sentence
109
+ Thank you. - TEXT.htm << File: TEXT.htm >> I was thinking of converting it to a hover
110
+ Identification
111
+ E B
112
+ E
113
+ vehicle. I might just sell the car and get you to drive me around all winter.
114
+ Table 1: Illustration of sentence segmentation and sentence identification. In sentence segmentation, EOS labels
115
+ (E) are used to segment the input text into consecutive SUs (in blue). In sentence identification, only the spans
116
+ bracketed by the BOS (B) and EOS labels are extracted as SUs, while the rest can be excluded as NSUs.
117
+ cedure to convert the Universal Dependencies (UD)
118
+ corpora (de Marneffe et al., 2021) into sentence
119
+ identification benchmarks (§5). To be specific, (i)
120
+ we use the original sentence boundaries in UD as
121
+ the unit (SU and NSU) boundaries and (ii) classify
122
+ each unit as an SU iff it contains at least one clausal
123
+ predicate with a core/non-core argument. Impor-
124
+ tantly, our classification rule follows the definition
125
+ of lexical sentence in linguistics (Nunberg, 1990),
126
+ is easily customizable with language-independent
127
+ rules, and makes reasonable classification within
128
+ the scope of our experiments.
129
+ To conduct our experiments, we focus on the
130
+ English Web Treebank (Silveira et al., 2014) as the
131
+ primary benchmark for sentence identification and
132
+ train the BOS/EOS labeling models by finetuning
133
+ RoBERTa (Liu et al., 2019) (§6). We also propose
134
+ techniques to develop these models using a stan-
135
+ dard sentence segmentation dataset, i.e. the Wall
136
+ Street Journal corpus (Marcus et al., 1993), which
137
+ only contains clean, edited SUs without any NSUs.
138
+ Based on our experimental results, we demon-
139
+ strate that our proposed method generally outper-
140
+ forms sentence segmentation baselines which only
141
+ utilize EOS labels (§7). These results highlight
142
+ the importance of combining the BOS labels in
143
+ addition to the EOS labels for accurate sentence
144
+ identification under various conditions.
145
+ 2
146
+ Background
147
+ Sentence segmentation, a.k.a. sentence boundary
148
+ detection, is the task of segmenting an input text
149
+ into the unit of sentences. Despite the long his-
150
+ tory of study (Riley, 1989) and its importance in
151
+ the entire NLP pipeline (Walker et al., 2001), this
152
+ area has received relatively little attention. For
153
+ one reason, the task has been recognized as “long
154
+ solved” (Read et al., 2012) with the most recent
155
+ approach reporting 99.8% F1 score on the standard
156
+ English Wall Street Journal (WSJ) dataset (Wicks
157
+ and Post, 2021). Their state-of-the-art method ER-
158
+ SATZ combines (i) a regular-expression based de-
159
+ tector of candidate sentence boundaries, followed
160
+ by (ii) a Transformer-based (Vaswani et al., 2017)
161
+ binary classifier which predicts whether the can-
162
+ didate boundary is EOS based on the local con-
163
+ text, i.e. surrounding few words. This modern
164
+ context-based approach has been shown to outper-
165
+ form competitive, widely used baselines such as
166
+ SPLITTA (Gillick, 2009), PUNKT (Kiss and Strunk,
167
+ 2006), and MOSES (Koehn et al., 2007).
168
+ However, two important aspects are not fully ad-
169
+ dressed in the current literature. First is the cover-
170
+ age of diverse domains, genres, and writing styles.
171
+ Existing works (including Wicks and Post, 2021)
172
+ focus on formal/edited text and assume the exis-
173
+ tence of sentence ending punctuations (e.g. full
174
+ stops) at the sentence boundaries. However, social
175
+ media texts often lack such punctuations and con-
176
+ tain various types of non-linguistic noise, which
177
+ can lead to a substantial degradation in the seg-
178
+ mentation performance (Read et al., 2012; Rudra-
179
+ pal et al., 2015). Speech transcription texts also
180
+ usually contain disfluent, ungrammatical, or frag-
181
+ mented structures and lack both punctuations and
182
+ casing (Wang et al., 2019; Rehbein et al., 2020).
183
+ Considering the amount of such informal or non-
184
+ standard texts in the real world, it is compelling
185
+ to expand the capability of sentence segmentation
186
+ beyond formal, standardized text.
187
+ The second aspect is the coverage of multiple
188
+ languages. Different languages involve different
189
+ complexities in sentence segmentation, e.g. Chi-
190
+ nese requires the disambiguation of commas as the
191
+
192
+ sentence ending punctuation (Xue and Yang, 2011)
193
+ and Thai does not mark EOS with any type of punc-
194
+ tations (Aroonmanakun et al., 2007; Zhou et al.,
195
+ 2016). To advance NLP from a multilingual per-
196
+ spective, it is crucial to develop and evaluate mod-
197
+ els in multiple languages: Wicks and Post (2021)
198
+ make an important step in this direction, proposing
199
+ a language-agnostic, unified sentence segmentation
200
+ model covering a total of 87 languages.
201
+ Based on these observations, we first propose to
202
+ extend the task of sentence segmentation to sen-
203
+ tence identification, which expands the capability
204
+ of sentence segmentation beyond formal, standard-
205
+ ized text (§3, §4). Secondly, we propose a cross-
206
+ lingual method of benchmarking sentence identifi-
207
+ cation based on the UD corpora, considering every
208
+ word or character as the candidate boundary to
209
+ cover diverse domains, genres, and languages that
210
+ lack sentence ending punctuations (§5). Finally,
211
+ we follow Wicks and Post (2021) to develop mod-
212
+ ern neural-based models that require no language-
213
+ specific engineering and can be developed for dif-
214
+ ferent languages in a unified manner (§6).
215
+ 3
216
+ Task Formulation
217
+ 3.1
218
+ Sentence Segmentation Task
219
+ First, we introduce a precise (re-)formulation
220
+ of the sentence segmentation task.
221
+ Let W =
222
+ (w0, w1, ..., wN−1) represent the input text, where
223
+ each wi denotes a word (but can also be a sub-
224
+ word or character). We also define the text span
225
+ W [i : j] = (wi, ..., wj−1), their concatenation
226
+ W [i : j] ⊕ W [j : k] = W [i : k], and SU bound-
227
+ ary indices B = (b0, b1, ..., bM) where b0 = 0,
228
+ bM = N, and �M
229
+ i=1 W [bi−1 : bi] = W (i.e. the
230
+ concatenation of all SUs recovers the input text).
231
+ Next, we introduce the SU probability pSU(W [i:
232
+ j]) which corresponds to the probability of the text
233
+ span W [i:j] being an SU. Based on this probabil-
234
+ ity, the task of sentence segmentation can be for-
235
+ malized as searching for the boundaries B which
236
+ maximize the following probability:2
237
+ arg max
238
+ B
239
+ M
240
+
241
+ i=1
242
+ pSU(W [bi−1 :bi])
243
+ (1)
244
+ The most standard approach is to define pSU(W [i:
245
+ j]) based on a pretrained EOS labeling model, as
246
+ we describe in §4.1. However, our (re-)formulation
247
+ 2M is a variable and need not be fixed during the search.
248
+ as Eq. (1) is more general and permits other defini-
249
+ tions of SU probability as well.
250
+ 3.2
251
+ Sentence Identification Task
252
+ In sentence identification, we consider the input
253
+ text W can be segmented into consecutive, non-
254
+ overlapping units of SUs and NSUs. Hence, we
255
+ regard B = (b0, b1, ..., bM) as the unit (SU and
256
+ NSU) boundaries and define the unit indicators
257
+ A = (a1, a2, ..., aM) for each unit as follows:
258
+ ai =
259
+
260
+ 1
261
+ if W [bi−1 :bi] is an SU
262
+ 0
263
+ if W [bi−1 :bi] is an NSU
264
+ Next,
265
+ we
266
+ introduce
267
+ the
268
+ NSU
269
+ probability
270
+ pNSU(W [i : j]) which corresponds to the prob-
271
+ ability of the text span W [i : j] being an NSU.
272
+ Based on pSU and pNSU, we can formalize the
273
+ task of sentence identification as searching for the
274
+ unit boundaries B and unit indicators A which
275
+ maximize the following probability:
276
+ arg max
277
+ B,A
278
+ M
279
+
280
+ i=1
281
+ pSU(W [bi−1 :bi])ai pNSU(W [bi−1 :bi])1−ai
282
+ (2)
283
+ Note that this strictly generalizes the sentence seg-
284
+ mentation task in Eq. (1), which is a special case
285
+ where ai = 1, ∀ai ∈ A. Based on this task formu-
286
+ lation, we discuss how we can define pSU(W [i:j])
287
+ and pNSU(W [i:j]) to derive our sentence identifi-
288
+ cation algorithm in §4.2.
289
+ 4
290
+ Methods
291
+ 4.1
292
+ Sentence Segmentation Method
293
+ In the most standard approach, sentence segmenta-
294
+ tion employs an EOS labeling model pEOS to define
295
+ the SU probability pSU in Eq. (1). To be specific,
296
+ let pEOS(wi|W ; θ) denote the EOS labeling model,
297
+ which computes the probability of wi being EOS
298
+ in W (θ denotes the model parameters). Typically,
299
+ it is straightforward to train this model in a super-
300
+ vised learning setup using a dataset annotated with
301
+ gold EOS boundaries (Wicks and Post, 2021). For
302
+ brevity, we use the notation pEOS(wi) as a short-
303
+ hand for pEOS(wi|W ; θ), i.e. we omit W and θ
304
+ (unless required) in the rest of this paper.
305
+ Based on the pretrained model pEOS, we can
306
+ define the SU probability as pSU(W [i : j]) =
307
+ pEOS(wj−1) �
308
+ i≤k<j−1(1 − pEOS(wk)), which re-
309
+ quires the last word wj−1 to be EOS and all other
310
+
311
+ words to be non-EOS. By substituting this defini-
312
+ tion, we can decompose Eq. (1) as follows:
313
+ (1) = arg max
314
+ B
315
+ M
316
+
317
+ i=1
318
+ log pSU(W [bi−1 :bi])
319
+ = arg max
320
+ B
321
+ M
322
+
323
+ i=1
324
+
325
+ log pEOS(wbi−1) +
326
+
327
+ bi−1≤j<bi−1
328
+ log (1 − pEOS(wj))
329
+
330
+ = arg max
331
+ B
332
+
333
+ i∈BEOS
334
+ log pEOS(wi) +
335
+
336
+ i/∈BEOS
337
+ log (1 − pEOS(wi))
338
+ (3)
339
+ where BEOS = {bi − 1 | i ∈ (1, 2, ..., M)} repre-
340
+ sents all the EOS indices defined by B.
341
+ This is a trivial optimization problem where we
342
+ can simply choose BEOS = {i ∈ (0, 1, ..., N −
343
+ 1) | pEOS(wi) ≥ 0.5} to maximize Eq. (3). This
344
+ also shows that sentence segmentation can be con-
345
+ ducted by predicting the EOS independently for
346
+ each wi based on pEOS(wi). In contrast, sentence
347
+ identification involves a more complex optimiza-
348
+ tion problem which we solve using dynamic pro-
349
+ gramming (§4.2).
350
+ 4.2
351
+ Sentence Identification Method
352
+ We extend the method of sentence segmentation
353
+ (§4.1) to conduct sentence identification. To be spe-
354
+ cific, we employ pretrained BOS and EOS labeling
355
+ models pBOS, pEOS to define the SU and NSU prob-
356
+ abilities pSU, pNSU in Eq. (2). As a first step, we
357
+ need to train the BOS and EOS labeling models:
358
+ this can be conducted in a supervised manner using
359
+ a dataset containing gold BOS and EOS labels, as
360
+ we explain in §6.1.
361
+ Based on the pretrained BOS and EOS labeling
362
+ models, we can define the SU and NSU probabili-
363
+ ties as follows:
364
+ pSU(W [i:j]) = pBOS(wi)
365
+
366
+ i<k≤j−1
367
+ (1 − pBOS(wk))
368
+ × pEOS(wj−1)
369
+
370
+ i≤k<j−1
371
+ (1 − pEOS(wk))
372
+ pNSU(W [i:j]) =
373
+
374
+ i≤k≤j−1
375
+ (1 − pBOS(wk)) ×
376
+
377
+ i≤k≤j−1
378
+ (1 − pEOS(wk))
379
+ In the SU probability pSU, the first word wi is
380
+ required to be BOS, the last word wj−1 to be EOS,
381
+ and all other words to be neither BOS nor EOS.
382
+ Note that this definition of pSU is a natural gener-
383
+ alization from §4.1 which only relies on the EOS
384
+ probability pEOS.
385
+ In contrast, the NSU probability pNSU requires
386
+ all words to be neither BOS nor EOS. Notably, this
387
+ definition does not distinguish contiguous NSUs in
388
+ the sense that pNSU(W [i:k]) = pNSU(W [i:j]) ×
389
+ pNSU(W [j :k]) if W [i:j] ⊕ W [j :k] = W [i:k].
390
+ This is convenient as we are only interested in the
391
+ extraction of SUs and do not need to seek the exact
392
+ boundaries between consecutive NSUs.
393
+ By substituting these definitions of pSU and pNSU,
394
+ we can decompose Eq. (2) as follows:
395
+ (2) = arg max
396
+ B,A
397
+ M
398
+
399
+ i=1
400
+
401
+ ai log pSU(W [bi−1 :bi])
402
+ + (1 − ai) log pNSU(W [bi−1 :bi])
403
+
404
+ = arg max
405
+ B,A
406
+
407
+ i∈BA
408
+ BOS
409
+ log pBOS(wi) +
410
+
411
+ i/∈BA
412
+ BOS
413
+ log (1 − pBOS(wi))
414
+ +
415
+
416
+ i∈BA
417
+ EOS
418
+ log pEOS(wi) +
419
+
420
+ i/∈BA
421
+ EOS
422
+ log (1 − pEOS(wi))
423
+ (4)
424
+ where BA
425
+ BOS = {bi−1 | i ∈ (1, 2, ..., M), ai = 1}
426
+ denotes the BOS indices and BA
427
+ EOS = {bi − 1 | i∈
428
+ (1, 2, ..., M), ai = 1} denotes the EOS indices,
429
+ both defined by B and A.
430
+ Therefore, our goal is to choose BA
431
+ BOS and BA
432
+ EOS
433
+ which maximize Eq. (4). To this end, we need
434
+ to consider the restrictions that (i) the first label
435
+ should be BOS, (ii) the last label should be EOS,
436
+ and (iii) BOS and EOS labels need to appear alter-
437
+ nately. These restrictions can be incorporated in
438
+ our dynamic programming framework to find the
439
+ argmax of Eq. (4). For the precise algorithm, we
440
+ refer the readers to Appendix A.
441
+ 5
442
+ Evaluation
443
+ Due to the novelty of the task, currently there exists
444
+ no benchmark for evaluating sentence identifica-
445
+ tion. To address this issue, we propose a fully
446
+ automatic procedure to convert the Universal De-
447
+ pendencies (UD) corpora (de Marneffe et al., 2021)
448
+ into sentence identification benchmarks.
449
+ Concretely speaking, we conduct the following
450
+ two steps based on the gold UD annotation: (i) the
451
+ detection of unit (SU and NSU) boundaries and (ii)
452
+ the classification of each unit into SU or NSU. As
453
+ for (i), we simply use the original sentence bound-
454
+ aries in the UD annotation, where UD uses the term
455
+ sentence in a broader sense including both SUs and
456
+ NSUs (e.g. sentence fragments). Note that the ex-
457
+ act boundaries between consecutive NSUs (which
458
+ we call NSU–NSU boundaries) do not need to be
459
+ accurate or consistent, since we are only interested
460
+ in extracting the spans of SUs. However, we do
461
+ expect that the original boundaries are generally
462
+ reliable in all other cases (SU–SU and SU–NSU
463
+ boundaries), which seems to be the case.
464
+ The main problem is (ii), i.e. how to classify
465
+
466
+ *∼*∼*∼*∼*∼*∼*∼*∼*∼*
467
+ **********NOTE**********
468
+ By video conference from _______
469
+ Excerpt:
470
+ 02/13/2001 08:02 PM
471
+ 5:00 PT ** 6:00 MT ** 7:00 CT ** 8:00 ET
472
+ Time: 11:30am / 1:30pm Central / 2:30pm Eastern
473
+ Sunshine Coast, British Columbia, Canada
474
+ - UnleadedStocks.pdf
475
+ t r u t h o u t — Perspective
476
+ ( Answered, 2 Comments )
477
+ The federal sites of Washington, DC.
478
+ From Madrid to Seville to Barcelona an Valencia.
479
+ Table 2: Examples of gold NSUs in the English Web
480
+ Treebank (EWT) identified based on our procedure.
481
+ Each line corresponds to one example of NSU.
482
+ each unit as an SU or NSU. To this end, we fol-
483
+ low the notion of lexical sentence in linguistics
484
+ (Nunberg, 1990) which defines an SU based on the
485
+ dependencies among the lexical items, e.g. a group
486
+ of words that contain a subject and predicate. In
487
+ this work, we build upon the UD dependency re-
488
+ lations and define an SU as a unit that contains at
489
+ least one clausal predicate with a core or non-core
490
+ argument.3 Here, a clause expresses an event or
491
+ proposition which we regard as an essential aspect
492
+ of SUs. A clausal predicate and a core argument
493
+ form the backbone of a clause, while a non-core
494
+ argument modifies it (de Marneffe et al., 2021).
495
+ Note that our current definition excludes noun
496
+ phrases appearing by themselves, since they only
497
+ consist of the nominal dependent relations. How-
498
+ ever, we can flexibly customize the definition of
499
+ SUs to include or exclude such phrases.
500
+ Due to the reliance on UD, our conversion proce-
501
+ dure can be applied to a wide variety of languages
502
+ supported in UD (currently over 100 languages).
503
+ However, as a first set of experiments, we focus on
504
+ the English Web Treebank (EWT) (Silveira et al.,
505
+ 2014) as the primary benchmark of sentence identi-
506
+ fication. This dataset comprises five genres of web
507
+ media texts: namely weblogs, newsgroup threads,
508
+ emails, product reviews, and Q&A websites. Con-
509
+ sequently, the dataset contains formal SUs, infor-
510
+ mal SUs (e.g. without capitalization or punctua-
511
+ tions) as well as a wide variety of NSUs.
512
+ We show some examples of NSUs in Table 2
513
+ (and more in Appendix B) identified based on our
514
+ 3To check this condition, we simply need to verify whether
515
+ there is at least one core argument (e.g. nsubj, obj, ccomp) or
516
+ non-core dependent (e.g. obl, advcl, aux). For a full list of the
517
+ UD relations, see https://universaldependencies.org/u/dep/.
518
+ Train
519
+ Dev
520
+ Test
521
+ Total SUs
522
+ 10,356
523
+ 1,523
524
+ 1,490
525
+ Total NSUs
526
+ 2,187
527
+ 478
528
+ 587
529
+ Word-
530
+ Level
531
+ B-Label
532
+ 10,356
533
+ 1,523
534
+ 1,490
535
+ I-Label
536
+ 160,127
537
+ 18,791
538
+ 18,222
539
+ O-Label
540
+ 6,939
541
+ 1,302
542
+ 1,822
543
+ Character-
544
+ Level
545
+ B-Label
546
+ 10,356
547
+ 1,523
548
+ 1,490
549
+ I-Label
550
+ 773,223
551
+ 92,309
552
+ 88,441
553
+ O-Label
554
+ 47,107
555
+ 9,925
556
+ 13,232
557
+ Table 3: EWT dataset statistics.
558
+ procedure. As shown by the results, our procedure
559
+ can identify various NSUs including nonlinguistic
560
+ markers, timestamps, lists, contact information, etc.
561
+ We can also see that noun/prepositional phrases
562
+ are classified as NSUs based on our criteria. By
563
+ excluding such NSUs and identifying SUs, we can
564
+ clearly separate the portions of the text that are
565
+ worth sophisticated linguistic analyses, e.g. based
566
+ on dependency parsing or manual inspection.
567
+ Finally, we summarize the dataset statistics of
568
+ EWT in Table 3. Overall, 17∼28% of the units
569
+ were classified as NSUs, with the test set containing
570
+ the highest proportion of NSUs. We also regard SU
571
+ extraction as a word-level or character-level BIO
572
+ labeling task and report the number of gold BIO
573
+ labels in Table 3.4 At the word-level, we can see
574
+ that the proportion of O-labels (indicating NSUs) is
575
+ only 4∼8% and much smaller than the proportion
576
+ of NSUs in terms of units: this is because NSUs
577
+ are usually short and contain only a few words.
578
+ At the character-level, the proportion of O-labels
579
+ is slightly larger (6∼13%): this is because NSUs
580
+ often contain extraordinarily long words like URLs
581
+ and long sequences of nonlinguistic symbols.
582
+ Overall, we could verify that there exists a non-
583
+ negligible amount of NSUs in the EWT dataset,
584
+ which we aim to exclude with sentence identifica-
585
+ tion in our experiments.
586
+ 6
587
+ Experimental Setup
588
+ 6.1
589
+ Model Setup
590
+ As we discussed in §4.2, our sentence identification
591
+ method requires pretrained BOS and EOS labeling
592
+ models to identify SUs and NSUs. To develop
593
+ these models, we simply finetune RoBERTaBASE
594
+ 4B = Beginning of SU, I = Inside of SU, and O = Outside
595
+ of SU. Details of how we assign the gold BIO labels (at the
596
+ word-level and character-level) are provided in Appendix C.
597
+
598
+ by adding a binary BOS/EOS classifier on top of
599
+ the encoder.
600
+ To enable our models to handle various lengths
601
+ of the input texts, we concatenate the consecutive
602
+ L units of gold SUs and NSUs as the input during
603
+ training, where L is sampled from a geometric
604
+ distribution with parameter pCC.5 However, the
605
+ RoBERTa encoder has the restriction that the input
606
+ text size cannot exceed 512 subwords. Therefore, if
607
+ the input text size is too large, we replace L with the
608
+ maximum L′ < L which satisfies this restriction.
609
+ Note that this is a common procedure to sample
610
+ variable (instead of fixed) lengths of concatenated
611
+ units (Joshi et al., 2020).
612
+ Assuming the existence of the in-domain sen-
613
+ tence identification dataset (EWT Train/Dev), it
614
+ is straightforward to train the BOS/EOS labeling
615
+ models based on our unit concatenation procedure.
616
+ However, we may not always have the gold anno-
617
+ tation of SUs and NSUs for the target domain. To
618
+ take such cases into account, we also consider a
619
+ setup where we only have the standard sentence
620
+ segmentation dataset (WSJ Train/Dev) to train the
621
+ BOS/EOS labeling models.
622
+ When using the sentence segmentation dataset
623
+ (WSJ), we need to apply the unit concatenation
624
+ procedure using only clean, edited SUs. Unfor-
625
+ tunately, this can yield the following data priors
626
+ which do not actually hold in a sentence identifi-
627
+ cation dataset (EWT): (i) an SU (almost) always
628
+ starts with a capitalization and ends with punctua-
629
+ tion, (ii) the first word of the input is always BOS
630
+ and the last word is always EOS, and (iii) BOS
631
+ always directly follows EOS.
632
+ To address (i) and (ii), we propose a simple data
633
+ augmentation technique to alleviate the discrepancy
634
+ in the data priors. To address (iii), we propose
635
+ an ensembling technique with the unidirectional
636
+ (instead of bidirectional) models which are agnostic
637
+ to this data prior.
638
+ 6.1.1
639
+ Data Augmentation (+AUG)
640
+ To address (i), we conduct a unit-level data aug-
641
+ mentation, i.e. we modify each unit based on the
642
+ following rules with a small probability pDA:
643
+ • Convert all words in the unit to lower-case,
644
+ upper-case, or title-case (e.g. “hello world”,
645
+ 5With parameter pCC ∈ (0, 1], the probability mass func-
646
+ tion of the geometric distribution is p(L = l) = (1 −
647
+ pCC)l−1pCC where l ∈ {1, 2, 3, ...}. As pCC decreases,
648
+ the distribution gets more skewed towards larger L. With
649
+ pCC = 0, we consider p(L = ∞) = 1.
650
+ Orig.
651
+ B
652
+ E B
653
+ Joe went to school. After that he ...
654
+ (i) Unit
655
+ B
656
+ E
657
+ B
658
+ Aug.
659
+ Joe went to school
660
+ AFTER THAT HE ...
661
+ (ii) Unit
662
+ B
663
+ E
664
+ B
665
+ Trunc.
666
+ Joe went to school
667
+ AFTER THAT HE ...
668
+ Table 4: Illustration of our data augmentation tech-
669
+ nique.
670
+ In (i) unit-level augmentation, we randomly
671
+ change the casing or remove the last punctuations of
672
+ each unit. In (ii) unit truncation, we randomly truncate
673
+ the first and last units of the input (and regard them as
674
+ NSUs).
675
+ “HELLO WORLD”, or “Hello World”).
676
+ • Remove sentence ending punctuations based
677
+ on a regular-expression matcher (following
678
+ ERSATZ, Wicks and Post, 2021).
679
+ After the unit-level augmentation, we can apply the
680
+ unit concatenation in the exact same manner.
681
+ Finally, to address (ii), we randomly apply a unit
682
+ truncation to the first and last units of the concate-
683
+ nated input. To be specific, we choose a random
684
+ word in the first (last) unit and remove all words
685
+ prior (posterior) to it with a small probability pT R.
686
+ If the truncation is conducted, we regard the unit as
687
+ an NSU and fix the gold BOS/EOS labels accord-
688
+ ingly. See Table 4 for an illustration.
689
+ Based on this procedure, we can expect to allevi-
690
+ ate the data priors (i) and (ii). For more details, we
691
+ refer the readers to Appendix D.
692
+ 6.1.2
693
+ Unidirectional Model (+UNI)
694
+ Simply concatenating SUs (without NSUs) yields
695
+ the data prior (iii), i.e. BOS always directly fol-
696
+ lows EOS. This prior can be easily captured by the
697
+ bidirectional models pBOS(wi|W ), pEOS(wi|W )
698
+ conditioned on the whole input W , including our
699
+ RoBERTa-based models. For instance, as shown in
700
+ Figure 1, the model may predict EOS at the end of
701
+ the first unit (w2 = #) just because the next word
702
+ (w3 = This) is likely predicted as BOS.
703
+ To alleviate this issue, we propose to combine
704
+ the predictions of the unidirectional models for
705
+ BOS and EOS labeling. To be precise, let W ≤i =
706
+ (w0, ..., wi) and W ≥i = (wi, ..., wN−1). Then,
707
+ we can represent the unidirectional BOS model as
708
+ pUni
709
+ BOS(wi|W ≥i) (looking the context right-to-left)
710
+ and EOS model as pUni
711
+ EOS(wi|W ≤i) (looking left-to-
712
+ right). As illustrated in Figure 1, these models are
713
+ agnostic to the data prior (iii). In practice, we can
714
+
715
+ Figure 1: Illustration of the bidirectional EOS model
716
+ (left) and the unidirectional EOS model (right).
717
+ simply use different attention masks and share the
718
+ encoder parameters (except the last classifier) for
719
+ the unidirectional and bidirectional models.
720
+ We can utilize these unidirectional models by
721
+ taking a linear intepolation with the bidirectional
722
+ models as follows:
723
+ p+Uni
724
+ BOS (wi|W ) = λ · pUni
725
+ BOS(wi|W ≥i) + (1−λ) · pBOS(wi|W )
726
+ p+Uni
727
+ EOS (wi|W ) = λ · pUni
728
+ EOS(wi|W ≤i) + (1−λ) · pEOS(wi|W )
729
+ Then, we can use p+Uni
730
+ BOS and p+Uni
731
+ EOS in place of
732
+ pBOS and pEOS (respectively) to conduct sentence
733
+ identification, as described in §4.2.
734
+ Finally, we compare our proposed methods
735
+ against sentence segmentation baselines which only
736
+ utilize EOS labels.6 As for the baselines, we use
737
+ the EOS labeling model developed in the same
738
+ manner to segment the input text based on EOS.
739
+ Note that we can optionally force the last word in
740
+ the input to be EOS: in this case, the result will
741
+ only contain SUs since all segments will end with
742
+ EOS. By default, we do not force the last EOS: in
743
+ this case, the segment after the last EOS (if exists)
744
+ is considered as an NSU.
745
+ As a default configuration, we use pCC = 0.5,
746
+ pDA = 0.3, pT R = 0.1, and λ = 0.5 in our ex-
747
+ periments. To ensure reproducibility, we report
748
+ more details on the hyperparameters and model
749
+ setup in Appendix D. For the precise procedure on
750
+ how we convert between the word-, character-, and
751
+ subword-level labels (for RoBERTa), we refer the
752
+ readers to Appendix C.
753
+ 6.2
754
+ Evaluation Setup
755
+ In the evaluation phase, we consider three ways of
756
+ assembling the input texts on which we conduct
757
+ sentence identification. Firstly, we can apply the
758
+ same unit concatenation procedure as described in
759
+ 6This EOS-only method is the most reasonable baseline to
760
+ quantify the precise advantage from combining BOS labels in
761
+ addition to EOS, which is proposed in our methods.
762
+ §6.1. To be specific, we use pCC =0.5 (same as the
763
+ training phase) and pCC = 0 (which concatenates
764
+ the units up to the maximal length) to simulate both
765
+ shorter and longer lengths of the input texts.
766
+ However, this approach is relatively synthetic in
767
+ the sense that we take the gold unit boundaries for
768
+ granted. They are usually unavailable at the infer-
769
+ ence time, so we should consider a more realistic
770
+ setting for evaluating the methods without relying
771
+ on the gold unit boundaries.
772
+ To this end, we propose to evaluate sentence
773
+ identification as a postprocessing of sentence seg-
774
+ mentation. To be specific, we first apply the state-
775
+ of-the-art method ERSATZ (Wicks and Post, 2021)
776
+ on the raw text of EWT and then apply sentence
777
+ identification to each segmented text. Note that ER-
778
+ SATZ has high precision but still predicts false EOS
779
+ which can fragment a gold SU: in such cases, we
780
+ consider the fragmented SUs as NSUs and fix the
781
+ labels accordingly (just as we did in unit truncation,
782
+ cf. §6.1 and Table 4).
783
+ As for the evaluation metrics, we convert the
784
+ predictions of our methods into word/character-
785
+ level BIO labels (cf. Appendix C) and compute
786
+ the F1 score for each label prediction. Then, we
787
+ summarize the results as the macro average F1
788
+ and weighted average F1. We also compute the
789
+ F1 score of the exact SU span extraction at the
790
+ word/character-level. Finally, we run each exper-
791
+ iment (from model training to testing) five times
792
+ with different random seeds and report the average
793
+ and standard deviation as the final results.
794
+ 7
795
+ Results
796
+ Table 5 summarizes the word-level evaluation re-
797
+ sults. The results for the character-level evaluation
798
+ show similar tendencies, so we put them in Ap-
799
+ pendix E. The F1 score for each BIO label predic-
800
+ tion is also available in Appendix E.
801
+ Firstly, we take a look at the results when we
802
+ have the in-domain sentence identification dataset
803
+ (EWT Train/Dev) for model development. In this
804
+ setup, we can verify that our proposed method
805
+ (BOS&EOS) significantly outperforms the base-
806
+ lines (EOS-Only) in all metrics. For instance, our
807
+ method achieves consistently high performance of
808
+ 84∼89% F1 for the exact SU span extraction, both
809
+ at the word- and character-level. This is a very
810
+ promising result that demonstrates the effective-
811
+ ness of our method when we can leverage the gold
812
+ SUs and NSUs from the target domain.
813
+
814
+ Peos (W2/W) = 0.65
815
+ (W2/W≤2) = 0.08
816
+ Unidirectional Model
817
+ Bidirectional Model
818
+ (Left-to-Right)
819
+ This
820
+ #
821
+ This
822
+ #
823
+ #
824
+ #
825
+ is
826
+ #
827
+ #
828
+ is
829
+ wo
830
+ W1
831
+ W2
832
+ wo
833
+ W1
834
+ W2
835
+ W3
836
+ W4
837
+ W3
838
+ W4Train/Dev
839
+ Datasets
840
+ Model
841
+ EWT Test (pCC = 0.5)
842
+ EWT Test (pCC = 0)
843
+ EWT Test (Postprocess)
844
+ BIO
845
+ BIO
846
+ Span
847
+ BIO
848
+ BIO
849
+ Span
850
+ BIO
851
+ BIO
852
+ Span
853
+ Macro
854
+ Weighted
855
+ Macro
856
+ Weighted
857
+ Macro
858
+ Weighted
859
+ EWT
860
+ Train/Dev
861
+ EOS-Only
862
+ 83.2±1.5
863
+ 93.9±0.6
864
+ 72.8±1.8
865
+ 59.7±0.2
866
+ 86.4±0.1
867
+ 58.2±1.1
868
+ 86.3±2.7
869
+ 94.6±1.1
870
+ 81.6±2.4
871
+ EOS-Only (force last)
872
+ 58.6±0.1
873
+ 86.6±0.0
874
+ 60.4±0.8
875
+ 57.6±0.2
876
+ 85.9±0.1
877
+ 57.7±1.0
878
+ 59.1±0.1
879
+ 85.7±0.0
880
+ 62.3±0.3
881
+ BOS&EOS
882
+ 93.0±1.4
883
+ 97.3±0.6
884
+ 87.3±1.6
885
+ 91.0±1.8
886
+ 96.4±0.7
887
+ 84.1±2.6
888
+ 92.3±1.0
889
+ 96.7±0.4
890
+ 88.8±0.9
891
+ WSJ
892
+ Train/Dev
893
+ EOS-Only
894
+ 71.7±0.7
895
+ 88.9±0.4
896
+ 59.2±2.4
897
+ 56.9±0.6
898
+ 85.2±0.3
899
+ 48.2±2.5
900
+ 71.5±0.3
901
+ 87.8±0.3
902
+ 67.8±0.3
903
+ EOS-Only (force last)
904
+ 57.5±0.3
905
+ 86.2±0.2
906
+ 53.6±2.1
907
+ 55.4±0.7
908
+ 85.0±0.3
909
+ 48.2±2.5
910
+ 58.9±0.1
911
+ 85.7±0.0
912
+ 61.1±0.2
913
+ EOS-Only (+AUG)
914
+ 66.4±1.5
915
+ 88.3±0.4
916
+ 59.5±1.4
917
+ 58.3±0.5
918
+ 86.1±0.3
919
+ 54.4±2.5
920
+ 71.1±1.3
921
+ 88.5±0.6
922
+ 66.2±1.9
923
+ BOS&EOS
924
+ 71.5±0.2
925
+ 89.1±0.2
926
+ 59.1±1.5
927
+ 57.7±0.9
928
+ 85.4±0.2
929
+ 48.8±1.6
930
+ 71.0±0.3
931
+ 87.9±0.2
932
+ 68.4±0.3
933
+ BOS&EOS (+UNI)
934
+ 70.4±0.7
935
+ 88.2±0.3
936
+ 60.0±1.1
937
+ 63.3±0.8
938
+ 86.0±0.4
939
+ 53.0±1.3
940
+ 70.8±0.4
941
+ 87.6±0.2
942
+ 68.4±0.1
943
+ BOS&EOS (+UNI +AUG)
944
+ 72.5±0.4
945
+ 89.5±0.1
946
+ 66.6±0.2
947
+ 72.4±1.3
948
+ 89.1±0.5
949
+ 63.7±1.0
950
+ 74.3±1.1
951
+ 89.6±0.4
952
+ 71.9±1.4
953
+ Table 5: Overall Results (Word-Level). We report the macro/weighted average F1 of the BIO labeling task and
954
+ the F1 score of the exact SU span extraction task. Details of our experimental setup are discussed in §6.
955
+ Secondly, we focus on the results where we
956
+ only utilize the standard sentence segmentation
957
+ dataset (WSJ Train/Dev) for model development.
958
+ In this setup, we also report the results of applying
959
+ our data augmentation (+AUG) and unidirectional
960
+ model (+UNI) techniques from §6.1.7
961
+ Due to the data discrepancy between WSJ and
962
+ EWT, we find a natural drop in performance com-
963
+ pared to the previous setup using in-domain EWT
964
+ Train/Dev. However, we can verify that our tech-
965
+ niques (+AUG, +UNI) generally help to alleviate
966
+ this issue, and our proposed method performs on
967
+ par or slightly better than the EOS-only baselines
968
+ when applying these techniques. It is especially
969
+ worth noting the improvement in the exact SU
970
+ span extraction task (reaching 64∼72% F1), where
971
+ the advantage of our method is the most conspic-
972
+ uous and consistent in both word- and character-
973
+ level evaluation. This improvement can also be
974
+ explained by the higher performance in the B-label
975
+ prediction with our method (Appendix E), which
976
+ is a prerequisite for accurate SU span extraction.
977
+ Finally, we note that the EOS-only baseline with-
978
+ out forcing the last EOS can be quite competitive
979
+ with shorter inputs (pCC = 0.5 and postprocessing)
980
+ but performs considerably worse when the input
981
+ texts are longer (pCC = 0). This is because the
982
+ baseline can only predict the last segment of the
983
+ input as an NSU, which is less problematic when
984
+ the input texts are shorter but becomes increasingly
985
+ problematic with longer inputs (since most NSUs
986
+ will not be able to be removed). In contrast, our
987
+ proposed method performs much more robustly
988
+ under various input lengths.
989
+ 7We did not observe any improvement from applying these
990
+ techniques to the in-domain dataset (EWT Train/Dev), which
991
+ is consistent with our motivation and expectation.
992
+ Through further experiments and analyses, we
993
+ found that (i) the results are stable across different
994
+ hyperparameter choices, (ii) predictions are reason-
995
+ able especially when using the in-domain dataset
996
+ (EWT Train/Dev) for model development, and (iii)
997
+ our methods do not sacrifice performance on the
998
+ formal/edited texts of the sentence segmentation
999
+ dataset (WSJ Test). These detailed evidences can
1000
+ be found in Appendix F.
1001
+ 8
1002
+ Conclusion
1003
+ In this paper, we introduced a novel task of sen-
1004
+ tence identification, where we aim to identify SUs
1005
+ while excluding NSUs in a given text (§3). Through
1006
+ sentence identification, we can clearly distinguish
1007
+ the portions of the text that are appropriate (or not)
1008
+ for the prediction and evaluation of sophisticated
1009
+ linguistic analyses, such as dependency parsing,
1010
+ semantic role labeling, etc.
1011
+ To conduct sentence identification, we proposed
1012
+ a simple yet effective method of combining the
1013
+ BOS and EOS labeling models to determine the
1014
+ SUs and NSUs (§4). To evaluate sentence iden-
1015
+ tification, we designed an automatic, language-
1016
+ independent procedure to convert the UD corpora
1017
+ into sentence identification benchmarks (§5).
1018
+ In our experiments, we developed the BOS/EOS
1019
+ labeling models by finetuning pretrained RoBERTa
1020
+ (§6). Based on the experimental results, we showed
1021
+ that our proposed method combining the BOS and
1022
+ EOS labels outperforms sentence segmentation
1023
+ baselines which only utilize EOS labels in all of
1024
+ the considered settings (§7). Overall, we expect
1025
+ sentence identification to be a fundamental frame-
1026
+ work for the preprocessing of noisy, informal, or
1027
+ non-standard texts in the real world.
1028
+
1029
+ Limitations
1030
+ Firstly, our current experiments are limited to En-
1031
+ glish and cover only five domains of web media
1032
+ texts in EWT. However, our task formulation (§3),
1033
+ method (§4), and evaluation framework (§5) are
1034
+ fully agnostic to the language and domain. Hence
1035
+ it is straightforward to conduct experiments in dif-
1036
+ ferent languages or domains (as long as they are
1037
+ supported in the UD). While we expect similar re-
1038
+ sults with different languages/domains, we leave
1039
+ further investigation as a future work.
1040
+ Secondly, while our method performs reliably
1041
+ when the in-domain dataset is available, there is
1042
+ still a huge room left for improvement without re-
1043
+ lying on such resources (e.g. only using the stan-
1044
+ dard sentence segmentation dataset). To make our
1045
+ method fully practical, we still need to improve on
1046
+ the accuracy and robustness in such cross-domain
1047
+ scenarios. One potential approach is to refine the
1048
+ definitions of SU and NSU probabilities from §4.2
1049
+ to make sentence identification more robust. For
1050
+ instance, we can incorporate span-level scores in-
1051
+ stead of only using word-level BOS/EOS probabil-
1052
+ ities to define the SU/NSU probabilities. We leave
1053
+ further improvement and extension of our approach
1054
+ as an important future work.
1055
+ Finally, our methods are currently evaluated on
1056
+ the (exact) SU span extraction task. Ideally, we
1057
+ should also evaluate the methods on downstream
1058
+ applications such as POS tagging, syntactic pars-
1059
+ ing, semantic role labeling, etc. However, we still
1060
+ expect that the (exact) SU span extraction will play
1061
+ a primary role in the evaluation, since accurate
1062
+ (say human-level) identification of SUs/NSUs will
1063
+ likely provide unprecedented benefits on a wide va-
1064
+ riety of NLP applications dealing with real-world
1065
+ texts. While we leave the precise analyses on down-
1066
+ stream applications as future work, our contribu-
1067
+ tions make the first foundational step towards ex-
1068
+ panding the capability of the long-established sen-
1069
+ tence segmentation task.
1070
+ References
1071
+ Wirote Aroonmanakun et al. 2007. Thoughts on word
1072
+ and sentence segmentation in thai. In Proceedings
1073
+ of the Seventh Symposium on Natural language Pro-
1074
+ cessing, pages 85–90.
1075
+ Marie-Catherine de Marneffe, Christopher D. Man-
1076
+ ning, Joakim Nivre, and Daniel Zeman. 2021. Uni-
1077
+ versal Dependencies.
1078
+ Computational Linguistics,
1079
+ 47(2):255–308.
1080
+ Timothy Dozat and Christopher D Manning. 2017.
1081
+ Deep biaffine attention for neural dependency pars-
1082
+ ing. In Proc. of ICLR.
1083
+ Timothy Dozat and Christopher D. Manning. 2018.
1084
+ Simpler but more accurate semantic dependency
1085
+ parsing. In Proceedings of the 56th Annual Meet-
1086
+ ing of the Association for Computational Linguis-
1087
+ tics (Volume 2: Short Papers), pages 484–490, Mel-
1088
+ bourne, Australia. Association for Computational
1089
+ Linguistics.
1090
+ Dan Gillick. 2009. Sentence boundary detection and
1091
+ the problem with the U.S. In Proceedings of Human
1092
+ Language Technologies: The 2009 Annual Confer-
1093
+ ence of the North American Chapter of the Associa-
1094
+ tion for Computational Linguistics, Companion Vol-
1095
+ ume: Short Papers, pages 241–244, Boulder, Col-
1096
+ orado. Association for Computational Linguistics.
1097
+ Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S.
1098
+ Weld, Luke Zettlemoyer, and Omer Levy. 2020.
1099
+ SpanBERT: Improving pre-training by representing
1100
+ and predicting spans. Transactions of the Associa-
1101
+ tion for Computational Linguistics, 8:64–77.
1102
+ Tibor Kiss and Jan Strunk. 2006. Unsupervised mul-
1103
+ tilingual sentence boundary detection.
1104
+ Computa-
1105
+ tional Linguistics, 32(4):485–525.
1106
+ Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris
1107
+ Callison-Burch, Marcello Federico, Nicola Bertoldi,
1108
+ Brooke Cowan,
1109
+ Wade Shen,
1110
+ Christine Moran,
1111
+ Richard Zens, Chris Dyer, Ondˇrej Bojar, Alexandra
1112
+ Constantin, and Evan Herbst. 2007. Moses: Open
1113
+ source toolkit for statistical machine translation. In
1114
+ Proceedings of the 45th Annual Meeting of the As-
1115
+ sociation for Computational Linguistics Companion
1116
+ Volume Proceedings of the Demo and Poster Ses-
1117
+ sions, pages 177–180, Prague, Czech Republic. As-
1118
+ sociation for Computational Linguistics.
1119
+ Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey
1120
+ Edunov, Marjan Ghazvininejad, Mike Lewis, and
1121
+ Luke Zettlemoyer. 2020.
1122
+ Multilingual denoising
1123
+ pre-training for neural machine translation. Transac-
1124
+ tions of the Association for Computational Linguis-
1125
+ tics, 8:726–742.
1126
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man-
1127
+ dar Joshi, Danqi Chen, Omer Levy, Mike Lewis,
1128
+ Luke Zettlemoyer, and Veselin Stoyanov. 2019.
1129
+ RoBERTa: A robustly optimized BERT pretraining
1130
+ approach. arXiv preprint arXiv:1907.11692.
1131
+
1132
+ Mitchell P. Marcus, Beatrice Santorini, and Mary Ann
1133
+ Marcinkiewicz. 1993.
1134
+ Building a large annotated
1135
+ corpus of English: The Penn Treebank. Computa-
1136
+ tional Linguistics, 19(2):313–330.
1137
+ Geoffrey Nunberg. 1990. The linguistics of punctua-
1138
+ tion. 18. Center for the Study of Language (CSLI).
1139
+ Jonathon Read, Rebecca Dridan, Stephan Oepen, and
1140
+ Lars Jørgen Solberg. 2012. Sentence boundary de-
1141
+ tection: A long solved problem? In Proceedings of
1142
+ COLING 2012: Posters, pages 985–994, Mumbai,
1143
+ India. The COLING 2012 Organizing Committee.
1144
+ Ines
1145
+ Rehbein,
1146
+ Josef
1147
+ Ruppenhofer,
1148
+ and
1149
+ Thomas
1150
+ Schmidt. 2020. Improving sentence boundary detec-
1151
+ tion for spoken language transcripts. In Proceedings
1152
+ of the Twelfth Language Resources and Evaluation
1153
+ Conference, pages 7102–7111, Marseille, France.
1154
+ European Language Resources Association.
1155
+ Michael D. Riley. 1989.
1156
+ Some applications of tree-
1157
+ based modelling to speech and language. In Speech
1158
+ and Natural Language: Proceedings of a Workshop
1159
+ Held at Cape Cod, Massachusetts, October 15-18,
1160
+ 1989.
1161
+ Dwijen Rudrapal, Anupam Jamatia, Kunal Chakma,
1162
+ Amitava Das, and Björn Gambäck. 2015.
1163
+ Sen-
1164
+ tence boundary detection for social media text. In
1165
+ Proceedings of the 12th International Conference
1166
+ on Natural Language Processing, pages 254–260,
1167
+ Trivandrum, India. NLP Association of India.
1168
+ Natalia Silveira,
1169
+ Timothy Dozat,
1170
+ Marie-Catherine
1171
+ de Marneffe, Samuel Bowman, Miriam Connor,
1172
+ John Bauer, and Chris Manning. 2014. A gold stan-
1173
+ dard dependency corpus for English. In Proceedings
1174
+ of the Ninth International Conference on Language
1175
+ Resources and Evaluation (LREC’14), pages 2897–
1176
+ 2904, Reykjavik, Iceland. European Language Re-
1177
+ sources Association (ELRA).
1178
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob
1179
+ Uszkoreit, Llion Jones, Aidan N Gomez, Ł ukasz
1180
+ Kaiser, and Illia Polosukhin. 2017. Attention is all
1181
+ you need. In Proc. of NeurIPS.
1182
+ Daniel J. Walker, David E. Clements, Maki Darwin,
1183
+ and Jan W. Amtrup. 2001. Sentence boundary de-
1184
+ tection: a comparison of paradigms for improving
1185
+ MT quality. In Proceedings of Machine Translation
1186
+ Summit VIII, Santiago de Compostela, Spain.
1187
+ Xiaolin Wang, Masao Utiyama, and Eiichiro Sumita.
1188
+ 2019. Online sentence segmentation for simultane-
1189
+ ous interpretation using multi-shifted recurrent neu-
1190
+ ral network. In Proceedings of Machine Translation
1191
+ Summit XVII: Research Track, pages 1–11, Dublin,
1192
+ Ireland. European Association for Machine Transla-
1193
+ tion.
1194
+ Rachel Wicks and Matt Post. 2021. A unified approach
1195
+ to sentence segmentation of punctuated text in many
1196
+ languages. In Proceedings of the 59th Annual Meet-
1197
+ ing of the Association for Computational Linguistics
1198
+ and the 11th International Joint Conference on Nat-
1199
+ ural Language Processing (Volume 1: Long Papers),
1200
+ pages 3995–4007, Online. Association for Computa-
1201
+ tional Linguistics.
1202
+ Nianwen Xue and Yaqin Yang. 2011.
1203
+ Chinese sen-
1204
+ tence segmentation as comma classification. In Pro-
1205
+ ceedings of the 49th Annual Meeting of the Associ-
1206
+ ation for Computational Linguistics: Human Lan-
1207
+ guage Technologies, pages 631–635, Portland, Ore-
1208
+ gon, USA. Association for Computational Linguis-
1209
+ tics.
1210
+ Nina Zhou, AiTi Aw, Nattadaporn Lertcheva, and Xu-
1211
+ ancong Wang. 2016. A word labeling approach to
1212
+ Thai sentence boundary detection and POS tagging.
1213
+ In Proceedings of COLING 2016, the 26th Inter-
1214
+ national Conference on Computational Linguistics:
1215
+ Technical Papers, pages 319–327, Osaka, Japan. The
1216
+ COLING 2016 Organizing Committee.
1217
+
1218
+ A
1219
+ Dynamic Programming Algorithm
1220
+ To find the maximum value (and the argmax) of
1221
+ Eq. 4 from §4.2, we rely on a simple dynamic
1222
+ programming framework. To be specific, we con-
1223
+ sider the partial labeling of BOS and EOS up to
1224
+ W ≤k = (w0, ..., wk), where k ≤ N − 1. Then,
1225
+ we aim to compute the maximum log probability
1226
+ of Eq. 4 based on the partial labeling, i.e. using
1227
+ W ≤k in place of W .
1228
+ Since the labeling is partial, W ≤k may end in-
1229
+ side the SU (i.e. the last label is BOS) or outside the
1230
+ SU (i.e. the last label is EOS). Let log pIS(k+1) de-
1231
+ note the maximum log probability when W ≤k ends
1232
+ inside the SU and log pOS(k + 1) the maximum
1233
+ log probability when W ≤k ends outside the SU.
1234
+ Then, we can initialize log pIS(0) = log 0 = −∞,
1235
+ log pOS(0) = log 1 = 0 (since we always start out-
1236
+ side the SU) and iteratively update the two values
1237
+ as follows:
1238
+ log p′
1239
+ IS(i) = max { log pIS(i) + log (1−pBOS(wi)),
1240
+ log pOS(i) + log pBOS(wi) }
1241
+ log p′
1242
+ OS(i) = log pOS(i) + log (1−pBOS(wi))
1243
+ log pIS(i+1) = log p′
1244
+ IS(i) + log (1−pEOS(wi))
1245
+ log pOS(i+1) = max { log p′
1246
+ IS(i) + log pEOS(wi),
1247
+ log p′
1248
+ OS(i) + log (1−pEOS(wi)) }
1249
+ (5)
1250
+ Note that we first update pIS(i) → p′
1251
+ IS(i) and
1252
+ pOS(i) → p′
1253
+ OS(i) based on the BOS probability
1254
+ pBOS(wi). Then, we update p′
1255
+ IS(i)→pIS(i+1) and
1256
+ p′
1257
+ OS(i)→pOS(i+1) based on the EOS probability
1258
+ pEOS(wi).8 The iterative procedure is illustrated in
1259
+ Figure 2.
1260
+ Finally, we can compute the log probability
1261
+ log pOS(N) (since we always end outside the SU),
1262
+ which corresponds to the maximum value of Eq. 4.
1263
+ To obtain the argmax, we can simply incorporate
1264
+ backtracking during the iterative updates of Eq. 5.
1265
+ Through this dynamic programming framework,
1266
+ we can ensure that the restrictions from §4.2 are
1267
+ satisfied: namely, (i) the first label should be BOS,
1268
+ (ii) the last label should be EOS, and (iii) BOS and
1269
+ EOS labels need to appear alternately.
1270
+ In practice, we can limit the candidates of BOS
1271
+ indices to the subset where pBOS(wi) is higher
1272
+ than a certain threshold c. This can be efficiently
1273
+ implemented by simply skipping the updates of
1274
+ p′
1275
+ IS(i) and p′
1276
+ OS(i), i.e. using p′
1277
+ IS(i) = pIS(i) and
1278
+ 8Note that if a single word wi is labeled as both BOS and
1279
+ EOS at the same time, we can extract it as a single SU.
1280
+ p′
1281
+ OS(i) = pOS(i), if pBOS(wi) < c.9 Likewise, we
1282
+ can limit the candidates of EOS indices by skip-
1283
+ ping the updates of pIS(i + 1) and pOS(i + 1) if
1284
+ pEOS(wi) < c. Generally speaking, this leads to
1285
+ a more efficient algorithm: therefore, we use the
1286
+ candidate threshold of c = 0.1 for restricting both
1287
+ BOS and EOS indices throughout our experiments.
1288
+ B
1289
+ SU and NSU Examples
1290
+ In Table 6, we provide more examples of SUs and
1291
+ NSUs identified based on our procedure described
1292
+ in §5. As for the SUs, we can verify that EWT con-
1293
+ tains clean, formal SUs with appropriate capitaliza-
1294
+ tion and punctuation. We can also verify that EWT
1295
+ contains various types of informal SUs, e.g. that
1296
+ lack capitalization/punctuation, use non-standard
1297
+ casing, end with emoticons, include spelling errors,
1298
+ concatenate consecutive SUs without a space, etc.
1299
+ C
1300
+ Label Assignment and Conversion
1301
+ In this section, we explain the precise procedure
1302
+ on how we (i) assign the gold character-level
1303
+ labels, (ii) convert the character-level labels to
1304
+ word/subword-level labels, and (iii) convert the
1305
+ subword-level labels to character/word-level labels.
1306
+ We limit our explanation to BIO labels, since it is
1307
+ straightforward to convert them to the combination
1308
+ of BOS and EOS labels (and vice versa).
1309
+ Firstly, we can assign the gold character-level
1310
+ labels from the UD annotation by taking the
1311
+ character-level alignment, which determines the
1312
+ exact spans of SUs and NSUs. From the character-
1313
+ level labels, we can assign the word- or subword-
1314
+ level labels based on the following rule:
1315
+ • If the word (or subword) contains a character
1316
+ with the B-label, assign it the B-label.
1317
+ • Else if it contains a character with the I-label,
1318
+ assign the I-label.
1319
+ • Otherwise assign the O-label.
1320
+ For instance, this procedure is used to create the
1321
+ subword-level labels for training our BOS/EOS
1322
+ labeling models.
1323
+ To evaluate our methods, we need to convert
1324
+ the subword-level labels produced by our methods
1325
+ into the character-level labels, which can then be
1326
+ converted into the word-level labels (based on the
1327
+ 9This is equivalent to forcing wi to be non-BOS, i.e. set-
1328
+ ting pBOS(wi) = 0 in Eq. 5.
1329
+
1330
+ Figure 2: Illustration of the dynamic programming procedure.
1331
+ SUs
1332
+ President Bush on Tuesday nominated two individuals to replace retiring jurists on federal courts in the Washington area.
1333
+ Unfortunately, Mr. Lay will be in San Jose, CA participating in a conference, where he is a speaker, on June 14.
1334
+ “In 1972, there was an enormous glut of pilots,” Campenni says.
1335
+ PS – There is a happy hour tonight at Scudeiros on Dallas Street (just west of the Met Garage) beginning around 5:00.
1336
+ 2) Your vet would not prescribe them if they didn’t think it would be helpful.
1337
+ BUT EVERYONE HAS THERE OWN WAY!!!!!!
1338
+ The motel is very well maintained, and the managers are so accomodating, it’s kind of like visiting family each year! ;-)
1339
+ where can I find the best tours to the Mekong Delta at reasonable prices?
1340
+ it seems like its healthier too, but its prolly not.
1341
+ I have wifi at my house, but thats just at my house...is there anyway i can buy some card to make the ipod itself have wifi?
1342
+ NSUs
1343
+ —->===}*{===<—-
1344
+ - Lisa_coverletter.doc << File: Lisa_coverletter.doc >>
1345
+ Thur. Sept. 28 - Paris (Versailles or Fontainbleu - half day side trip)
1346
+ 9.3m - Number of US unemployed in April 2004.
1347
+ Game 1: Monday, May 28 @ 2:00PM vs. Los Angeles SPARKS
1348
+ Mixed Tempura.....................8.25 Shrimp or vegetable tempura & salad.
1349
+ Infinity stereo, bucket seats, nerf bars, tool box, bed liner, camper tow package, 5 speed manual.
1350
+ printing, printing, copies, printing, copies, printing,
1351
+ A++++ !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1352
+ Dear Sir / Madam,
1353
+ Table 6: Examples of gold SUs and NSUs in the English Web Treebank (EWT) identified based on our procedure
1354
+ (§5). Each line corresponds to one example of SU or NSU.
1355
+ previous procedure). To convert a subword-level
1356
+ label into a sequence of character-level labels, we
1357
+ apply the following rule (where n denotes the num-
1358
+ ber of characters in the subword):
1359
+ • If the subword has the B-label, the character-
1360
+ level labels are 1 B-label followed by n − 1
1361
+ I-labels.
1362
+ • If the subword has the I-label, the character-
1363
+ level labels are n I-labels.
1364
+ • If the subword has the O-label, the character-
1365
+ level labels are n O-labels.
1366
+ D
1367
+ Details on the Model Setup
1368
+ As discussed in §6.1, we finetune the pretrained
1369
+ RoBERTaBASE publicly available on the Hugging-
1370
+ Face model hub10. We add a binary BOS/EOS
1371
+ classifier on top of the encoder, which is a single-
1372
+ layer MLP with a hidden size of 768. We share the
1373
+ 10https://huggingface.co/models
1374
+ encoder parameters and use different classifiers for
1375
+ the BOS/EOS predictions. The BOS/EOS models
1376
+ are trained jointly by summing their losses.
1377
+ When we combine the unidirectional models
1378
+ (+UNI), we take the same approach and use differ-
1379
+ ent classifiers for the unidirectional/bidirectional
1380
+ models. Again, the encoder parameters are shared
1381
+ and all models are trained jointly.
1382
+ As for the training data preparation, we apply the
1383
+ unit concatenation and data augmentation (+AUG)
1384
+ on the fly, i.e. we see different concatenation and
1385
+ augmentation of the units in each iteration. The
1386
+ same procedure is applied on the validation set.
1387
+ During data augmentation, we remove the last
1388
+ sentence ending punctuation based on the following
1389
+ regular-expression, similar to the candidate bound-
1390
+ ary detector in ERSATZ (Wicks and Post, 2021):
1391
+ • (.∗PeP ∗) where P denotes the set of punctua-
1392
+ tions and Pe ⊂ P denotes the sentence ending
1393
+ punctuations.
1394
+ Since our experiments are conducted on English,
1395
+
1396
+ Update based on PBos(wi)
1397
+ Update based on peos(wi)
1398
+ In-sentence: pis(i)
1399
+ pis(i) = max (pis(i) * (1 - pBos(wi),
1400
+ Pis(i + 1) = pis(i) * (1 -peos (wi))
1401
+ 1 - PBos(wi)
1402
+ 1 - PEos(wi)
1403
+ Pos(i) * pBos(wi) )
1404
+ PEos(wi)
1405
+ PBos(wi)
1406
+ Text
1407
+ Out-of-sentence: pos(i)
1408
+ Pos(i + 1) = max (pis(i) * peos(wi)
1409
+ pos(i) = pos(i) * (1 - pBos (wi)
1410
+ 1 - PBos(wi)
1411
+ 1 - peos(wi)
1412
+ ps(i) * (1 - peos(wi)) )
1413
+ 1-m . Tm "om Jae (ns apisul=) s u! buiaq jo K!qeoud :(2)sid
1414
+ Return: Pos(N)
1415
+ Start: pis(O) = 0, Pos(0) = 1
1416
+ Pos(i): probability of being in OS (=outside SU) after Wo, Wi, ..Wi-1we use P = {.?!")’} and Pe = {.?!}.
1417
+ Finally, all models are implemented in Pytorch
1418
+ and trained on a single Tesla V100-SXM2-32GB
1419
+ GPU. We use a batch size of 8, accumulate the
1420
+ gradients for 32 batches, and apply the gradient
1421
+ clipping at 1.0 before updating the model weights.
1422
+ As for the optimizer, we use Adam with the initial
1423
+ learning rate of 0.0001 and exponentially decay the
1424
+ learning rate by γ = 0.95 after each epoch. We
1425
+ check the validation loss every 200 batches and
1426
+ stop the training early if there is no improvement
1427
+ for 5 consecutive evaluations.
1428
+ E
1429
+ The Full Experimental Results
1430
+ In this section, we report the full results of our
1431
+ experiments which did not fit in §7. Table 7 shows
1432
+ the word-level F1 scores for each B-, I-, and O-
1433
+ label prediction. Table 8 shows the overall results
1434
+ for the character-level evaluation.
1435
+ Generally speaking, we can confirm the same
1436
+ results as observed in §7. Firstly, our proposed
1437
+ method significantly outperforms the baselines
1438
+ when we use the EWT Train/Dev dataset for model
1439
+ development.
1440
+ Secondly, our method performs
1441
+ slightly better than (or at least on par with) the
1442
+ baselines when developed on the WSJ Train/Dev
1443
+ dataset. Finally, the baseline without forcing the
1444
+ last EOS is competitive with shorter inputs (pCC =
1445
+ 0.5 and postprocessing) but performs considerably
1446
+ worse when the input texts are longer (pCC = 0).
1447
+ F
1448
+ Further Experiments and Analyses
1449
+ In this section, we provide further experiments
1450
+ and analyses to complement our study. To be spe-
1451
+ cific, we provide discussions on the effect of the
1452
+ choice of hyperparameters (F.1), qualitative anal-
1453
+ yses based on example model outputs (F.2), and
1454
+ evaluation of sentence identification based on the
1455
+ sentence segmentation dataset (F.3).
1456
+ F.1
1457
+ Effect of Hyperparameters
1458
+ As a default configuration, we used pDA
1459
+ =
1460
+ 0.3, pT R =0.1 for the data augmentation (+AUG)
1461
+ and λ = 0.5 for the unidirectional model ensem-
1462
+ bling (+UNI). To examine the effect of the choice
1463
+ of these hyperparameters, we conducted further
1464
+ experiments by changing these default hyperpa-
1465
+ rameters. Note that all evaluation results in this
1466
+ subsection are based on BOS&EOS (+UNI +AUG)
1467
+ developed on WSJ Train/Dev.
1468
+ Firstly, we focus on the data augmentation and
1469
+ report the results of our method trained with dif-
1470
+ ferent sets of pDA and pT R (with λ fixed at 0.5).
1471
+ Since increasing pDA leads to higher recall (and
1472
+ lower precision) of SU extraction and increasing
1473
+ pT R leads to higher precision (and lower recall),
1474
+ we used a fixed ratio of pDA : pT R = 3 : 1 which
1475
+ seemed to make a good trade-off. As shown in
1476
+ Table 9, the results are generally stable with the
1477
+ different choices of the hyperparameters. However,
1478
+ more data augmentation (with larger values of pDA
1479
+ and pT R) tends to slightly improve the performance,
1480
+ especially for the exact SU span extraction.
1481
+ Secondly, we focus on the unidirectional model
1482
+ ensembling and report the results of changing the
1483
+ linear interpolation rate λ ∈ [0, 1], where λ = 0 is
1484
+ equivalent to using only the bidirectional models
1485
+ and λ = 1 only the unidirectional models. We fix
1486
+ pDA =0.3 and pT R =0.1 and only change λ at the
1487
+ inference time without retraining the unidirectional
1488
+ or bidirectional models. As shown in Figure 3, we
1489
+ found that unidirectional and bidirectional models
1490
+ generally have complementary benefits, and choos-
1491
+ ing the intermediate value of λ leads to the best
1492
+ performance. The results also indicate that we may
1493
+ be able to obtain further improvement by tuning
1494
+ λ on the validation set, although we simply fixed
1495
+ λ = 0.5 throughout our experiments.
1496
+ F.2
1497
+ Qualitative Analyses
1498
+ In Table 10 and 11, we show the actual predictions
1499
+ made by our proposed method developed on EWT
1500
+ Train/Dev and WSJ Train/Dev. For the latter, we
1501
+ applied +UNI and +AUG with the default hyperpa-
1502
+ rameters.
1503
+ In the first example (Table 10), we can verify
1504
+ that both models identify the correct SU span while
1505
+ removing the non-sentential header as the NSU.
1506
+ This is a relatively easy example, since the start of
1507
+ the SU is capitalized and less ambiguous.
1508
+ In the second example (Table 11), we can ob-
1509
+ serve that our method using in-domain data (EWT
1510
+ Train/Dev) extracts the correct SU span, while our
1511
+ method developed on out-of-domain data (WSJ
1512
+ Train/Dev) incorrectly excludes a part of an SU.
1513
+ This seems to be a relatively difficult example,
1514
+ since the start of the SU is not capitalized and more
1515
+ ambiguous. It is worth noting that such SUs can
1516
+ be reliably extracted when we can leverage the in-
1517
+ domain annotation of gold SUs and NSUs.
1518
+
1519
+ Train/Dev
1520
+ Datasets
1521
+ Model
1522
+ EWT Test (pCC = 0.5)
1523
+ EWT Test (pCC = 0.0)
1524
+ EWT Test (Postprocess)
1525
+ B-Label
1526
+ I-Label
1527
+ O-Label
1528
+ B-Label
1529
+ I-Label
1530
+ O-Label
1531
+ B-Label
1532
+ I-Label
1533
+ O-Label
1534
+ EWT
1535
+ Train/Dev
1536
+ EOS-Only
1537
+ 85.6±0.8
1538
+ 97.3±0.3
1539
+ 66.6±3.5
1540
+ 78.0±0.6
1541
+ 95.1±0.1
1542
+ 6.0±0.4
1543
+ 90.2±1.2
1544
+ 97.5±0.5
1545
+ 71.3±6.6
1546
+ EOS-Only (force last)
1547
+ 79.8±0.2
1548
+ 95.9±0.0
1549
+ 0.0±0.0
1550
+ 77.8±0.6
1551
+ 95.1±0.1
1552
+ 0.0±0.0
1553
+ 81.7±0.2
1554
+ 95.7±0.0
1555
+ 0.0±0.0
1556
+ BOS&EOS
1557
+ 94.3±0.6
1558
+ 98.7±0.3
1559
+ 86.1±3.5
1560
+ 93.0±1.0
1561
+ 98.2±0.4
1562
+ 81.7±4.0
1563
+ 94.7±0.3
1564
+ 98.4±0.2
1565
+ 83.9±2.4
1566
+ WSJ
1567
+ Train/Dev
1568
+ EOS-Only
1569
+ 78.7±1.3
1570
+ 94.4±0.4
1571
+ 42.1±1.3
1572
+ 71.8±1.9
1573
+ 94.3±0.2
1574
+ 4.5±0.4
1575
+ 83.3±0.2
1576
+ 93.8±0.3
1577
+ 37.3±0.9
1578
+ EOS-Only (force last)
1579
+ 76.7±0.9
1580
+ 95.6±0.1
1581
+ 0.0±0.0
1582
+ 71.7±1.9
1583
+ 94.6±0.2
1584
+ 0.0±0.0
1585
+ 81.0±0.4
1586
+ 95.6±0.0
1587
+ 0.0±0.0
1588
+ EOS-Only (+AUG)
1589
+ 79.4±1.0
1590
+ 95.4±0.2
1591
+ 24.5±4.1
1592
+ 78.1±1.8
1593
+ 93.3±0.2
1594
+ 1.4±1.1
1595
+ 82.7±1.1
1596
+ 94.9±0.5
1597
+ 35.6±3.2
1598
+ BOS&EOS
1599
+ 79.4±0.9
1600
+ 94.8±0.2
1601
+ 40.5±0.6
1602
+ 72.9±1.2
1603
+ 94.3±0.2
1604
+ 5.8±2.0
1605
+ 83.9±0.2
1606
+ 94.1±0.2
1607
+ 35.2±0.9
1608
+ BOS&EOS (+UNI)
1609
+ 79.8±0.6
1610
+ 93.9±0.2
1611
+ 37.5±1.5
1612
+ 76.2±1.3
1613
+ 93.3±0.3
1614
+ 20.2±1.2
1615
+ 83.8±0.1
1616
+ 93.8±0.1
1617
+ 34.7±0.9
1618
+ BOS&EOS (+UNI +AUG)
1619
+ 83.7±0.1
1620
+ 95.0±0.2
1621
+ 38.7±1.2
1622
+ 83.0±0.6
1623
+ 94.6±0.3
1624
+ 39.7±3.5
1625
+ 85.9±0.6
1626
+ 95.2±0.2
1627
+ 41.9±2.8
1628
+ Table 7: BIO Labeling Results (Word-Level). We report the F1 scores for each B-, I- and O-label prediction.
1629
+ Train/Dev
1630
+ Datasets
1631
+ Model
1632
+ EWT Test (pCC = 0.5)
1633
+ EWT Test (pCC = 0.0)
1634
+ EWT Test (Postprocess)
1635
+ BIO
1636
+ BIO
1637
+ Span
1638
+ BIO
1639
+ BIO
1640
+ Span
1641
+ BIO
1642
+ BIO
1643
+ Span
1644
+ Macro
1645
+ Weighted
1646
+ Macro
1647
+ Weighted
1648
+ Macro
1649
+ Weighted
1650
+ EWT
1651
+ Train/Dev
1652
+ EOS-Only
1653
+ 83.8±1.1
1654
+ 92.7±0.5
1655
+ 72.8±1.8
1656
+ 58.5±0.2
1657
+ 81.5±0.0
1658
+ 58.2±1.1
1659
+ 87.7±2.3
1660
+ 93.9±1.2
1661
+ 81.6±2.4
1662
+ EOS-Only (force last)
1663
+ 57.7±0.1
1664
+ 81.0±0.0
1665
+ 60.4±0.8
1666
+ 56.9±0.2
1667
+ 80.9±0.0
1668
+ 57.7±1.0
1669
+ 58.1±0.1
1670
+ 79.9±0.0
1671
+ 62.3±0.3
1672
+ BOS&EOS
1673
+ 94.0±1.0
1674
+ 97.2±0.6
1675
+ 87.3±1.6
1676
+ 92.2±1.5
1677
+ 96.3±0.7
1678
+ 84.1±2.6
1679
+ 93.5±0.6
1680
+ 96.6±0.4
1681
+ 88.9±0.8
1682
+ WSJ
1683
+ Train/Dev
1684
+ EOS-Only
1685
+ 72.8±0.6
1686
+ 86.9±0.4
1687
+ 59.1±2.3
1688
+ 56.0±0.6
1689
+ 80.9±0.1
1690
+ 48.2±2.5
1691
+ 73.3±0.4
1692
+ 85.6±0.2
1693
+ 67.7±0.4
1694
+ EOS-Only (force last)
1695
+ 56.6±0.3
1696
+ 80.9±0.0
1697
+ 53.5±2.0
1698
+ 54.9±0.6
1699
+ 80.7±0.1
1700
+ 48.2±2.5
1701
+ 57.8±0.2
1702
+ 79.9±0.0
1703
+ 61.0±0.3
1704
+ EOS-Only (+AUG)
1705
+ 64.3±1.5
1706
+ 83.5±0.6
1707
+ 59.5±1.4
1708
+ 57.4±0.5
1709
+ 81.0±0.2
1710
+ 54.4±2.5
1711
+ 69.2±1.7
1712
+ 84.0±0.8
1713
+ 66.2±1.9
1714
+ BOS&EOS
1715
+ 72.7±0.7
1716
+ 87.1±0.2
1717
+ 59.1±1.5
1718
+ 57.8±1.9
1719
+ 81.6±0.7
1720
+ 48.8±1.6
1721
+ 72.4±1.0
1722
+ 85.2±0.5
1723
+ 68.3±0.3
1724
+ BOS&EOS (+UNI)
1725
+ 72.4±0.6
1726
+ 86.3±0.3
1727
+ 59.6±1.0
1728
+ 65.3±1.0
1729
+ 83.6±0.5
1730
+ 52.9±1.3
1731
+ 72.8±0.4
1732
+ 85.3±0.2
1733
+ 68.0±0.2
1734
+ BOS&EOS (+UNI +AUG)
1735
+ 72.2±1.3
1736
+ 86.1±0.6
1737
+ 66.5±0.3
1738
+ 72.8±1.8
1739
+ 86.5±0.9
1740
+ 63.6±1.0
1741
+ 73.2±1.9
1742
+ 85.7±0.9
1743
+ 71.8±1.5
1744
+ Table 8: Overall Results (Character-Level). We report the macro/weighted average F1 of the BIO labeling task
1745
+ and the F1 score of the exact SU span extraction task.
1746
+ F.3
1747
+ Evaluation on the Sentence Segmentation
1748
+ Dataset
1749
+ Finally, we report the results of sentence identifica-
1750
+ tion on the standard sentence segmentation dataset
1751
+ (WSJ Test).
1752
+ In Table 12, we summarize the WSJ dataset
1753
+ statistics. Note that WSJ only contains SUs and
1754
+ do not contain any NSUs (O-labels). However, we
1755
+ can still evaluate the performance using the same
1756
+ metrics, i.e. the macro/weighted average F1 of the
1757
+ BIO labeling task and the F1 of the exact SU span
1758
+ extraction task.11
1759
+ Table 13 summarizes the word-level evaluation
1760
+ results. Since we are evaluating on WSJ Test, the
1761
+ performance is naturally better when the models
1762
+ are trained on WSJ Train/Dev rather than EWT
1763
+ Train/Dev (which is now out-of-domain).
1764
+ When the models are trained on EWT, we found
1765
+ that the baseline (EOS-Only) forcing the last EOS
1766
+ performs the best. This is natural, since this base-
1767
+ line better reflects the nature of the sentence seg-
1768
+ mentation dataset where all units are SUs. How-
1769
+ 11Since the O-label does not exist, we report the macro
1770
+ average F1 as the average F1 scores of the B-label and I-label
1771
+ predictions.
1772
+ ever, our method (BOS&EOS) is still comparable
1773
+ to this baseline and do not (or minimally) sacrifice
1774
+ performance on such datasets.
1775
+ When the models are trained on WSJ, we found
1776
+ that our method without +UNI or +AUG performs
1777
+ the best. This is most likely because we can lever-
1778
+ age the knowledge of BOS to predict EOS. When
1779
+ we apply the data augmentation (+AUG) and uni-
1780
+ directional model ensembling (+UNI), we observe
1781
+ a slight decrease in performance compared to our
1782
+ vanilla method. However, the results are still com-
1783
+ parable and even outperforms the baselines in some
1784
+ metrics (e.g. the exact SU span extraction task).
1785
+ Overall, we can conclude that our methods do
1786
+ not sacrifice the performance on the the clean,
1787
+ edited texts of the sentence segmentation dataset.
1788
+
1789
+ Evaluation
1790
+ Augmentation Rates
1791
+ EWT Test (pCC = 0.5)
1792
+ EWT Test (pCC = 0)
1793
+ EWT Test (Postprocess)
1794
+ BIO
1795
+ BIO
1796
+ Span
1797
+ BIO
1798
+ BIO
1799
+ Span
1800
+ BIO
1801
+ BIO
1802
+ Span
1803
+ Macro
1804
+ Weighted
1805
+ Macro
1806
+ Weighted
1807
+ Macro
1808
+ Weighted
1809
+ Word-Level
1810
+ pDA =0.15, pT R =0.05
1811
+ 71.3±1.1
1812
+ 89.0±0.5
1813
+ 65.7±1.3
1814
+ 71.5±0.9
1815
+ 88.6±0.5
1816
+ 62.3±1.7
1817
+ 73.5±1.4
1818
+ 89.2±0.6
1819
+ 71.2±1.8
1820
+ pDA =0.3, pT R =0.1
1821
+ 72.5±0.4
1822
+ 89.5±0.1
1823
+ 66.6±0.2
1824
+ 72.4±1.3
1825
+ 89.1±0.5
1826
+ 63.7±1.0
1827
+ 74.3±1.1
1828
+ 89.6±0.4
1829
+ 71.9±1.4
1830
+ pDA =0.45, pT R =0.15
1831
+ 73.2±1.0
1832
+ 90.0±0.1
1833
+ 67.3±0.8
1834
+ 73.0±0.9
1835
+ 89.5±0.6
1836
+ 64.0±1.8
1837
+ 75.1±1.3
1838
+ 90.0±0.4
1839
+ 72.1±0.7
1840
+ Character-
1841
+ Level
1842
+ pDA =0.15, pT R =0.05
1843
+ 72.3±1.9
1844
+ 86.3±1.0
1845
+ 65.4±1.4
1846
+ 73.6±0.7
1847
+ 86.8±0.3
1848
+ 62.2±1.7
1849
+ 73.8±1.5
1850
+ 86.1±0.8
1851
+ 71.0±1.8
1852
+ pDA =0.3, pT R =0.1
1853
+ 72.2±1.3
1854
+ 86.1±0.6
1855
+ 66.5±0.3
1856
+ 72.8±1.8
1857
+ 86.5±0.9
1858
+ 63.6±1.0
1859
+ 73.2±1.9
1860
+ 85.7±0.9
1861
+ 71.8±1.5
1862
+ pDA =0.45, pT R =0.15
1863
+ 71.9±1.1
1864
+ 86.1±0.3
1865
+ 67.2±0.8
1866
+ 72.3±0.9
1867
+ 86.3±0.6
1868
+ 64.0±1.8
1869
+ 73.6±1.5
1870
+ 86.0±0.6
1871
+ 72.1±0.7
1872
+ Table 9: Effect of Data Augmentation Rates (Word/Character-Level).
1873
+ We use different data augmentation
1874
+ rates (pDA and pT R) and evaluate BOS&EOS (+UNI +AUG) developed on WSJ Train/Dev.
1875
+ We report the
1876
+ macro/weighted average F1 of the BIO labeling task and the F1 score of the exact SU span extraction task.
1877
+ B
1878
+ Developed
1879
+ ... 06/04/2001 05:54 PM Can you pass this along to Elizabeth to ensure Sanders
1880
+ on EWT
1881
+ E
1882
+ is on board as well?
1883
+ B
1884
+ Developed
1885
+ ... 06/04/2001 05:54 PM Can you pass this along to Elizabeth to ensure Sanders
1886
+ on WSJ
1887
+ E
1888
+ is on board as well?
1889
+ Table 10: Example Outputs (Both Correct). We show the predictions made by our proposed method (BOS&EOS)
1890
+ developed on EWT Train/Dev (top) or WSJ Train/Dev (bottom). We can verify that both methods identify the
1891
+ correct SU span while removing the non-sentential header as the NSU.
1892
+ B
1893
+ Developed
1894
+ with my breakfast I like bacon and sausage when I having a big breakfast like
1895
+ on EWT
1896
+ E
1897
+ a grand slam with pancakes and the works.
1898
+ B
1899
+ Developed
1900
+ with my breakfast I like bacon and sausage when I having a big breakfast like
1901
+ on WSJ
1902
+ E
1903
+ a grand slam with pancakes and the works.
1904
+ Table 11: Example Output with One Incorrect Case. We show the predictions made by our proposed method
1905
+ (BOS&EOS) developed on EWT Train/Dev (top) or WSJ Train/Dev (bottom). We can verify that the former
1906
+ extracts the correct SU span, while the latter incorrectly excludes the first prepositional phrase as an NSU.
1907
+ Train
1908
+ Dev
1909
+ Test
1910
+ Total SUs
1911
+ 37,447
1912
+ 2,021
1913
+ 7,442
1914
+ Total NSUs
1915
+ 0
1916
+ 0
1917
+ 0
1918
+ Word-Level
1919
+ B-Label
1920
+ 37,447
1921
+ 2,021
1922
+ 7,442
1923
+ I-Label
1924
+ 805,387
1925
+ 44,354
1926
+ 163,132
1927
+ O-Label
1928
+ 0
1929
+ 0
1930
+ 0
1931
+ Character-Level
1932
+ B-Label
1933
+ 37,447
1934
+ 2,021
1935
+ 7,442
1936
+ I-Label
1937
+ 4,308,729
1938
+ 236,798
1939
+ 876,461
1940
+ O-Label
1941
+ 0
1942
+ 0
1943
+ 0
1944
+ Table 12: WSJ dataset statistics.
1945
+
1946
+ (a) EWT Test (pCC =0.5), BIO Macro (b) EWT Test (pCC =0.5), BIO Weighted
1947
+ (c) EWT Test (pCC =0.5), Span
1948
+ (d) EWT Test (pCC =0.0), BIO Macro (e) EWT Test (pCC =0.0), BIO Weighted
1949
+ (f) EWT Test (pCC =0.0), Span
1950
+ (g) EWT Test (Postproc.), BIO Macro
1951
+ (h) EWT Test (Postproc.), BIO Weighted
1952
+ (i) EWT Test (Postproc.), Span
1953
+ Figure 3: Effect of the Unidirectional Model Interpolation Rate (Word-Level). We change λ ∈ [0, 1] and report
1954
+ the macro/weighted average F1 of the BIO labeling task and the F1 score of the exact SU span extraction task.
1955
+ Interpolated results are shown in blue and non-interpolated results (i.e. λ = 0) shown in red. The line shows the
1956
+ mean and the shade shows the standard deviation from the five experimental runs.
1957
+ Train/Dev
1958
+ Datasets
1959
+ Model
1960
+ WSJ Test (pCC = 0.5)
1961
+ WSJ Test (pCC = 0)
1962
+ BIO
1963
+ BIO
1964
+ Span
1965
+ BIO
1966
+ BIO
1967
+ Span
1968
+ Macro
1969
+ Weighted
1970
+ Macro
1971
+ Weighted
1972
+ EWT
1973
+ Train/Dev
1974
+ EOS-Only
1975
+ 97.4±0.1
1976
+ 99.5±0.0
1977
+ 87.3±0.3
1978
+ 97.3±0.0
1979
+ 99.5±0.0
1980
+ 87.2±0.2
1981
+ EOS-Only (force last)
1982
+ 97.6±0.1
1983
+ 99.9±0.0
1984
+ 87.8±0.3
1985
+ 97.3±0.0
1986
+ 99.6±0.0
1987
+ 87.3±0.2
1988
+ BOS&EOS
1989
+ 97.1±0.2
1990
+ 99.4±0.0
1991
+ 86.7±0.5
1992
+ 97.0±0.1
1993
+ 99.3±0.0
1994
+ 86.5±0.3
1995
+ WSJ
1996
+ Train/Dev
1997
+ EOS-Only
1998
+ 98.4±0.6
1999
+ 99.7±0.1
2000
+ 92.1±2.9
2001
+ 98.2±0.4
2002
+ 99.7±0.1
2003
+ 90.6±1.8
2004
+ EOS-Only (force last)
2005
+ 98.4±0.6
2006
+ 99.7±0.1
2007
+ 92.1±2.9
2008
+ 98.2±0.4
2009
+ 99.7±0.1
2010
+ 90.6±1.8
2011
+ EOS-Only (+AUG)
2012
+ 98.2±1.1
2013
+ 99.1±1.0
2014
+ 92.6±2.5
2015
+ 97.3±1.9
2016
+ 99.3±0.8
2017
+ 87.8±6.3
2018
+ BOS&EOS
2019
+ 99.2±0.2
2020
+ 99.7±0.3
2021
+ 95.5±0.5
2022
+ 98.7±0.1
2023
+ 99.7±0.2
2024
+ 93.1±0.4
2025
+ BOS&EOS (+UNI)
2026
+ 98.5±0.3
2027
+ 98.9±0.5
2028
+ 92.9±1.0
2029
+ 98.1±0.3
2030
+ 98.8±0.5
2031
+ 91.4±0.8
2032
+ BOS&EOS (+UNI +AUG)
2033
+ 98.7±0.2
2034
+ 99.3±0.4
2035
+ 94.0±0.7
2036
+ 98.2±0.3
2037
+ 99.1±0.3
2038
+ 91.8±1.1
2039
+ Table 13: Overall Results on WSJ Test (RoBERTa, Word-Level). We report the macro/weighted average F1 of
2040
+ the BIO labeling task and the F1 score of the exact SU span extraction task.
2041
+
2042
+ 0.90
2043
+ 0.89
2044
+ 0.88
2045
+ 0.87
2046
+ 0.86
2047
+ 0.0
2048
+ 0.2
2049
+ 0.4
2050
+ 0.6
2051
+ 0.8
2052
+ 1.0
2053
+ 入0.73
2054
+ 0.72
2055
+ 0.71
2056
+ 0.70
2057
+ 0.69
2058
+ 0.68
2059
+ 0.67
2060
+ 0.0
2061
+ 0.2
2062
+ 0.4
2063
+ 0.6
2064
+ 0.8
2065
+ 1.00.73
2066
+ 0.72
2067
+ 0.71
2068
+ 0.70
2069
+ 0.69
2070
+ 0.68
2071
+ 0.0
2072
+ 0.2
2073
+ 0.4
2074
+ 0.6
2075
+ 0.8
2076
+ 1.0
2077
+ 入0.90
2078
+ 0.89
2079
+ 0.88
2080
+ 0.87
2081
+ 0.86
2082
+ 0.85
2083
+ 0.0
2084
+ 0.2
2085
+ 0.4
2086
+ 0.6
2087
+ 0.8
2088
+ 1.00.66
2089
+ 0.64
2090
+ 0.62
2091
+ 0.60
2092
+ 0.58
2093
+ 0.56
2094
+ 0.54
2095
+ 0.0
2096
+ 0.2
2097
+ 0.4
2098
+ 0.6
2099
+ 0.8
2100
+ 1.0
2101
+ 入0.74
2102
+ 0.72
2103
+ 0.70
2104
+ 0.68
2105
+ 0.66
2106
+ 0.64
2107
+ 0.62
2108
+ 0.60
2109
+ 0.0
2110
+ 0.2
2111
+ 0.4
2112
+ 0.6
2113
+ 0.8
2114
+ 1.00.90
2115
+ 0.88
2116
+ 0.86
2117
+ 0.84
2118
+ 0.82
2119
+ 0.0
2120
+ 0.2
2121
+ 0.4
2122
+ 0.6
2123
+ 0.8
2124
+ 1.0
2125
+ 入0.65
2126
+ 0.60
2127
+ 0.55
2128
+ 0.50
2129
+ 0.45
2130
+ 0.0
2131
+ 0.2
2132
+ 0.4
2133
+ 0.6
2134
+ 0.8
2135
+ 1.00.77
2136
+ 0.76
2137
+ 0.75
2138
+ 0.74
2139
+ 0.73
2140
+ 0.72
2141
+ 0.71
2142
+ 0.0
2143
+ 0.2
2144
+ 0.4
2145
+ 0.6
2146
+ 0.8
2147
+ 1.0
dNFQT4oBgHgl3EQfjDY0/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
h9AzT4oBgHgl3EQf4v4M/content/tmp_files/2301.01847v1.pdf.txt ADDED
@@ -0,0 +1,1578 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Probabilistic Genotype-Phenotype Maps Reveal Mutational Robustness
2
+ of RNA Folding, Spin Glasses, and Quantum Circuits
3
+ Anna Sappington1, ∗ and Vaibhav Mohanty1, ∗
4
+ 1Harvard-MIT Health Sciences and Technology, Harvard Medical School, Boston, MA 02115
5
+ and Massachusetts Institute of Technology, Cambridge, MA 02139
6
+ (Dated: January 1, 2023)
7
+ Recent studies of genotype-phenotype (GP) maps have reported universally enhanced phenotypic
8
+ robustness to genotype mutations, a feature essential to evolution. Virtually all of these studies
9
+ make a simplifying assumption that each genotype maps deterministically to a single phenotype.
10
+ Here, we introduce probabilistic genotype-phenotype (PrGP) maps, where each genotype maps to
11
+ a vector of phenotype probabilities, as a more realistic framework for investigating robustness. We
12
+ study three model systems to show that our generalized framework can handle uncertainty emerging
13
+ from various physical sources: (1) thermal fluctuation in RNA folding, (2) external field disorder in
14
+ spin glass ground state finding, and (3) superposition and entanglement in quantum circuits, which
15
+ are realized experimentally on a 7-qubit IBM quantum computer. In all three cases, we observe a
16
+ novel biphasic robustness scaling which is enhanced relative to random expectation for more frequent
17
+ phenotypes and approaches random expectation for less frequent phenotypes.
18
+ Introduction.—Systems which take a sequence as in-
19
+ put and nontrivially produce a structure, function, or
20
+ behavior as output are ubiquitous throughout the sci-
21
+ ences and engineering.
22
+ In biological systems such as
23
+ RNA folding [1–11], lattice protein folding [4], pro-
24
+ tein self-assembly [12, 13], and gene regulatory net-
25
+ works [14, 15], the relationship between genotype (stored
26
+ biological information) and phenotype (observable or
27
+ functional properties) can be structured as genotype-
28
+ phenotype (GP) maps, which have a rich history of com-
29
+ putational and analytical investigation [1–32]. Systems
30
+ from physics and computer science have also been ana-
31
+ lyzed as GP maps, including the spin glass ground state
32
+ problem [30], linear genetic programming [26], and digi-
33
+ tal circuits [31].
34
+ Despite being completely disparate systems, all of the
35
+ GP maps above share a number of common structural
36
+ features, most notably an enhanced robustness of the
37
+ phenotypes to genotype mutations. Phenotypic robust-
38
+ ness ρn of a phenotype n is the average probability that
39
+ a single character mutation of a genotype g which maps
40
+ to n does not change the resultant phenotype n, averaged
41
+ over all genotypes g mapping to n. Random assignment
42
+ of genotype to phenotype predicts that ρn ≈ fn [4], where
43
+ fn is the fraction of genotypes that map to phenotype
44
+ n. However, the systems mentioned above display sub-
45
+ stantially enhanced robustness, exhibiting the relation-
46
+ ship ρn ≈ a + b log fn ≫ fn with system-dependent con-
47
+ stants a and b. It has been shown that, in evolution, this
48
+ enhanced robustness facilitates discovery of new pheno-
49
+ types [11, 19, 20, 33] and is crucial for navigating fitness
50
+ landscapes [5]. As a result, it is important to accurately
51
+ quantify robustness and its relationship with phenotype
52
+ frequency.
53
+ All of the GP map studies referenced above make the
54
+ assumption that a genotype maps deterministically to a
55
+ single phenotype. However, we argue that for most of
56
+ the above systems, this is a major simplification. For in-
57
+ stance, within a bulk sample of ∼ N mammalian cells,
58
+ we expect to find ∼ N copies and ∼ N ∗ 104 copies of a
59
+ protein [34]. In vitro, such molecules often misfold [35],
60
+ which is why cellular machinery exists to assist this fold-
61
+ ing and to degrade misfolded structures in vivo. By map-
62
+ ping a genotype to only the ground state energy struc-
63
+ ture, previous studies [1–11] make an implicit zero tem-
64
+ perature approximation for the ensemble of molecules,
65
+ even if the Gibbs free energy of an individual molecule
66
+ itself is calculated within the folding software at finite
67
+ temperature. Similarly, in studies of gene regulatory net-
68
+ works, spin glasses, linear genetic programs, and digital
69
+ circuits, the systems investigated are small and do not
70
+ interact with external networks or variables. These in-
71
+ vestigations assume that the environmental effect on the
72
+ GP mapping of the subsystem of interest is static.
73
+ In this Letter, we introduce probabilistic genotype-
74
+ phenotype (PrGP) maps, in contrast to the above sys-
75
+ tems which we call deterministic genotype-phenotype
76
+ (DGP) maps, which emerge as a limiting case of PrGP
77
+ maps.
78
+ The definitions of phenotypic robustness and
79
+ transition probabilities retain the same physical mean-
80
+ ing in PrGP maps as in DGP maps, and we empha-
81
+ size that PrGP maps can handle disorder and uncer-
82
+ tainty emerging from a variety of sources. To address the
83
+ implicit zero temperature approximation in sequence-to-
84
+ structure mappings (RNA, lattice protein folding, protein
85
+ self-assembly), we study the folding of RNA primary se-
86
+ quences to a canonical ensemble of secondary structures
87
+ corresponding to low-lying local free energy minima. To
88
+ address external variable disorder with a known distri-
89
+ bution, we study the zero temperature mapping of a
90
+ spin glass bond configuration to its ground state with
91
+ quenched external field disorder, building a phenotype
92
+ probability vector using many replicas of the disordered
93
+ field. This has implications for viral fitness landscape in-
94
+ arXiv:2301.01847v1 [cond-mat.stat-mech] 4 Jan 2023
95
+
96
+ 2
97
+ Circuit operation U
98
+ Exact output:
99
+ P(sGS) =
100
+ = U
101
+ Experimental output:
102
+ classical measurement
103
+ RNA Folding
104
+ AUCGAGGGGCCCCAGUCAGU
105
+ ΔG = 0
106
+ Spin Glass Ground State
107
+ Quantum Circuit
108
+ Local free energy
109
+ minimum structure
110
+ Genotype: Primary sequence
111
+ ViennaRNA folding
112
+ unfolded
113
+ Phenotype Probability
114
+ P(structure) =
115
+ {Jij} = (+1,-1,+1,+1,-1,+1,+1,-1,-1)
116
+ Genotype: Bond configuration
117
+ bond Jij
118
+ spin si
119
+ external field
120
+ hi ~ N(h0, σh
121
+ 2)
122
+ {hi} Replica 1
123
+ Finding ground state
124
+ {hi} Replica 2
125
+ {hi} Replica 3
126
+ sGS = ↑↓↑↓↑↑
127
+ sGS = ↑↓↑↓↑↑
128
+ sGS = ↑↓↑↓↑↓
129
+ Phenotype Probability
130
+ P( ) =
131
+ # of times sGS occurs as GS
132
+ # of replicas
133
+ {Gi} = (X, Y, H, Z)
134
+ Genotype: Circuit gate configuration
135
+ Z
136
+ H
137
+ Y
138
+ X
139
+ 0〉
140
+ 0〉
141
+ 0〉
142
+ 0〉
143
+ 0〉
144
+ Phenotype Probability
145
+ 0〉
146
+ 0〉
147
+ 1〉
148
+ 1〉
149
+ 1〉
150
+ n〉
151
+ 00...0〉
152
+ # of classical observations of
153
+ # of experimental shots
154
+ n〉
155
+ P( ) =
156
+ n〉
157
+ U 00...0〉
158
+ 〈n
159
+ 2
160
+ Exact:
161
+ Exp.:
162
+ exp[-ΔGstructure/(RT)]
163
+ exp[-ΔGstructure/(RT)]
164
+ Σ
165
+ structure
166
+ Genotype Alphabet
167
+ {A, C, U, G} (or {C, G})
168
+ {-1, +1}
169
+ {Z, X, Y, H, S, S†, T, T†}
170
+ Alphabet Size k
171
+ 4 (or 2)
172
+ 2
173
+ 8
174
+ Phenotype
175
+ Folded dot-bracket structure
176
+ Ground state spin configuration
177
+ Classical measurement of circuit output
178
+ Source of Uncertainty
179
+ Thermal fluctuation, T > 0
180
+ Disordered external field
181
+ Superposition and entanglement
182
+ FIG. 1. Schematic representations of the PrGP model systems studied in this work. Each system’s genotype, source of disorder,
183
+ and method for calculating the phenotype probability vector are indicated.
184
+ ference [36–40], where external fields, in part, model host
185
+ immune pressure [39]. Lastly, to investigate inherent un-
186
+ certainty in phenotypes, we introduce quantum circuit
187
+ GP maps where uncertainty emerges from superposition
188
+ and entanglement of classically measurable basis states.
189
+ Our experimental realization of these quantum circuits
190
+ on a 7-qubit IBM quantum computer also introduces
191
+ measurement noise, which has a clear and unique effect
192
+ on robustness. The PrGP map properties of the three
193
+ model systems are summarized visually in Figure 1.
194
+ We observe that PrGP maps exhibit a novel bipha-
195
+ sic scaling of robustness versus phenotype frequency
196
+ which, for higher frequency phenotypes, resembles the
197
+ ρn ∝ log fn seen in DGP maps but is suppressed, and,
198
+ for lower frequency phenotypes, settles closer to a lin-
199
+ ear relationship between ρn and fn, suggesting that the
200
+ lowest frequency phenotypes either appear sporadically
201
+ throughout the GP map or are uniformly scattered at
202
+ low probabilities throughout the genotype domain.
203
+ Theory.—Let Ω(g) = n represent the mapping of geno-
204
+ type g to phenotype n, where g is an element of Sℓ,k, the
205
+ set of all kℓ sequences of length ℓ drawn from an alpha-
206
+ bet of k characaters. A generalization of robustness is
207
+ the transition probability φmn, the average probability
208
+ that a single character mutation of a genotype mapping
209
+ to phenotype n will change the phenotype to m, with the
210
+ average taken over all genotypes mapping to n. For DGP
211
+ maps, φmn is given by
212
+ φmn =
213
+
214
+ g∈Sℓ,k I[Ω(g) = n] �
215
+ h∈nn(g) I[Ω(h) = m]
216
+ ℓ(k − 1) �
217
+ g∈Sℓ,k I[Ω(g) = n]
218
+ , (1)
219
+ where I[·] is the indicator function, and nn(g) is the single
220
+ character mutational neighborhood of sequence g. For
221
+ PrGP maps, we weaken the indicator I[Ω(g) = n] to a
222
+ probability pn(g) ≡ P[Ω(g) = n], which allows us to write
223
+ φmn =
224
+
225
+ {g,h}∈∆ℓ,k[p(g) ⊗ p(h) + (p(g) ⊗ p(h))T ]mn
226
+ ℓ(k − 1)kℓfn
227
+ ,
228
+ (2)
229
+ where p(g) = (p0(g), p1(g), . . .) is the phenotype prob-
230
+ ability vector to which genotype g maps, and ∆ℓ,k
231
+ is the set of all kℓℓ(k − 1)/2 unordered pairs of se-
232
+ quences in Sℓ,k which differ by exactly one charac-
233
+ ter.
234
+ The phenotype probability vector obeys the nor-
235
+ malization conditions kℓf
236
+ = �
237
+ g∈Sℓ,k p(g) and 1 =
238
+
239
+ n∈{phenotypes} pn(g) for all g ∈ Sℓ,k, and phenotype ro-
240
+ bustnesses are given by the diagonal of the transition
241
+ probability matrix, ρn = φnn. The phenotype entropy
242
+ S(g) = − �
243
+ n∈{phenotypes} pn(g) log pn(g) of a genotype g
244
+ is also useful for quantifying how deterministic or prob-
245
+ abilistic a PrGP map is.
246
+ In DGP maps, a random null model [4] for robustness
247
+ can be built by randomly assigning genotype-phenotype
248
+ pairings while keeping the frequencies f constant.
249
+ As
250
+ a result, the probability of a single character mutation
251
+ leading to a change from phenotype n to phenotype m
252
+ is approximately φmn ≈ fm for all m. For PrGP maps,
253
+ a naive expectation can be built by letting all phenotype
254
+ probability vectors equal the frequency vector, p(g) = f
255
+ for all genotypes g. From eq. (2), one finds that φmn =
256
+ fm; thus, the two random expectations are the same,
257
+ even though they physically represent different scenarios.
258
+ RNA Secondary Structure Maps.—In RNA folding
259
+ DGP map studies [1–11], the global free energy minimum
260
+ secondary structure (reported as a “dot-bracket” string
261
+ indicating polymer connectivity) was calculated for every
262
+ RNA sequence of fixed length drawn from the alphabet
263
+ of the four canonical nucleotides {A, C, G, U} (alphabet
264
+ size k = 4).
265
+ Here, we are interested in not only the
266
+ global free energy minimum structures but also the low-
267
+ lying local minima, and we additionally investigate the
268
+ temperature-dependent behavior of the robustness. We
269
+ use the RNAsubopt program from the ViennaRNA pack-
270
+
271
+ 3
272
+ a
273
+ b
274
+ c
275
+ d
276
+ e
277
+ f
278
+ FIG. 2. Plots of robustness versus (a,c,e) frequency and versus
279
+ (b,d,f) log10(frequency) for (a,b) RNA folding in, (c,d) spin
280
+ glass ground state, and (e,f) quantum circuit PrGP maps.
281
+ The dashed line is the random null expectation ρn = fn.
282
+ age (version 2.4.17) [41] to calculate the secondary struc-
283
+ tures and associated Gibbs free energies for the local free
284
+ energy minima within 6 kcal/mol of the global free energy
285
+ minimum (or all the nonpositive free energy local min-
286
+ ima, if the global minimum is greater than −6 kcal/mol).
287
+ Because of the increased computational time required to
288
+ discover all the local minima within an energy range, we
289
+ use a reduced alphabet of {C, G} for our main simula-
290
+ tions with sequence length ℓ = 20. A validation study
291
+ with ℓ = 12 and the full k = 4 alphabet is reported in the
292
+ Supplemental Material [42]. Simulations for the ℓ = 20,
293
+ k = 2 trials were conducted at 20 ◦C, 37 ◦C (human body
294
+ temperature), and 70 ◦C. We take the low-lying local free
295
+ energy minima structures to comprise a canonical ensem-
296
+ ble at the simulation temperature, so the probability of
297
+ RNA sequence g mapping to secondary structure n is
298
+ determined from pn(g) = e−∆Gn/(RT )/Z, where Z nor-
299
+ malizes the vector.
300
+ We then calculate the robustness,
301
+ transition probabilities, and phenotype entropy distribu-
302
+ tions as detailed in the previous section. The DGP map
303
+ limits of the PrGP map are also plotted for each temper-
304
+ ature.
305
+ In Figure 2(a-b), we plot the relationship between ro-
306
+ bustness and frequency for the ℓ = 20, k = 2 RNA PrGP
307
+ map and for the DGP map limiting cases for each sim-
308
+ ulation temperature (see Supplemental Material [42] for
309
+ Perason and Spearman correlations).
310
+ The DGP maps
311
+ confirm the results of refs. [3, 4], which emphasize that
312
+ ρn ∝ log fn for most phenotypes with significant eleva-
313
+ tion above the random null model [4] expectation. We
314
+ find that there is little temperature dependence in DGP
315
+ robustness calculations (see Supplemental Material [42]),
316
+ suggesting that the effect of temperature does little to
317
+ alter the exact ground state phenotype. However, our
318
+ PrGP map results showcase a different robustness behav-
319
+ ior. As the simulation temperature increases, there is a
320
+ gradual but clear suppression of the robustness versus fre-
321
+ quency relationship, as is apparent in both panels (a) and
322
+ (b). We suggest this occurs due to two factors: firstly,
323
+ though the ground state structure itself does not change
324
+ much with temperature, the ground state becomes less
325
+ stable relative to low-lying local minima, thereby increas-
326
+ ing phenotype entropy, as evidenced by the entropy plots
327
+ in the Supplemental Material [42]. As a result, for the
328
+ corresponding p(g) ⊗ p(h) terms contributing to φmn,
329
+ probability mass is drawn away from the diagonals to-
330
+ ward the off-diagonal transition probabilities. Secondly,
331
+ as temperature increases, many low frequency (higher
332
+ ∆G) phenotypes are discovered, increasing the number
333
+ of phenotypes and drawing probability mass away from
334
+ the more robust phenotypes.
335
+ For high frequency phenotypes, the PrGP map robust-
336
+ ness is suppressed relative to the DGP map robustness,
337
+ but is nonetheless substantially elevated above the ran-
338
+ dom null expectation like in the DGP maps. However,
339
+ for lower frequencies, the robustness behaves more like
340
+ the random model; in the Supplemental Material, we see
341
+ from a log-log plot of ρn versus fn that robustness travels
342
+ nearly parallel to the random null expectation, suggest-
343
+ ing linear ρn ∝ fn behavior up to a constant multiplica-
344
+ tive factor. This biphasic robustness behavior becomes
345
+ even clearer in the spin glass and quantum circuit PrGP
346
+ maps.
347
+ Off-diagonal transition probabilities maintained
348
+ an approximate relationship φmn ∝ fm for m ̸= n, in
349
+ concordance with DGP maps (see Supplemental Mate-
350
+ rial [42]).
351
+ Spin Glass Ground State Maps.—In a previous spin
352
+ glass [43, 44] DGP map study [30], a zero temperature
353
+ ±J spin glass on a random graph G(V, E) with Hamilto-
354
+ nian H(s; J) = − �
355
+ {i,j}∈E Jijsisj − �
356
+ i∈V hisi was con-
357
+ sidered. The genotype is the bond configuration where
358
+ each Jij ∈ {−1, +1}, and the phenotype is the ground
359
+ state configuration where each si ∈ {−1, +1}. Degen-
360
+ eracies of the ground state were broken by the uniformly
361
+ drawn, i.i.d. random external fields hi ∈ [−10−4, 10−4]
362
+ which were fixed for each simulation. In our spin glass
363
+
364
+ 4
365
+ PrGP map, we use a similar setup, but we are interested
366
+ in the effect of external field disorder on robustness. We
367
+ therefore incorporate the effects of Gaussian-distributed
368
+ external fields hi ∼ N(h0,i, σ2
369
+ h), where the uniformly dis-
370
+ tributed means h0,i ∈ [−0.1, 0.1] are fixed across all re-
371
+ alizations of the disorder for each simulation. To obtain
372
+ accurate robustness measurements, we exactly calculate
373
+ every ground state for spin glasses with |V | = 9, and
374
+ |E| = 15 by exhaustive enumeration. We examine the
375
+ effect of external field disorder by simulating 450 repli-
376
+ cas of {hi} with variances σ2
377
+ h = 0.001, 0.01, and 0.1 and
378
+ fixed means {h0,i}.
379
+ Phenotype probability vectors for
380
+ each genotype g ≡ J were constructed by tallying and
381
+ normalizing the number of appearances of each ground
382
+ state across each replica. Graph topology G(V, E) cor-
383
+ responding to data presented here, as well as validation
384
+ trial data, are in the Supplemental Material [42].
385
+ In Figure 2(c-d), we plot robustness versus frequency
386
+ of each ground state for each external field variance σ2
387
+ h
388
+ as well as the DGP map limiting case, which qualita-
389
+ tively reproduce the results of the earlier work [30] (see
390
+ Supplemental Material [42] for Pearson and Spearman
391
+ correlations). Trends similar to the RNA PrGP map are
392
+ observed. Namely, as the disorder parameter (temper-
393
+ ature for RNA and field variance for spin glasses) in-
394
+ creases the uncertainty in the genotype-phenotype pair-
395
+ ing, the phenotype entropy distribution shifts rightward
396
+ (see Supplemental Material [42]), and the robustness ver-
397
+ sus frequency relationship becomes suppressed relative
398
+ to the DGP map limit. Here, the spin glass results are
399
+ more clearly suggestive of the proposed biphasic robust-
400
+ ness relationship, especially apparent in panel (d). For
401
+ the highest frequencies, the ρn is substantially enhanced
402
+ above the random null expectation and behavior close
403
+ to the deterministic limit is observed. However, for the
404
+ smallest frequencies, nearly linear behavior is observed;
405
+ in the log-log plot of ρn versus fn (see Supplemental Ma-
406
+ terial [42]), we see a strong sign that ρn ∝ fn, with the
407
+ empirical robustness nearly parallel to the random expec-
408
+ tation. As with the RNA folding PrGP maps, we suspect
409
+ two causes which both contribute to this behavior: (1)
410
+ as σ2
411
+ h increases, there is a higher chance of changing the
412
+ ground state, which increases phenotype entropy, and (2)
413
+ a larger number of spin configurations appear as ground
414
+ states, but with low frequency, drawing away probability
415
+ mass from the more frequent phenotypes.
416
+ Quantum Circuit Maps.—Although methods to evolve
417
+ quantum circuits have been suggested [45], to our knowl-
418
+ edge this work is the first to analyze the structural prop-
419
+ erties of quantum circuit GP maps. We generate ran-
420
+ dom quantum circuits (see Supplemental Material for al-
421
+ gorithm) with 7 qubits and 4 layers of gates. Circuits
422
+ are randomly seeded with CNOT gates which cannot
423
+ participate in the genotype, and the remaining spaces
424
+ are filled with single-qubit gates drawn from the al-
425
+ phabet {Z, X, Y, H, S, S†, T, T †}.
426
+ We choose ℓ = 4 of
427
+ these gates to be variable gates which comprise the geno-
428
+ type.
429
+ The input to the circuit is the prepared state
430
+ |00 . . . 0⟩ ≡ |0⟩⊗· · ·⊗|0⟩, and the exact probability of clas-
431
+ sically measuring the basis state |n⟩ = �
432
+ |qi⟩∈{|0⟩,|1⟩} |qi⟩
433
+ is pn(g) = |⟨n| U(g) |00 . . . 0⟩|2, where |qi⟩ is the i-th
434
+ qubit, and U(g) is the total circuit operation. We realize
435
+ these quantum circuits on the ibm lagos v1.2.0 quantum
436
+ computer [42], one of the 7-qubit IBM Quantum Falcon
437
+ r5.11H processors. Experimental phenotype probability
438
+ vectors are constructed from tallying classical measure-
439
+ ments from 1000 shots for each genotype. The circuits
440
+ from our experimental trials are depicted in the Supple-
441
+ mental Material [42].
442
+ In Figure 2(e-f), we plot robustness versus frequency
443
+ for each circuit output state, using both exact and ex-
444
+ perimental phenotype probability vectors for robustness
445
+ calculations (see Supplemental Material [46] for Pearson
446
+ and Spearman correlations and for data from additional
447
+ validation trials). For the exact probabilities, the results
448
+ in panel (f) strongly support the enhanced ρn ∝ log fn
449
+ scaling (Pearson r = 0.998). The spread of phenotypes
450
+ in the frequency domain is due to superposition and/or
451
+ entanglement; moreover, we see that many of the phen-
452
+ toypes are degenerate with identical frequency and ro-
453
+ bustness. This degeneracy is broken in our experimen-
454
+ tal measurements, which also exhibit measurement noise.
455
+ Since we have finite shots, the degeneracies for the phe-
456
+ notypes observed in the exact case end up broken. The
457
+ frequency and robustness of these logarithmically scal-
458
+ ing phenotypes is suppressed relative to the exact case
459
+ as probability density is drawn towards additional phe-
460
+ notypes, which are observed experimentally and which
461
+ were not observed in the exact case. These appear due
462
+ to measurement noise/decoherence effects in the physical
463
+ system. The rightward shift of the phenotype entropy
464
+ S(g) (see Supplemental Material [42]) further illustrates
465
+ this effect.
466
+ Of the three systems investigated here, the quantum
467
+ circuit PrGP map results in panel (f) are perhaps most
468
+ illustrative of our suggested biphasic robustness scaling.
469
+ The low frequency phenotypes which are introduced due
470
+ to measurement noise in the experimental trials lie much
471
+ closer to the random null expectation than the higher
472
+ frequency phenotypes observed in the exact calculations,
473
+ which rather scale with enhanced robustness similar to
474
+ what is seen in standard DGP maps.
475
+ Discussion.—Compared to existing DGP maps, our in-
476
+ troduction of PrGP maps not only allows for the inclu-
477
+ sion of realistic, physical sources of disorder like thermal
478
+ fluctuation and external variables, but it also permits the
479
+ analysis of new systems like quantum circuits with inher-
480
+ ent uncertainty built into the genotype-phenotype map-
481
+ ping and from measurement disorder. We emphasize the
482
+ broad applicability of this framework to a vast array of
483
+ systems across biology, physics, and computer science,
484
+
485
+ 5
486
+ and other disciplines for the analysis of robustness and
487
+ stability. The proposed biphasic robustness scaling sug-
488
+ gests that robustness of high frequency phenotypes in the
489
+ DGP limit is suppressed in the PrGP formulation due to
490
+ phenotype entropy increases and due to the discovery of
491
+ new low frequency phenotypes. Moreover, low frequency
492
+ phenotypes, which lie closer to the random null expecta-
493
+ tion, either appear randomly throughout genotype space
494
+ (like in the DGP random null model), or they appear
495
+ somewhat uniformly throughout a large portion of geno-
496
+ type space, but remain at low frequency (like in our new
497
+ PrGP random null model). This scaling is observed in all
498
+ three studied systems, despite being disparate, hinting at
499
+ its universality. How this robustness trend affects navi-
500
+ gability of (probabilistic) fitness landscapes is an impor-
501
+ tant direction for further investigation. We also suggest
502
+ that the mapping of genotypes to probability vectors in-
503
+ stead of discrete phenotypes may facilitate the taking of
504
+ gradients of, for instance, a negative loss-likelihood loss
505
+ function in the process of learning PrGP or even DGP
506
+ maps using statistical learning methods.
507
+ Acknowledgements.—We acknowledge the use of IBM
508
+ Quantum services and the MIT Engaging Cluster for
509
+ this work.
510
+ This work was supported by awards
511
+ T32GM007753 and T32GM144273 from the National In-
512
+ stitute of General Medical Sciences. The content is solely
513
+ the responsibility of the authors and does not necessar-
514
+ ily represent the official views of the National Institute
515
+ of General Medical Sciences, the National Institutes of
516
+ Health, IBM, or the IBM Quantum Team. The authors
517
+ declare no known conflict of interest.
518
+ ∗ The authors contributed equally to this work. Cor-
519
+ respondence:
520
+ asappington@hms.harvard.edu and mo-
521
+ hanty@hms.harvard.edu.
522
+ [1] M.
523
+ Weiß
524
+ and
525
+ S.
526
+ E.
527
+ Ahnert,
528
+ Neutral
529
+ components
530
+ show a hierarchical community structure in the geno-
531
+ type–phenotype map of RNA secondary structure, Jour-
532
+ nal of The Royal Society Interface 17, 20200608 (2020).
533
+ [2] M. Weiß and S. E. Ahnert, Using small samples to esti-
534
+ mate neutral component size and robustness in the geno-
535
+ type–phenotype map of RNA secondary structure, Jour-
536
+ nal of The Royal Society Interface 17, 20190784 (2020).
537
+ [3] J. Aguirre, J. M. Buld´u, M. Stich, and S. C. Manrubia,
538
+ Topological Structure of the Space of Phenotypes: The
539
+ Case of RNA Neutral Networks, PLoS ONE 6, e26324
540
+ (2011).
541
+ [4] S. F. Greenbury, S. Schaper, S. E. Ahnert, and A. A.
542
+ Louis, Genetic Correlations Greatly Increase Muta-
543
+ tional Robustness and Can Both Reduce and Enhance
544
+ Evolvability, PLOS Computational Biology 12, e1004773
545
+ (2016).
546
+ [5] S. F. Greenbury, A. A. Louis, and S. E. Ahnert, The
547
+ structure of genotype-phenotype maps makes fitness
548
+ landscapes navigable, Nature Ecology & Evolution 6,
549
+ 1742 (2022).
550
+ [6] K. Dingle, S. Schaper, and A. A. Louis, The struc-
551
+ ture of the genotype–phenotype map strongly constrains
552
+ the evolution of non-coding RNA, Interface Focus 5,
553
+ 20150053 (2015).
554
+ [7] K. Dingle,
555
+ C. Q. Camargo, and A. A. Louis, In-
556
+ put–output maps are strongly biased towards simple out-
557
+ puts, Nature Communications 9, 761 (2018).
558
+ [8] K. Dingle, F. Ghaddar, P. ˇSulc, and A. A. Louis, Phe-
559
+ notype Bias Determines How Natural RNA Structures
560
+ Occupy the Morphospace of All Possible Shapes, Molec-
561
+ ular Biology and Evolution 39, msab280 (2022).
562
+ [9] K. Dingle, G. V. P´erez, and A. A. Louis, Generic pre-
563
+ dictions of output probability based on complexities of
564
+ inputs and outputs, Scientific Reports 10, 4415 (2020).
565
+ [10] T. J¨org, O. C. Martin, and A. Wagner, Neutral net-
566
+ work sizes of biological RNA molecules can be computed
567
+ and are not atypically small, BMC Bioinformatics 9, 464
568
+ (2008).
569
+ [11] A. Wagner, Robustness and evolvability: a paradox re-
570
+ solved, Proceedings of the Royal Society B: Biological
571
+ Sciences 275, 91 (2008).
572
+ [12] S. F. Greenbury, I. G. Johnston, A. A. Louis, and S. E.
573
+ Ahnert, A tractable genotype–phenotype map modelling
574
+ the self-assembly of protein quaternary structure, Jour-
575
+ nal of The Royal Society Interface 11, 20140249 (2014).
576
+ [13] S. Tesoro and S. E. Ahnert, Non-deterministic genotype-
577
+ phenotype maps of biological self-assembly, EPL (Euro-
578
+ physics Letters) 123, 38002 (2018).
579
+ [14] C. Q. Camargo and A. A. Louis, Boolean Threshold Net-
580
+ works as Models of Genotype-Phenotype Maps, Complex
581
+ Networks XI , 143 (2020).
582
+ [15] S. Kauffman, Homeostasis and Differentiation in Random
583
+ Genetic Control Networks, Nature 224, 177 (1969).
584
+ [16] A. Wagner, Distributed robustness versus redundancy
585
+ as causes of mutational robustness, BioEssays 27, 176
586
+ (2005).
587
+ [17] A. Wagner, Robustness and evolvability in living systems,
588
+ 3rd ed., Princeton studies in complexity (Princeton Univ.
589
+ Press, Princeton, NJ, 2007) oCLC: 845177181.
590
+ [18] J. L. Payne and A. Wagner, Constraint and Contingency
591
+ in Multifunctional Gene Regulatory Circuits, PLoS Com-
592
+ putational Biology 9, e1003071 (2013).
593
+ [19] J. L. Payne, J. H. Moore, and A. Wagner, Robustness,
594
+ evolvability, and the logic of genetic regulation, Artificial
595
+ Life 20, 111 (2014).
596
+ [20] J. L. Payne and A. Wagner, The Robustness and Evolv-
597
+ ability of Transcription Factor Binding Sites, Science
598
+ 343, 875 (2014).
599
+ [21] S. Schaper and A. A. Louis, The Arrival of the Frequent:
600
+ How Bias in Genotype-Phenotype Maps Can Steer Pop-
601
+ ulations to Local Optima, PLoS ONE 9, e86635 (2014).
602
+ [22] S. F. Greenbury and S. E. Ahnert, The organiza-
603
+ tion of biological sequences into constrained and un-
604
+ constrained parts determines fundamental properties of
605
+ genotype–phenotype maps, Journal of The Royal Society
606
+ Interface 12, 20150724 (2015).
607
+ [23] S.
608
+ E.
609
+ Ahnert,
610
+ Structural
611
+ properties
612
+ of
613
+ geno-
614
+ type–phenotype maps, Journal of The Royal Society
615
+ Interface 14, 20170275 (2017).
616
+ [24] M. Weiß and S. E. Ahnert, Phenotypes can be robust and
617
+ evolvable if mutations have non-local effects on sequence
618
+ constraints, Journal of The Royal Society Interface 15,
619
+ 20170618 (2018).
620
+ [25] D. Nichol, M. Robertson-Tessi, A. R. A. Anderson, and
621
+
622
+ 6
623
+ P. Jeavons, Model genotype–phenotype mappings and
624
+ the algorithmic structure of evolution, Journal of The
625
+ Royal Society Interface 16, 20190332 (2019).
626
+ [26] T. Hu, M. Tomassini, and W. Banzhaf, A network per-
627
+ spective on genotype–phenotype mapping in genetic pro-
628
+ gramming, Genetic Programming and Evolvable Ma-
629
+ chines 10.1007/s10710-020-09379-0 (2020).
630
+ [27] S. Manrubia, J. A. Cuesta, J. Aguirre, S. E. Ahnert,
631
+ L. Altenberg, A. V. Cano, P. Catal´an, R. Diaz-Uriarte,
632
+ S. F. Elena, J. A. Garc´ıa-Mart´ın, P. Hogeweg, B. S. Kha-
633
+ tri, J. Krug, A. A. Louis, N. S. Martin, J. L. Payne,
634
+ M. J. Tarnowski, and M. Weiß, From genotypes to organ-
635
+ isms: State-of-the-art and perspectives of a cornerstone
636
+ in evolutionary dynamics, Physics of Life Reviews 38, 55
637
+ (2021).
638
+ [28] J. L. Payne and A. Wagner, The causes of evolvabil-
639
+ ity and their evolution, Nature Reviews Genetics 20, 24
640
+ (2019).
641
+ [29] S. Schaper, I. G. Johnston, and A. A. Louis, Epistasis
642
+ can lead to fragmented neutral spaces and contingency in
643
+ evolution, Proceedings of the Royal Society B: Biological
644
+ Sciences 279, 1777 (2012).
645
+ [30] V. Mohanty and A. A. Louis, Robustness and Stability
646
+ of Spin Glass Ground States to Perturbed Interactions,
647
+ Phys. Rev. E In press (2022), arXiv: 2012.05437 [cond-
648
+ mat.dis-nn].
649
+ [31] A. H. Wright and C. L. Laue, Evolving Complexity is
650
+ Hard (2022), arXiv:2209.13013 [cs].
651
+ [32] I. G. Johnston, K. Dingle, S. F. Greenbury, C. Q. Ca-
652
+ margo, J. P. K. Doye, S. E. Ahnert, and A. A. Louis,
653
+ Symmetry and simplicity spontaneously emerge from the
654
+ algorithmic nature of evolution, Proceedings of the Na-
655
+ tional Academy of Sciences 119, e2113883119 (2022).
656
+ [33] J. A. Draghi, T. L. Parsons, G. P. Wagner, and J. B.
657
+ Plotkin, Mutational robustness can facilitate adaptation,
658
+ Nature 463, 353 (2010), number: 7279 Publisher: Nature
659
+ Publishing Group.
660
+ [34] R. Milo and R. Phillips, Cell biology by the numbers (Gar-
661
+ land Science, Taylor & Francis Group, New York, NY,
662
+ 2016).
663
+ [35] R. Russell, RNA misfolding and the action of chaperones,
664
+ Frontiers in bioscience : a journal and virtual library 13,
665
+ 1 (2008).
666
+ [36] R. H. Y. Louie, K. J. Kaczorowski, J. P. Barton, A. K.
667
+ Chakraborty, and M. R. McKay, Fitness landscape of
668
+ the human immunodeficiency virus envelope protein that
669
+ is targeted by antibodies, Proceedings of the National
670
+ Academy of Sciences 115, E564 (2018).
671
+ [37] T. C. Butler, J. P. Barton, M. Kardar, and A. K.
672
+ Chakraborty, Identification of drug resistance mutations
673
+ in HIV from constraints on natural evolution, Physical
674
+ Review E 93, 022412 (2016).
675
+ [38] J. P. Barton, N. Goonetilleke, T. C. Butler, B. D. Walker,
676
+ A. J. McMichael, and A. K. Chakraborty, Relative rate
677
+ and location of intra-host HIV evolution to evade cellu-
678
+ lar immunity are predictable, Nature Communications 7,
679
+ 11660 (2016).
680
+ [39] K. Shekhar, C. F. Ruberman, A. L. Ferguson, J. P. Bar-
681
+ ton, M. Kardar, and A. K. Chakraborty, Spin models
682
+ inferred from patient-derived viral sequence data faith-
683
+ fully describe HIV fitness landscapes, Physical Review E
684
+ 88, 062705 (2013).
685
+ [40] T. A. Hopf, J. B. Ingraham, F. J. Poelwijk, C. P. I.
686
+ Sch¨arfe, M. Springer, C. Sander, and D. S. Marks, Muta-
687
+ tion effects predicted from sequence co-variation, Nature
688
+ Biotechnology 35, 128 (2017).
689
+ [41] R. Lorenz, S. H. Bernhart, C. H¨oner zu Siederdissen,
690
+ H. Tafer, C. Flamm, P. F. Stadler, and I. L. Hofacker,
691
+ ViennaRNA Package 2.0, Algorithms for Molecular Biol-
692
+ ogy 6, 26 (2011).
693
+ [42] See Supplemental Material for this work.
694
+ [43] S. F. Edwards and P. W. Anderson, Theory of spin
695
+ glasses, Journal of Physics F: Metal Physics 5, 965
696
+ (1975).
697
+ [44] D. Sherrington and S. Kirkpatrick, Solvable Model of a
698
+ Spin-Glass, Physical Review Letters 35, 1792 (1975).
699
+ [45] D. Tandeitnik and T. Guerreiro, Evolving Quantum Cir-
700
+ cuits (2022), arXiv:2210.05058 [quant-ph].
701
+ [46] IBM Quantum. https://quantum-computing.ibm.com/.
702
+ (2021).
703
+
704
+ Supplemental Materials for “Probabilistic Genotype-Phenotype Maps Reveal
705
+ Mutational Robustness of RNA Folding, Spin Glasses, and Quantum Circuits”
706
+ Anna Sappington1, ∗ and Vaibhav Mohanty1, ∗
707
+ 1Harvard-MIT Health Sciences and Technology, Harvard Medical School, Boston, MA 02115
708
+ and Massachusetts Institute of Technology, Cambridge, MA 02139
709
+ CONTENTS
710
+ I. Entropy Distributions for Main Text Systems
711
+ 2
712
+ II. Extended Data for Main Text RNA Folding PrGP Map, GC Alphabet, ℓ = 20, k = 2
713
+ 3
714
+ III. Validation Trial for RNA Folding PrGP Map, Full Alphabet, ℓ = 12, k = 4
715
+ 6
716
+ IV. Extended Data for Main Text Spin Glass PrGP Map
717
+ 8
718
+ V. Validation Trial for Spin Glass PrGP Map
719
+ 11
720
+ VI. Quantum Circuit Generation Algorithm
721
+ 13
722
+ VII. Extended Data for Main Text Quantum Circuit PrGP Map
723
+ 14
724
+ VIII. Validation Trials for Quantum Circuit PrGP Map
725
+ 17
726
+ References
727
+ 20
728
+ ∗ The authors contributed equally to this work. Correspondence: asappington@hms.harvard.edu and mohanty@hms.harvard.edu.
729
+ arXiv:2301.01847v1 [cond-mat.stat-mech] 4 Jan 2023
730
+
731
+ 2
732
+ I.
733
+ ENTROPY DISTRIBUTIONS FOR MAIN TEXT SYSTEMS
734
+ In the main text, we presented robustness versus frequency plots for RNA folding, spin glass ground state, and
735
+ quantum circuit PrGP maps. For the spin glass ground state and quantum circuit PrGP maps, data for a single
736
+ representative realization were presented in the main text.
737
+ In Figure S1, we plot the distribution of phenotype
738
+ entropy S(g) across all genotypes g for each of these PrGP maps. For RNA folding and spin glasses, we observe that
739
+ the entropy distributions shift rightward as the disorder parameter increases. For RNA, this corresponds to increasing
740
+ temperature and for spin glasses, this corresponds to increasing external field variance σ2
741
+ h. For the quantum circuits,
742
+ we plot both exact and experimental results (from the 7-qubit IBM quantum computer); the experimental entropy
743
+ distribution is shifted rightward relative to the exact result, due to measurement noise as well as a finite number of
744
+ experimental trials.
745
+ a
746
+ b
747
+ c
748
+ FIG. S1.
749
+ Phenotype entropy distributions for the (a) RNA folding, (b) spin glass ground state, and (c) quantum circuit
750
+ PrGP maps whose robustness plots are presented in the main text. As disorder parameters increase in (a) and (b) due to
751
+ increased temperature and increased external field variance, respectively, the entropies shift rightward. The same occurs due
752
+ to measurement noise in (c).
753
+
754
+ 3
755
+ II.
756
+ EXTENDED DATA FOR MAIN TEXT RNA FOLDING PrGP MAP, GC ALPHABET, ℓ = 20, k = 2
757
+ In the main text, we presented robustness versus frequency and robustness versus log10(frequency) plots for RNA
758
+ folding PrGP and DGP maps at three temperatures.
759
+ For clarity, we have included robustness versus frequency,
760
+ robustness versus log10(frequency), and log10(robustness) versus log10(frequency) plots separately for PrGP and DGP
761
+ maps in Figure S2. First, we see that the DGP map results reproduce the expected ρn ∝ log fn relationship for most
762
+ phenotypes, with significant elevation above the random null model expectation. We also note that there is little
763
+ temperature dependence in DGP robustness calculations, which suggests the effect of temperature does little to alter
764
+ the exact ground state phenotype. In contrast, our PrGP map results showcase a different robustness behavior in
765
+ which as simulation temperature increases, there is a gradual but clear suppression of the robustness versus frequency
766
+ relationship; see main text for discussion of these features. In the PrGP map results we also note a biphasic behavior in
767
+ which for high frequency phenotypes, the PrGP map robustness, similar to the DGP map robustness, is substantially
768
+ elevated above the random null expectation and for lower frequencies, the robustness behaves more like the random
769
+ model.
770
+ In Table S1, we include the Pearson correlation coefficient r and Spearman rank correlation coefficient ρ for each
771
+ map (PrGP, DGP), temperature (20 °C, 37 °C, 70 °C), and axis transformation presented in Figure 2(a-b) and
772
+ Figure S2. The primary feature we point out is the relative decrease of the PrGP Pearson r coefficients in robustness
773
+ versus log10(frequency) plots as compared to the DGP plots; this suggests a deviation from the empirical ρn ∝ log fn
774
+ trend observed in DGP studies.
775
+ In the GP map literature, phenotype bias, the finding that phenotype frequencies can vary over many orders of
776
+ magnitude with a small number of phenotypes being the targets of a large number of genotypes, has been shown for
777
+ many systems [1–4]. In Figure S3, we present plots of log10(frequency) versus normalized rank and log10(frequency)
778
+ versus log10(normalized rank) for each temperature and map pairing which show phenotype bias for this RNA folding
779
+ system. Notably, the log10(frequency) versus log10(normalized rank) plot suggests a deviation from Zipf’s law.
780
+ Figure S4 presents transition probabilities φmn for the most frequently occurring phenotype n to the other pheno-
781
+ types m due to a single nucleotide mutation for both PrGP and DGP maps at three different temperatures. For each
782
+ respective map, a plot including and excluding the most robust transition (i.e. from phenotype n → n) is shown for
783
+ added clarity. This figure demonstrates that the off-diagonal transition probabilities for PrGP maps maintained an
784
+ approximate relationship φmn ∝ fm for m ̸= n in concordance with DGP maps, and in concordance with the random
785
+ null expectation for PrGP maps (see main text). A proportionality constant not equal to 1 for φmn ∝ fm with m ̸= n
786
+ is likely due to transition probability mass that is acquired by the diagonal element φnn. It is also apparent that the
787
+ most robust transition is much more likely than the transition to any other phenotype, in support of our claim that
788
+ PrGP maps, like DGP maps, exhibit enhanced robustness.
789
+ System Alphabet, Length Map Temperature
790
+ Axes
791
+ Pearson r Spearman ρ
792
+ RNA
793
+ GC, 20
794
+ PrGP
795
+ 20 °C
796
+ Robust v. Freq
797
+ 0.807
798
+ 0.953
799
+ RNA
800
+ GC, 20
801
+ PrGP
802
+ 20 °C
803
+ Robust v. log10(Freq)
804
+ 0.794
805
+ 0.953
806
+ RNA
807
+ GC, 20
808
+ PrGP
809
+ 20 °C
810
+ log10(Robust) v. log10(Freq)
811
+ 0.946
812
+ 0.954
813
+ RNA
814
+ GC, 20
815
+ PrGP
816
+ 37 °C
817
+ Robust v. Freq
818
+ 0.837
819
+ 0.955
820
+ RNA
821
+ GC, 20
822
+ PrGP
823
+ 37 °C
824
+ Robust v. log10(Freq)
825
+ 0.798
826
+ 0.955
827
+ RNA
828
+ GC, 20
829
+ PrGP
830
+ 37 °C
831
+ log10(Robust) v. log10(Freq)
832
+ 0.946
833
+ 0.953
834
+ RNA
835
+ GC, 20
836
+ PrGP
837
+ 70 °C
838
+ Robust v. Freq
839
+ 0.849
840
+ 0.934
841
+ RNA
842
+ GC, 20
843
+ PrGP
844
+ 70 °C
845
+ Robust v. log10(Freq)
846
+ 0.767
847
+ 0.934
848
+ RNA
849
+ GC, 20
850
+ PrGP
851
+ 70 °C
852
+ log10(Robust) v. log10(Freq)
853
+ 0.940
854
+ 0.934
855
+ RNA
856
+ GC, 20
857
+ DGP
858
+ 20 °C
859
+ Robust v. Freq
860
+ 0.683
861
+ 0.887
862
+ RNA
863
+ GC, 20
864
+ DGP
865
+ 20 °C
866
+ Robust v. log10(Freq)
867
+ 0.897
868
+ 0.887
869
+ RNA
870
+ GC, 20
871
+ DGP
872
+ 20 °C
873
+ log10(Robust) v. log10(Freq)
874
+ 0.839
875
+ 0.860
876
+ RNA
877
+ GC, 20
878
+ DGP
879
+ 37 °C
880
+ Robust v. Freq
881
+ 0.689
882
+ 0.877
883
+ RNA
884
+ GC, 20
885
+ DGP
886
+ 37 °C
887
+ Robust v. log10(Freq)
888
+ 0.884
889
+ 0.877
890
+ RNA
891
+ GC, 20
892
+ DGP
893
+ 37 °C
894
+ log10(Robust) v. log10(Freq)
895
+ 0.836
896
+ 0.856
897
+ RNA
898
+ GC, 20
899
+ DGP
900
+ 70 °C
901
+ Robust v. Freq
902
+ 0.745
903
+ 0.917
904
+ RNA
905
+ GC, 20
906
+ DGP
907
+ 70 °C
908
+ Robust v. log10(Freq)
909
+ 0.909
910
+ 0.917
911
+ RNA
912
+ GC, 20
913
+ DGP
914
+ 70 °C
915
+ log10(Robust) v. log10(Freq)
916
+ 0.882
917
+ 0.913
918
+ TABLE S1. Pearson and Spearman correlation coefficients for all robustness versus frequency plots in main text/Supplemental
919
+ Material for RNA k = 2, ℓ = 20 simulations with reduced alphabet, for each simulation temperature.
920
+
921
+ 4
922
+ PrGP
923
+ DGP
924
+ FIG. S2. Plots of (left) robustness versus frequency, (middle) robustness versus log10(frequency), and (right) log10(robustness)
925
+ versus log10(frequency) for RNA folding (top row) PrGP maps and (bottom row) DGP maps for three temperatures. These
926
+ data are the same results from main text Figure 2, with axis scaling adjusted and with PrGP and DGP data shown separately
927
+ for clarity. The dashed line is the random null expectation for both PrGP and DGP maps given by φmn = fm for all m and n.
928
+ FIG. S3. Plots of (left) log10(frequency) versus normalized rank and (right) log10(frequency) versus log10(normalized rank) for
929
+ RNA folding PrGP and DGP maps for three temperatures. When computing ranks, ties were broken arbitrarily.
930
+
931
+ 5
932
+ PrGP
933
+ DGP
934
+ 20 °C
935
+ 37 °C
936
+ 70 °C
937
+ All transitions included
938
+ All transitions included
939
+ Robust transition removed
940
+ Robust transition removed
941
+ FIG. S4.
942
+ Plots of transition probabilities versus frequency for RNA folding (left) PrGP maps and (right) DGP maps for
943
+ three temperatures, (top) 20◦ C, (middle) 37◦ C, and (bottom) 70◦ C. For each respective map, plots include either (left) all
944
+ transitions or (right) have the most robust transition removed. The dashed line is the random null expectation for both PrGP
945
+ and DGP maps given by φmn = fm for all m and n.
946
+
947
+ 6
948
+ III.
949
+ VALIDATION TRIAL FOR RNA FOLDING PrGP MAP, FULL ALPHABET, ℓ = 12, k = 4
950
+ Here, we present results of a validation trial for RNA folding PrGP maps for sequences of length ℓ = 12 utilizing
951
+ the full alphabet of size k = 4, {A, C, G, U}. In Figure S5, we present robustness versus frequency, robustness versus
952
+ log10(frequency), and log10(robustness) versus log10(frequency) plots for RNA folding for PrGP and DGP maps. As
953
+ with the reduced alphabet case, we see both PrGP and DGP map results show significant elevation above the random
954
+ null model expectation, with PrGP map results demonstrating a gradual but clear suppression of the robustness versus
955
+ frequency relationship compared to DGP map results. The expected ρn ∝ log fn relationship for phenotypes in the
956
+ DGP map results as well as the biphasic behavior of the PrGP map results is present but less clear in this case, likely
957
+ due to a small size effect from the limited number of phenotypes present in this complete alphabet (k = 4, ℓ = 12)
958
+ system compared to the reduced alphabet system (K = 2), which contains sequences of longer length (ℓ = 20). Also
959
+ in Figure S5, we plot the distribution of phenotype entropy S(g) across all genotypes g; most phenotype entropies are
960
+ zero due to their being deterministic because for the RNA folding, k = 4, ℓ = 12 system most genotypes do not fold.
961
+ In Table S2, we include the Pearson correlation coefficient r and Spearman rank correlation coefficient ρ for each
962
+ map (PrGP, DGP) and axis transformation presented in Figure S5. In Figure S6, we present plots of log10(frequency)
963
+ versus normalized rank and log10(frequency) versus log10(normalized rank) for each temperature and map pairing
964
+ which show phenotype bias for this RNA folding system. Notably, the log10(frequency) versus log10(normalized rank)
965
+ plot suggests a deviation from Zipf’s law.
966
+ Figure S7 presents transition probabilities φmn for the most frequently occurring phenotype n to the other phe-
967
+ notypes m due to a single nucleotide mutation for both PrGP and DGP maps. For each respective map, a plot
968
+ including and excluding the most robust transition is shown for added clarity. This figure demonstrates that the
969
+ off-diagonal transition probabilities for PrGP maps maintained an approximate relationship φmn ∝ fm for m ̸= n in
970
+ concordance with DGP maps, and in concordance with the random null expectation for PrGP maps (see main text).
971
+ A proportionality constant not equal to 1 for φmn ∝ fm with m ̸= n is likely due to transition probability mass that
972
+ is acquired by the diagonal element φnn. It is also apparent that the most robust transition is much more likely than
973
+ the transition to any other phenotype, in support of our claim that PrGP maps, like DGP maps, exhibit enhanced
974
+ robustness.
975
+ FIG. S5.
976
+ Plots of (top left) robustness versus frequency, (top middle) robustness versus log10(frequency), and (top right)
977
+ log10(robustness) versus log10(frequency) for RNA folding PrGP and DGP maps. Plots of (bottom left) density versus phenotype
978
+ entropy and (bottom right) log10(density) versus phenotype entropy. The dashed line is the random null expectation for both
979
+ PrGP and DGP maps given by φmn = fm for all m and n.
980
+
981
+ 7
982
+ System Alphabet, Length Map
983
+ Axes
984
+ Pearson r Spearman ρ
985
+ RNA
986
+ AUCG, 12
987
+ PrGP
988
+ Robust v. Freq
989
+ 0.879
990
+ 0.684
991
+ RNA
992
+ AUCG, 12
993
+ PrGP
994
+ Robust v. log10(Freq)
995
+ 0.846
996
+ 0.684
997
+ RNA
998
+ AUCG, 12
999
+ PrGP log10(Robust) v. log10(Freq)
1000
+ 0.948
1001
+ 0.684
1002
+ RNA
1003
+ AUCG, 12
1004
+ DGP
1005
+ Robust v. Freq
1006
+ 0.788
1007
+ 0.244
1008
+ RNA
1009
+ AUCG, 12
1010
+ DGP
1011
+ Robust v. log10(Freq)
1012
+ 0.815
1013
+ 0.244
1014
+ RNA
1015
+ AUCG, 12
1016
+ DGP log10(Robust) v. log10(Freq)
1017
+ 0.948
1018
+ 0.684
1019
+ TABLE S2. Pearson and Spearman correlation coefficients for all robustness versus frequency plots for RNA k = 4, ℓ = 12
1020
+ validation trial with reduced alphabet. Simulations were conducted at 37 ◦C.
1021
+ FIG. S6. Plots of (left) log10(frequency) versus normalized rank and (right) log10(frequency) versus log10(normalized frequency)
1022
+ for RNA folding PrGP and DGP maps. When computing ranks, ties were broken arbitrarily.
1023
+ PrGP
1024
+ DGP
1025
+ All transitions included
1026
+ Robust transition removed
1027
+ FIG. S7. Plots of transition probabilities versus frequency RNA folding (top) PrGP maps and (bottom) DGP maps. For each
1028
+ respective map, plots include either (left) all transitions or (right) have the most robust transition removed. The dashed line
1029
+ is the random null expectation for both PrGP and DGP maps given by φmn = fm for all m and n.
1030
+
1031
+ 8
1032
+ IV.
1033
+ EXTENDED DATA FOR MAIN TEXT SPIN GLASS PrGP MAP
1034
+ In the main text, we compared a spin glass DGP map with a fixed random external field {h0,i} with our spin glass
1035
+ PrGP map, which introduces a Gaussian distribution to the external field whose means are fixed at {h0,i} and whose
1036
+ variance σ2
1037
+ h is varied as an independent variable. Figure S8 shows the topology of the graph G(V, E) (with |V | = 9,
1038
+ |E| = 15) that corresponds to the spin glass PrGP map data presented in the main text.
1039
+ Main text Figure 2(c, d) presents robustness versus frequency and robustness versus log10(frequency) data; Fig-
1040
+ ure S9 additionally plots log10(robustness) versus log10(frequency) for these same data. These three plots collectively
1041
+ demonstrate that, as with RNA folding GP maps, as the disorder parameter increases the uncertainty in the genotype-
1042
+ phenotype pairing, the robustness versus frequency relationship in PrGP maps becomes suppressed relative to the
1043
+ DGP map limit. These spin glass results are highly suggestive of a biphasic robustness relationship where at high
1044
+ frequencies, ρn is substantially enhanced above the random null expectation and behavior close to the deterministic
1045
+ limit is observed. However, as is clear from Figure S9, nearly linear behavior is observed for the smallest frequencies
1046
+ with the empirical robustness nearly parallel to the random expectation, suggesting ρn ∝ fn. See main text for
1047
+ discussion of these features.
1048
+ In Table S3, we include the Pearson correlation coefficient r and Spearman rank correlation coefficient ρ for each
1049
+ map (PrGP, DGP), external field variance (σ2
1050
+ h = 0.001, σ2
1051
+ h = 0.01, σ2
1052
+ h = 0.1), and axis transformation presented
1053
+ in Figure 2(c, d) and Figure S9. The primary feature we point out is the relative decrease of the PrGP Pearson r
1054
+ coefficients in robustness versus log10(frequency) plots as compared to the DGP (deterministic) plot; this suggests a
1055
+ deviation from the empirical ρn ∝ log fn trend observed in the spin glass DGP study [5].
1056
+ In Figure S10, we present plots of log10(frequency) versus normalized rank and log10(frequency) versus log10(normalized
1057
+ rank) for each external field variance and the deterministic case. Notably, the log10(frequency) versus log10(normalized
1058
+ rank) plot suggests a deviation from Zipf’s law.
1059
+ Figure S11 presents transition probabilities φmn for the most frequently occurring ground state n to the other
1060
+ ground states m due to a single bond perturbation.
1061
+ For each setting of external random field variance, a plot
1062
+ including and excluding the most robust transition is shown for added clarity. This figure demonstrates that the
1063
+ off-diagonal transition probabilities for PrGP maps maintained an approximate relationship φmn ∝ fm for m ̸= n in
1064
+ concordance with DGP maps, and in concordance with the random null expectation for PrGP maps (see main text).
1065
+ A proportionality constant not equal to 1 for φmn ∝ fm with m ̸= n is likely due to transition probability mass that
1066
+ is acquired by the diagonal element φnn. It is also apparent that the most robust transition is much more likely than
1067
+ the transition to any other phenotype, in support of our claim that PrGP maps, like DGP maps, exhibit enhanced
1068
+ robustness.
1069
+ 1
1070
+ 2
1071
+ 3
1072
+ 4
1073
+ 5
1074
+ 6
1075
+ 7
1076
+ 8
1077
+ 9
1078
+ G(V,E) for Main Text Data
1079
+ FIG. S8. Graph G(V, E) corresponding to the spin glass PrGP map data presented in the main text.
1080
+
1081
+ 9
1082
+ 6
1083
+ 5
1084
+ 4
1085
+ 3
1086
+ 2
1087
+ 1
1088
+ log10(Frequency)
1089
+ 4
1090
+ 3
1091
+ 2
1092
+ 1
1093
+ 0
1094
+ log10(Robustness)
1095
+ 2
1096
+ h = 0.001
1097
+ 2
1098
+ h = 0.01
1099
+ 2
1100
+ h = 0.1
1101
+ Deterministic
1102
+ FIG. S9. Plot of log10(robustness) versus log10(frequency) for each spin glass ground state at three different external field
1103
+ variances for the spin glass PrGP maps. The DGP map limiting case is also plotted for comparison. This is the same as the
1104
+ main text plots but with axes scaled as log-log. The dashed line is the random null expectation for both PrGP and DGP maps
1105
+ given by φmn = fm for all m and n.
1106
+ System
1107
+ Map
1108
+ σ2
1109
+ h
1110
+ Axes
1111
+ Pearson r Spearman ρ
1112
+ Spin glass PrGP
1113
+ 0.001
1114
+ Robust v. Freq
1115
+ 0.766
1116
+ 0.962
1117
+ Spin glass PrGP
1118
+ 0.001
1119
+ Robust v. log10(Freq)
1120
+ 0.940
1121
+ 0.962
1122
+ Spin glass PrGP
1123
+ 0.001
1124
+ log10(Robust) v. log10(Freq)
1125
+ 0.920
1126
+ 0.962
1127
+ Spin glass PrGP
1128
+ 0.01
1129
+ Robust v. Freq
1130
+ 0.874
1131
+ 0.985
1132
+ Spin glass PrGP
1133
+ 0.01
1134
+ Robust v. log10(Freq)
1135
+ 0.924
1136
+ 0.985
1137
+ Spin glass PrGP
1138
+ 0.01
1139
+ log10(Robust) v. log10(Freq)
1140
+ 0.986
1141
+ 0.985
1142
+ Spin glass PrGP
1143
+ 0.1
1144
+ Robust v. Freq
1145
+ 0.976
1146
+ 0.987
1147
+ Spin glass PrGP
1148
+ 0.1
1149
+ Robust v. log10(Freq)
1150
+ 0.954
1151
+ 0.987
1152
+ Spin glass PrGP
1153
+ 0.1
1154
+ log10(Robust) v. log10(Freq)
1155
+ 0.989
1156
+ 0.987
1157
+ Spin glass DGP Deterministic
1158
+ Robust v. Freq
1159
+ 0.930
1160
+ 0.962
1161
+ Spin glass DGP Deterministic
1162
+ Robust v. log10(Freq)
1163
+ 0.964
1164
+ 0.962
1165
+ Spin glass DGP Deterministic log10(Robust) v. log10(Freq)
1166
+ 0.962
1167
+ 0.962
1168
+ TABLE S3. Pearson and Spearman correlation coefficients for all robustness versus frequency plots for the spin glass PrGP
1169
+ map with |V | = 9 and |E| = 15 whose data are shown in the main text and here in the Supplemental Material.
1170
+ FIG. S10. Plots of (left) log10(frequency) versus normalized rank and (right) log10(frequency) versus log10(normalized frequency)
1171
+ for spin glass ground states for PrGP maps at three external field variances and DGP maps the deterministic case. When
1172
+ computing ranks, ties were broken arbitrarily.
1173
+
1174
+ 10
1175
+ h
1176
+ 2 = 0.001
1177
+ h
1178
+ 2 = 0.01
1179
+ h
1180
+ 2 = 0.1
1181
+ All transitions included
1182
+ Robust transition removed
1183
+ FIG. S11. Plots of transition probabilities versus frequency for spin glass ground states for PrGP maps at three external field
1184
+ variances, (top) σ2
1185
+ h = 0.001, (middle) σ2
1186
+ h = 0.01, and (bottom) σ2
1187
+ h = 0.1. For each, plots include either (left) all transitions
1188
+ or (right) have the most robust transition removed. The dashed line is the random null expectation for both PrGP and DGP
1189
+ maps given by φmn = fm for all m and n.
1190
+
1191
+ 11
1192
+ V.
1193
+ VALIDATION TRIAL FOR SPIN GLASS PrGP MAP
1194
+ We provide a second spin glass PrGP map trial here in the Supplemental Material to illustrate that the spin glass
1195
+ trends described above and in the main text hold across multiple random graph instances. We generate a new G(V, E),
1196
+ once again with |V | = 9, |E| = 15 with topology shown in Figure S12. Figure S13 presents robustness versus frequency,
1197
+ robustness versus log10(frequency), and log10(robustness) versus log10(frequency) for spin glass PrGP maps at three
1198
+ different external field variances and for the deterministic case for DGP maps.
1199
+ The results from this validation
1200
+ trial exhibit the same behavior as observed in the trial presented in the main text. In particular, we see that as
1201
+ the disorder parameter increases the uncertainty in the genotype-phenotype pairing, the robustness versus frequency
1202
+ relationship in PrGP maps becomes suppressed relative to the DGP map limit. Again, these spin glass results are
1203
+ highly suggestive of a biphasic robustness relationship where at high frequencies, ρn is substantially enhanced above
1204
+ the random null expectation and behavior close to the deterministic limit is observed. However, as is clear from
1205
+ Figure S9, nearly linear behavior is observed for the smallest frequencies with the empirical robustness nearly parallel
1206
+ to the random expectation, signaling ρn ∝ fn. See main text for discussion of these features. Additionally, Figure S13
1207
+ plots the distribution of phenotype entropy S(g) across all genotypes g for PrGP maps at each external field variance
1208
+ experimental value. As is the case in Figure S1, we observe that the entropy distributions shift rightward as the
1209
+ disorder parameter increases.
1210
+ In Table S4, we include the Pearson correlation coefficient r and Spearman rank correlation coefficient ρ for each
1211
+ map (PrGP, DGP), external field variance (σ2
1212
+ h = 0.001, σ2
1213
+ h = 0.00, σ2
1214
+ h = 0.1), and axis transformation presented
1215
+ in Figure S13.
1216
+ The primary feature we point out is the relative decrease of the PrGP Pearson r coefficients in
1217
+ robustness versus log10(frequency) plots as compared to the DGP (deterministic) plot; this suggests a deviation from
1218
+ the empirical ρn ∝ log fn trend observed in the spin glass DGP study [5].
1219
+ 1
1220
+ 2
1221
+ 3
1222
+ 4
1223
+ 5
1224
+ 6
1225
+ 7
1226
+ 8
1227
+ 9
1228
+ G(V,E) for Validation Trial
1229
+ FIG. S12. Graph G(V, E) corresponding to the spin glass PrGP map validation trial data shown here in the Supplemental
1230
+ Material.
1231
+
1232
+ 12
1233
+ FIG. S13. Plots of (leftmost) robustness versus frequency, (middle left) robustness versus log10(frequency), and (middle right)
1234
+ log10(robustness) versus log10(frequency) for spin glass ground states for PrGP maps at three different external field variances
1235
+ and for the deterministic case for DGP maps. Additionally, the (rightmost) density versus phenotype entropy for the spin glass
1236
+ ground states at three difference external field variances is plotted. The dashed line is the random null expectation for both
1237
+ PrGP and DGP maps given by φmn = fm for all m and n.
1238
+ System
1239
+ Map
1240
+ σ2
1241
+ h
1242
+ Axes
1243
+ Pearson r Spearman ρ
1244
+ Spin glass PrGP
1245
+ 0.001
1246
+ Robust v. Freq
1247
+ 0.806
1248
+ 0.994
1249
+ Spin glass PrGP
1250
+ 0.001
1251
+ Robust v. log10(Freq)
1252
+ 0.940
1253
+ 0.994
1254
+ Spin glass PrGP
1255
+ 0.001
1256
+ log10(Robust) v. log10(Freq)
1257
+ 0.950
1258
+ 0.994
1259
+ Spin glass PrGP
1260
+ 0.01
1261
+ Robust v. Freq
1262
+ 0.932
1263
+ 0.996
1264
+ Spin glass PrGP
1265
+ 0.01
1266
+ Robust v. log10(Freq)
1267
+ 0.916
1268
+ 0.996
1269
+ Spin glass PrGP
1270
+ 0.01
1271
+ log10(Robust) v. log10(Freq)
1272
+ 0.993
1273
+ 0.996
1274
+ Spin glass PrGP
1275
+ 0.1
1276
+ Robust v. Freq
1277
+ 0.993
1278
+ 0.997
1279
+ Spin glass PrGP
1280
+ 0.1
1281
+ Robust v. log10(Freq)
1282
+ 0.981
1283
+ 0.997
1284
+ Spin glass PrGP
1285
+ 0.1
1286
+ log10(Robust) v. log10(Freq)
1287
+ 0.997
1288
+ 0.997
1289
+ Spin glass DGP Deterministic
1290
+ Robust v. Freq
1291
+ 0.962
1292
+ 0.995
1293
+ Spin glass DGP Deterministic
1294
+ Robust v. log10(Freq)
1295
+ 0.993
1296
+ 0.995
1297
+ Spin glass DGP Deterministic log10(Robust) v. log10(Freq)
1298
+ 0.990
1299
+ 0.995
1300
+ TABLE S4. Pearson and Spearman correlation coefficients for all robustness versus frequency plots for the spin glass PrGP
1301
+ map validation trial with |V | = 9 and |E| = 15 whose data are shown above in this section.
1302
+
1303
+ 13
1304
+ VI.
1305
+ QUANTUM CIRCUIT GENERATION ALGORITHM
1306
+ In this study, we generated quantum circuits with 7 qubits and 4 layers. We take the genotype of the quantum
1307
+ circuit PrGP map to be a subset of single qubit gates (which are varied to reflect each genotype). We first start by
1308
+ seeding the circuit randomly with CNOT gates which cannot participate in the genotype gate list. Only certain pairs
1309
+ of qubits which are physically connected in the 7-qubit ibm lagos v1.2.0 quantum computer can participate in the
1310
+ same CNOT gate. The remaining open places are seeded with single qubit gates, and we choose ℓ = 4 of these gates
1311
+ to be the variable gates for the genotype. The alphabet chosen is of size k = 8: {Z, X, Y, H, S, S†, T, T †}. Circuit
1312
+ diagrams used in our experimental trials are shown in the subsequent sections.
1313
+
1314
+ 14
1315
+ VII.
1316
+ EXTENDED DATA FOR MAIN TEXT QUANTUM CIRCUIT PrGP MAP
1317
+ To our knowledge, this work is the first to analyze the structural properties of quantum circuit GP maps. We
1318
+ generate random quantum circuits as described in the main text and in the previous section with 7 qubits and 4 layers
1319
+ of gates. Figure S14 shows a schematic representation of the random quantum circuit generated for the quantum
1320
+ circuit PrGP map data presented in main text Figure 2(e, f) and in this Supplemental Material section.
1321
+ Main text Figure 2(e, f) presents robustness versus frequency and robustness versus log10(frequency) data using
1322
+ both exact and experimental phenotype probability vectors for robustness calculations; Figure S15 additionally plots
1323
+ log10(robustness) versus log10(frequency) for these same data. Collectively, we see that for the exact probabilities, the
1324
+ results strongly support the enhanced ρn ∝ log fn scaling. The spread of phenotypes observed in the frequency domain
1325
+ is due to superposition and/or entanglement and many of the phentoypes are degenerate with identical frequency and
1326
+ robustness. This degeneracy is broken in our experimental measurements, which also exhibit measurement noise.
1327
+ Moreover, the frequency and robustness of these logarithmically scaling phenotypes is suppressed relative to the exact
1328
+ case as probability density is drawn towards additional phenotypes which are observed experimentally which were not
1329
+ observed in the exact case. These quantum circuit PrGP map results are perhaps most illustrative of our suggested
1330
+ biphasic robustness scaling. The low frequency phenotypes which are introduced due to measurement noise in the
1331
+ experimental trials lie much closer to the random null expectation than the higher frequency phenotypes observed in
1332
+ the exact calculations, which rather scale with enhanced robustness similar to what is seen in standard DGP maps.
1333
+ In Table S5, we include the Pearson correlation coefficient r and Spearman rank correlation coefficient ρ for both
1334
+ exact and experimental quantum circuit PrGP results for each axis transformation presented in main text Figure 2(e,
1335
+ f) and Figure S15. The primary features we point out are the high Perason correlation r = 0.998 of the robustness
1336
+ versus log10(frequency) relationship for the exact phenotype probability vectors, and the relative decrease of the
1337
+ experimental Pearson r coefficients in robustness versus log10(frequency) plot as compared to the exact plot. This
1338
+ suggests that the exact relationship exhibits behavior similar to the empirical ρn ∝ log fn trend observed in DGP
1339
+ studies, and that the experimental trials introduce measurement noise which induces a deviation from the exact
1340
+ results.
1341
+ In Figure S16, we present plots of log10(frequency) versus normalized rank and of log10(frequency) versus
1342
+ log10(normalized rank) for experimental and exact quantum circuit PrGP map results.
1343
+ Notably, the plot show-
1344
+ ing log10(frequency) versus log10(normalized rank) suggests a deviation from Zipf’s law.
1345
+ Figure S17 presents transition probabilities φmn for the most frequently occurring circuit output state n to the
1346
+ other circuit output states m due to a single variable gate perturbation. For both experimental and exact phenotype
1347
+ probability vectors, a plot including and excluding the most robust transition is shown for added clarity. This figure
1348
+ demonstrates that the off-diagonal transition probabilities for quantum circuit PrGP maps are positively correlated
1349
+ with the frequency fm, though there appears to be some additional nonrandom relationship which is not predicted
1350
+ from standard DGP or PrGP theory. It is also apparent that the most robust transition is much more likely than
1351
+ the transition to any other phenotype, in support of our claim that PrGP maps, like DGP maps, exhibit enhanced
1352
+ robustness.
1353
+ System
1354
+ Map Trial Exact or Exp
1355
+ Axes
1356
+ Pearson r Spearman ρ
1357
+ Quantum circuit PrGP
1358
+ 1
1359
+ Exact
1360
+ Robust v. Freq
1361
+ 0.926
1362
+ 0.996
1363
+ Quantum circuit PrGP
1364
+ 1
1365
+ Exact
1366
+ Robust v. log10(Freq)
1367
+ 0.998
1368
+ 0.996
1369
+ Quantum circuit PrGP
1370
+ 1
1371
+ Exact
1372
+ log10(Robust) v. log10(Freq)
1373
+ 0.993
1374
+ 0.996
1375
+ Quantum circuit PrGP
1376
+ 1
1377
+ Experimental
1378
+ Robust v. Freq
1379
+ 0.912
1380
+ 0.987
1381
+ Quantum circuit PrGP
1382
+ 1
1383
+ Experimental
1384
+ Robust v. log10(Freq)
1385
+ 0.712
1386
+ 0.987
1387
+ Quantum circuit PrGP
1388
+ 1
1389
+ Experimental
1390
+ log10(Robust) v. log10(Freq)
1391
+ 0.983
1392
+ 0.987
1393
+ TABLE S5. Pearson and Spearman correlation coefficients for all robustness versus frequency plots quantum circuit PrGP map
1394
+ whose robustness data was presented in the main text and in the above log-log plot. This includes both exact results as well
1395
+ as experimental results for realization/Trial 1, whose circuit is also printed earlier in this section.
1396
+
1397
+ 15
1398
+ G3
1399
+ G2
1400
+ G1
1401
+ G0
1402
+ Trial 1 - Main Text
1403
+ FIG. S14. Random circuit generated for quantum circuit trial 1, whose robustness data is plotted in the main text and below
1404
+ in the remainder of this section. The genotype is the set of variable gates g = (G0, G1, G2, G3), so the length of the input
1405
+ sequence is ℓ = 4 drawn from an alphabet of k = 8 single qubit gates: {Z, X, Y, H, S, S†, T, T †}.
1406
+ 6
1407
+ 4
1408
+ 2
1409
+ 0
1410
+ log10Frequency
1411
+ 6
1412
+ 5
1413
+ 4
1414
+ 3
1415
+ 2
1416
+ 1
1417
+ 0
1418
+ 1
1419
+ log10Robustness
1420
+ Experimental
1421
+ Exact
1422
+ FIG. S15. Plot of log10(robustness) versus log10(frequency) for the quantum circuit in trial 1 for experimental and exact data.
1423
+ The dashed line is the random null expectation for both PrGP and DGP maps given by φmn = fm for all m and n.
1424
+ FIG. S16. Plots of (left) log10(frequency) versus normalized rank and (right) log10(frequency) versus log10(normalized frequency)
1425
+ for the quantum circuit trial 1 for experimental and exact data. The dashed line is the random null expectation for both PrGP
1426
+ and DGP maps given by φmn = fm for all m and n.
1427
+
1428
+ 16
1429
+ All transitions included
1430
+ Robust transition removed
1431
+ Experimental
1432
+ Exact
1433
+ FIG. S17. Plots of (top) transition probabilities versus frequency for quantum circuit trial 1 for experimental and (bottom)
1434
+ exact data. For each model framework, plots include either (left) all transitions or (right) have the most robust transition
1435
+ removed. The dashed line is the random null expectation for both PrGP and DGP maps given by φmn = fm for all m and n.
1436
+ The dashed line is the random null expectation for both PrGP and DGP maps given by φmn = fm for all m and n.
1437
+
1438
+ 17
1439
+ VIII.
1440
+ VALIDATION TRIALS FOR QUANTUM CIRCUIT PrGP MAP
1441
+ To validate the quantum circuit PrGP map results presented in the main text and Supplemental Material, six
1442
+ additional trials were conducted. A schematic of the random quantum circuit generated for the first of these validation
1443
+ trials is shown in Figure S18. Figure S19 presents robustness versus frequency, robustness versus log10(frequency),
1444
+ and log10(robustness) versus log10(frequency) for this quantum circuit PrGP map validation trial. As with the first
1445
+ quantum circuit PrGP map trial, these data strongly support the enhanced ρn ∝ log fn scaling.
1446
+ Again, we see
1447
+ the spread of phenotypes observed in the frequency domain due to superposition and/or entanglement and that
1448
+ many of the phentoypes are degenerate with identical frequency and robustness. This degeneracy is broken in our
1449
+ experimental measurements, which exhibit measurement noise. Once again, the frequency and robustness of these
1450
+ logarithmically scaling phenotypes is suppressed relative to the exact case as probability density is drawn towards
1451
+ additional phenotypes which are observed experimentally which were not observed in the exact case. These results
1452
+ illustrate our suggested biphasic robustness scaling in which the low frequency phenotypes, which are introduced
1453
+ due to measurement noise in the experimental trials, lie much closer to the random null expectation than the higher
1454
+ frequency phenotypes observed in the exact calculations, which rather scale with enhanced robustness similar to what
1455
+ is seen in standard DGP maps. Figure S19 also presents a plot of the distribution of phenotype entropy S(g) across all
1456
+ genotypes g for exact and experimental quantum circuit PrGP maps. Notably, the experimental entropy distribution
1457
+ is shifted rightward relative to the exact result due to measurement noise as well as a finite number of experimental
1458
+ trials.
1459
+ In Table S6, we include the Pearson correlation coefficient r and Spearman rank correlation coefficient ρ for both
1460
+ exact and experimental quantum circuit PrGP results for each axis transformation presented in Figure S19. The
1461
+ primary features we point out are the high Perason correlation r = 0.950 of the robustness versus log10(frequency)
1462
+ relationship for the exact phenotype probability vectors, and the relative decrease of the experimental Pearson r
1463
+ coefficients in robustness versus log10(frequency) plot as compared to the exact plot. This suggests that the exact
1464
+ relationship exhibits behavior similar to the empirical ρn ∝ log fn trend observed in DGP studies, and that the
1465
+ experimental trials introduce measurement noise which induces a deviation from the exact results.
1466
+ Figure S20 presents robustness versus frequency and robustness versus log10(frequency) plots as well as schematics
1467
+ of the corresponding random quantum circuits for validation trials 3-7. In each trial, the suggested biphasic robustness
1468
+ scaling is clear. Additionally, these trials support the enhanced ρn ∝ log fn scaling.
1469
+ Trial 2 (Validation)
1470
+ G3
1471
+ G2
1472
+ G1
1473
+ G0
1474
+ FIG. S18. Random circuit generated for quantum circuit trial 2, whose robustness and entropy data are plotted below as a
1475
+ validation trial. The genotype is the set of variable gates g = (G0, G1, G2, G3), so the length of the input sequence is ℓ = 4
1476
+ drawn from an alphabet of k = 8 single qubit gates: {Z, X, Y, H, S, S†, T, T †}.
1477
+
1478
+ 18
1479
+ FIG. S19. Plot of (leftmost) robustness versus frequency, (left middle) robustness versus log10(frequency), and (middle right)
1480
+ log10(robustness) versus log10(frequency) for quantum circuit trial 2 for experimental and exact data. Additionally, the (right-
1481
+ most) density versus phenotype entropy for quantum circuit trial 2 is plotted. The dashed line is the random null expectation
1482
+ for both PrGP and DGP maps given by φmn = fm for all m and n.
1483
+ System
1484
+ Map Trial Exact or Exp
1485
+ Axes
1486
+ Pearson r Spearman ρ
1487
+ Quantum circuit PrGP
1488
+ 2
1489
+ Exact
1490
+ Robust v. Freq
1491
+ 0.910
1492
+ 0.973
1493
+ Quantum circuit PrGP
1494
+ 2
1495
+ Exact
1496
+ Robust v. log10(Freq)
1497
+ 0.950
1498
+ 0.973
1499
+ Quantum circuit PrGP
1500
+ 2
1501
+ Exact
1502
+ log10(Robust) v. log10(Freq)
1503
+ 0.954
1504
+ 0.973
1505
+ Quantum circuit PrGP
1506
+ 2
1507
+ Experimental
1508
+ Robust v. Freq
1509
+ 0.916
1510
+ 0.979
1511
+ Quantum circuit PrGP
1512
+ 2
1513
+ Experimental
1514
+ Robust v. log10(Freq)
1515
+ 0.837
1516
+ 0.979
1517
+ Quantum circuit PrGP
1518
+ 2
1519
+ Experimental
1520
+ log10(Robust) v. log10(Freq)
1521
+ 0.989
1522
+ 0.979
1523
+ TABLE S6. Pearson and Spearman correlation coefficients for all robustness versus frequency plots quantum circuit PrGP
1524
+ map whose robustness data is shown above as a Validation trial (i.e. Trial 2). This includes both exact results as well as
1525
+ experimental results for Trial 2, whose circuit is also printed earlier in this section.
1526
+
1527
+ 19
1528
+ Experimental trial 3
1529
+ Experimental trial 4
1530
+ Experimental trial 5
1531
+ Experimental trial 6
1532
+ Experimental trial 7
1533
+ Trial 3
1534
+ G3
1535
+ G2
1536
+ G1
1537
+ G0
1538
+ Trial 4
1539
+ G3
1540
+ G2
1541
+ G1
1542
+ G0
1543
+ Trial 5
1544
+ G3
1545
+ G2
1546
+ G1
1547
+ G0
1548
+ Trial 6
1549
+ G3
1550
+ G2
1551
+ G1
1552
+ G0
1553
+ Trial 7
1554
+ G3
1555
+ G2
1556
+ G1
1557
+ G0
1558
+ FIG. S20. Plots of (left) robustness versus frequency and (middle) robustness versus log10(frequency) for quantum circuit
1559
+ trials 3-7, as well as (right) corresponding random quantum circuits for Trials 3-7 (validation trials). The genotype is the set
1560
+ of variable gates g = (G0, G1, G2, G3), so the length of the input sequence is ℓ = 4 drawn from an alphabet of k = 8 single
1561
+ qubit gates: {Z, X, Y, H, S, S†, T, T †}. The dashed line is the random null expectation for both PrGP and DGP maps given by
1562
+ φmn = fm for all m and n.
1563
+
1564
+ 20
1565
+ .
1566
+ [1] T. J¨org, O. C. Martin, and A. Wagner, Neutral network sizes of biological RNA molecules can be computed and are not
1567
+ atypically small, BMC Bioinformatics 9, 464 (2008).
1568
+ [2] S. E. Ahnert, Structural properties of genotype–phenotype maps, Journal of The Royal Society Interface 14, 20170275
1569
+ (2017).
1570
+ [3] S. Manrubia, J. A. Cuesta, J. Aguirre, S. E. Ahnert, L. Altenberg, A. V. Cano, P. Catal´an, R. Diaz-Uriarte, S. F. Elena,
1571
+ J. A. Garc´ıa-Mart´ın, P. Hogeweg, B. S. Khatri, J. Krug, A. A. Louis, N. S. Martin, J. L. Payne, M. J. Tarnowski, and
1572
+ M. Weiß, From genotypes to organisms: State-of-the-art and perspectives of a cornerstone in evolutionary dynamics, Physics
1573
+ of Life Reviews 38, 55 (2021).
1574
+ [4] S. F. Greenbury, S. Schaper, S. E. Ahnert, and A. A. Louis, Genetic Correlations Greatly Increase Mutational Robustness
1575
+ and Can Both Reduce and Enhance Evolvability, PLOS Computational Biology 12, e1004773 (2016).
1576
+ [5] V. Mohanty and A. A. Louis, Robustness and Stability of Spin Glass Ground States to Perturbed Interactions, Phys. Rev.
1577
+ E In press (2022), arXiv: 2012.05437 [cond-mat.dis-nn].
1578
+
h9AzT4oBgHgl3EQf4v4M/content/tmp_files/load_file.txt ADDED
The diff for this file is too large to render. See raw diff
 
hNE4T4oBgHgl3EQfrQ2w/vector_store/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a963ccfbb01dea768c068c2554d925ad537dfff7be6322c9ced7a2881b2d6e97
3
+ size 2818093
htE4T4oBgHgl3EQfrg2l/content/tmp_files/2301.05209v1.pdf.txt ADDED
@@ -0,0 +1,1870 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MNRAS 000, 1–12 (2022)
2
+ Preprint 13 January 2023
3
+ Compiled using MNRAS LATEX style file v3.0
4
+ Data-Driven Selection and Spectral Classification of White Dwarf Stars
5
+ Olivier Vincent★, P. Bergeron and P. Dufour
6
+ Département de Physique, Université de Montréal, C.P. 6128, Succ. Centre-Ville, Montréal, Québec H3C 3J7, Canada
7
+ Accepted XXX. Received YYY; in original form ZZZ
8
+ ABSTRACT
9
+ The next generation of spectroscopic surveys is expected to provide spectra for hundreds of thousands of white dwarf (WD)
10
+ candidates in the upcoming years. Currently, spectroscopic classification of white dwarfs is mostly done by visual inspection,
11
+ requiring substantial amounts of expert attention. We propose a data-driven pipeline for fast, automatic selection and spectroscopic
12
+ classification of WD candidates, trained using spectroscopically confirmed objects with available Gaia astrometry, photometry,
13
+ and Sloan Digital Sky Survey (SDSS) spectra with signal-to-noise ratios ≥ 9. The pipeline selects WD candidates with improved
14
+ accuracy and completeness over existing algorithms, classifies their primary spectroscopic type with ≳ 90% accuracy, and
15
+ spectroscopically detects main sequence companions with similar performance. We apply our pipeline to the Gaia Data Release
16
+ 3 cross-matched with the SDSS Data Release 17 (DR17), identifying 424 096 high-confidence WD candidates and providing
17
+ the first catalogue of automated and quantifiable classification for 36 523 WD spectra. Both the catalogue and pipeline are made
18
+ available online. Such a tool will prove particularly useful for the undergoing SDSS-V survey, allowing for rapid classification
19
+ of thousands of spectra at every data release.
20
+ Key words: White Dwarfs – Methods: Data Analysis – Catalogues – Surveys
21
+ 1 INTRODUCTION
22
+ The field of astronomy is entering an era of Big Data, with the volume
23
+ of astronomical data doubling every 16 months and is predicted to
24
+ keep doing so for the next few years (Smith & Geach 2022). Classical
25
+ methods that rely on human supervision and specialist expertise are
26
+ rapidly becoming insufficient to handle this stream of opportunities,
27
+ and concerns are arising that they may strongly delay the important
28
+ discoveries, or worse, completely miss them. Machine learning meth-
29
+ ods have emerged as a natural response to these concerns and have
30
+ already become commonplace in many physical sciences (see Carleo
31
+ et al. 2019; Smith & Geach 2022, for recent reviews). Stellar science
32
+ has also recently seen interesting applications of machine learning,
33
+ including main sequence star spectral classification (Sharma et al.
34
+ 2020), stellar parameter inference (Ting et al. 2019; Chandra et al.
35
+ 2020), and trigonometric parallax calibration (Leung & Bovy 2019).
36
+ Until now, the quantity of astronomical data available for the study
37
+ of white dwarfs has remained small enough to be manageable using
38
+ classical methods. Most of the spectroscopic data come from SDSS
39
+ (Gunn et al. 2006), which gradually provided optical spectroscopy
40
+ along with broadband photometry for the majority of the ∼33 000
41
+ currently known white dwarfs (Harris et al. 2003; Kleinman et al.
42
+ 2004; Eisenstein et al. 2006; Kleinman et al. 2013; Kepler et al.
43
+ 2015, 2016; Fusillo et al. 2021). Newer surveys dramatically contrast
44
+ with this figure, such as the Gaia survey (Gaia Collaboration et al.
45
+ 2016) that measured astrometry for over a billion objects, resulting
46
+ not only in increasing the number of white dwarfs with parallax mea-
47
+ surements by about 3 orders of magnitude (Bédard et al. 2017, and
48
+ ★ E-mail: o.vincent@umontreal.ca
49
+ references therein), but also in the identification of ∼260 000 high-
50
+ confidence white dwarf candidates (Fusillo et al. 2021). Available
51
+ data sources also worth mentioning include the Survey Telescope
52
+ And Rapid Response System photometry (Pan-STARRS; Chambers
53
+ et al. 2016), the Two Micron Sky Survey near-infrared photometry
54
+ (2MASS; Skrutskie et al. 2006), and the spectroscopic data from the
55
+ Large Sky Area Multi-Object Fiber Spectroscopic Telescope (LAM-
56
+ OST; Cui et al. 2012).
57
+ The next generation of observatories and surveys (e.g. SDSS-V,
58
+ DESI, 4MOST; Kollmeier et al. 2017; DESI Collaboration et al.
59
+ 2016; de Jong et al. 2014) will be providing spectroscopic data
60
+ for a large number of white dwarf candidates, unlocking both un-
61
+ precedented statistical analyses and detailed studies of white dwarfs.
62
+ However, extracting the white dwarf observations from the billions
63
+ of expected spectra will be an impossible task without the aid of
64
+ automated tools. While machine learning methods for the selection
65
+ of white dwarf candidates as well as the classification of DA ver-
66
+ sus non-DA have started to surface (Gaia Collaboration et al. 2021;
67
+ López-Sanjuan et al. 2022), the most recent spectroscopic catalogues
68
+ are still built using visual inspection (Fusillo et al. 2021; Kepler et al.
69
+ 2021). More generally, studies of large white dwarf samples usually
70
+ involve a number of manual steps that require substantial amounts of
71
+ time that experts could and should be spending on more important
72
+ aspects of the data analysis (Caron et al. 2022).
73
+ As a first step towards addressing this lack of tools, we present a
74
+ neural network-based pipeline for rapid and automated selection and
75
+ spectroscopic classification for white dwarf candidates. The pipeline
76
+ is comprised of three independent modules that identify white dwarf
77
+ candidates based on Gaia astrometry and photometry, classify the
78
+ main spectroscopic signature of white dwarf spectra, and detect the
79
+ © 2022 The Authors
80
+ arXiv:2301.05209v1 [astro-ph.SR] 12 Jan 2023
81
+
82
+ 2
83
+ Vincent et al.
84
+ presence of spectroscopic contamination by a main sequence star.
85
+ The entire pipeline is trained using human-labelled spectroscopically
86
+ confirmed objects, and spectroscopic modules are trained and tested
87
+ using SDSS Data Release 16 spectra (DR16; Ahumada et al. 2020).
88
+ In the future, each module of the pipeline will be able to serve as a
89
+ base classification model for various tasks and surveys. As a proof-
90
+ of-concept, we produce a catalogue containing 1.3M Gaia objects
91
+ cross-matched with the SDSS Data Release 17 (DR17; Abdurro’uf
92
+ et al. 2022), the latest and last data release of SDSS-IV.
93
+ This paper is structured as follows. In Section 2, we describe
94
+ the different components of the pipeline, as well as the methods
95
+ and data used to train them. The performance of the pipeline on
96
+ test data is presented in Section 3. We also test the spectroscopic
97
+ modules in different data quality regimes, and on white dwarf spectra
98
+ with multiple spectral signatures. We then showcase the pipeline
99
+ by creating a catalogue using 1.3M white dwarf candidates from
100
+ the Gaia survey cross-matched with the SDSS DR17 in Section 4.
101
+ Finally, we summarize our work and provide concluding remarks in
102
+ Section 6.
103
+ 2 METHODOLOGY
104
+ 2.1 Pipeline description
105
+ The white dwarf selection and spectral classification pipeline con-
106
+ sists of three different modules: one for white dwarf candidate se-
107
+ lection among Gaia objects, one for classification of the primary
108
+ spectroscopic type1, and one for the detection of spectral contami-
109
+ nation from a main sequence companion. The pipeline is illustrated
110
+ in Figure 1. Each module consists of 10 neural networks grouped to-
111
+ gether to form a deep ensemble, which are known to offer improved
112
+ generalization and performance over single neural networks (Lee
113
+ et al. 2015; Lakshminarayanan et al. 2016). Since neural networks
114
+ typically have millions of parameters, there exists a large number
115
+ of different parameter combinations that might sufficiently approxi-
116
+ mate the function the networks are trying to model. By ensembling
117
+ them, we create a distribution of diverse functions from which we
118
+ can compute statistics for our classifications (Fort et al. 2019). For
119
+ each pipeline module, we take the mean and standard deviation of the
120
+ predictions of their respective 10 networks as the final prediction and
121
+ uncertainty, respectively. Within each module, the neural networks
122
+ are built using an unique architecture (see Appendix A for more de-
123
+ tails), but are trained using different initialization and portions of the
124
+ training data sets.
125
+ The candidate selection module is an ensemble of binary classi-
126
+ fiers that take the three Gaia magnitudes (𝐺, 𝐺BP, 𝐺RP), their flux
127
+ errors, both proper motion components (𝜇𝛼 and 𝜇𝛿), and the paral-
128
+ lax measurement, along with all their respective uncertainties. Our
129
+ choice of input data is based on the white dwarf candidate Random
130
+ Forest classifier by Gaia Collaboration et al. (2021), and differences
131
+ between our classifiers are explored in Section 4. Additionally, we
132
+ include phot_bp_rp_excess_factor as an input parameter, as we
133
+ found it helpful for identifying binaries composed of a white dwarf
134
+ and a main sequence star (WD+MS). We also tested the corrected
135
+ excess factor proposed by Riello et al. (2021) and found no notable
136
+ differences in performance when compared to the uncorrected factor.
137
+ 1 In this paper, we follow the terminology described in Sion et al. (1983)
138
+ where the upper case letter following the D (for degenerate) indicates the
139
+ primary spectroscopic type in the optical spectrum, and where the following
140
+ upper case letters indicate the secondary spectroscopic features.
141
+ In total, we use 13 Gaia input parameters to predict the probability
142
+ 𝑃WD of an object being a white dwarf.
143
+ Spectral classification by the two other modules requires a spectral
144
+ coverage of at least 3840 to 7000 Å for the determination of the
145
+ primary spectroscopic type, which we increase to 9000 Å for the
146
+ detection of contamination from a main sequence companion. The
147
+ first spectroscopic module outputs the probability 𝑃class that the
148
+ object belongs to one of the 13 following classes: DA, DB, DC,
149
+ DO, DQ, hotDQ, DZ, DAH, PG1159, cataclysmic variable (CV),
150
+ sdB, sdO, or sdBO. The white dwarf classes follow the classification
151
+ system proposed by Sion et al. (1983) and Fusillo et al. (2021),
152
+ whereas subdwarfs follow the system proposed by Geier et al. (2017).
153
+ The neural networks of this module are trained assuming a multiclass
154
+ problem, meaning the classes are assumed to be mutually exclusive,
155
+ and the sum of probabilities must equal unity. We emphasize that
156
+ this approach does not attempt to classify secondary spectroscopic
157
+ features, but does, however, provide a consistent primary type for
158
+ hybrid white dwarfs (see Section 3.3). The main sequence companion
159
+ module, similar to the candidate selection module, is made of binary
160
+ classifier networks that output a probability 𝑃MS that the spectrum
161
+ is contaminated by a MS star.
162
+ 2.2 Data selection and processing
163
+ To train and test our networks, we made use of all objects with
164
+ a confirmed spectral type in the Montreal White Dwarf Database
165
+ (MWDD; Dufour et al. 2017), in the Gaia-SDSS catalogue of Fusillo
166
+ et al. (2021, henceforth GF21), as well as the subdwarf star catalogues
167
+ of Geier et al. (2017) and Geier (2020). We downloaded all SDSS
168
+ DR16 spectra and Gaia DR3 data associated with these objects from
169
+ the Science Archive Server2 and Gaia Archive3. In order to minimize
170
+ human error in the spectral classifications, we discarded spectra with
171
+ a signal-to-noise ratio (SNR) lower than 9 between 4500 and 5500
172
+ Å, which are mostly DA and DB white dwarfs, as well as subdwarfs.
173
+ Each module of the pipeline has further restrictions that are described
174
+ in Section 3.
175
+ All SDSS spectra go through the following preprocessing. We
176
+ remove the sky emission lines around 5577 6300, and 6363 Å by re-
177
+ placing a region of 9 pixels centered on those lines with interpolated
178
+ values of the nearest neighbouring pixel. We then pseudo-continuum
179
+ normalize the spectra using a running window of 50 pixels width
180
+ and selecting pixels in the 85th percentile or higher, on which we fit
181
+ a fourth order Chebyshev polynomial. The pseudo-continuum pix-
182
+ els are restricted to the 3842-7000Årange for the main spectroscopic
183
+ normalization module and to 3842-9000Åfor the main sequence com-
184
+ panion detection module. Telluric pixels are also excluded.
185
+ As a final preprocessing step for the spectroscopic module inputs,
186
+ we compute the average and standard deviations for all pixels of all
187
+ continuum-normalized spectra within their respective training sets,
188
+ and zero-center the spectra by subtracting the average, and then by
189
+ dividing the standard deviation. The same preprocessing procedure is
190
+ applied to the Gaia parameters of the candidate selection module by
191
+ using the mean and standard deviations of each parameter computed
192
+ over the entire training set.
193
+ 2 https://www.sdss.org/
194
+ 3 https://gea.esac.esa.int/archive/
195
+ MNRAS 000, 1–12 (2022)
196
+
197
+ Data-Driven Classification of WD Stars
198
+ 3
199
+ Gaia
200
+ parameters
201
+ Spectrum
202
+ Module 1:
203
+ Candidate Selection
204
+ Module 2:
205
+ Main Spec. Type
206
+ Spectroscopically
207
+ classified WD
208
+ Module 3:
209
+ WD+MS Detection
210
+ 𝑃WD
211
+ 𝑃class
212
+ 𝑃MS
213
+ DA, DB, DC
214
+ Figure 1. Schema of the pipeline. For a given object, the Gaia parameters are sent to the candidate selection module to determine the probability of being a white
215
+ dwarf (𝑃WD). If this probability meets certain criteria, the spectrum of the object is sent to the first spectroscopic classification module, where the probability
216
+ that its primary spectroscopic type belongs to one of the 13 possible classes (𝑃class) is calculated. If the most probable class is either a DA, DB or DC, the
217
+ spectrum is also sent to the WD+MS module to determine the probability of a main sequence companion (𝑃MS).
218
+ 3 PIPELINE TRAINING AND VALIDATION
219
+ 3.1 Module 1: Identification of white dwarf candidates
220
+ We begin by looking at the candidate selection module, which takes
221
+ 13 Gaia parameters as input, and outputs a probability 𝑃WD for the
222
+ object to be a white dwarf (see Section 2). Our sample includes all
223
+ objects in the MWDD as well as the GF21 catalogue with available
224
+ spectral classification. These objects are split into two categories:
225
+ white dwarfs and non-white dwarfs; this last category also includes
226
+ subdwarfs and main sequence stars. We apply the following cuts:
227
+ 𝐺abs > 6 + 5(𝐺BP − 𝐺RP) to remove most main sequence stars, and
228
+ parallax_over_error > 10−3 to remove any object with excessive
229
+ parallax measurement error.
230
+ Any object with missing Gaia parameters or any known MS+WD
231
+ binary is also removed, leaving a total of 35 930 stars among which
232
+ 33 416 are spectroscopically confirmed white dwarfs, and 2514 are
233
+ confirmed non-white dwarfs. We train the neural networks using a
234
+ different random selection of 25 000 objects for each of them, validate
235
+ each one using a different random selection of 5500 objects, and test
236
+ them with their respective 5430 remaining objects.
237
+ Using a probability threshold of 0.5, the networks correctly iden-
238
+ tify, on average, 99.2% of white dwarfs (𝑃WD > 0.5) and 83.5%
239
+ of non-white dwarfs (𝑃WD ≤ 0.5) in their respective test sets. This
240
+ translates into about 1.4% contamination in objects classified as
241
+ white dwarfs and 0.9% white dwarfs being missed by the networks.
242
+ In order to minimize biases learned by individual networks, we as-
243
+ semble them and use the average probability as the final prediction.
244
+ We run the entire sample through the ensemble and show the re-
245
+ sulting distribution of 𝑃WD over the Hertzsprung-Russell diagram
246
+ (HRD) in the right panel of Figure 2. Objects located within the white
247
+ dwarf locus show high probabilities of being a white dwarf, rapidly
248
+ dropping as we move towards the main sequence.
249
+ We also show the HRD of misclassified objects in the left panel
250
+ of Figure 2 when applying the 𝑃WD = 0.5 threshold, highlighting
251
+ that most misclassified white dwarfs reside within the edge of the
252
+ main sequence locus. While it is unsurprising that the ensemble
253
+ shows confusion in such regions, we note that these misclassified
254
+ white dwarfs are mostly of hot spectral types (DOA, PG1159) or
255
+ have photometric effective temperatures above ≳ 20000 K (Fusillo
256
+ et al. 2021; Dufour et al. 2017). Consequently, the ensemble may be
257
+ less sensitive to very hot white dwarfs found outside the white dwarf
258
+ locus.
259
+ We compare the ensemble probabilities with those of GF21 in
260
+ 1
261
+ 0
262
+ 1
263
+ 4
264
+ 6
265
+ 8
266
+ 10
267
+ 12
268
+ 14
269
+ 16
270
+ PWD
271
+ 0.5, is WD
272
+ PWD > 0.5, not WD
273
+ 1
274
+ 0
275
+ 1
276
+ 0.0
277
+ 0.2
278
+ 0.4
279
+ 0.6
280
+ 0.8
281
+ 1.0
282
+ PWD
283
+ GBP
284
+ GRP
285
+ MG
286
+ Figure 2. Gaia HRD of the sample used to test the candidate selection ensem-
287
+ ble. Left: Red dots represent confirmed white dwarfs for which the ensemble
288
+ predicts a low 𝑃WD, while teal dots are non-WD objects predicted to have
289
+ a high 𝑃WD. Objects with correct classifications are colored in grey. Right:
290
+ Distribution of 𝑃WD over the entire HRD.
291
+ Figure 3 by plotting the differences between our 𝑃WD and theirs
292
+ over the HRD. We find large differences (over 0.5) between the
293
+ probabilities for 532 objects, primarily located at the edges of the
294
+ white dwarf locus and main sequence tail. 471 of these objects are
295
+ spectroscopically confirmed white dwarfs for which our ensemble
296
+ predicts high probabilities (𝑃WD ≳ 0.85), while the GF21 catalogue
297
+ indicates very low probabilities (𝑃WD ≲ 0.15). The remaining 61
298
+ objects have subdwarf spectroscopic classifications and show a mix
299
+ of very high or low 𝑃WD for both our predictions and those of GF21.
300
+ Our ensemble shows excellent performance and appears more robust
301
+ for objects located in ambiguous regions of the HRD. We attribute
302
+ this to the fact that the neural networks can learn highly non-linear
303
+ features in a larger parameter space to make their predictions, rather
304
+ than providing a simple density estimation of the HRD.
305
+ 3.2 Module 2: Primary spectroscopic type
306
+ This section focuses on the primary spectroscopic type classification
307
+ module of our pipeline. We train the networks using spectra of non-
308
+ MNRAS 000, 1–12 (2022)
309
+
310
+ 4
311
+ Vincent et al.
312
+ 1.0
313
+ 0.5
314
+ 0.0
315
+ 0.5
316
+ 1.0
317
+ 1.5
318
+ GBP
319
+ GRP
320
+ 4
321
+ 6
322
+ 8
323
+ 10
324
+ 12
325
+ 14
326
+ 16
327
+ MG
328
+ 0.6
329
+ 0.4
330
+ 0.2
331
+ 0.0
332
+ 0.2
333
+ 0.4
334
+ 0.6
335
+ PWD, ours
336
+ PWD, GF21
337
+ Figure 3. Comparison of the predicted 𝑃WD by our ensemble and those of
338
+ Fusillo et al. (2021) over the HRD. Red points indicate that the network pre-
339
+ dicts higher probabilities than GF21, while blue points indicate the opposite.
340
+ Table 1. Sample of spectra used to train the networks and summary of the
341
+ ensemble predictions
342
+ Label
343
+ 𝑁
344
+ 𝑁agree
345
+ 𝑁uncert
346
+ 𝑁disagree
347
+ DA
348
+ 17485
349
+ 17281
350
+ 93
351
+ 111
352
+ DC
353
+ 1718
354
+ 1548
355
+ 65
356
+ 105
357
+ DB
358
+ 1649
359
+ 1625
360
+ 7
361
+ 17
362
+ DZ
363
+ 439
364
+ 424
365
+ 5
366
+ 10
367
+ DQ/DQpec
368
+ 296
369
+ 260
370
+ 14
371
+ 22
372
+ DAH
373
+ 179
374
+ 137
375
+ 27
376
+ 15
377
+ DO/DAO
378
+ 142
379
+ 104
380
+ 12
381
+ 26
382
+ hotDQ
383
+ 73
384
+ 70
385
+ 2
386
+ 1
387
+ PG1159
388
+ 23
389
+ 15
390
+ 3
391
+ 5
392
+ sdB
393
+ 759
394
+ 617
395
+ 51
396
+ 91
397
+ sdOB
398
+ 389
399
+ 118
400
+ 35
401
+ 102
402
+ sdO
403
+ 255
404
+ 203
405
+ 67
406
+ 119
407
+ CV
408
+ 221
409
+ 208
410
+ 5
411
+ 8
412
+ Total
413
+ 23628
414
+ 22610
415
+ 386
416
+ 632
417
+ hybrid objects from the GF21 Gaia-SDSS catalogue and subdwarf
418
+ catalogues from (Geier et al. 2017; Geier 2020). The number of
419
+ spectra for each class is listed in Table 1. In order to increase the size
420
+ of the DO and DQ classes, we include DAO white dwarfs in the DO
421
+ class, and DQpec white dwarfs in the DQ class. We do not include
422
+ objects with spectral classification found only in the MWDD as their
423
+ types may have been assigned based on observations obtained by
424
+ other means than the SDSS. We split the 23 628 spectra in Table
425
+ 1 into 21 265 for the training set, and 2363 for the validation set,
426
+ ensuring the proportion of each spectral type remains the same. We
427
+ do not use a testing set due to the very small number of PG1159,
428
+ hotDQ, and DO stars, and instead cross-validate the networks by
429
+ using a completely different validation set for each network, i.e. a
430
+ different 10% of the dataset for each one.
431
+ In order to determine whether a spectral classification is to be con-
432
+ sidered reliable or flagged as uncertain, requiring a visual inspection,
433
+ 0.0
434
+ 0.2
435
+ 0.4
436
+ 0.6
437
+ 0.8
438
+ 1.0
439
+ Threshold
440
+ 0.0
441
+ 0.2
442
+ 0.4
443
+ 0.6
444
+ 0.8
445
+ 1.0
446
+ F
447
+ DA
448
+ DB
449
+ DC
450
+ DO
451
+ DQ
452
+ hotDQ
453
+ DZ
454
+ PG1159
455
+ CV
456
+ DAH
457
+ sdB
458
+ sdO
459
+ sdOB
460
+ Figure 4. 𝐹𝛽 score as a function of classification threshold for each class
461
+ label. The 𝛽 factor is set to 0.5, making precision twice as important as recall
462
+ (see text).
463
+ we rely on a prediction probability threshold based on the generalized
464
+ 𝐹𝛽 score:
465
+ 𝐹𝛽 = (1 + 𝛽2)
466
+ Precision × Recall
467
+ (𝛽2 × Precision) + Recall ,
468
+ (1)
469
+ where
470
+ Precision = TP/(TP + FP) ,
471
+ (2)
472
+ Recall = TP/(TP + FN) .
473
+ (3)
474
+ Precision is a measure of purity for a given class, while recall is a
475
+ measure of completeness, and both are calculated using the count
476
+ of true positives (TP), false positives (FP), or false negatives (FN).
477
+ Their importance can be weighted in the 𝐹𝛽 score using the 𝛽 pa-
478
+ rameter, where 𝛽 > 1 places more weight on recall, and 𝛽 < 1 places
479
+ more importance on precision. As we aim to minimize the need for
480
+ visual inspection of spectra, we consider precision twice as important
481
+ as recall and set 𝛽 = 0.5. We calculate 𝐹𝛽 for each class for every
482
+ individual network using their respective validation set, and plot the
483
+ average 𝐹𝛽 curve for each class in Figure 4. We find that a threshold
484
+ of 0.6 provides the highest score for most classes. Depending on the
485
+ case studied, different thresholds may prove optimal. For example, if
486
+ the subdwarf classification is not of interest, a threshold of 0.5 may
487
+ be more appropriate, or if DQ white dwarfs are the only objects of
488
+ interest, a threshold of 0.75 may be more useful. In light of these re-
489
+ sults, we consider a spectrum to be classified if the highest prediction
490
+ probability for a class is above the threshold 𝑃class ≥ 0.6. Objects
491
+ with no class probabilities above this threshold are tagged for visual
492
+ inspection. These are discussed at the end of this section.
493
+ Having defined what constitutes a confirmed classification, we
494
+ now perform a cross-validation to verify the performance of the
495
+ networks for objects with a non-hybrid spectral type (i.e., DA, DB,
496
+ DQ, DZ, etc.). We calculate the confusion matrix for each of the
497
+ ten networks with their respective validation set, normalize them
498
+ row-wise and take their average, producing the confusion matrix
499
+ MNRAS 000, 1–12 (2022)
500
+
501
+ Data-Driven Classification of WD Stars
502
+ 5
503
+ DA
504
+ DB
505
+ DC
506
+ DO
507
+ DQ
508
+ hotDQ
509
+ DZ
510
+ PG1159
511
+ CV
512
+ DAH
513
+ sdB
514
+ sdO
515
+ sdOB
516
+ Prediction
517
+ DA
518
+ DB
519
+ DC
520
+ DO
521
+ DQ
522
+ hotDQ
523
+ DZ
524
+ PG1159
525
+ CV
526
+ DAH
527
+ sdB
528
+ sdO
529
+ sdOB
530
+ Label
531
+ 0.995
532
+ 0
533
+ 0.003
534
+ 0
535
+ 0
536
+ 0
537
+ 0
538
+ 0
539
+ 0
540
+ 0
541
+ 0.001
542
+ 0
543
+ 0
544
+ 0
545
+ 0.996 0.003 0.001
546
+ 0
547
+ 0
548
+ 0
549
+ 0
550
+ 0
551
+ 0
552
+ 0
553
+ 0
554
+ 0.001
555
+ 0.007 0.012 0.96 0.001 0.016
556
+ 0
557
+ 0.002
558
+ 0
559
+ 0
560
+ 0.001
561
+ 0
562
+ 0.001 0.001
563
+ 0.06
564
+ 0
565
+ 0
566
+ 0.897
567
+ 0
568
+ 0
569
+ 0
570
+ 0.009
571
+ 0
572
+ 0
573
+ 0
574
+ 0.034
575
+ 0
576
+ 0
577
+ 0
578
+ 0.044
579
+ 0
580
+ 0.949 0.007
581
+ 0
582
+ 0
583
+ 0
584
+ 0
585
+ 0
586
+ 0
587
+ 0
588
+ 0
589
+ 0
590
+ 0.014
591
+ 0
592
+ 0
593
+ 0.972
594
+ 0
595
+ 0
596
+ 0
597
+ 0.014
598
+ 0
599
+ 0
600
+ 0
601
+ 0
602
+ 0
603
+ 0.012
604
+ 0
605
+ 0
606
+ 0
607
+ 0.988
608
+ 0
609
+ 0
610
+ 0
611
+ 0
612
+ 0
613
+ 0
614
+ 0
615
+ 0
616
+ 0
617
+ 0.167
618
+ 0
619
+ 0
620
+ 0
621
+ 0.833
622
+ 0
623
+ 0
624
+ 0
625
+ 0
626
+ 0
627
+ 0.009
628
+ 0
629
+ 0.009
630
+ 0
631
+ 0
632
+ 0
633
+ 0
634
+ 0
635
+ 0.977 0.005
636
+ 0
637
+ 0
638
+ 0
639
+ 0.152
640
+ 0
641
+ 0.006
642
+ 0
643
+ 0
644
+ 0.006
645
+ 0
646
+ 0
647
+ 0
648
+ 0.835
649
+ 0
650
+ 0
651
+ 0
652
+ 0.01
653
+ 0
654
+ 0
655
+ 0
656
+ 0
657
+ 0
658
+ 0
659
+ 0
660
+ 0
661
+ 0
662
+ 0.924 0.021 0.045
663
+ 0.026
664
+ 0
665
+ 0
666
+ 0.007
667
+ 0
668
+ 0
669
+ 0
670
+ 0
671
+ 0
672
+ 0
673
+ 0.131 0.771 0.065
674
+ 0.007
675
+ 0
676
+ 0
677
+ 0
678
+ 0
679
+ 0
680
+ 0
681
+ 0
682
+ 0
683
+ 0
684
+ 0.207 0.033 0.752
685
+ 0.0
686
+ 0.2
687
+ 0.4
688
+ 0.6
689
+ 0.8
690
+ Figure 5. Average confusion matrix of the primary spectroscopic type con-
691
+ fidently predicted (𝑃class ≥ 0.6) by the networks for objects with a single
692
+ known spectroscopic signature. The values are normalized row-wise.
693
+ shown in Figure 5. The majority of white dwarf classes show ≳90%
694
+ agreement between network predictions and human labels, while
695
+ PG1159 and DAH show the lowest agreement at ∼83%. The networks
696
+ predict a PG1159 class for ∼13% of human-labelled DO, and ∼17%
697
+ of the human-labelled DAH as being DA, lowering the score of
698
+ their respective types. Subdwarfs appear to be the most confused
699
+ classes as many sdO and sdBO are predicted to be sdB. Even so,
700
+ all three classes show good agreement with human labels. In what
701
+ follows, we ensemble the ten neural networks, predict classes for
702
+ the entire spectroscopic sample, and review spectra belonging to
703
+ classes displaying the highest percentage of disagreeing predictions
704
+ and labels. We list the number of disagreements for each class in Table
705
+ 1 as 𝑁disagree, along with the number of agreeing predictions/labels
706
+ (𝑁agree) and number of spectra flagged for visual inspection (𝑁uncert).
707
+ Visual inspection of 15 objects classified as DAH by GF21 but
708
+ with different predictions reveals that the ensemble tends to confuse
709
+ DAH with low SNR DA, especially if the blue part of the spectrum is
710
+ of low quality. In low SNR spectra, magnetic line splitting looks very
711
+ similar to wide, but noisy, Balmer lines. Such confusion has been
712
+ shown to be frequent in the study of magnetic white dwarfs by Hardy
713
+ et al. (in prep.), who have found 400 out of 651 white dwarf spectra to
714
+ have erroneously been classified as magnetic in previous literature.
715
+ In our case, however, human labels appear to be correct, and the
716
+ ensemble indicates a 5-30% probability of the objects being a DAH,
717
+ which can be used as an indicator of weakly detectable magnetic
718
+ splitting. There is one notable object with differing classifications,
719
+ Gaia DR3 2849930668862492544, predicted to be a hotDQ, which
720
+ was classified as a DQA in the in-depth analysis of DQ/DZ white
721
+ dwarfs by Coutu et al. (2019).
722
+ A similar investigation of the 3 objects classified as PG1159 by
723
+ GF21, but with different predictions, points to the presence of ionized
724
+ helium in the spectra of these objects, a feature that the ensemble
725
+ strongly associates with the DO class. We emphasize the very small
726
+ number of known PG1159 stars, and the fact that neural networks
727
+ Table 2. Sample of white dwarfs with known secondary spectroscopic features
728
+ Label
729
+ 𝑁
730
+ 𝑁PIA
731
+ 𝑁uncert
732
+ 𝑁disagree
733
+ DAB/DBA
734
+ 326
735
+ 271
736
+ 20
737
+ 35
738
+ DAZ/DZA
739
+ 105
740
+ 92
741
+ 10
742
+ 3
743
+ DBZ/DZB
744
+ 97
745
+ 86
746
+ 11
747
+ 0
748
+ DBAZ
749
+ 59
750
+ 59
751
+ 0
752
+ 0
753
+ DABZ
754
+ 20
755
+ 17
756
+ 3
757
+ 0
758
+ DZBA
759
+ 14
760
+ 13
761
+ 1
762
+ 0
763
+ DZAB
764
+ 5
765
+ 4
766
+ 1
767
+ 0
768
+ DOZ
769
+ 2
770
+ 1
771
+ 1
772
+ 0
773
+ DQZ
774
+ 1
775
+ 0
776
+ 1
777
+ 0
778
+ DA+MS
779
+ 783
780
+ 750
781
+ 22
782
+ 11
783
+ DB+MS
784
+ 37
785
+ 32
786
+ 1
787
+ 4
788
+ DC+MS
789
+ 23
790
+ 16
791
+ 4
792
+ 3
793
+ Total
794
+ 1472
795
+ 1341
796
+ 75
797
+ 56
798
+ are data-driven algorithms that strongly depend on the number of
799
+ available examples to learn from (He & Garcia 2009; Zhu et al.
800
+ 2015). It is thus not surprising to see a lower performance for the
801
+ class with the least number of spectra. Furthermore, PG1159 and
802
+ DO are suspected to share a common spectral evolution pathway
803
+ (Bédard et al. 2022), and so the decision boundary between the two
804
+ is ill-defined by nature. Objects found on the fine line between DO and
805
+ PG1159 can usually be identified with high prediction probabilities
806
+ for both classes, typically ≳ 0.6 for the first, and ≳ 0.2 for the second.
807
+ As for the apparent confusion between subdwarf types, we strongly
808
+ suspect the culprit to be common features among the various types.
809
+ Indeed, according to the classification scheme proposed by Geier
810
+ et al. (2017), both sdOB and sdO subdwarfs may show a mix of
811
+ hydrogen, neutral helium, and ionized helium lines. The consider-
812
+ able feature overlap, along with their occasional presence, of these
813
+ classes could easily induce confusion to both our ensemble and a
814
+ human classifier, resulting in errors in not only the predictions but
815
+ labels as well. While a subtype classification has been proposed by
816
+ Geier (2020) to separate hydrogen-rich from helium-rich spectra, the
817
+ number of objects for each class would be too small for the networks
818
+ to learn meaningful features.
819
+ The results discussed so far have been restricted to high-confidence
820
+ predictions, i.e. those with a class probability above the threshold
821
+ 𝑃class > 0.6. Among the 23 628 spectra used to train and vali-
822
+ date the networks, 2.7% have their highest predictions falling be-
823
+ low this threshold. We list the number of uncertain spectra per
824
+ label in Table 1. The majority of cases are subdwarfs, consistent
825
+ with the fact that their classes are the most confusing to the en-
826
+ semble. Uncertain white dwarf classifications can be grouped into
827
+ three broad categories: (1) their spectra is close to an ill-defined
828
+ boundary between multiple classes, (2) the spectra have low SNR
829
+ in regions where important class-specific features are found, and
830
+ (3) the spectra may possess an unusual feature among its class.
831
+ Categories (1) and (2) are self-explanatory and the most affected
832
+ classes share the same explanations as the misclassifications dis-
833
+ cussed above. Spectra belonging to category (3) include rare objects
834
+ such as Gaia DR33985469616188225152, a DQ with oxygen lines
835
+ (Gänsicke et al. 2010), or Gaia DR3 3731667388643923840, also
836
+ a DQ but with metal traces (Coutu et al. 2019; Farihi et al. 2022).
837
+ A simple approach to filter out the majority of uninteresting spectra
838
+ with uncertain classifications would be to apply a SNR cut, keeping
839
+ most spectra in categories (1) and (3).
840
+ MNRAS 000, 1–12 (2022)
841
+
842
+ 6
843
+ Vincent et al.
844
+ 3.3 White dwarfs with secondary spectroscopic features
845
+ We now turn to white dwarfs with secondary spectroscopic features,
846
+ such as DAZ, DBA, etc. Since the networks are trained on objects
847
+ with only primary spectroscopic types, it is important to asses their
848
+ predictions when confronted with ambiguous data. Our white dwarf
849
+ sample with known secondary spectroscopic features is listed in Ta-
850
+ ble 2 and totals 1472 spectra, most of which are labelled DBA/DAB
851
+ and WD+MS binaries. The second column of Table 2 lists the number
852
+ of spectra with a predicted primary spectroscopic type matching one
853
+ of the human labels of their respective class (Prediction In Any class;
854
+ 𝑁PIA), while the third and fourth columns list the number of spectra
855
+ flagged for visual inspection and disagreeing predictions/labels, re-
856
+ spectively. The ensemble predictions show excellent agreement with
857
+ human labels, with ∼91% of all spectra matching one of the possible
858
+ types and only ∼3% disagreement. Upon visual inspection, nearly all
859
+ spectra with predictions inconsistent with their labels were found to
860
+ be misclassified in the GF21 catalogue.
861
+ Out of 38 spectra labelled as having secondary spectroscopic
862
+ features for which the ensemble predicts no matching primary
863
+ type, only two appear to have been erroneously predicted by
864
+ the networks. The ensemble prediction for the spectra of Gaia
865
+ DR3 3781616827503753088 and Gaia DR3 1028779636740111872
866
+ points to a DC type instead of either a DA or DB that would match
867
+ their proposed DBA label, probably due to the weak strength of the
868
+ lines. As a matter of fact, Gaia DR3 1028779636740111872 was
869
+ classified as a DB+DC binary by Kleinman et al. (2013), and the
870
+ helium lines are most likely diluted by the DC companion. 11 other
871
+ spectra are labelled as DBA/DAB, although they clearly show ionized
872
+ helium lines, and the ensemble predicts a DO primary class for these
873
+ spectra, which we confirm to be correct by cross-checking the predic-
874
+ tions with the spectral types in the MWDD. 22 spectra are labelled as
875
+ DBA/DAB/DAZ but are predicted to be subdwarfs by the ensemble,
876
+ which we also confirm using the MWDD. One spectrum of Gaia
877
+ DR3 1884548739436672640 shows carbon in its spectrum and is
878
+ correctly predicted to be a DQ, although it is labelled as a DBA. The
879
+ spectrum for Gaia DR3 1587462866571138048 is labelled as DZA,
880
+ while the ensemble predicted a DB type, consistent with the DBZA
881
+ spectral type in the MWDD. The predictions made by the ensemble
882
+ for the last two objects are also consistent with the spectral types
883
+ DQA and DBAZ given by Coutu et al. (2019). The ensemble seems
884
+ to provide a primary spectroscopic type reliably when classifying
885
+ white dwarfs with secondary spectroscopic features.
886
+ The situation is quite similar when classifying the 1472 spec-
887
+ tra labelled as having a main sequence companion, as most spec-
888
+ tra with predictions inconsistent with their labels were found to
889
+ have wrong labels. We note that although there are differences be-
890
+ tween the predicted and labelled primary spectroscopic types for
891
+ the cases discussed below, a main sequence companion is indeed
892
+ always present. Out of the 18 spectra with disagreeing predictions
893
+ and labels, the only spectrum with a possible erroneous ensemble
894
+ prediction belongs to the object Gaia DR3 2464385576553809792,
895
+ which is predicted to be a DZ but labelled as a DC+MS. The spec-
896
+ trum is dominated by the MS companion, showing unusual features
897
+ to the ensemble, which likely confuse it. Prediction uncertainties for
898
+ this spectrum are very high, with the DZ type being predicted with
899
+ 61% probability, but with 37% uncertainty. Interestingly, the DC
900
+ type is predicted with 24% probability, but with 34% uncertainty.
901
+ Such uncertainties may be used to discern spurious high-confidence
902
+ classifications, and even give a hint as to which class is the cor-
903
+ rect one. 9 of 18 spectra labelled as DA+MS, but predicted to be
904
+ CV, all show emission in at least one of their hydrogen lines. Two
905
+ spectra for the object Gaia DR3 922604914151538816 labelled as
906
+ DB+MS show ionized helium lines and are predicted to be DO, con-
907
+ sistent with the DO+MS classification in the MWDD. The spectrum
908
+ for Gaia DR3 1219552974402511104 is labelled as DA+MS but is
909
+ predicted to be a subdwarf, probably due to its narrow hydrogen
910
+ lines. A spectrum for Gaia DR3 2836746940329352448 labelled as
911
+ DC+MS is predicted to be a DB, consistent with the MWDD, and
912
+ one available spectrum for Gaia DR3 733784442283662464 labelled
913
+ DB+MS does not show any obvious features, consistent with the en-
914
+ semble prediction suggesting a DC. Finally, a spectrum for Gaia
915
+ DR3 2536561952205972608 labelled as a DA+MS but predicted to
916
+ be a DC also does not seem to show any obvious features. However,
917
+ higher SNR spectra of the same object reveal it is indeed a DA+MS.
918
+ These verifications lend support to the ensemble providing primary
919
+ spectroscopic types that are more robust than visual inspection for
920
+ WD+MS spectra.
921
+ 3.4 Module 3: Main sequence companionship
922
+ The third and final module of the pipeline makes use of SDSS spec-
923
+ tra to predict the probability of contamination by a main sequence
924
+ companion. Since the redder part of white dwarf spectra often con-
925
+ tains the most obvious signature of the presence of a main sequence
926
+ companion, we rely on a wider wavelength coverage than the main
927
+ classification module, extending to 9000 Å. We use the GF21 cata-
928
+ logue to form two groups for our training sample: all white dwarfs
929
+ labelled as having a main sequence companion form the positive
930
+ group, while those with a DA, DB or DC type form the negative
931
+ group, totalling 832 and 20 486 spectra for each group, respectively.
932
+ We then train each neural network using the same approach as
933
+ for the candidate selection module by randomly selecting, for each
934
+ network, 16 000 and 2700 spectra for training and validation, re-
935
+ spectively, keeping the remaining 2618 spectra for testing. Using a
936
+ probability threshold of 0.5, the networks correctly identify, on aver-
937
+ age, 91.6% of WD+MS spectra and 99.8% of uncontaminated white
938
+ dwarfs. About 0.4% of spectra labelled as single white dwarfs are
939
+ predicted to have a companion, and about 4.9% of spectra labelled as
940
+ having a companion are predicted to be single white dwarfs. We vi-
941
+ sually inspected the spectra with disagreeing predictions and labels,
942
+ and found that 36 out of 76 spectra labelled as having no companion
943
+ turn out to obviously have one, consistent with the very high prob-
944
+ abilities (≳0.8) given by the network. Out of 41 spectra predicted
945
+ as having no companion but labelled as having one, 9 show obvious
946
+ companion contamination. Among these, 4 are DB+MS and 1 is a
947
+ DC+MS. We find no obvious clues as to why these objects were
948
+ erroneously classified, but we suspect the type of white dwarf may
949
+ be the cause, as DB and DC binaries are much less numerous and
950
+ may be harder for the ensemble to recognize. We also notice 11 CVs
951
+ and 5 subdwarfs that were mislabelled as WD+MS in the GF21 cata-
952
+ logue, for which the networks give very small probabilities of being
953
+ a WD+MS, consistent with their real classification. We find the rest
954
+ of the spectra with disagreeing predictions and labels too visually
955
+ ambiguous to determine companionship by visual inspection alone.
956
+ 3.5 Performance vs SNR
957
+ The SNR of a spectrum is perhaps one of the most important factors
958
+ affecting the visibility of spectroscopic features, as well as the overall
959
+ spectral shape, making the same object look radically different when
960
+ observed at low and high SNR. Moreover, large surveys generally do
961
+ not provide uniform SNR distributions of spectra, and so classifica-
962
+ tion algorithms may struggle to classify correctly spectra of objects
963
+ MNRAS 000, 1–12 (2022)
964
+
965
+ Data-Driven Classification of WD Stars
966
+ 7
967
+ ]0-9]
968
+ ]9-18] ]18-27] ]27-36] ]36-45] ]45-54]
969
+ >54
970
+ Signal-to-noise
971
+ 0
972
+ 2000
973
+ 4000
974
+ 6000
975
+ 8000
976
+ 10000
977
+ 12000
978
+ Number of spectra
979
+ 0.0
980
+ 0.2
981
+ 0.4
982
+ 0.6
983
+ 0.8
984
+ 1.0
985
+ Weighted F
986
+ ]0-9]
987
+ ]9-18] ]18-27] ]27-36] ]36-45] ]45-54]
988
+ >54
989
+ Signal-to-noise
990
+ 0
991
+ 2000
992
+ 4000
993
+ 6000
994
+ 8000
995
+ 10000
996
+ 12000
997
+ Number of spectra
998
+ 0.0
999
+ 0.2
1000
+ 0.4
1001
+ 0.6
1002
+ 0.8
1003
+ 1.0
1004
+ Weighted F
1005
+ Figure 6. SNR histograms of spectra used to train and to test the spectro-
1006
+ scopic classification modules of our pipeline, overlaid with the class-weighted
1007
+ average 𝐹𝛽-score. The top panel is for the primary spectroscopic type clas-
1008
+ sification module, while the bottom is for the main sequence companion
1009
+ module.
1010
+ that have much higher (or lower) SNR than the bulk of spectra of its
1011
+ class. Below, we demonstrate that the two spectroscopic classifica-
1012
+ tion modules of our pipeline perform well over the entire SNR range
1013
+ of the SDSS spectra.
1014
+ To evaluate the performance of the primary spectroscopic type
1015
+ module in different SNR bins, we plot the SNR histogram of all
1016
+ spectra in the upper panel of Figure 6, including white dwarfs with
1017
+ secondary types and main sequence binaries discussed previously,
1018
+ and overlay the class-weighted average of the 𝐹𝛽-score. The primary
1019
+ spectroscopic type module shows an excellent score at 𝐹𝛽 ∼ 0.95 for
1020
+ spectra under SNR ∼45, with a slight drop to ∼0.91 for higher SNR.
1021
+ Though it may seem counter-intuitive that higher SNR spectra are
1022
+ slightly more difficult to classify for the neural networks, one must
1023
+ keep in mind the small number of spectra relative to the bulk at SNR
1024
+ 9-18, along with the fact that classes with few known objects – thus
1025
+ more difficult to classify – tend to have a higher SNR.
1026
+ We find similar results for the main sequence companion module,
1027
+ for which we plot the histogram and class-weighted 𝐹𝛽-score in the
1028
+ lower panel of Figure 6 using the sample of spectra described in
1029
+ Section 3.4. The module also shows an excellent score of 𝐹𝛽 ≳ 0.9
1030
+ over the entire SNR > 9 range, generally increasing along with higher
1031
+ SNR. This trend differs from that found for the primary spectroscopic
1032
+ Table 3. Sample of low SNR spectra used to test the main spectroscopic type
1033
+ classification module
1034
+ Label
1035
+ 𝑁
1036
+ 𝑁agree
1037
+ 𝑁uncert
1038
+ 𝑁disagree
1039
+ DA
1040
+ 7531
1041
+ 7438
1042
+ 45
1043
+ 48
1044
+ DC
1045
+ 1241
1046
+ 995
1047
+ 97
1048
+ 149
1049
+ DB
1050
+ 331
1051
+ 321
1052
+ 5
1053
+ 5
1054
+ DZ
1055
+ 362
1056
+ 315
1057
+ 20
1058
+ 27
1059
+ DQ
1060
+ 47
1061
+ 41
1062
+ 4
1063
+ 2
1064
+ DAH
1065
+ 15
1066
+ 10
1067
+ 0
1068
+ 5
1069
+ DO
1070
+ 1
1071
+ 0
1072
+ 0
1073
+ 1
1074
+ hotDQ
1075
+ 7
1076
+ 5
1077
+ 2
1078
+ 0
1079
+ PG1159
1080
+ 0
1081
+ 0
1082
+ 0
1083
+ 0
1084
+ sdB
1085
+ 20
1086
+ 2
1087
+ 6
1088
+ 12
1089
+ sdOB
1090
+ 3
1091
+ 0
1092
+ 1
1093
+ 2
1094
+ sdO
1095
+ 8
1096
+ 0
1097
+ 4
1098
+ 4
1099
+ CV
1100
+ 75
1101
+ 72
1102
+ 1
1103
+ 2
1104
+ Total
1105
+ 9734
1106
+ 9292
1107
+ 185
1108
+ 257
1109
+ type module because the detection of MS companions becomes easier
1110
+ with better signal, but also because the classification is set as a binary
1111
+ problem (WD vs WD+MS), removing the effect that might have been
1112
+ caused by rare classes whose spectra are mostly found at high SNR.
1113
+ Even though we do not use low-SNR (≤ 9) spectra to train and to
1114
+ test the neural networks of our pipeline, they constitute ∼30% of all
1115
+ spectra in the GF21 catalogue, warranting at the very least a short
1116
+ performance assessment of the spectroscopic classification modules
1117
+ in this SNR regime. We verify the global performance by including a
1118
+ bin containing SNR ≤ 9 spectra in the histograms displayed in Figure
1119
+ 6. Both modules display very high scores on par with higher SNR
1120
+ spectra, with the primary spectroscopic type module showing a ∼0.96
1121
+ score, and the main sequence companion module showing a 0.86
1122
+ score. We further study the high score of the primary spectroscopic
1123
+ type module with its confusion matrix in Figure 7, showing a ≳90%
1124
+ agreement for most classes when applying the 𝑃class > 0.6 threshold.
1125
+ The only exception is for subdwarfs, for which the ensemble predicts
1126
+ a DA type for 27 out of the 31 spectra. These classes are known to
1127
+ be notoriously difficult to distinguish in the low SNR regime. The
1128
+ confusion between DA and DAH also remains present, with 4 of
1129
+ the 15 labelled DAH being predicted as DA. A more detailed list of
1130
+ low-SNR spectra used to test the main spectroscopic type module
1131
+ and their resulting classification is provided in Table 3.
1132
+ We conclude that the spectral classification modules of our pipeline
1133
+ are reliable over the entire SNR range, including noisier spectra
1134
+ (SNR< 9) on which the networks were not trained. We caution, how-
1135
+ ever, that neural networks are known for producing overconfident
1136
+ predictions on out-of-distribution data (Nguyen et al. 2014; Good-
1137
+ fellow et al. 2014), and recommend that extra care be taken when
1138
+ interpreting results in low-SNR regimes.
1139
+ 4 GAIA-SDSS WHITE DWARF CATALOGUE
1140
+ In this section, we use our pipeline to identify white dwarf candidates
1141
+ from a large sample of Gaia objects and classify them spectroscop-
1142
+ ically using SDSS DR17 spectra. The results are made available as
1143
+ an online catalogue, along with recommendations on how to use it,
1144
+ as well as the pipeline itself.
1145
+ MNRAS 000, 1–12 (2022)
1146
+
1147
+ 8
1148
+ Vincent et al.
1149
+ A
1150
+ B
1151
+ C
1152
+ O
1153
+ Q
1154
+ hot
1155
+ Z
1156
+ P
1157
+ V
1158
+ DAH
1159
+ sdB
1160
+ sdO
1161
+ sdOB
1162
+ Prediction
1163
+ A
1164
+ B
1165
+ C
1166
+ O
1167
+ Q
1168
+ hot
1169
+ Z
1170
+ P
1171
+ V
1172
+ DAH
1173
+ sdB
1174
+ sdO
1175
+ sdOB
1176
+ Label
1177
+ 0.994
1178
+ 0
1179
+ 0.004
1180
+ 0
1181
+ 0
1182
+ 0
1183
+ 0
1184
+ 0
1185
+ 0
1186
+ 0.001
1187
+ 0
1188
+ 0
1189
+ 0
1190
+ 0
1191
+ 0.985 0.012
1192
+ 0
1193
+ 0
1194
+ 0.003
1195
+ 0
1196
+ 0
1197
+ 0
1198
+ 0
1199
+ 0
1200
+ 0
1201
+ 0
1202
+ 0.084 0.031 0.871
1203
+ 0
1204
+ 0.004 0.004 0.005
1205
+ 0
1206
+ 0
1207
+ 0
1208
+ 0
1209
+ 0
1210
+ 0
1211
+ 0
1212
+ 1
1213
+ 0
1214
+ 0
1215
+ 0
1216
+ 0
1217
+ 0
1218
+ 0
1219
+ 0
1220
+ 0
1221
+ 0
1222
+ 0
1223
+ 0
1224
+ 0
1225
+ 0
1226
+ 0
1227
+ 0
1228
+ 0.953
1229
+ 0
1230
+ 0.023
1231
+ 0
1232
+ 0.023
1233
+ 0
1234
+ 0
1235
+ 0
1236
+ 0
1237
+ 0
1238
+ 0
1239
+ 0
1240
+ 0
1241
+ 0
1242
+ 1
1243
+ 0
1244
+ 0
1245
+ 0
1246
+ 0
1247
+ 0
1248
+ 0
1249
+ 0
1250
+ 0.012
1251
+ 0
1252
+ 0.067
1253
+ 0
1254
+ 0
1255
+ 0
1256
+ 0.921
1257
+ 0
1258
+ 0
1259
+ 0
1260
+ 0
1261
+ 0
1262
+ 0
1263
+ 0
1264
+ 0
1265
+ 0
1266
+ 0
1267
+ 0
1268
+ 0
1269
+ 0
1270
+ 0
1271
+ 0
1272
+ 0
1273
+ 0
1274
+ 0
1275
+ 0
1276
+ 0.014
1277
+ 0
1278
+ 0.014
1279
+ 0
1280
+ 0
1281
+ 0
1282
+ 0
1283
+ 0
1284
+ 0.973
1285
+ 0
1286
+ 0
1287
+ 0
1288
+ 0
1289
+ 0.267
1290
+ 0
1291
+ 0.067
1292
+ 0
1293
+ 0
1294
+ 0
1295
+ 0
1296
+ 0
1297
+ 0
1298
+ 0.667
1299
+ 0
1300
+ 0
1301
+ 0
1302
+ 0.857
1303
+ 0
1304
+ 0
1305
+ 0
1306
+ 0
1307
+ 0
1308
+ 0
1309
+ 0
1310
+ 0
1311
+ 0
1312
+ 0.143
1313
+ 0
1314
+ 0
1315
+ 1
1316
+ 0
1317
+ 0
1318
+ 0
1319
+ 0
1320
+ 0
1321
+ 0
1322
+ 0
1323
+ 0
1324
+ 0
1325
+ 0
1326
+ 0
1327
+ 0
1328
+ 1
1329
+ 0
1330
+ 0
1331
+ 0
1332
+ 0
1333
+ 0
1334
+ 0
1335
+ 0
1336
+ 0
1337
+ 0
1338
+ 0
1339
+ 0
1340
+ 0
1341
+ 0.0
1342
+ 0.2
1343
+ 0.4
1344
+ 0.6
1345
+ 0.8
1346
+ 1.0
1347
+ Figure 7. Primary spectroscopic type confusion matrix of confident predic-
1348
+ tions (𝑃class > 0.6) for SNR≤ 9 spectra.
1349
+ 4.1 Candidate selection
1350
+ As our starting point, we calculate the probability of being a white
1351
+ dwarf for the 1.3M objects found in the GF21 Gaia main catalogue.
1352
+ We apply a probability threshold of 𝑃WD > 0.75 and an uncertainty
1353
+ limit of 0.02, resulting in 424 096 white dwarf candidates. Of these
1354
+ candidates, 25 205 are spectroscopically confirmed white dwarfs,
1355
+ and 50 are non-WD according to the MWDD and/or the GF21 SDSS
1356
+ catalogue. We reclassify as candidates 131 non-WD objects found
1357
+ within the white dwarf locus in the Gaia HRD since their spectra have
1358
+ very low SNR (≲ 5) and are too noisy for reliable visual classification.
1359
+ We show in Figure 8 the Gaia HRD of the candidates as well as
1360
+ spectroscopically confirmed white dwarfs and non-WD objects. For
1361
+ visibility purposes, a random selection of 15% of the candidates
1362
+ and a quarter of the confirmed white dwarfs is displayed, while all
1363
+ non-WD objects are kept.
1364
+ A large number of white dwarf candidates can be seen above
1365
+ the faint end of the white dwarf sequence, as delimited by the or-
1366
+ ange dashed line in Figure 8. This region is typically populated
1367
+ by WD+MS binaries (Rebassa-Mansergas et al. 2016, 2021) and
1368
+ here we look for clues to confirm whether this is the case. We find
1369
+ that 2583 out of the 4311 candidates above the line have renormal-
1370
+ ized unit weight error (ruwe) greater than 1.1, a quantity repre-
1371
+ senting the quality of the astrometric solutions, for which a value
1372
+ between 1.1 ≲ ruwe ≲ 1.4 has been found to indicate possible
1373
+ movement perturbations caused by an unresolved companion (Be-
1374
+ lokurov et al. 2020). An additional 583 candidates show red flux
1375
+ excess (phot_bp_rp_excess_factor ≳ 1.3) that is at least 0.2
1376
+ larger than those of other objects with a similar absolute 𝐺 magni-
1377
+ tude. Riello et al. (2021) found that many objects with large excess
1378
+ factors tend to either have emission lines in the wavelength range
1379
+ where the RP passband has a larger transmissivity with respect to
1380
+ the 𝐺 passband, or to be blended sources. High values of ruwe and
1381
+ phot_bp_rp_excess_factor may thus imply that the white dwarf
1382
+ candidates inside the region delimited by orange dashed lines in Fig-
1383
+ ure 8 are genuine white dwarf binary systems. For ease of selection
1384
+ 1.0
1385
+ 0.5
1386
+ 0.0
1387
+ 0.5
1388
+ 1.0
1389
+ 1.5
1390
+ 2.0
1391
+ GBP
1392
+ GRP
1393
+ 6
1394
+ 8
1395
+ 10
1396
+ 12
1397
+ 14
1398
+ 16
1399
+ MG
1400
+ Confirmed WD
1401
+ Confirmed non-WD
1402
+ Figure 8. Gaia HRD of white dwarf candidates selected among 1.3M Gaia
1403
+ objects. Candidates are shown in grey, while spectroscopically confirmed
1404
+ white dwarfs and non-white dwarfs are shown in blue and red, respectively.
1405
+ Objects found to the right of the region delimited by the orange dashed lines
1406
+ are likely WD+MS objects (see Section 4.1).
1407
+ (or removal), we include a binary flag above_locus in our catalogue
1408
+ for the 4317 objects located within the region.
1409
+ We compare our selection of candidates to the GF21 main cata-
1410
+ logue by extracting all objects with 𝑃WD,GF21 > 0.75. We find our
1411
+ selection to contain 25 816 candidates not in the GF21 selection,
1412
+ including 869 spectroscopically confirmed white dwarfs and 28 non-
1413
+ WD objects, the former set mainly consisting of DA and DA+MS
1414
+ white dwarfs. There are 48 candidates present in the GF21 selection,
1415
+ but not in ours, including 28 confirmed non-WD and no confirmed
1416
+ white dwarfs.
1417
+ We also compare our candidate selection within 100 parsecs of
1418
+ the Sun to the Gaia Catalogue of Nearby Stars (GCNS, Gaia Col-
1419
+ laboration et al. 2021), which provides a probability for objects to
1420
+ be white dwarfs using a Random Forest algorithm. Apart from the
1421
+ choice of algorithm, the main difference between our two methods
1422
+ lies in the choice of training data. Our training set is restricted to the
1423
+ white dwarf region in the Gaia HRD through color cuts, while Gaia
1424
+ Collaboration et al. (2021) use the entire HRD space. Moreover, our
1425
+ non-WD training examples are solely spectroscopically confirmed
1426
+ objects, whereas the GCNS uses any object that is not a spectroscop-
1427
+ ically confirmed white dwarf as part of their non-WD training set,
1428
+ which may include yet-to-be confirmed white dwarfs. We compare
1429
+ objects in the GCNS with a probability of being a white dwarf above
1430
+ 0.75 and find a single candidate not present in our own selection,
1431
+ but we find 366 candidates present in our selection but missing in
1432
+ the GCNS, among which 107 are spectroscopically confirmed white
1433
+ dwarfs and no confirmed non-WD objects. Overall, our candidate
1434
+ selection appears to be more complete while also being less contam-
1435
+ inated than other catalogues.
1436
+ 4.2 Spectroscopic classification
1437
+ Optical spectra for the white dwarf candidates are obtained by calcu-
1438
+ lating the position of the Gaia objects at the J2000 epoch using their
1439
+ proper motions, then by cross-matching them with the SDSS DR17
1440
+ MNRAS 000, 1–12 (2022)
1441
+
1442
+ Data-Driven Classification of WD Stars
1443
+ 9
1444
+ Table 4. Primary spectral type classification predicted by our pipeline for
1445
+ white dwarf candidates in the Gaia-SDSS DR17 sample.
1446
+ Class
1447
+ 𝑁 conf
1448
+ Gaia
1449
+ 𝑁 uncert
1450
+ Gaia
1451
+ 𝑁 conf
1452
+ spec
1453
+ 𝑁 uncert
1454
+ spec
1455
+ DA
1456
+ 21434
1457
+ 178
1458
+ 28093
1459
+ 211
1460
+ DB
1461
+ 1887
1462
+ 35
1463
+ 2758
1464
+ 43
1465
+ DC
1466
+ 2007
1467
+ 341
1468
+ 2518
1469
+ 416
1470
+ DO
1471
+ 68
1472
+ 5
1473
+ 102
1474
+ 5
1475
+ DQ
1476
+ 265
1477
+ 21
1478
+ 362
1479
+ 26
1480
+ hotDQ
1481
+ 131
1482
+ 22
1483
+ 181
1484
+ 26
1485
+ DZ
1486
+ 896
1487
+ 80
1488
+ 1120
1489
+ 96
1490
+ PG1159
1491
+ 12
1492
+ 0
1493
+ 17
1494
+ 1
1495
+ CV
1496
+ 157
1497
+ 9
1498
+ 225
1499
+ 12
1500
+ DAH
1501
+ 174
1502
+ 17
1503
+ 237
1504
+ 21
1505
+ sdB
1506
+ 13
1507
+ 5
1508
+ 18
1509
+ 6
1510
+ sdO
1511
+ 6
1512
+ 0
1513
+ 9
1514
+ 1
1515
+ sdOB
1516
+ 9
1517
+ 7
1518
+ 11
1519
+ 8
1520
+ the latest and last data release of SDSS-IV (Blanton et al. 2017). This
1521
+ data release includes new observations through January 2021, as well
1522
+ as updates to some calibration files affecting all eBOSS spectra taken
1523
+ after the summer of 2017. Therefore, all spectra after MJD 58000 are
1524
+ different from their equivalent DR16 version and can be considered
1525
+ as unseen data for the networks. We also supplement our sample
1526
+ with 3591 spectra from the MWDD and the GF21 SDSS catalogue
1527
+ missed by our cross-match procedure. We remove all spectra that do
1528
+ not have the full coverage of 3842-7000 Å required by our networks,
1529
+ leaving a total of 36 523 spectra belonging to 27 866 unique Gaia
1530
+ white dwarf candidates.
1531
+ We pass all the spectra through the primary spectroscopic type
1532
+ classification module and present the results in Table 4, where 𝑁conf
1533
+ Gaia
1534
+ is the number of unique Gaia objects for which their highest SNR
1535
+ spectrum has a confident prediction above the 𝑃class > 0.6 threshold,
1536
+ 𝑁uncert
1537
+ Gaia
1538
+ is the number of unique Gaia objects whose predictions for
1539
+ their highest SNR spectrum fall below or equal to the threshold.
1540
+ Also included are the number of spectra predicted to belong to each
1541
+ class, 𝑁conf
1542
+ spec and 𝑁uncert
1543
+ spec , following the same notation as for the
1544
+ Gaia columns. About 97.5% of both spectra and unique Gaia objects
1545
+ were assigned a high probability primary spectral type, reducing the
1546
+ number of spectra requiring visual inspection from 36 523 to 872 in
1547
+ less than a minute.
1548
+ A surprising result from the automated classification is the iden-
1549
+ tification of 131 hotDQ Gaia objects, a much larger number than
1550
+ what is currently known in the literature (Dufour et al. 2008; Koester
1551
+ & Kepler 2019; Fusillo et al. 2021). To our knowledge, 66 of these
1552
+ have already been identified as hotDQ by at least one study, 49 have
1553
+ previously been classified as another type than hotDQ (usually DAH
1554
+ or DQ), and the remaining 16 appear to be new discoveries. The
1555
+ large number of hotDQ may also be due to differing definitions of
1556
+ this class. Indeed, GF21 do not state what features were used to dis-
1557
+ tinguish hotDQ from other DQ white dwarfs, and seem to include
1558
+ warmDQ (Dufour et al. 2013) in the hotDQ class. In their analysis of
1559
+ carbon-atmosphere white dwarfs, Coutu et al. (2019) classify white
1560
+ dwarfs showing molecular carbon bands as DQ, neutral atomic car-
1561
+ bon lines as warmDQ, and ionizedcarbon linesas hotDQ. Given these
1562
+ definitions, visual inspection of the spectra labelled and predicted as
1563
+ hotDQ reveals some of the objects may actually be warmDQ. A more
1564
+ detailed analysis of these objects would allow to confirm their true
1565
+ classes.
1566
+ As a final step, all candidates classified as DA, DB, and DC white
1567
+ dwarfs by our neural networks are passed through the third classi-
1568
+ fication module to determine whether they have a MS companion.
1569
+ Assuming a probability threshold of 0.5, we find 1380 spectra show-
1570
+ ing signs of companionship, a number comparable to those identified
1571
+ by GF21. To verify how the pipeline deals with WD+MS systems, we
1572
+ compare our results with the spectroscopic catalogue of WD+MS bi-
1573
+ naries in SDSS DR12 published by Rebassa-Mansergas et al. (2016).
1574
+ We first cross-match the 979 SDSS objects in their catalogue with
1575
+ the Gaia DR3 and find 258 within the 1.3M objects in the GF21
1576
+ main catalogue. The list of white dwarf candidates produced by our
1577
+ candidate selection module contains 211 of these, all of which were
1578
+ correctly classified as WD+MS by our pipeline.
1579
+ Binary systems were excluded from the training sets of both the
1580
+ candidate selection and the main spectroscopic type modules. Con-
1581
+ sequently, our pipeline may not be the optimal tool for discovering
1582
+ such objects. However, the MS companion module could be used as
1583
+ a stand-alone classifier to identify new WD+MS binaries in samples
1584
+ with appropriate selection criteria. The module could also bene-
1585
+ fit from training on synthetic data, as current WD+MS samples are
1586
+ strongly biased towards relatively equal contribution from both mem-
1587
+ bers (Rebassa-Mansergas et al. 2021), and may prove to be able to
1588
+ classify objects for which visual inspection is too ambiguous.
1589
+ 5 DATA AVAILABILITY
1590
+ The results of Section 4 are available on the MWDD website4 and
1591
+ in the VizieR catalogue access tool as two catalogues. The first
1592
+ catalogue includes the probability of being a white dwarf for the
1593
+ 1.3M Gaia objects in Section 5, along with all Gaia parameters used
1594
+ for our analysis and discussion. See Table 5 for a list of column names
1595
+ and description. The second catalogue contains the list of objects
1596
+ for which SDSS spectra were found, as well as their spectroscopic
1597
+ classification results, in addition to the same columns as in the Gaia
1598
+ candidate catalogue. We provide the fiberid, mjd and plateid as
1599
+ a way to identify the spectra in the SDSS database. Note that Gaia
1600
+ objects may have multiple spectra, or vice versa. A list of the columns
1601
+ unique to the results of the Gaia-SDSS catalogue is shown in Table
1602
+ 6.
1603
+ Granular control over the completeness and contamination rate
1604
+ for both the white dwarf candidate selection and the spectroscopic
1605
+ classifications can be achieved by imposing different prediction prob-
1606
+ ability thresholds or prediction uncertainty limits. In principle, the
1607
+ prediction probability and uncertainty could be combined to look for
1608
+ spectra that show secondary spectroscopic signatures, though this
1609
+ has not been tested here. For a good balance between complete-
1610
+ ness and contamination, we recommend the following thresholds:
1611
+ 𝑃WD > 0.75 and an uncertainty limit of 0.02 for candidate selec-
1612
+ tion, 𝑃class > 0.6 for the primary spectroscopic type with the highest
1613
+ prediction probability, and 𝑃MS > 0.5 for MS companion detection.
1614
+ We also remind the reader to be cautious when interpreting predic-
1615
+ tions for spectra with low SNR (≲ 9), as the neural networks may be
1616
+ overconfident in their predictions (see Section 3).
1617
+ The modules of the pipeline are made available to try and to
1618
+ use on the MWDD website5. A description of the required inputs,
1619
+ file formats can be found there. Future versions of the pipeline and
1620
+ machine learning tools will also be uploaded on this page. By making
1621
+ these publicly available, we aim to facilitate the transition from the
1622
+ traditionally manual approaches of white dwarf analysis to rapid,
1623
+ 4 montrealwhitedwarfdatabase.org
1624
+ 5 montrealwhitedwarfdatabase.org/MLTools
1625
+ MNRAS 000, 1–12 (2022)
1626
+
1627
+ 10
1628
+ Vincent et al.
1629
+ Table 5. Columns unique to our Gaia candidate catalogue
1630
+ Column Header
1631
+ Description
1632
+ source_id
1633
+ Unique source identifier in Gaia DR3
1634
+ P_wd
1635
+ Probability of being a white dwarf (see Section 2)
1636
+ P_wd_u
1637
+ Uncertainty on the probability of being a white dwarf
1638
+ above_locus
1639
+ Binary flag indicating whether the object is above the white dwarf locus and may be a WD+MS binary (see Section 4.1)
1640
+ Table 6. Columns unique to the Gaia-SDSS DR17 catalogue. All columns in the Gaia candidate catalogue are also included, but not shown here.
1641
+ Column Header
1642
+ Description
1643
+ P_{class}
1644
+ Probability of the spectrum being of primary spectroscopic type {class} (see Section 2 for the 13 possible classes)
1645
+ P_{class}_u
1646
+ Uncertainty on the probability of the spectrum being of primary spectroscopic type {class}
1647
+ P_ms
1648
+ Probability of the presence of a MS companion in the spectrum (see Section 2)
1649
+ P_ms_u
1650
+ Uncertainty on the probability of the presence of a MS companion in the spectrum
1651
+ automated statistical tools for the entire community, and encourage
1652
+ other teams to do the same. As the field of astronomy enters a new era
1653
+ of Big Data, collaborative efforts will be more important than ever
1654
+ to ensure science is not hampered by sheer quantity of observations,
1655
+ and to allow astronomers to focus on interesting cases waiting to be
1656
+ discovered.
1657
+ 6 CONCLUSION
1658
+ In this paper, we presented a fully automated, data-driven pipeline
1659
+ for white dwarf candidate selection and spectroscopic classification
1660
+ based on neural network ensembles. The pipeline is composed of
1661
+ three modules that can be used independently for a variety of pur-
1662
+ poses. The first module calculates the probability of being a white
1663
+ dwarf given Gaia photometric and astrometric data, and correctly
1664
+ identified > 99% of white dwarfs in the test set, with a contamina-
1665
+ tion rate of ∼1.4%. The second module predicts the primary spectro-
1666
+ scopic type of a white dwarf given an optical spectrum with > 90%
1667
+ precision for most classes according to cross-validation tests. The
1668
+ last module calculates the probability of main sequence star contam-
1669
+ ination being present in the spectrum with > 91% precision on its
1670
+ test set. The two spectroscopic modules were trained with SDSS DR
1671
+ 16 spectra.
1672
+ We applied our pipeline to 1.3M Gaia objects located in, or near
1673
+ the white dwarf locus in the Gaia HRD and found 424 096 high
1674
+ probability white dwarf candidates for which we cross-matched 36
1675
+ 523 SDSS DR17 spectra, creating the first white dwarf catalogue
1676
+ with quantifiable spectroscopic classifications. The entire process
1677
+ is orders of magnitude faster than the current manual inspection
1678
+ approach, taking about 10 minutes on a Mac M1 laptop, with about
1679
+ 9 minutes taken by the candidate selection module. In addition to the
1680
+ benefits of quantifiable classifications and speed, neural networks
1681
+ remove the need to select manually the relevant features by learning
1682
+ which ones best distinguish one class from the others.
1683
+ The pipeline presented here will be particularly useful for the
1684
+ SDSS-V, as the spectroscopic classification modules are already
1685
+ trained on data observed by the same instruments. The pipeline can
1686
+ also serve as a base model for other surveys (e.g., DESI, 4MOST,
1687
+ LAMOST), where fine-tuning and transfer learning methods can be
1688
+ applied to adapt the spectroscopic modules to the new data distri-
1689
+ bution. Deep ensembles seem to offer benefits in out-of-distribution
1690
+ settings (Ovadia et al. 2019; Gustafsson et al. 2020), although with
1691
+ some limitations (Rahaman & Thiery 2021), and may require only
1692
+ small adjustments to perform well on data from surveys other than
1693
+ SDSS. Future work will aim to provide secondary spectroscopic fea-
1694
+ ture identification and improved classification for rare classes such as
1695
+ PG1159 and hotDQ. In particular, the addition of synthetic spectra
1696
+ to augment the training data offers a promising avenue that will be
1697
+ investigated. Such machine learning tools will soon become indis-
1698
+ pensable as observations from the next generation of spectroscopic
1699
+ surveys will provide millions of spectra for hundreds of thousands of
1700
+ white dwarf candidates.
1701
+ ACKNOWLEDGEMENTS
1702
+ This work was supported in part by the NSERC Canada and by
1703
+ the Fund FRQ-NT (Québec). This work has made use of data
1704
+ from the European Space Agency (ESA) mission Gaia (https:
1705
+ //www.cosmos.esa.int/gaia), processed by the Gaia Data Pro-
1706
+ cessing and Analysis Consortium (DPAC, https://www.cosmos.
1707
+ esa.int/web/gaia/dpac/consortium). Funding for the DPAC
1708
+ has been provided by national institutions, in particular the insti-
1709
+ tutions participating in the Gaia Multilateral Agreement. Funding
1710
+ for the Sloan Digital Sky Survey (https://www.sdss.org) has
1711
+ been provided by the Alfred P. Sloan Foundation, the U.S. Depart-
1712
+ ment of Energy Office of Science, and the Participating Institutions.
1713
+ SDSS-IV acknowledges support and resources from the Center for
1714
+ High-Performance Computing at the University of Utah, and is man-
1715
+ aged by the Astrophysical Research Consortium for the Participating
1716
+ Institutions of the SDSS Collaboration. This research has also made
1717
+ use of the NASA Astrophysics Data System Bibliographic Services;
1718
+ the Montreal White Dwarf Database (Dufour et al. 2017); the SIM-
1719
+ BAD database, operated at the Centre de Données astronomiques de
1720
+ Strasbourg (Wenger et al. 2000)
1721
+ DATA AVAILABILITY
1722
+ A full description of the data and software availability is provided in
1723
+ Section 5.
1724
+ MNRAS 000, 1–12 (2022)
1725
+
1726
+ Data-Driven Classification of WD Stars
1727
+ 11
1728
+ REFERENCES
1729
+ Abadi M., et al., 2015, TensorFlow: Large-Scale Machine Learning on Het-
1730
+ erogeneous Systems, https://www.tensorflow.org/
1731
+ Abdurro’uf et al., 2022, ApJS, 259, 35
1732
+ Ahumada R., et al., 2020, ApJS, 249, 3
1733
+ Bédard A., Bergeron P., Fontaine G., 2017, ApJ, 848, 11
1734
+ Bédard A., Bergeron P., Brassard P., 2022, ApJ, 930, 8
1735
+ Belokurov V., et al., 2020, MNRAS, 496, 1922
1736
+ Blanton M. R., et al., 2017, AJ, 154, 28
1737
+ Carleo G., Cirac I., Cranmer K., Daudet L., Schuld M., Tishby N., Vogt-
1738
+ Maranto L., Zdeborová L., 2019, Reviews of Modern Physics, 91, 045002
1739
+ Caron A., Bergeron P., Blouin S., Leggett S. K., 2022, arXiv e-prints, p.
1740
+ arXiv:2212.08014
1741
+ Chambers K. C., et al., 2016, arXiv e-prints, p. arXiv:1612.05560
1742
+ Chandra V., Hwang H.-C., Zakamska N. L., Budavári T., 2020, MNRAS,
1743
+ 497, 2688
1744
+ Coutu S., Dufour P., Bergeron P., Blouin S., Loranger E., Allard N. F., Dunlap
1745
+ B. H., 2019, ApJ, 885, 74
1746
+ Cui X.-Q., et al., 2012, Research in Astronomy and Astrophysics, 12, 1197
1747
+ DESI Collaboration et al., 2016, arXiv e-prints, p. arXiv:1611.00036
1748
+ Dufour P., Fontaine G., Liebert J., Schmidt G. D., Behara N., 2008, ApJ, 683,
1749
+ 978
1750
+ Dufour P., Vornanen T., Bergeron P., Fontaine Berdyugin A., 2013, in Krze-
1751
+ siński J., Stachowski G., Moskalik P., Bajan K., eds, Astronomical Society
1752
+ of the Pacific Conference Series Vol. 469, 18th European White Dwarf
1753
+ Workshop.. p. 167
1754
+ Dufour P., Blouin S., Coutu S., Fortin-Archambault M., Thibeault C., Berg-
1755
+ eron P., Fontaine G., 2017, in Tremblay P. E., Gaensicke B., Marsh T.,
1756
+ eds, Astronomical Society of the Pacific Conference Series Vol. 509, 20th
1757
+ European White Dwarf Workshop. p. 3 (arXiv:1610.00986)
1758
+ Eisenstein D. J., et al., 2006, ApJS, 167, 40
1759
+ Farihi J., Dufour P., Wilson T. G., 2022, arXiv e-prints, p. arXiv:2208.05990
1760
+ Fort
1761
+ S.,
1762
+ Hu
1763
+ H.,
1764
+ Lakshminarayanan
1765
+ B.,
1766
+ 2019,
1767
+ arXiv
1768
+ e-prints,
1769
+ p.
1770
+ arXiv:1912.02757
1771
+ Fusillo N. P. G., et al., 2021, arXiv
1772
+ Gaia Collaboration et al., 2016, A&A, 595, A1
1773
+ Gaia Collaboration et al., 2021, A&A, 649, A6
1774
+ Gänsicke B. T., Koester D., Girven J., Marsh T. R., Steeghs D., 2010, Science,
1775
+ 327, 188
1776
+ Geier S., 2020, A&A, 635, A193
1777
+ Geier S., Østensen R. H., Nemeth P., Gentile Fusillo N. P., Gänsicke B. T.,
1778
+ Telting J. H., Green E. M., Schaffenroth J., 2017, A&A, 600, A50
1779
+ Goodfellow I. J., Shlens J., Szegedy C., 2014, arXiv e-prints, p.
1780
+ arXiv:1412.6572
1781
+ Gunn J. E., et al., 2006, AJ, 131, 2332
1782
+ Gustafsson F. K., Danelljan M., Schön T. B., 2020, 2020 IEEE/CVF Confer-
1783
+ ence on Computer Vision and Pattern Recognition Workshops (CVPRW),
1784
+ pp 1289–1298
1785
+ Harris H. C., et al., 2003, AJ, 126, 1023
1786
+ He H., Garcia E. A., 2009, IEEE Transactions on Knowledge and Data Engi-
1787
+ neering, 21, 1263
1788
+ Kepler S. O., et al., 2015, MNRAS, 446, 4078
1789
+ Kepler S. O., et al., 2016, MNRAS, 455, 3413
1790
+ Kepler S. O., Koester D., Pelisoli I., Romero A. D., Ourique G., 2021, MN-
1791
+ RAS, 507, 4646
1792
+ Kingma D. P., Ba J., 2014, arXiv e-prints, p. arXiv:1412.6980
1793
+ Kleinman S. J., et al., 2004, ApJ, 607, 426
1794
+ Kleinman S. J., et al., 2013, ApJS, 204, 5
1795
+ Koester D., Kepler S. O., 2019, A&A, 628, A102
1796
+ Kollmeier J. A., et al., 2017, arXiv e-prints, p. arXiv:1711.03234
1797
+ Krizhevsky A., Sutskever I., Hinton G. E., 2012, Communications of the
1798
+ ACM, 60, 84
1799
+ Lakshminarayanan B., Pritzel A., Blundell C., 2016, arXiv e-prints, p.
1800
+ arXiv:1612.01474
1801
+ Lee S., Purushwalkam S., Cogswell M., Crandall D., Batra D., 2015, arXiv
1802
+ e-prints, p. arXiv:1511.06314
1803
+ Leung H. W., Bovy J., 2019, MNRAS, 489, 2079
1804
+ López-Sanjuan C., et al., 2022, A&A, 658, A79
1805
+ Maas A. L., 2013.
1806
+ Nguyen A., Yosinski J., Clune J., 2014, arXiv e-prints, p. arXiv:1412.1897
1807
+ Ovadia Y., et al., 2019, arXiv e-prints, p. arXiv:1906.02530
1808
+ Rahaman R., Thiery A. H., 2021, in NeurIPS.
1809
+ Rebassa-Mansergas A., Ren J. J., Parsons S. G., Gänsicke B. T., Schreiber
1810
+ M. R., García-Berro E., Liu X. W., Koester D., 2016, MNRAS, 458, 3808
1811
+ Rebassa-Mansergas A., et al., 2021, MNRAS, 506, 5201
1812
+ Riello M., et al., 2021, A&A, 649, A3
1813
+ Sharma K., Kembhavi A., Kembhavi A., Sivarani T., Abraham S., Vaghmare
1814
+ K., 2020, MNRAS, 491, 2280
1815
+ Sion E. M., Greenstein J. L., Landstreet J. D., Liebert J., Shipman H. L.,
1816
+ Wegner G. A., 1983, ApJ, 269, 253
1817
+ Skrutskie M. F., et al., 2006, AJ, 131, 1163
1818
+ Smith M. J., Geach J. E., 2022, arXiv e-prints, p. arXiv:2211.03796
1819
+ Srivastava N., Hinton G. E., Krizhevsky A., Sutskever I., Salakhutdinov R.,
1820
+ 2014, J. Mach. Learn. Res., 15, 1929
1821
+ Ting Y.-S., Conroy C., Rix H.-W., Cargile P., 2019, ApJ, 879, 69
1822
+ Zhu X., Vondrick C., Fowlkes C., Ramanan D., 2015, arXiv e-prints, p.
1823
+ arXiv:1503.01508
1824
+ de Jong R. S., et al., 2014, in Ramsay S. K., McLean I. S., Takami H.,
1825
+ eds, Society of Photo-Optical Instrumentation Engineers (SPIE) Confer-
1826
+ ence Series Vol. 9147, Ground-based and Airborne Instrumentation for
1827
+ Astronomy V. p. 91470M, doi:10.1117/12.2055826
1828
+ APPENDIX A: NEURAL NETWORK ARCHITECTURES
1829
+ The architecture details for our three pipeline modules are briefly de-
1830
+ scribed here. All modules are implemented using the tensorflow
1831
+ (version 2.10) Python library (Abadi et al. 2015). For every net-
1832
+ work, regardless of the module, we use an initial learning rate of
1833
+ 0.01 and the ReduceLROnPlateau callback with a factor of 0.33,
1834
+ min_delta of 10−3, and patience of 3. We use the tensorflow
1835
+ implementation of the Adam optimizer (Kingma & Ba 2014).
1836
+ Starting with the candidate selection module, each network is
1837
+ composed of 5 fully-connected layers with 56 hidden units in the first
1838
+ layer, doubling up for each successive layer, followed by a single-unit
1839
+ fully-connected layer that outputs the probability of an object being
1840
+ a white dwarf. The output layer has a sigmoid activation function,
1841
+ while all other layers use a LeakyReLU activation (Maas 2013) with
1842
+ the leak parameter set to 0.1, along with dropout (Srivastava et al.
1843
+ 2014) set to 0.5. We train the networks for 100 epochs using a binary
1844
+ cross-entropy loss.
1845
+ The primary spectroscopic type module neural networks use a
1846
+ mix of fully-connected and convolutional (Krizhevsky et al. 2012)
1847
+ layers. Each network has 4 convolutional layers with a kernel size of
1848
+ 5, 14 feature maps and a stride of 2, followed by a fully-connected
1849
+ layer with 100 hidden units, and a final fully-connected layer with 13
1850
+ hidden units for the output. The output layer has a sigmoid activation
1851
+ function, while all other layers use a LeakyReLU activation with the
1852
+ leak parameter set to 0.1, along with dropout set to 0.4. We also
1853
+ apply dropout to the input with a 0.2 probability. Each network is
1854
+ trained for 30 epochs using a categorical cross-entropy loss.
1855
+ The main sequence companion detection module neural networks
1856
+ have 3 fully-connected layers with 512 hidden units in the first layer,
1857
+ doubling down for every successive layer, followed by a single-unit
1858
+ fully-connected output layer. The output layer has a sigmoid activa-
1859
+ tion function, while all other layers use a LeakyReLU activation with
1860
+ the leak parameter set to 0.1, a dropout set to 0.5, and L1L2 kernel
1861
+ regularization with default parameters. We also apply dropout to
1862
+ the input with a 0.2 probability. We train the networks for 100 epochs
1863
+ using a binary cross-entropy loss.
1864
+ MNRAS 000, 1–12 (2022)
1865
+
1866
+ 12
1867
+ Vincent et al.
1868
+ This paper has been typeset from a TEX/LATEX file prepared by the author.
1869
+ MNRAS 000, 1–12 (2022)
1870
+